Add queens support
This patch adds initial support for queens.
Change-Id: I2000643de8e57839620179d9bfc8848806df43f4
Related-Prod: PROD-20175
diff --git a/neutron/files/queens/api-paste.ini.Debian b/neutron/files/queens/api-paste.ini.Debian
new file mode 100644
index 0000000..9036078
--- /dev/null
+++ b/neutron/files/queens/api-paste.ini.Debian
@@ -0,0 +1,57 @@
+{%- from "neutron/map.jinja" import server with context %}
+[composite:neutron]
+use = egg:Paste#urlmap
+/: neutronversions_composite
+/v2.0: neutronapi_v2_0
+
+[composite:neutronapi_v2_0]
+use = call:neutron.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
+keystone = cors http_proxy_to_wsgi {%- if server.backend.engine == "contrail" and server.backend.get('rbac', False) %} user_token {%- endif %} request_id catch_errors authtoken keystonecontext extensions {% if server.audit.enabled %}audit {% endif %}neutronapiapp_v2_0
+
+[composite:neutronversions_composite]
+use = call:neutron.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi neutronversions
+keystone = cors http_proxy_to_wsgi {% if server.audit.enabled %}audit {% endif %}neutronversions
+
+{%- if server.backend.engine == "contrail" and server.backend.get('rbac', False) %}
+[filter:user_token]
+paste.filter_factory = neutron_plugin_contrail.plugins.opencontrail.neutron_middleware:token_factory
+{%- endif %}
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:catch_errors]
+paste.filter_factory = oslo_middleware:CatchErrors.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = neutron
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+
+[filter:keystonecontext]
+paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:extensions]
+paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
+
+[app:neutronversions]
+paste.app_factory = neutron.pecan_wsgi.app:versions_factory
+
+[app:neutronapiapp_v2_0]
+paste.app_factory = neutron.api.v2.router:APIRouter.factory
+
+{%- if server.audit.enabled %}
+[filter:audit]
+paste.filter_factory = {{ server.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory") }}
+audit_map_file = {{ server.get("audit", {}).get("map_file", "/etc/pycadf/neutron_api_audit_map.conf") }}
+{%- endif %}
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
diff --git a/neutron/files/queens/api-paste.ini.RedHat b/neutron/files/queens/api-paste.ini.RedHat
new file mode 120000
index 0000000..08fd76a
--- /dev/null
+++ b/neutron/files/queens/api-paste.ini.RedHat
@@ -0,0 +1 @@
+api-paste.ini.Debian
\ No newline at end of file
diff --git a/neutron/files/queens/dhcp_agent.ini b/neutron/files/queens/dhcp_agent.ini
new file mode 100644
index 0000000..ec530ed
--- /dev/null
+++ b/neutron/files/queens/dhcp_agent.ini
@@ -0,0 +1,160 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) and rate limiting on router's gateway port so long as
+# ovs_use_veth is set to True. (boolean value)
+#ovs_use_veth = false
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+interface_driver = openvswitch
+
+#
+# From neutron.dhcp.agent
+#
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or RPC errors. The interval is number of seconds
+# between attempts. (integer value)
+#resync_interval = 5
+resync_interval = 30
+
+# The driver used to manage the DHCP server. (string value)
+#dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only be
+# activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+# This option doesn't have any effect when force_metadata is set to True.
+# (boolean value)
+#enable_isolated_metadata = false
+enable_isolated_metadata = True
+
+# In some cases the Neutron router is not present to provide the metadata IP
+# but the DHCP server can be used to provide this info. Setting this value will
+# force the DHCP server to append specific host routes to the DHCP request. If
+# this option is set, then the metadata service will be activated for all the
+# networks. (boolean value)
+#force_metadata = false
+{%- if neutron.backend.router is defined or neutron.force_metadata|default(False) %}
+force_metadata = True
+{%- endif %}
+
+# Allows for serving metadata requests coming from a dedicated metadata access
+# network whose CIDR is 169.254.169.254/16 (or larger prefix), and is connected
+# to a Neutron router from which the VMs send metadata:1 request. In this case
+# DHCP Option 121 will not be injected in VMs, as they will be able to reach
+# 169.254.169.254 through a router. This option requires
+# enable_isolated_metadata = True. (boolean value)
+#enable_metadata_network = false
+enable_metadata_network = False
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server. (integer value)
+#num_sync_threads = 4
+
+# Location to store DHCP server config files. (string value)
+#dhcp_confs = $state_path/dhcp
+
+# Override the default dnsmasq settings with this file. (string value)
+#dnsmasq_config_file =
+
+# Comma-separated list of the DNS servers which will be used as forwarders.
+# (list value)
+#dnsmasq_dns_servers =
+
+# Base log dir for dnsmasq logging. The log contains DHCP and DNS log
+# information and is useful for debugging issues with either DHCP or DNS. If
+# this section is null, disable dnsmasq log. (string value)
+#dnsmasq_base_log_dir = <None>
+
+# Enables the dnsmasq service to provide name resolution for instances via DNS
+# resolvers on the host running the DHCP agent. Effectively removes the '--no-
+# resolv' option from the dnsmasq process arguments. Adding custom DNS
+# resolvers to the 'dnsmasq_dns_servers' option disables this feature. (boolean
+# value)
+#dnsmasq_local_resolv = false
+
+# Limit number of leases to prevent a denial-of-service. (integer value)
+#dnsmasq_lease_max = 16777216
+
+# Use broadcast in DHCP replies. (boolean value)
+#dhcp_broadcast_reply = false
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+[agent]
+
+#
+# From neutron.az.agent
+#
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+[ovs]
+
+#
+# From neutron.base.agent
+#
+
+# DEPRECATED: The interface for interacting with the OVSDB (string value)
+# Possible values:
+# native - <No description provided>
+# vsctl - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ovsdb_interface = native
+
+# The connection string for the OVSDB backend. Will be used by ovsdb-client
+# when monitoring and used for the all ovsdb commands when native
+# ovsdb_interface is enabled (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+# The SSL private key file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_key_file = <None>
+
+# The SSL certificate file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_cert_file = <None>
+
+# The Certificate Authority (CA) certificate to use when interacting with
+# OVSDB. Required when using an "ssl:" prefixed ovsdb_connection (string
+# value)
+#ssl_ca_cert_file = <None>
+
+# Timeout in seconds for ovsdb commands. If the timeout expires, ovsdb commands
+# will fail with ALARMCLOCK error. (integer value)
+# Deprecated group/name - [DEFAULT]/ovs_vsctl_timeout
+#ovsdb_timeout = 10
diff --git a/neutron/files/queens/l3_agent.ini b/neutron/files/queens/l3_agent.ini
new file mode 100644
index 0000000..69e97be
--- /dev/null
+++ b/neutron/files/queens/l3_agent.ini
@@ -0,0 +1,254 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+{%- from "neutron/map.jinja" import fwaas with context %}
+
+[DEFAULT]
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) and rate limiting on router's gateway port so long as
+# ovs_use_veth is set to True. (boolean value)
+#ovs_use_veth = false
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+interface_driver = openvswitch
+
+#
+# From neutron.l3.agent
+#
+
+# The working mode for the agent. Allowed modes are: 'legacy' - this preserves
+# the existing behavior where the L3 agent is deployed on a centralized
+# networking node to provide L3 services like DNAT, and SNAT. Use this mode if
+# you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality and
+# must be used for an L3 agent that runs on a compute host. 'dvr_snat' - this
+# enables centralized SNAT support in conjunction with DVR. This mode must be
+# used for an L3 agent running on a centralized node (or in single-host
+# deployments, e.g. devstack). 'dvr_no_external' - this mode enables only
+# East/West DVR routing functionality for a L3 agent that runs on a compute
+# host, the North/South functionality such as DNAT and SNAT will be provided by
+# the centralized network node that is running in 'dvr_snat' mode. This mode
+# should be used when there is no external network connectivity on the compute
+# host. (string value)
+# Possible values:
+# dvr - <No description provided>
+# dvr_snat - <No description provided>
+# legacy - <No description provided>
+# dvr_no_external - <No description provided>
+
+agent_mode = {{ neutron.agent_mode }}
+
+# TCP Port used by Neutron metadata namespace proxy. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_port = 9697
+metadata_port = 8775
+
+# DEPRECATED: Send this many gratuitous ARPs for HA setup, if less than or
+# equal to 0, the feature is disabled (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#send_arp_for_ha = 3
+
+# Indicates that this L3 agent should also handle routers that do not have an
+# external network gateway configured. This option should be True only for a
+# single agent in a Neutron deployment, and may be False for all agents if all
+# routers must have an external network gateway. (boolean value)
+#handle_internal_only_routers = true
+
+# DEPRECATED: When external_network_bridge is set, each L3 agent can be
+# associated with no more than one external network. This value should be set
+# to the UUID of that external network. To allow L3 agent support multiple
+# external networks, both the external_network_bridge and
+# gateway_external_network_id must be left empty. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#gateway_external_network_id =
+
+# With IPv6, the network used for the external gateway does not need to have an
+# associated subnet, since the automatically assigned link-local address (LLA)
+# can be used. However, an IPv6 gateway address is needed for use as the next-
+# hop for the default route. If no IPv6 gateway address is configured here,
+# (and only then) the neutron router will be configured to get its default
+# route from router advertisements (RAs) from the upstream router; in which
+# case the upstream router must also be configured to send these RAs. The
+# ipv6_gateway, when configured, should be the LLA of the interface on the
+# upstream router. If a next-hop using a global unique address (GUA) is
+# desired, it needs to be done via a subnet allocated to the network and not
+# through this parameter. (string value)
+#ipv6_gateway =
+
+# Driver used for ipv6 prefix delegation. This needs to be an entry point
+# defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for
+# entry points included with the neutron source. (string value)
+#prefix_delegation_driver = dibbler
+
+# Allow running metadata proxy. (boolean value)
+#enable_metadata_proxy = true
+
+# Iptables mangle mark used to mark metadata valid requests. This mark will be
+# masked with 0xffff so that only the lower 16 bits will be used. (string
+# value)
+#metadata_access_mark = 0x1
+
+# Iptables mangle mark used to mark ingress from external network. This mark
+# will be masked with 0xffff so that only the lower 16 bits will be used.
+# (string value)
+#external_ingress_mark = 0x2
+
+# DEPRECATED: Name of bridge used for external network traffic. When this
+# parameter is set, the L3 agent will plug an interface directly into an
+# external bridge which will not allow any wiring by the L2 agent. Using this
+# will result in incorrect port statuses. This option is deprecated and will be
+# removed in Ocata. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#external_network_bridge =
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval = 40
+
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+
+# Number of RPC worker processes for service. (integer value)
+#rpc_workers = 1
+
+# Number of RPC worker processes dedicated to state reports queue. (integer
+# value)
+#rpc_state_report_workers = 1
+
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+# Location to store keepalived/conntrackd config files (string value)
+#ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type (string value)
+# Possible values:
+# AH - <No description provided>
+# PASS - <No description provided>
+#ha_vrrp_auth_type = PASS
+
+# VRRP authentication password (string value)
+#ha_vrrp_auth_password = <None>
+
+# The advertisement interval in seconds (integer value)
+#ha_vrrp_advert_int = 2
+
+# Number of concurrent threads for keepalived server connection requests. More
+# threads create a higher CPU load on the agent node. (integer value)
+# Minimum value: 1
+#ha_keepalived_state_change_server_threads = (1 + <num_of_cpus>) / 2
+
+# The VRRP health check interval in seconds. Values > 0 enable VRRP health
+# checks. Setting it to 0 disables VRRP health checks. Recommended value is 5.
+# This will cause pings to be sent to the gateway IP address(es) - requires
+# ICMP_ECHO_REQUEST to be enabled on the gateway. If gateway fails, all routers
+# will be reported as master, and master election will be repeated in round-
+# robin fashion, until one of the router restore the gateway connection.
+# (integer value)
+#ha_vrrp_health_check_interval = 0
+
+# Location to store IPv6 PD files. (string value)
+#pd_confs = $state_path/pd
+
+# A decimal value as Vendor's Registered Private Enterprise Number as required
+# by RFC3315 DUID-EN. (string value)
+#vendor_pen = 8888
+
+# Location to store IPv6 RA config files (string value)
+#ra_confs = $state_path/ra
+
+# MinRtrAdvInterval setting for radvd.conf (integer value)
+#min_rtr_adv_interval = 30
+
+# MaxRtrAdvInterval setting for radvd.conf (integer value)
+#max_rtr_adv_interval = 100
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+[agent]
+
+#
+# From neutron.az.agent
+#
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+#
+# From neutron.l3.agent
+#
+
+# Extensions list to use (list value)
+#extensions =
+{%- if fwaas.get('enabled', False) %}
+extensions = {{ fwaas[fwaas.api_version]['l3_extension'] }}
+{%- endif %}
+
+
+[ovs]
+
+#
+# From neutron.base.agent
+#
+
+# DEPRECATED: The interface for interacting with the OVSDB (string value)
+# Possible values:
+# native - <No description provided>
+# vsctl - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ovsdb_interface = native
+
+# The connection string for the OVSDB backend. Will be used by ovsdb-client
+# when monitoring and used for the all ovsdb commands when native
+# ovsdb_interface is enabled (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+# The SSL private key file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_key_file = <None>
+
+# The SSL certificate file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_cert_file = <None>
+
+# The Certificate Authority (CA) certificate to use when interacting with
+# OVSDB. Required when using an "ssl:" prefixed ovsdb_connection (string
+# value)
+#ssl_ca_cert_file = <None>
+
+# Timeout in seconds for ovsdb commands. If the timeout expires, ovsdb commands
+# will fail with ALARMCLOCK error. (integer value)
+# Deprecated group/name - [DEFAULT]/ovs_vsctl_timeout
+#ovsdb_timeout = 10
diff --git a/neutron/files/queens/metadata_agent.ini b/neutron/files/queens/metadata_agent.ini
new file mode 100644
index 0000000..bb9c23d
--- /dev/null
+++ b/neutron/files/queens/metadata_agent.ini
@@ -0,0 +1,103 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+#
+# From neutron.metadata.agent
+#
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
+
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
+
+# Certificate Authority public key (CA cert) file for ssl (string value)
+#auth_ca_cert = <None>
+
+# IP address or DNS name of Nova metadata server. (unknown value)
+#nova_metadata_host = 127.0.0.1
+nova_metadata_ip = {{ neutron.metadata.host }}
+
+# TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses the same config key, but in [neutron] section.
+# (string value)
+#metadata_proxy_shared_secret =
+metadata_proxy_shared_secret = {{ neutron.metadata.password }}
+
+# Protocol to access nova metadata, http or https (string value)
+# Possible values:
+# http - <No description provided>
+# https - <No description provided>
+#nova_metadata_protocol = http
+nova_metadata_protocol = http
+
+# Allow to perform insecure SSL (https) requests to nova metadata (boolean
+# value)
+#nova_metadata_insecure = false
+
+# Client certificate for nova metadata api server. (string value)
+#nova_client_cert =
+
+# Private key of client certificate. (string value)
+#nova_client_priv_key =
+
+# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce
+# mode from metadata_proxy_user/group values, 'user': set metadata proxy socket
+# mode to 0o644, to use when metadata_proxy_user is agent effective user or
+# root, 'group': set metadata proxy socket mode to 0o664, to use when
+# metadata_proxy_group is agent effective group or root, 'all': set metadata
+# proxy socket mode to 0o666, to use otherwise. (string value)
+# Allowed values: deduce, user, group, all
+#metadata_proxy_socket_mode = deduce
+
+# Number of separate worker processes for metadata server (defaults to half of
+# the number of CPUs) (integer value)
+#metadata_workers = <num_of_cpus> / 2
+{%- if neutron.metadata.workers is defined %}
+metadata_workers = {{ neutron.metadata.workers }}
+{%- endif %}
+
+# Number of backlog requests to configure the metadata server socket with
+# (integer value)
+#metadata_backlog = 4096
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+[agent]
+
+#
+# From neutron.metadata.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+{%- if neutron.cache is defined %}
+[cache]
+{%- set _data = neutron.cache %}
+{%- include "oslo_templates/files/queens/oslo/_cache.conf" %}
+{%- endif %}
diff --git a/neutron/files/queens/ml2_conf.ini b/neutron/files/queens/ml2_conf.ini
new file mode 100644
index 0000000..e30dad1
--- /dev/null
+++ b/neutron/files/queens/ml2_conf.ini
@@ -0,0 +1,243 @@
+{%- from "neutron/map.jinja" import server with context %}
+[DEFAULT]
+
+{%- if server.logging is defined %}
+{%- set _data = server.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+[l2pop]
+
+#
+# From neutron.ml2
+#
+
+# Delay within which agent is expected to update existing ports when it
+# restarts (integer value)
+#agent_boot_time = 180
+
+
+[ml2]
+
+#
+# From neutron.ml2
+#
+
+# List of network type driver entrypoints to be loaded from the
+# neutron.ml2.type_drivers namespace. (list value)
+#type_drivers = local,flat,vlan,gre,vxlan,geneve
+
+# Ordered list of network_types to allocate as tenant networks. The default
+# value 'local' is useful for single-box testing but provides no connectivity
+# between hosts. (list value)
+#tenant_network_types = local
+tenant_network_types = {{ server.backend.tenant_network_types }}
+
+# An ordered list of networking mechanism driver entrypoints to be loaded from
+# the neutron.ml2.mechanism_drivers namespace. (list value)
+#mechanism_drivers =
+{%- set mechanism_drivers = [] %}
+{%- for mechanism_name, mechanism in server.get('backend', {}).get('mechanism', []).items() %}
+{%- do mechanism_drivers.append(mechanism.get('driver')) if 'driver' in mechanism %}
+{%- endfor %}
+{%- set opendaylight_enabled = true if 'opendaylight' in mechanism_drivers|join else false %}
+{%- if "vxlan" in server.backend.tenant_network_types and not opendaylight_enabled %}
+{%- do mechanism_drivers.append('l2population') %}
+{%- endif %}
+mechanism_drivers = {{ ','.join(mechanism_drivers) }}
+
+# An ordered list of extension driver entrypoints to be loaded from the
+# neutron.ml2.extension_drivers namespace. For example: extension_drivers =
+# port_security,qos (list value)
+#extension_drivers =
+{# Get server:backend:extension mapping and prepare tmp_ext_list list with extentions where enabled = True #}
+{%- set tmp_ext_list = [] %}
+{%- for ext_name, ext_params in server.backend.get('extension', {}).iteritems() %}
+{%- do tmp_ext_list.append(ext_name) if ext_params.get('enabled', False) %}
+{%- endfor %}
+{# Below section is for backward compatible when extentions were separated properties without server:backend:extension pillar #}
+{%- do tmp_ext_list.append('port_security') if 'port_security' not in tmp_ext_list %}
+{%- do tmp_ext_list.append('qos') if server.get('qos', 'True') and 'qos' not in tmp_ext_list %}
+extension_drivers={{ tmp_ext_list|join(',') }}
+
+# Maximum size of an IP packet (MTU) that can traverse the underlying physical
+# network infrastructure without fragmentation when using an overlay/tunnel
+# protocol. This option allows specifying a physical network MTU value that
+# differs from the default global_physnet_mtu value. (integer value)
+#path_mtu = 0
+path_mtu = {{ server.get('path_mtu', server.get('global_physnet_mtu', '0')) }}
+
+# A list of mappings of physical networks to MTU values. The format of the
+# mapping is <physnet>:<mtu val>. This mapping allows specifying a physical
+# network MTU value that differs from the default global_physnet_mtu value.
+# (list value)
+#physical_network_mtus =
+{%- set physical_network_mtus = [] %}
+{%- if server.get('external_access', True) %}
+{%- do physical_network_mtus.append(['physnet1',server.backend.get('external_mtu', '1500')]|join(":")) %}
+{%- endif %}
+{%- if "vlan" in server.backend.tenant_network_types %}
+{%- do physical_network_mtus.append(['physnet2',server.backend.get('tenant_net_mtu', server.backend.get('external_mtu', '1500'))]|join(":")) %}
+{%- endif %}
+{%- if server.get('ironic_enabled', False) %}
+{%- do physical_network_mtus.append(['physnet3',server.backend.get('ironic_net_mtu', server.backend.get('external_mtu', '1500'))]|join(":")) %}
+{%- endif %}
+physical_network_mtus = {{ ','.join(physical_network_mtus) }}
+
+# Default network type for external networks when no provider attributes are
+# specified. By default it is None, which means that if provider attributes are
+# not specified while creating external networks then they will have the same
+# type as tenant networks. Allowed values for external_network_type config
+# option depend on the network type values configured in type_drivers config
+# option. (string value)
+#external_network_type = <None>
+
+# IP version of all overlay (tunnel) network endpoints. Use a value of 4 for
+# IPv4 or 6 for IPv6. (integer value)
+#overlay_ip_version = 4
+
+
+[ml2_type_flat]
+
+#
+# From neutron.ml2
+#
+
+# List of physical_network names with which flat networks can be created. Use
+# default '*' to allow flat networks with arbitrary physical_network names. Use
+# an empty list to disable flat networks. (list value)
+#flat_networks = *
+flat_networks = *
+
+
+[ml2_type_geneve]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
+# Geneve VNI IDs that are available for tenant network allocation (list value)
+#vni_ranges =
+vni_ranges = {{ server.get('geneve', {}).vni_ranges|default('1:65536') }}
+
+# Geneve encapsulation header size is dynamic, this value is used to calculate
+# the maximum MTU for the driver. This is the sum of the sizes of the outer ETH
+# + IP + UDP + GENEVE header sizes. The default size for this field is 50,
+# which is the size of the Geneve header without any additional option headers.
+# (integer value)
+#max_header_size = 30
+max_header_size = 38
+
+
+[ml2_type_gre]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE
+# tunnel IDs that are available for tenant network allocation (list value)
+#tunnel_id_ranges =
+tunnel_id_ranges =2:65535
+
+
+[ml2_type_vlan]
+
+#
+# From neutron.ml2
+#
+
+# List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network>
+# specifying physical_network names usable for VLAN provider and tenant
+# networks, as well as ranges of VLAN tags on each available for allocation to
+# tenant networks. (list value)
+#network_vlan_ranges =
+{%- set network_vlan_ranges = [] %}
+{%- if server.backend.external_vlan_range is defined %}
+{%- do network_vlan_ranges.append(['physnet1',server.backend.get('external_vlan_range')]|join(":")) %}
+{%- endif %}
+{%- if "vlan" in server.backend.tenant_network_types %}
+{%- do network_vlan_ranges.append(['physnet2',server.backend.get('tenant_vlan_range')]|join(":")) %}
+{%- endif %}
+{%- if server.get('ironic_enabled', False) %}
+{%- do network_vlan_ranges.append(['physnet3',server.backend.get('ironic_vlan_range')]|join(":")) %}
+{%- endif %}
+network_vlan_ranges = {{ ','.join(network_vlan_ranges) }}
+
+[ml2_type_vxlan]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
+# VXLAN VNI IDs that are available for tenant network allocation (list value)
+#vni_ranges =
+vni_ranges = {{ server.get('vxlan', {}).vni_ranges|default('2:65535') }}
+
+# Multicast group for VXLAN. When configured, will enable sending all broadcast
+# traffic to this multicast group. When left unconfigured, will disable
+# multicast VXLAN mode. (string value)
+#vxlan_group = <None>
+vxlan_group = {{ server.get('vxlan', {}).group|default('224.0.0.1') }}
+
+
+[securitygroup]
+
+#
+# From neutron.ml2
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+{%- if not server.get('security_groups_enabled', True) %}
+firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+enable_security_group = False
+{%- elif server.dpdk or server.get('vlan_aware_vms', False) %}
+firewall_driver = openvswitch
+enable_security_group = True
+{%- else %}
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+{%- endif %}
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+{%- if server.backend.engine == "ovn" %}
+[ovn]
+ovn_nb_connection = tcp:{{ server.controller_vip }}:6641
+ovn_sb_connection = tcp:{{ server.controller_vip }}:6642
+ovn_l3_scheduler = leastloaded
+{%- endif %}
+
+{%- if opendaylight_enabled %}
+[ml2_odl]
+# HTTP URL of OpenDaylight REST interface. (string value)
+url = {{ server.backend.protocol|default('http') }}://{{ server.backend.host }}:{{ server.backend.rest_api_port }}/controller/nb/v2/neutron
+
+# HTTP username for authentication. (string value)
+username = {{ server.backend.user }}
+
+# HTTP password for authentication. (string value)
+password = {{ server.backend.password }}
+
+# Name of the controller to be used for port binding. (string value)
+port_binding_controller = pseudo-agentdb-binding
+
+# Enable websocket for pseudo-agent-port-binding. (boolean value)
+enable_websocket_pseudo_agentdb = {{ server.backend.enable_websocket|default('false') }}
+
+# Enables the networking-odl driver to supply special neutron ports of
+# "dhcp" type to OpenDaylight Controller for its use in providing DHCP
+# Service. (boolean value)
+enable_dhcp_service = {{ server.backend.enable_dhcp_service|default('false') }}
+{%- endif %}
diff --git a/neutron/files/queens/neutron-server b/neutron/files/queens/neutron-server
new file mode 100644
index 0000000..d147249
--- /dev/null
+++ b/neutron/files/queens/neutron-server
@@ -0,0 +1,24 @@
+# Generated by Salt.
+{%- from "neutron/map.jinja" import server with context %}
+
+# defaults for neutron-server
+
+# path to config file corresponding to the core_plugin specified in
+# neutron.conf
+#NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
+
+{%- if server.backend.engine in ["ml2", "ovn"] %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/ml2/ml2_conf.ini"
+{%- endif %}
+
+{%- if server.backend.engine == "contrail" %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
+{%- endif %}
+
+{%- if server.logging.log_appender %}
+DAEMON_ARGS="${DAEMON_ARGS} --log-config-append=/etc/neutron/logging/logging-neutron-server.conf"
+{%- endif %}
+
+{%- if server.l2gw is defined and server.l2gw.get('enabled', False) %}
+DAEMON_ARGS="${DAEMON_ARGS} --config-file=/etc/neutron/l2gw_plugin.ini"
+{%- endif %}
diff --git a/neutron/files/queens/neutron-server.conf.Debian b/neutron/files/queens/neutron-server.conf.Debian
new file mode 100644
index 0000000..320de0c
--- /dev/null
+++ b/neutron/files/queens/neutron-server.conf.Debian
@@ -0,0 +1,531 @@
+{%- from "neutron/map.jinja" import fwaas, server with context %}
+[DEFAULT]
+
+#
+# From neutron
+#
+
+# Where to store Neutron state files. This directory must be writable by the
+# agent. (string value)
+#state_path = /var/lib/neutron
+state_path = /var/lib/neutron
+
+# The host IP to bind to. (unknown value)
+#bind_host = 0.0.0.0
+bind_host = {{ server.bind.address }}
+
+# The port to bind to (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#bind_port = 9696
+bind_port = {{ server.bind.port }}
+
+# The path for API extensions. Note that this can be a colon-separated list of
+# paths. For example: api_extensions_path =
+# extensions:/path/to/more/exts:/even/more/exts. The __path__ of
+# neutron.extensions is appended to this, so if your extensions are in there
+# you don't need to specify them here. (string value)
+#api_extensions_path =
+
+# The type of authentication to use (string value)
+#auth_strategy = keystone
+auth_strategy = keystone
+
+{% if server.backend.engine == "contrail" %}
+
+api_extensions_path = extensions:/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions:/usr/lib/python2.7/dist-packages/neutron_lbaas/extensions
+# The core plugin Neutron will use (string value)
+core_plugin = neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
+
+service_plugins = neutron_plugin_contrail.plugins.opencontrail.loadbalancer.v2.plugin.LoadBalancerPluginV2
+
+{% elif server.backend.engine in ["ml2", "ovn"] %}
+
+core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+
+{% if server.backend.engine == "ml2" %}
+{% set l3_plugin = 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' %}
+{% elif server.backend.engine == "ovn" %}
+{% set l3_plugin = 'networking_ovn.l3.l3_ovn.OVNL3RouterPlugin' %}
+{% endif %}
+
+service_plugins = {{ server.backend.get('router', l3_plugin)}},metering
+{%- if server.lbaas is defined -%},lbaasv2{%- endif -%}
+{%- if fwaas.get('enabled', False) -%},{{ fwaas[fwaas.api_version]['service_plugin'] }}{%- endif -%}
+{%- if server.get('qos', 'True') -%},neutron.services.qos.qos_plugin.QoSPlugin{%- endif -%}
+{%- if server.get('vlan_aware_vms', False) -%},trunk{%- endif -%}
+{%- if server.l2gw is defined and server.l2gw.get('enabled', False) -%},networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin{%- endif -%}
+{%- if server.get('bgp_vpn', {}).get('enabled', False) -%},bgpvpn{%- endif -%}
+
+{% endif %}
+
+# The service plugins Neutron will use (list value)
+#service_plugins =
+
+# The base MAC address Neutron will use for VIFs. The first 3 octets will
+# remain unchanged. If the 4th octet is not 00, it will also be used. The
+# others will be randomly generated. (string value)
+#base_mac = fa:16:3e:00:00:00
+
+# Allow the usage of the bulk API (boolean value)
+#allow_bulk = true
+
+# The maximum number of items returned in a single response, value was
+# 'infinite' or negative integer means no limit (string value)
+#pagination_max_limit = -1
+pagination_max_limit = {{ server.pagination_max_limit|default('-1') }}
+
+# Default value of availability zone hints. The availability zone aware
+# schedulers use this when the resources availability_zone_hints is empty.
+# Multiple availability zones can be specified by a comma separated string.
+# This value can be empty. In this case, even if availability_zone_hints for a
+# resource is empty, availability zone is considered for high availability
+# while scheduling the resource. (list value)
+#default_availability_zones =
+
+# Maximum number of DNS nameservers per subnet (integer value)
+#max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet (integer value)
+#max_subnet_host_routes = 20
+
+# Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to
+# True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable
+# environment. Users making subnet creation requests for IPv6 subnets without
+# providing a CIDR or subnetpool ID will be given a CIDR via the Prefix
+# Delegation mechanism. Note that enabling PD will override the behavior of the
+# default IPv6 subnetpool. (boolean value)
+#ipv6_pd_enabled = false
+
+# DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite
+# lease times. (integer value)
+#dhcp_lease_duration = 86400
+dhcp_lease_duration = 600
+
+# Domain to use for building the hostnames (string value)
+#dns_domain = openstacklocal
+dns_domain = {{ server.dns_domain }}
+
+# Driver for external DNS integration. (string value)
+#external_dns_driver = <None>
+{%- if server.backend.get('extension', {}).get('dns', {}).get('enabled', False) %}
+external_dns_driver={{ server.backend.get('extension', {}).get('dns', {}).get('engine', '') }}
+{%- endif %}
+
+# Allow sending resource operation notification to DHCP agent (boolean value)
+#dhcp_agent_notification = true
+
+# Allow overlapping IP support in Neutron. Attention: the following parameter
+# MUST be set to False if Neutron is being used in conjunction with Nova
+# security groups. (boolean value)
+#allow_overlapping_ips = false
+allow_overlapping_ips = True
+
+# Hostname to be used by the Neutron server, agents and services running on
+# this machine. All the agents and services running on this machine must use
+# the same host value. (unknown value)
+#host = example.domain
+
+# This string is prepended to the normal URL that is returned in links to the
+# OpenStack Network API. If it is empty (the default), the URLs are returned
+# unchanged. (string value)
+#network_link_prefix = <None>
+
+# Send notification to nova when port status changes (boolean value)
+#notify_nova_on_port_status_changes = true
+notify_nova_on_port_status_changes = true
+
+# Send notification to nova when port data (fixed_ips/floatingip) changes so
+# nova can update its cache. (boolean value)
+#notify_nova_on_port_data_changes = true
+notify_nova_on_port_data_changes = true
+
+# Number of seconds between sending events to nova if there are any events to
+# send. (integer value)
+#send_events_interval = 2
+
+# Neutron IPAM (IP address management) driver to use. By default, the reference
+# implementation of the Neutron IPAM driver is used. (string value)
+#ipam_driver = internal
+
+# If True, then allow plugins that support it to create VLAN transparent
+# networks. (boolean value)
+#vlan_transparent = false
+
+# MTU of the underlying physical network. Neutron uses this value to calculate
+# MTU for all virtual network components. For flat and VLAN networks, neutron
+# uses this value without modification. For overlay networks such as VXLAN,
+# neutron automatically subtracts the overlay protocol overhead from this
+# value. Defaults to 1500, the standard value for Ethernet. (integer value)
+# Deprecated group/name - [ml2]/segment_mtu
+#global_physnet_mtu = 1500
+global_physnet_mtu = {{ server.get('global_physnet_mtu', '1500') }}
+
+# Number of backlog requests to configure the socket with (integer value)
+#backlog = 4096
+
+# Number of seconds to keep retrying to listen (integer value)
+#retry_until_window = 30
+
+# Enable SSL on the API server (boolean value)
+#use_ssl = false
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval = 40
+
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+{%- if server.api_workers is defined %}
+api_workers = {{ server.api_workers }}
+{%- endif %}
+
+# Number of RPC worker processes for service. (integer value)
+#rpc_workers = 1
+{%- if server.rpc_workers is defined %}
+rpc_workers = {{ server.rpc_workers }}
+{%- else %}
+rpc_workers = {{ grains.num_cpus }}
+{%- endif %}
+
+
+# Number of RPC worker processes dedicated to state reports queue. (integer
+# value)
+#rpc_state_report_workers = 1
+{%- if server.rpc_state_report_workers is defined %}
+rpc_state_report_workers = {{ server.rpc_state_report_workers }}
+{%- else %}
+rpc_state_report_workers = 4
+{%- endif %}
+
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+#
+# From neutron.agent
+#
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
+
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
+
+#
+# From neutron.db
+#
+
+# Seconds to regard the agent is down; should be at least twice
+# report_interval, to be sure the agent is down for good. (integer value)
+#agent_down_time = 75
+
+# Representing the resource type whose load is being reported by the agent.
+# This can be "networks", "subnets" or "ports". When specified (Default is
+# networks), the server will extract particular load sent as part of its agent
+# configuration object from the agent report state, which is the number of
+# resources being consumed, at every report_interval.dhcp_load_type can be used
+# in combination with network_scheduler_driver =
+# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the
+# network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured
+# to represent the choice for the resource being balanced. Example:
+# dhcp_load_type=networks (string value)
+# Possible values:
+# networks - <No description provided>
+# subnets - <No description provided>
+# ports - <No description provided>
+#dhcp_load_type = networks
+
+# Agent starts with admin_state_up=False when enable_new_agents=False. In the
+# case, user's resources will not be scheduled automatically to the agent until
+# admin changes admin_state_up to True. (boolean value)
+#enable_new_agents = true
+
+# Maximum number of routes per router (integer value)
+#max_routes = 30
+
+# Define the default value of enable_snat if not provided in
+# external_gateway_info. (boolean value)
+#enable_snat_by_default = true
+
+# Driver to use for scheduling network to DHCP agent (string value)
+#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
+
+# Allow auto scheduling networks to DHCP agent. (boolean value)
+#network_auto_schedule = true
+
+# Automatically remove networks from offline DHCP agents. (boolean value)
+#allow_automatic_dhcp_failover = true
+
+# Number of DHCP agents scheduled to host a tenant network. If this number is
+# greater than 1, the scheduler automatically assigns multiple DHCP agents for
+# a given tenant network, providing high availability for DHCP service.
+# (integer value)
+#dhcp_agents_per_network = 1
+dhcp_agents_per_network = 2
+
+# Enable services on an agent with admin_state_up False. If this option is
+# False, when admin_state_up of an agent is turned False, services on it will
+# be disabled. Agents with admin_state_up False are not selected for automatic
+# scheduling regardless of this option. But manual scheduling to such agents is
+# available if this option is True. (boolean value)
+#enable_services_on_agents_with_admin_state_down = false
+
+# The base mac address used for unique DVR instances by Neutron. The first 3
+# octets will remain unchanged. If the 4th octet is not 00, it will also be
+# used. The others will be randomly generated. The 'dvr_base_mac' *must* be
+# different from 'base_mac' to avoid mixing them up with MAC's allocated for
+# tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00.
+# The default is 3 octet (string value)
+#dvr_base_mac = fa:16:3f:00:00:00
+
+# System-wide flag to determine the type of router that tenants can create.
+# Only admin can override. (boolean value)
+#router_distributed = false
+router_distributed = {{ server.get('dvr', 'False') }}
+
+# Determine if setup is configured for DVR. If False, DVR API extension will be
+# disabled. (boolean value)
+#enable_dvr = true
+enable_dvr = {{ server.get('dvr', 'False') }}
+
+# Driver to use for scheduling router to a default L3 agent (string value)
+#router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling of routers to L3 agent. (boolean value)
+#router_auto_schedule = true
+
+# Automatically reschedule routers from offline L3 agents to online L3 agents.
+# (boolean value)
+#allow_automatic_l3agent_failover = false
+allow_automatic_l3agent_failover = true
+
+# Enable HA mode for virtual routers. (boolean value)
+#l3_ha = false
+l3_ha = {{ server.get('l3_ha', 'False') }}
+
+# Maximum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+#max_l3_agents_per_router = 3
+max_l3_agents_per_router = 0
+
+# Subnet used for the l3 HA admin network. (string value)
+#l3_ha_net_cidr = 169.254.192.0/18
+
+# The network type to use when creating the HA network for an HA router. By
+# default or if empty, the first 'tenant_network_types' is used. This is
+# helpful when the VRRP traffic should use a specific network which is not the
+# default one. (string value)
+#l3_ha_network_type =
+
+# The physical network name with which the HA network can be created. (string
+# value)
+#l3_ha_network_physical_name =
+
+#
+# From neutron.extensions
+#
+
+# Maximum number of allowed address pairs (integer value)
+#max_allowed_address_pair = 10
+
+{%- if server.logging is defined %}
+{%- set _data = server.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+
+{%- set _data = server.message_queue %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_default.conf" %}
+
+{%- set _data = {} %}
+{%- include "oslo_templates/files/queens/oslo/service/_wsgi_default.conf" %}
+
+nova_url = http://{{ server.compute.host }}:8774/v2
+
+
+[agent]
+
+#
+# From neutron.agent
+#
+
+# Root helper application. Use 'sudo neutron-rootwrap
+# /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to
+# 'sudo' to skip the filtering and just run the command directly. (string
+# value)
+#root_helper_daemon = <None>
+root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+# Use the root helper when listing the namespaces on a system. This may not be
+# required depending on the security configuration. If the root helper is not
+# required, set this to False for a performance improvement. (boolean value)
+#use_helper_for_ns_read = true
+
+# Root helper daemon application to use when possible. For the agent which
+# needs to execute commands in Dom0 in the hypervisor of XenServer, this item
+# should be set to 'xenapi_root_helper', so that it will keep a XenAPI session
+# to pass commands to Dom0. (string value)
+#root_helper_daemon = <None>
+{%- if server.root_helper_daemon|default(True) %}
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
+{%- endif %}
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+report_interval = 10
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+# Add comments to iptables rules. Set to false to disallow the addition of
+# comments to generated iptables rules that describe each rule's purpose.
+# System must support the iptables comments module for addition of comments.
+# (boolean value)
+#comment_iptables_rules = true
+
+# Duplicate every iptables difference calculation to ensure the format being
+# generated matches the format of iptables-save. This option should not be
+# turned on for production systems because it imposes a performance penalty.
+# (boolean value)
+#debug_iptables_rules = false
+
+# Action to be executed when a child process dies (string value)
+# Possible values:
+# respawn - <No description provided>
+# exit - <No description provided>
+#check_child_processes_action = respawn
+
+# Interval between checks of child process liveness (seconds), use 0 to disable
+# (integer value)
+#check_child_processes_interval = 60
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+
+[cors]
+{%- if server.cors is defined %}
+{%- set _data = server.cors %}
+{%- include "oslo_templates/files/queens/oslo/_cors.conf" %}
+{%- endif %}
+
+
+[database]
+{%- set _data = server.database %}
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': server.cacert_file}) %}{% endif %}
+{%- include "oslo_templates/files/queens/oslo/_database.conf" %}
+
+[keystone_authtoken]
+{%- set _data = server.identity %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/queens/keystonemiddleware/_auth_token.conf" %}
+{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
+
+
+[nova]
+{%- set _data = server.get('compute', server.get('identity', {})) %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/queens/keystoneauth/_type_" + auth_type + ".conf" %}
+
+
+[oslo_concurrency]
+{%- set _data = server.get('concurrency', {}) %}
+{%- include "oslo_templates/files/queens/oslo/_concurrency.conf" %}
+
+
+{%- if server.message_queue is defined %}
+{%- set _data = server.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+ {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+ {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- include "oslo_templates/files/queens/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
+[oslo_messaging_notifications]
+{%- set _data = server.notification %}
+{%- include "oslo_templates/files/queens/oslo/messaging/_notifications.conf" %}
+
+
+[oslo_middleware]
+{%- set _data = server %}
+{%- include "oslo_templates/files/queens/oslo/_middleware.conf" %}
+
+
+[oslo_policy]
+{%- if server.policy is defined %}
+{%- set _data = server.policy %}
+{%- include "oslo_templates/files/queens/oslo/_policy.conf" %}
+{%- endif %}
+
+
+[quotas]
+
+#
+# From neutron
+#
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited. (integer value)
+#default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_network = 100
+
+# Number of subnets allowed per tenant, A negative value means unlimited.
+# (integer value)
+#quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_port = 500
+
+# Default driver to use for quota checks. (string value)
+#quota_driver = neutron.db.quota.driver.DbQuotaDriver
+{% if server.backend.engine == "contrail" %}
+quota_driver = neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver
+{% endif %}
+# Keep in track in the database of current resource quota usage. Plugins which
+# do not leverage the neutron database should set this flag to False. (boolean
+# value)
+#track_quota_usage = true
+
+#
+# From neutron.extensions
+#
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_floatingip = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group = 10
+
+# Number of security rules allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group_rule = 100
+
+
+[ssl]
+{%- include "oslo_templates/files/queens/oslo/service/_ssl.conf" %}
diff --git a/neutron/files/queens/openvswitch_agent.ini b/neutron/files/queens/openvswitch_agent.ini
new file mode 100644
index 0000000..821b8b8
--- /dev/null
+++ b/neutron/files/queens/openvswitch_agent.ini
@@ -0,0 +1,285 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+
+[DEFAULT]
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/queens/oslo/_log.conf" %}
+{%- endif %}
+
+[agent]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Minimize polling by monitoring ovsdb for interface changes. (boolean value)
+#minimize_polling = true
+
+# The number of seconds to wait before respawning the ovsdb monitor after
+# losing communication with it. (integer value)
+#ovsdb_monitor_respawn_interval = 30
+
+# Network types supported by the agent (gre and/or vxlan). (list value)
+#tunnel_types =
+{%- if "vxlan" in neutron.backend.tenant_network_types %}
+tunnel_types =vxlan
+
+# The UDP port to use for VXLAN tunnels. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#vxlan_udp_port = 4789
+vxlan_udp_port = 4789
+
+# MTU size of veth interfaces (integer value)
+#veth_mtu = 9000
+
+# Use ML2 l2population mechanism driver to learn remote MAC and IPs and improve tunnel scalability. (boolean value)
+#l2_population = false
+l2_population = True
+
+# Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2
+# l2population driver. Allows the switch (when supporting an overlay) to
+# respond to an ARP request locally without performing a costly ARP broadcast
+# into the overlay. (boolean value)
+#arp_responder = false
+arp_responder = True
+{%- endif %}
+
+# Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying
+# GRE/VXLAN tunnel. (boolean value)
+#dont_fragment = true
+
+# Make the l2 agent run in DVR mode. (boolean value)
+#enable_distributed_routing = false
+enable_distributed_routing = {{ neutron.get('dvr', 'False') }}
+
+# Reset flow table on start. Setting this to True will cause brief traffic
+# interruption. (boolean value)
+#drop_flows_on_start = false
+drop_flows_on_start = False
+
+# Set or un-set the tunnel header checksum on outgoing IP packet carrying
+# GRE/VXLAN tunnel. (boolean value)
+#tunnel_csum = false
+
+# DEPRECATED: Selects the Agent Type reported (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#agent_type = Open vSwitch agent
+
+# Extensions list to use (list value)
+{# Get neutron:backend:extension mapping and prepare tmp_ext_list list with extentions where enabled = True #}
+{%- set tmp_ext_list = [] %}
+
+{%- for ext_name, ext_params in neutron.backend.get('ovs_extension', {}).iteritems() %}
+{%- do tmp_ext_list.append(ext_name) if ext_params.get('enabled', False) %}
+{%- endfor %}
+
+{%- for ext_name, ext_params in neutron.backend.get('extension', {}).iteritems() %}
+{%- do tmp_ext_list.append(ext_name) if ext_params.get('enabled', False) and ext_name not in tmp_ext_list %}
+{%- endfor %}
+{# Below section is for backward compatible when extentions were separated properties without neutron:backend:extension pillar #}
+{%- do tmp_ext_list.append('qos') if neutron.get('qos', 'True') and 'qos' not in tmp_ext_list %}
+extensions={{ tmp_ext_list|join(',') }}
+
+
+[network_log]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Maximum packets logging per second. (integer value)
+# Minimum value: 100
+#rate_limit = 100
+
+# Maximum number of packets per rate_limit. (integer value)
+# Minimum value: 25
+#burst_limit = 25
+
+# Output logfile path on agent side, default syslog file. (string value)
+#local_output_log_base = <None>
+
+
+[ovs]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Integration bridge to use. Do not change this parameter unless you have a
+# good reason to. This is the name of the OVS integration bridge. There is one
+# per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM
+# VIFs are attached to this bridge and then 'patched' according to their
+# network connectivity. (string value)
+#integration_bridge = br-int
+integration_bridge = br-int
+
+# Tunnel bridge to use. (string value)
+#tunnel_bridge = br-tun
+tunnel_bridge = br-tun
+
+# Peer patch port in integration bridge for tunnel bridge. (string value)
+#int_peer_patch_port = patch-tun
+
+# Peer patch port in tunnel bridge for integration bridge. (string value)
+#tun_peer_patch_port = patch-int
+
+# IP address of local overlay (tunnel) network endpoint. Use either an IPv4 or
+# IPv6 address that resides on one of the host network interfaces. The IP
+# version of this value must match the value of the 'overlay_ip_version' option
+# in the ML2 plug-in configuration file on the neutron server node(s). (IP
+# address value)
+#local_ip = <None>
+{%- if "vxlan" in neutron.backend.tenant_network_types %}
+local_ip = {{ neutron.local_ip }}
+{%- endif %}
+
+# Comma-separated list of <physical_network>:<bridge> tuples mapping physical
+# network names to the agent's node-specific Open vSwitch bridge names to be
+# used for flat and VLAN networks. The length of bridge names should be no more
+# than 11. Each bridge must exist, and should have a physical network interface
+# configured as a port. All physical networks configured on the server should
+# have mappings to appropriate bridges on each agent. Note: If you remove a
+# bridge from this mapping, make sure to disconnect it from the integration
+# bridge as it won't be managed by the agent anymore. (list value)
+{% set bridge_mappings=[] %}
+{%- if neutron.bridge_mappings is defined %}
+{%- for physnet,bridge in neutron.bridge_mappings.iteritems() %}{%- do bridge_mappings.append(physnet+':'+bridge) %}{%- endfor %}
+{%- endif %}
+{%- if 'br-floating' not in neutron.get('bridge_mappings', {}).values() %}{%- if neutron.get('external_access', True) %}
+{%- do bridge_mappings.append('physnet1:br-floating') %}{%- endif %}{%- endif %}
+{%- if 'br-prv' not in neutron.get('bridge_mappings', {}).values() %}{%- if "vlan" in neutron.backend.tenant_network_types %}
+{%- do bridge_mappings.append('physnet2:br-prv') %}{%- endif %}{%- endif %}
+{%- if 'br-baremetal' not in neutron.get('bridge_mappings', {}).values() %}{%- if neutron.get('ironic_enabled', False) %}
+{%- do bridge_mappings.append('physnet3:br-baremetal') %}{%- endif %}{%- endif %}
+{%- if bridge_mappings %}
+bridge_mappings = {{ ','.join(bridge_mappings) }}
+{%- else %}
+#bridge_mappings =
+{%- endif %}
+
+# Use veths instead of patch ports to interconnect the integration bridge to
+# physical networks. Support kernel without Open vSwitch patch port support so
+# long as it is set to True. (boolean value)
+#use_veth_interconnection = false
+
+# DEPRECATED: OpenFlow interface to use. (string value)
+# Possible values:
+# ovs-ofctl - <No description provided>
+# native - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#of_interface = native
+
+# OVS datapath to use. 'system' is the default value and corresponds to the
+# kernel datapath. To enable the userspace datapath set this value to 'netdev'.
+# (string value)
+# Possible values:
+# system - <No description provided>
+# netdev - <No description provided>
+#datapath_type = system
+{%- if neutron.dpdk %}
+datapath_type = netdev
+{%- endif %}
+
+# OVS vhost-user socket directory. (string value)
+#vhostuser_socket_dir = /var/run/openvswitch
+{%- if neutron.vhost_socket_dir is defined %}
+vhostuser_socket_dir = {{ neutron.vhost_socket_dir }}
+{%- endif %}
+
+# Address to listen on for OpenFlow connections. Used only for 'native' driver.
+# (IP address value)
+#of_listen_address = 127.0.0.1
+
+# Port to listen on for OpenFlow connections. Used only for 'native' driver.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#of_listen_port = 6633
+
+# Timeout in seconds to wait for the local switch connecting the controller.
+# Used only for 'native' driver. (integer value)
+#of_connect_timeout = 30
+
+# Timeout in seconds to wait for a single OpenFlow request. Used only for
+# 'native' driver. (integer value)
+#of_request_timeout = 10
+
+# DEPRECATED: The interface for interacting with the OVSDB (string value)
+# Possible values:
+# native - <No description provided>
+# vsctl - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ovsdb_interface = native
+
+# The connection string for the OVSDB backend. Will be used by ovsdb-client
+# when monitoring and used for the all ovsdb commands when native
+# ovsdb_interface is enabled (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+# The SSL private key file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_key_file = <None>
+
+# The SSL certificate file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_cert_file = <None>
+
+# The Certificate Authority (CA) certificate to use when interacting with
+# OVSDB. Required when using an "ssl:" prefixed ovsdb_connection (string
+# value)
+#ssl_ca_cert_file = <None>
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+{%- if not neutron.get('security_groups_enabled', True) %}
+firewall_driver = neutron.agent.firewall.NoopFirewallDriver
+enable_security_group = False
+{%- elif neutron.dpdk or neutron.get('vlan_aware_vms', False) %}
+firewall_driver = openvswitch
+enable_security_group = True
+{%- else %}
+firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+enable_security_group = True
+{%- endif %}
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+
+[xenapi]
+
+#
+# From neutron.ml2.xenapi
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_url = <None>
+
+# Username for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_username = <None>
+
+# Password for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_password = <None>