Support rocky version

Change-Id: Ie9439cf50b5dcfb73f63acd1f01c1dcb034358ee
Related-Prod: PROD-23722
diff --git a/.kitchen.travis.yml b/.kitchen.travis.yml
index f847543..172f5b2 100644
--- a/.kitchen.travis.yml
+++ b/.kitchen.travis.yml
@@ -3,4 +3,4 @@
   - name: <%= ENV['SUITE'] %>
     provisioner:
       pillars-from-files:
-        neutron.sls: tests/pillar/<%= ENV['SUITE'] %>.sls
+        neutron.sls: tests/pillar/<%= ENV['SUITE'] %>.sls
\ No newline at end of file
diff --git a/.kitchen.yml b/.kitchen.yml
index 78a4834..5e091ff 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -23,13 +23,13 @@
   dependencies:
     - name: linux
       repo: git
-      source: https://github.com/salt-formulas/salt-formula-linux
+      source: https://gerrit.mcp.mirantis.com/salt-formulas/linux
     - name: keystone
       repo: git
-      source: https://github.com/salt-formulas/salt-formula-keystone
+      source: https://gerrit.mcp.mirantis.com/salt-formulas/keystone
     - name: oslo_templates
       repo: git
-      source: https://github.com/salt-formulas/salt-formula-oslo-templates
+      source: https://gerrit.mcp.mirantis.com/salt-formulas/oslo-templates
   state_top:
     base:
       "*":
@@ -326,4 +326,4 @@
           server:
             version: <%= ENV['OS_VERSION'] || 'ocata' %>
 
-# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
+# vim: ft=yaml sw=2 ts=2 sts=2 tw=125
\ No newline at end of file
diff --git a/.travis.yml b/.travis.yml
index 0984d89..33253ee 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -41,6 +41,8 @@
     - PLATFORM=docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-stable/salt:2018_11_19 OS_VERSION=pike SUITE=gateway-legacy
     - PLATFORM=docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-stable/salt:2018_11_19 OS_VERSION=ocata SUITE=gateway-legacy-fwaas-v1
     - PLATFORM=docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-stable/salt:2018_11_19 OS_VERSION=pike SUITE=gateway-legacy-opendaylight
+    - PLATFORM=docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-stable/salt:2018_11_19 OS_VERSION=rocky SUITE=control-ovn
+    - PLATFORM=docker-dev-local.docker.mirantis.net/epcim/salt/saltstack-ubuntu-xenial-salt-2017.7/salt:2018_11_19 OS_VERSION=rocky SUITE=compute-ovn
 
 before_script:
   - set -o pipefail
@@ -59,4 +61,4 @@
     on_start: never     # options: [always|never|change] default: always
     on_cancel: never    # options: [always|never|change] default: always
     on_error: never    # options: [always|never|change] default: always
-  email: false
+  email: false
\ No newline at end of file
diff --git a/metadata/service/compute/ovn/single.yml b/metadata/service/compute/ovn/single.yml
index 1665ac8..187f180 100644
--- a/metadata/service/compute/ovn/single.yml
+++ b/metadata/service/compute/ovn/single.yml
@@ -33,3 +33,5 @@
             enabled: ${_param:openstack_fluentd_handler_enabled}
           ossyslog:
             enabled: ${_param:openstack_ossyslog_handler_enabled}
+      concurrency:
+        lock_path: /var/lock/neutron
diff --git a/metadata/service/compute/single.yml b/metadata/service/compute/single.yml
index 96f4268..baeb03c 100644
--- a/metadata/service/compute/single.yml
+++ b/metadata/service/compute/single.yml
@@ -47,3 +47,5 @@
             enabled: ${_param:openstack_fluentd_handler_enabled}
           ossyslog:
             enabled: ${_param:openstack_ossyslog_handler_enabled}
+      concurrency:
+        lock_path: /var/lock/neutron
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
index 33ddaa2..4753725 100644
--- a/metadata/service/control/cluster.yml
+++ b/metadata/service/control/cluster.yml
@@ -46,6 +46,8 @@
             enabled: ${_param:openstack_fluentd_handler_enabled}
           ossyslog:
             enabled: ${_param:openstack_ossyslog_handler_enabled}
+      concurrency:
+        lock_path: /var/lock/neutron
       message_queue:
         engine: rabbitmq
         host: ${_param:cluster_vip_address}
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
index 939856c..448d6fe 100644
--- a/metadata/service/control/single.yml
+++ b/metadata/service/control/single.yml
@@ -50,6 +50,8 @@
             enabled: ${_param:openstack_fluentd_handler_enabled}
           ossyslog:
             enabled: ${_param:openstack_ossyslog_handler_enabled}
+      concurrency:
+        lock_path: /var/lock/neutron
       message_queue:
         engine: rabbitmq
         host: ${_param:single_address}
diff --git a/metadata/service/gateway/single.yml b/metadata/service/gateway/single.yml
index 2649449..631e749 100644
--- a/metadata/service/gateway/single.yml
+++ b/metadata/service/gateway/single.yml
@@ -42,3 +42,5 @@
             enabled: ${_param:openstack_fluentd_handler_enabled}
           ossyslog:
             enabled: ${_param:openstack_ossyslog_handler_enabled}
+      concurrency:
+        lock_path: /var/lock/neutron
diff --git a/neutron/files/rocky/ContrailPlugin.ini b/neutron/files/rocky/ContrailPlugin.ini
new file mode 100644
index 0000000..d8739d1
--- /dev/null
+++ b/neutron/files/rocky/ContrailPlugin.ini
@@ -0,0 +1,12 @@
+{% from "neutron/map.jinja" import server with context %}
+[APISERVER]
+api_server_ip = {{ server.backend.host }}
+api_server_port = 8082
+multi_tenancy = True
+contrail_extensions = ipam:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_ipam.NeutronPluginContrailIpam,policy:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_policy.NeutronPluginContrailPolicy,route-table:neutron_plugin_contrail.plugins.opencontrail.contrail_plugin_vpc.NeutronPluginContrailVpc,contrail:None
+
+[KEYSTONE]
+auth_url = http://{{ server.identity.host }}:5000
+admin_user={{ server.backend.user }}
+admin_password={{ server.backend.password }}
+admin_tenant_name={{ server.backend.tenant }}
\ No newline at end of file
diff --git a/neutron/files/rocky/api-paste.ini b/neutron/files/rocky/api-paste.ini
new file mode 100644
index 0000000..f15dee2
--- /dev/null
+++ b/neutron/files/rocky/api-paste.ini
@@ -0,0 +1,57 @@
+{%- from "neutron/map.jinja" import server with context %}
+[composite:neutron]
+use = egg:Paste#urlmap
+/: neutronversions_composite
+/v2.0: neutronapi_v2_0
+
+[composite:neutronapi_v2_0]
+use = call:neutron.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi request_id catch_errors extensions neutronapiapp_v2_0
+keystone = cors http_proxy_to_wsgi {%- if server.backend.engine == "contrail" and server.backend.get('rbac', False) %} user_token {%- endif %} request_id catch_errors authtoken keystonecontext {% if server.audit.enabled %}audit {% endif %}extensions neutronapiapp_v2_0
+
+[composite:neutronversions_composite]
+use = call:neutron.auth:pipeline_factory
+noauth = cors http_proxy_to_wsgi neutronversions
+keystone = cors http_proxy_to_wsgi {% if server.audit.enabled %}audit {% endif %}neutronversions
+
+{%- if server.backend.engine == "contrail" and server.backend.get('rbac', False) %}
+[filter:user_token]
+paste.filter_factory = neutron_plugin_contrail.plugins.opencontrail.neutron_middleware:token_factory
+{%- endif %}
+
+[filter:request_id]
+paste.filter_factory = oslo_middleware:RequestId.factory
+
+[filter:catch_errors]
+paste.filter_factory = oslo_middleware:CatchErrors.factory
+
+[filter:cors]
+paste.filter_factory = oslo_middleware.cors:filter_factory
+oslo_config_project = neutron
+
+[filter:http_proxy_to_wsgi]
+paste.filter_factory = oslo_middleware.http_proxy_to_wsgi:HTTPProxyToWSGI.factory
+
+[filter:keystonecontext]
+paste.filter_factory = neutron.auth:NeutronKeystoneContext.factory
+
+[filter:authtoken]
+paste.filter_factory = keystonemiddleware.auth_token:filter_factory
+
+[filter:extensions]
+paste.filter_factory = neutron.api.extensions:plugin_aware_extension_middleware_factory
+
+[app:neutronversions]
+paste.app_factory = neutron.pecan_wsgi.app:versions_factory
+
+[app:neutronapiapp_v2_0]
+paste.app_factory = neutron.api.v2.router:APIRouter.factory
+
+{%- if server.audit.enabled %}
+[filter:audit]
+paste.filter_factory = {{ server.get("audit", {}).get("filter_factory", "keystonemiddleware.audit:filter_factory")   }}
+audit_map_file = {{ server.get("audit", {}).get("map_file", "/etc/pycadf/neutron_api_audit_map.conf")  }}
+{%- endif %}
+
+[filter:osprofiler]
+paste.filter_factory = osprofiler.web:WsgiMiddleware.factory
diff --git a/neutron/files/rocky/bagpipe-bgp.conf b/neutron/files/rocky/bagpipe-bgp.conf
new file mode 100644
index 0000000..d4f8b5c
--- /dev/null
+++ b/neutron/files/rocky/bagpipe-bgp.conf
@@ -0,0 +1,109 @@
+{%- from "neutron/map.jinja" import compute with context -%}
+[BGP]
+local_address = {{ compute.bgp_vpn.bagpipe.local_address }}
+peers = {{ compute.bgp_vpn.bagpipe.peers }}
+my_as = {{ compute.bgp_vpn.bagpipe.get('autonomous_system', 64512) }}
+enable_rtc = {{ compute.bgp_vpn.bagpipe.get('enable_rtc', True) }}
+
+[COMMON]
+# Root helper and root helper application
+#   root_helper_daemon is designed to use oslo-rootwrap-daemon for commands
+#   that need to be run as root.
+#   If root_helper_daemon is unset, bagpipe-bgp will use 'root_help' to call
+#   these commands, which defaults to 'sudo', and can be configured to use
+#   oslo.rootwrap.
+#root_helper = sudo
+#root_helper = sudo /usr/local/bin/oslo-rootwrap /etc/bagpipe-bgp/rootwrap.conf
+root_helper_daemon = sudo /usr/bin/oslo-rootwrap-daemon /etc/bagpipe-bgp/rootwrap.conf
+
+# local IP address (that others will use to send us encapsulated packets, and that
+# we will use to send)
+# Note: does not need to be specified if different than the BGP local_address
+# Note: can be overwritten under each dataplane config
+# dataplane_local_address=eth1
+# dataplane_local_address=1.2.3.4
+
+[API]
+# BGP component API IP address and port
+host=localhost
+port=8082
+
+
+[DATAPLANE_DRIVER_IPVPN]
+
+# IP VPN dataplane driver class
+#  Example values:
+#dataplane_driver = ovs
+#dataplane_driver = linux
+dataplane_driver = ovs
+
+# OVS bridge to use (defaults to br-int)
+ovs_bridge = br-mpls
+
+# MPLS outgoing interface (for linux and ovs drivers)
+#
+# (not specifying an mpls_interface or using the '*gre*' special value
+# means that the driver will instantiate a tunnel interface and use an
+# MPLS/GRE encapsulation)
+# mpls_interface=*gre*
+mpls_interface=*gre*
+
+# GRE tunnel to use (default to mpls_gre)
+#gre_tunnel=mpls_gre
+
+# Uncomment the following to allow the use of standard MPLS-o-GRE in OVS
+# (rather than MPLS-o-Eth-o-GRE).
+#
+# gre_tunnel_options="options:layer3=true"
+
+# Support VXLAN encapsulation of IP VPN traffic with the
+# ovs driver.
+#
+# Note well: this is non-standard and aimed at making it easier
+# to test IP VPN until OVS 2.4 is shipping
+#
+# WARNING: this option does *not* co-exist with the EVPN
+#          linux (can't have both OVS and the linux
+#          kernel native VXLAN stack process VXLAN)
+#
+#vxlan_encap=True
+
+# local IP address (that others will use to send us encapsulated packets, and that
+# we will use to send)
+# Note: does not need to be specified if different than the BGP local_address
+# dataplane_local_address=eth1
+# dataplane_local_address=1.2.3.4
+
+# (obsolete:)
+ovsbr_interfaces_mtu=4000
+
+# for ovs driver, control whether or not the VRF will
+# reply to all ARP requests on the subnet and impersonate the gateway
+# (defaults to True)
+proxy_arp=False
+
+[DATAPLANE_DRIVER_EVPN]
+
+# EVPN dataplane driver class
+# Example values:
+#dataplane_driver = linux
+dataplane_driver = dummy
+
+# (obsolete)
+ovsbr_interfaces_mtu=4000
+
+# The linux driver allows to force the VXLAN destination port IF iproute2 is
+# at version 3.14 or above (i.e. >= "ss140411" with "ip -V").
+#
+# to use standard IANA port for VXLAN:
+#vxlan_dst_port=4789
+#
+# to interoperate with switches or boxes not having the ability to
+# use another port than the linux kernel default 8472:
+#vxlan_dst_port=8472
+
+# local IP address (that others will use to send us encapsulated packets, and that
+# we will use to send)
+# Note: does not need to be specified if different than the BGP local_address
+# dataplane_local_address=eth1
+# dataplane_local_address=1.2.3.4
\ No newline at end of file
diff --git a/neutron/files/rocky/bgpvpn.conf b/neutron/files/rocky/bgpvpn.conf
new file mode 100644
index 0000000..974784e
--- /dev/null
+++ b/neutron/files/rocky/bgpvpn.conf
@@ -0,0 +1,14 @@
+# BGPVPN config
+
+[service_providers]
+{%- if server.get('bgp_vpn', {}).get('enabled', False) %}
+{%-
+  set _service_providers = {
+    'bagpipe': 'BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe.BaGPipeBGPVPNDriver:default',
+    'opencontrail': 'BGPVPN:OpenContrail:networking_bgpvpn.neutron.services.service_drivers.opencontrail.opencontrail.OpenContrailBGPVPNDriver:default',
+    'opendaylight': 'BGPVPN:OpenDaylight:networking_bgpvpn.neutron.services.service_drivers.opendaylight.odl.OpenDaylightBgpvpnDriver:default',
+    'opendaylight_v2': 'BGPVPN:OpenDaylight:networking_odl.bgpvpn.odl_v2.OpenDaylightBgpvpnDriver:default'
+  }
+%}
+service_provider = {{ _service_providers[server.bgp_vpn.driver] }}
+{%- endif %}
diff --git a/neutron/files/rocky/dhcp_agent.ini b/neutron/files/rocky/dhcp_agent.ini
new file mode 100644
index 0000000..9c5a739
--- /dev/null
+++ b/neutron/files/rocky/dhcp_agent.ini
@@ -0,0 +1,216 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+{%- set _dhcp = neutron.get('agents', {}).get('dhcp', {}) %}
+{%- set _dhcp_agent = _dhcp.get('agent', {}) %}
+[DEFAULT]
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+{%- if _dhcp.ovs_integration_bridge is defined %}
+ovs_integration_bridge = {{ _dhcp.ovs_integration_bridge }}
+{%- endif %}
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) and rate limiting on router's gateway port so long as
+# ovs_use_veth is set to True. (boolean value)
+#ovs_use_veth = false
+{%- if _dhcp.ovs_use_veth is defined %}
+ovs_use_veth = {{ _dhcp.ovs_use_veth }}
+{%- endif %}
+
+# The driver used to manage the virtual interface. (string value)
+interface_driver = {{ _dhcp.interface_driver|default('openvswitch') }}
+
+#
+# From neutron.dhcp.agent
+#
+
+# The DHCP agent will resync its state with Neutron to recover from any
+# transient notification or RPC errors. The interval is number of seconds
+# between attempts. (integer value)
+resync_interval = {{ _dhcp.resync_interval|default('30') }}
+
+# The driver used to manage the DHCP server. (string value)
+#dhcp_driver = neutron.agent.linux.dhcp.Dnsmasq
+{%- if _dhcp.dhcp_driver is defined %}
+dhcp_driver = {{ _dhcp.dhcp_driver }}
+{%- endif %}
+
+# The DHCP server can assist with providing metadata support on isolated
+# networks. Setting this value to True will cause the DHCP server to append
+# specific host routes to the DHCP request. The metadata service will only be
+# activated when the subnet does not contain any router port. The guest
+# instance must be configured to request host routes via DHCP (Option 121).
+# This option doesn't have any effect when force_metadata is set to True.
+# (boolean value)
+enable_isolated_metadata = {{ _dhcp.enable_isolated_metadata|default('true') }}
+
+# In some cases the Neutron router is not present to provide the metadata IP
+# but the DHCP server can be used to provide this info. Setting this value will
+# force the DHCP server to append specific host routes to the DHCP request. If
+# this option is set, then the metadata service will be activated for all the
+# networks. (boolean value)
+#force_metadata = false
+{%- if _dhcp.force_metadata is defined %}
+force_metadata = {{ _dhcp.force_metadata }}
+{%- elif neutron.backend.router is defined %}
+force_metadata = true
+{%- endif %}
+
+# Allows for serving metadata requests coming from a dedicated metadata access
+# network whose CIDR is 169.254.169.254/16 (or larger prefix), and is connected
+# to a Neutron router from which the VMs send metadata:1 request. In this case
+# DHCP Option 121 will not be injected in VMs, as they will be able to reach
+# 169.254.169.254 through a router. This option requires
+# enable_isolated_metadata = True. (boolean value)
+enable_metadata_network = {{ _dhcp.enable_metadata_network|default('false') }}
+
+# Number of threads to use during sync process. Should not exceed connection
+# pool size configured on server. (integer value)
+#num_sync_threads = 4
+{%- if _dhcp.num_sync_threads is defined %}
+num_sync_threads = {{ _dhcp.num_sync_threads }}
+{%- endif %}
+
+# Location to store DHCP server config files. (string value)
+#dhcp_confs = $state_path/dhcp
+{%- if _dhcp.dhcp_confs is defined %}
+dhcp_confs = {{ _dhcp.dhcp_confs }}
+{%- endif %}
+
+# Override the default dnsmasq settings with this file. (string value)
+#dnsmasq_config_file =
+{%- if _dhcp.dnsmasq_config_file is defined %}
+dnsmasq_config_file = {{ _dhcp.dnsmasq_config_file }}
+{%- endif %}
+
+# Comma-separated list of the DNS servers which will be used as forwarders.
+# (list value)
+#dnsmasq_dns_servers =
+
+# Base log dir for dnsmasq logging. The log contains DHCP and DNS log
+# information and is useful for debugging issues with either DHCP or DNS. If
+# this section is null, disable dnsmasq log. (string value)
+#dnsmasq_base_log_dir = <None>
+{%- if _dhcp.dnsmasq_base_log_dir is defined %}
+dnsmasq_base_log_dir = {{ _dhcp.dnsmasq_base_log_dir }}
+{%- endif %}
+
+# Enables the dnsmasq service to provide name resolution for instances via DNS
+# resolvers on the host running the DHCP agent. Effectively removes the '--no-
+# resolv' option from the dnsmasq process arguments. Adding custom DNS
+# resolvers to the 'dnsmasq_dns_servers' option disables this feature. (boolean
+# value)
+#dnsmasq_local_resolv = false
+{%- if _dhcp.dnsmasq_local_resolv is defined %}
+dnsmasq_local_resolv = {{ _dhcp.dnsmasq_local_resolv }}
+{%- endif %}
+
+# Limit number of leases to prevent a denial-of-service. (integer value)
+#dnsmasq_lease_max = 16777216
+{%- if _dhcp.dnsmasq_lease_max is defined %}
+dnsmasq_lease_max = {{ _dhcp.dnsmasq_lease_max }}
+{%- endif %}
+
+# Use broadcast in DHCP replies. (boolean value)
+#dhcp_broadcast_reply = false
+{%- if _dhcp.dhcp_broadcast_reply is defined %}
+dhcp_broadcast_reply = {{ _dhcp.dhcp_broadcast_reply }}
+{%- endif %}
+
+# DHCP renewal time T1 (in seconds). If set to 0, it will default to half of
+# the lease time. (integer value)
+#dhcp_renewal_time = 0
+
+# DHCP rebinding time T2 (in seconds). If set to 0, it will default to 7/8 of
+# the lease time. (integer value)
+#dhcp_rebinding_time = 0
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[agent]
+
+#
+# From neutron.az.agent
+#
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+{%- if _dhcp_agent.availability_zone is defined %}
+availability_zone = {{ _dhcp_agent.availability_zone }}
+{%- endif %}
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+{%- if _dhcp_agent.report_interval is defined %}
+report_interval = {{ _dhcp_agent.report_interval }}
+{%- endif %}
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+{%- if _dhcp_agent.log_agent_heartbeats is defined %}
+log_agent_heartbeats = {{ _dhcp_agent.log_agent_heartbeats }}
+{%- endif %}
+
+
+[ovs]
+
+#
+# From neutron.base.agent
+#
+
+# DEPRECATED: The interface for interacting with the OVSDB (string value)
+# Possible values:
+# native - <No description provided>
+# vsctl - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ovsdb_interface = native
+
+# The connection string for the OVSDB backend. Will be used by ovsdb-client
+# when monitoring and used for the all ovsdb commands when native
+# ovsdb_interface is enabled (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+# The SSL private key file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_key_file = <None>
+
+# The SSL certificate file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_cert_file = <None>
+
+# The Certificate Authority (CA) certificate to use when interacting with
+# OVSDB.  Required when using an "ssl:" prefixed ovsdb_connection (string
+# value)
+#ssl_ca_cert_file = <None>
+
+# Enable OVSDB debug logs (boolean value)
+#ovsdb_debug = false
+
+# Timeout in seconds for ovsdb commands. If the timeout expires, ovsdb commands
+# will fail with ALARMCLOCK error. (integer value)
+# Deprecated group/name - [DEFAULT]/ovs_vsctl_timeout
+#ovsdb_timeout = 10
+
+# The maximum number of MAC addresses to learn on a bridge managed by the
+# Neutron OVS agent. Values outside a reasonable range (10 to 1,000,000) might
+# be overridden by Open vSwitch according to the documentation. (integer value)
+#bridge_mac_table_size = 50000
diff --git a/neutron/files/rocky/l2gw/l2gateway_agent.ini b/neutron/files/rocky/l2gw/l2gateway_agent.ini
new file mode 100644
index 0000000..59a62fb
--- /dev/null
+++ b/neutron/files/rocky/l2gw/l2gateway_agent.ini
@@ -0,0 +1,68 @@
+{%- from "neutron/map.jinja" import gateway with context -%}
+[DEFAULT]
+# Show debugging output in log (sets DEBUG log level output)
+debug = {{ gateway.l2gw.debug|default('False') }}
+
+[ovsdb]
+# (StrOpt) OVSDB server tuples in the format
+# <ovsdb_name>:<ip address>:<port>[,<ovsdb_name>:<ip address>:<port>]
+# - ovsdb_name: a symbolic name that helps identifies keys and certificate files
+# - ip address: the address or dns name for the ovsdb server
+# - port: the port (ssl is supported)
+{%- set ovsdb_hosts = [] %}
+{%- for opt, value in gateway.l2gw.get('ovsdb_hosts', {}).iteritems() %}
+{%- do ovsdb_hosts.append('%s:%s'|format(opt, value)) %}
+{%- endfor %}
+ovsdb_hosts = {{ ovsdb_hosts|join(',') }}
+# Example: ovsdb_hosts = 'ovsdb1:16.95.16.1:6632,ovsdb2:16.95.16.2:6632'
+
+# enable_manager = False
+# (BoolOpt) connection can be initiated by the ovsdb server.
+# By default 'enable_manager' value is False, turn on the variable to True
+# to initiate the connection from ovsdb server to l2gw agent.
+
+# manager_table_listening_port = 6632
+# (PortOpt) set port number for l2gateway agent, so that it can listen
+# for ovsdb server,whenever its IP is entered in manager table of ovsdb server.
+# by default it is set to port 6632.
+# you can use vtep-ctl utility to populate manager table of ovsdb.
+# For Example: sudo vtep-ctl set-manager tcp:x.x.x.x:6640,
+# where x.x.x.x is IP of l2gateway agent and 6640 is a port.
+
+# (StrOpt) Base path to private key file(s).
+# Agent will find key file named
+# $l2_gw_agent_priv_key_base_path/$ovsdb_name.key
+# l2_gw_agent_priv_key_base_path =
+# Example: l2_gw_agent_priv_key_base_path = '/home/someuser/keys'
+
+# (StrOpt) Base path to cert file(s).
+# Agent will find cert file named
+# $l2_gw_agent_cert_base_path/$ovsdb_name.cert
+# l2_gw_agent_cert_base_path =
+# Example: l2_gw_agent_cert_base_path = '/home/someuser/certs'
+
+# (StrOpt) Base path to ca cert file(s).
+# Agent will find ca cert file named
+# $l2_gw_agent_ca_cert_base_path/$ovsdb_name.ca_cert
+# l2_gw_agent_ca_cert_base_path =
+# Example: l2_gw_agent_ca_cert_base_path = '/home/someuser/ca_certs'
+
+# (IntOpt) The L2 gateway agent checks connection state with the OVSDB
+# servers.
+# The interval is number of seconds between attempts.
+# periodic_interval =
+# Example: periodic_interval = 20
+
+# (IntOpt) The L2 gateway agent retries to connect to the OVSDB server
+# if a socket does not get opened in the first attempt.
+# the max_connection_retries is the maximum number of such attempts
+# before giving up.
+# max_connection_retries =
+# Example: max_connection_retries = 10
+
+# (IntOpt) The remote OVSDB server sends echo requests every 4 seconds.
+# If there is no echo request on the socket for socket_timeout seconds,
+# by default socket_timeout is set to 30 seconds. The agent can
+# safely assume that the connection with the remote OVSDB server is lost.
+socket_timeout = {{ gateway.l2gw.socket_timeout|default('30') }}
+# Example: socket_timeout = 30
diff --git a/neutron/files/rocky/l2gw/l2gw_plugin.ini b/neutron/files/rocky/l2gw/l2gw_plugin.ini
new file mode 100644
index 0000000..8ea1bd7
--- /dev/null
+++ b/neutron/files/rocky/l2gw/l2gw_plugin.ini
@@ -0,0 +1,26 @@
+{%- from "neutron/map.jinja" import server with context -%}
+[DEFAULT]
+# (StrOpt) default interface name of the l2 gateway
+# default_interface_name =
+# Example: default_interface_name = "FortyGigE1/0/1"
+
+# (StrOpt) default device name of the l2 gateway
+# default_device_name =
+# Example: default_device_name = "Switch1"
+
+# (IntOpt) quota of the l2 gateway
+quota_l2_gateway = {{ server.l2gw.quota_l2_gateway|default('10') }}
+# Example: quota_l2_gateway = 10
+
+# (IntOpt) The periodic interval at which the plugin
+# checks for the monitoring L2 gateway agent
+periodic_monitoring_interval = {{ server.l2gw.periodic_monitoring_interval|default('5') }}
+# Example: periodic_monitoring_interval = 5
+
+[service_providers]
+# Must be in form:
+# service_provider=<service_type>:<name>:<driver>[:default]
+# List of allowed service types includes L2GW
+# Combination of <service type> and <name> must be unique; <driver> must also be unique
+# This is multiline option
+service_provider = {{ server.l2gw.service_provider|default('L2GW:l2gw:networking_l2gw.services.l2gateway.service_drivers.rpc_l2gw.L2gwRpcDriver:default') }}
diff --git a/neutron/files/rocky/l3_agent.ini b/neutron/files/rocky/l3_agent.ini
new file mode 100644
index 0000000..1cb9ad4
--- /dev/null
+++ b/neutron/files/rocky/l3_agent.ini
@@ -0,0 +1,260 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+{%- from "neutron/map.jinja" import fwaas with context %}
+[DEFAULT]
+
+#
+# From neutron.base.agent
+#
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+
+# Uses veth for an OVS interface or not. Support kernels with limited namespace
+# support (e.g. RHEL 6.5) and rate limiting on router's gateway port so long as
+# ovs_use_veth is set to True. (boolean value)
+#ovs_use_veth = false
+{%- if neutron.ovs_use_veth is defined %}
+ovs_use_veth = {{ neutron.ovs_use_veth }}
+{%- endif %}
+
+# The driver used to manage the virtual interface. (string value)
+{%- if neutron.get('agents', {}).get('l3', {}).interface_driver is defined %}
+interface_driver = {{ neutron.agents.l3.interface_driver }}
+{%- else %}
+interface_driver = openvswitch
+{%- endif %}
+
+#
+# From neutron.l3.agent
+#
+
+# The working mode for the agent. Allowed modes are: 'legacy' - this preserves
+# the existing behavior where the L3 agent is deployed on a centralized
+# networking node to provide L3 services like DNAT, and SNAT. Use this mode if
+# you do not want to adopt DVR. 'dvr' - this mode enables DVR functionality and
+# must be used for an L3 agent that runs on a compute host. 'dvr_snat' - this
+# enables centralized SNAT support in conjunction with DVR.  This mode must be
+# used for an L3 agent running on a centralized node (or in single-host
+# deployments, e.g. devstack). 'dvr_no_external' - this mode enables only
+# East/West DVR routing functionality for a L3 agent that runs on a compute
+# host, the North/South functionality such as DNAT and SNAT will be provided by
+# the centralized network node that is running in 'dvr_snat' mode. This mode
+# should be used when there is no external network connectivity on the compute
+# host. (string value)
+# Possible values:
+# dvr - <No description provided>
+# dvr_snat - <No description provided>
+# legacy - <No description provided>
+# dvr_no_external - <No description provided>
+agent_mode = {{ neutron.agent_mode }}
+
+# TCP Port used by Neutron metadata namespace proxy. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+metadata_port = 8775
+
+# Indicates that this L3 agent should also handle routers that do not have an
+# external network gateway configured. This option should be True only for a
+# single agent in a Neutron deployment, and may be False for all agents if all
+# routers must have an external network gateway. (boolean value)
+#handle_internal_only_routers = true
+
+# DEPRECATED: When external_network_bridge is set, each L3 agent can be
+# associated with no more than one external network. This value should be set
+# to the UUID of that external network. To allow L3 agent support multiple
+# external networks, both the external_network_bridge and
+# gateway_external_network_id must be left empty. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#gateway_external_network_id =
+
+# With IPv6, the network used for the external gateway does not need to have an
+# associated subnet, since the automatically assigned link-local address (LLA)
+# can be used. However, an IPv6 gateway address is needed for use as the next-
+# hop for the default route. If no IPv6 gateway address is configured here,
+# (and only then) the neutron router will be configured to get its default
+# route from router advertisements (RAs) from the upstream router; in which
+# case the upstream router must also be configured to send these RAs. The
+# ipv6_gateway, when configured, should be the LLA of the interface on the
+# upstream router. If a next-hop using a global unique address (GUA) is
+# desired, it needs to be done via a subnet allocated to the network and not
+# through this parameter.  (string value)
+#ipv6_gateway =
+
+# Driver used for ipv6 prefix delegation. This needs to be an entry point
+# defined in the neutron.agent.linux.pd_drivers namespace. See setup.cfg for
+# entry points included with the neutron source. (string value)
+#prefix_delegation_driver = dibbler
+
+# Allow running metadata proxy. (boolean value)
+#enable_metadata_proxy = true
+
+# Iptables mangle mark used to mark metadata valid requests. This mark will be
+# masked with 0xffff so that only the lower 16 bits will be used. (string
+# value)
+#metadata_access_mark = 0x1
+
+# Iptables mangle mark used to mark ingress from external network. This mark
+# will be masked with 0xffff so that only the lower 16 bits will be used.
+# (string value)
+#external_ingress_mark = 0x2
+
+# DEPRECATED: Name of bridge used for external network traffic. When this
+# parameter is set, the L3 agent will plug an interface directly into an
+# external bridge which will not allow any wiring by the L2 agent. Using this
+# will result in incorrect port statuses. This option is deprecated and will be
+# removed in Ocata. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#external_network_bridge =
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval = 40
+
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+
+# Number of RPC worker processes for service. (integer value)
+#rpc_workers = 1
+
+# Number of RPC worker processes dedicated to state reports queue. (integer
+# value)
+#rpc_state_report_workers = 1
+
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+# Location to store keepalived/conntrackd config files (string value)
+#ha_confs_path = $state_path/ha_confs
+
+# VRRP authentication type (string value)
+# Possible values:
+# AH - <No description provided>
+# PASS - <No description provided>
+#ha_vrrp_auth_type = PASS
+
+# VRRP authentication password (string value)
+#ha_vrrp_auth_password = <None>
+
+# The advertisement interval in seconds (integer value)
+#ha_vrrp_advert_int = 2
+
+# Number of concurrent threads for keepalived server connection requests. More
+# threads create a higher CPU load on the agent node. (integer value)
+# Minimum value: 1
+#ha_keepalived_state_change_server_threads = (1 + <num_of_cpus>) / 2
+
+# The VRRP health check interval in seconds. Values > 0 enable VRRP health
+# checks. Setting it to 0 disables VRRP health checks. Recommended value is 5.
+# This will cause pings to be sent to the gateway IP address(es) - requires
+# ICMP_ECHO_REQUEST to be enabled on the gateway. If gateway fails, all routers
+# will be reported as master, and master election will be repeated in round-
+# robin fashion, until one of the router restore the gateway connection.
+# (integer value)
+#ha_vrrp_health_check_interval = 0
+
+# Location to store IPv6 PD files. (string value)
+#pd_confs = $state_path/pd
+
+# A decimal value as Vendor's Registered Private Enterprise Number as required
+# by RFC3315 DUID-EN. (string value)
+#vendor_pen = 8888
+
+# Location to store IPv6 RA config files (string value)
+#ra_confs = $state_path/ra
+
+# MinRtrAdvInterval setting for radvd.conf (integer value)
+#min_rtr_adv_interval = 30
+
+# MaxRtrAdvInterval setting for radvd.conf (integer value)
+#max_rtr_adv_interval = 100
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[agent]
+
+#
+# From neutron.az.agent
+#
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+#
+# From neutron.base.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+#
+# From neutron.l3.agent
+#
+
+# Extensions list to use (list value)
+#extensions =
+{%- if fwaas.get('enabled', False) %}
+extensions = {{ fwaas[fwaas.api_version]['l3_extension'] }}
+{%- endif %}
+
+
+[ovs]
+
+#
+# From neutron.base.agent
+#
+
+# DEPRECATED: The interface for interacting with the OVSDB (string value)
+# Possible values:
+# native - <No description provided>
+# vsctl - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ovsdb_interface = native
+
+# The connection string for the OVSDB backend. Will be used by ovsdb-client
+# when monitoring and used for the all ovsdb commands when native
+# ovsdb_interface is enabled (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+# The SSL private key file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_key_file = <None>
+
+# The SSL certificate file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_cert_file = <None>
+
+# The Certificate Authority (CA) certificate to use when interacting with
+# OVSDB.  Required when using an "ssl:" prefixed ovsdb_connection (string
+# value)
+#ssl_ca_cert_file = <None>
+
+# Enable OVSDB debug logs (boolean value)
+#ovsdb_debug = false
+
+# Timeout in seconds for ovsdb commands. If the timeout expires, ovsdb commands
+# will fail with ALARMCLOCK error. (integer value)
+# Deprecated group/name - [DEFAULT]/ovs_vsctl_timeout
+#ovsdb_timeout = 10
+
+# The maximum number of MAC addresses to learn on a bridge managed by the
+# Neutron OVS agent. Values outside a reasonable range (10 to 1,000,000) might
+# be overridden by Open vSwitch according to the documentation. (integer value)
+#bridge_mac_table_size = 50000
diff --git a/neutron/files/rocky/lbaas.conf b/neutron/files/rocky/lbaas.conf
new file mode 100644
index 0000000..e6f48a8
--- /dev/null
+++ b/neutron/files/rocky/lbaas.conf
@@ -0,0 +1,41 @@
+# LBaaS config
+
+[service_providers]
+
+{%- if server.lbaas.enabled -%}
+
+{%- for lbaas_name, lbaas in server.lbaas.providers.iteritems() %}
+
+service_provider=LOADBALANCERV2:{{ lbaas_name }}:{{ lbaas.get('driver_path', 'avi_lbaasv2.avi_driver.AviDriver') }}:default
+
+[{{ lbaas_name }}]
+
+{% if lbaas.engine == "octavia" %}
+
+base_url = {{ lbaas.base_url }}
+request_poll_timeout = 3000
+
+[service_auth]
+auth_version = 2
+admin_password = {{ server.identity.password }}
+admin_user = {{ server.identity.user }}
+admin_tenant_name = {{ server.identity.tenant }}
+auth_url = http://{{ server.identity.host }}:35357/v2.0
+{%- endif -%}
+
+{% if lbaas.engine == "avinetworks" %}
+address={{ lbaas.controller_address }}
+user={{ lbaas.controller_user }}
+password={{ lbaas.controller_password }}
+cloud={{ lbaas.controller_cloud_name }}
+{%- endif -%}
+
+{%- endfor -%}
+
+{% elif server.backend.engine == "contrail" %}
+
+service_provider = LOADBALANCERV2:Opencontrail:neutron_plugin_contrail.plugins.opencontrail.loadbalancer.driver.OpencontrailLoadbalancerDummyDriver:default
+
+{% include "neutron/files/"+server.version+"/ContrailPlugin.ini" %}
+
+{%- endif -%}
diff --git a/neutron/files/rocky/linuxbridge_agent.ini b/neutron/files/rocky/linuxbridge_agent.ini
new file mode 100644
index 0000000..17f1a15
--- /dev/null
+++ b/neutron/files/rocky/linuxbridge_agent.ini
@@ -0,0 +1,217 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[agent]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# The number of seconds the agent will wait between polling for local device
+# changes. (integer value)
+#polling_interval = 2
+
+# Set new timeout in seconds for new rpc calls after agent receives SIGTERM. If
+# value is set to 0, rpc timeout won't be changed (integer value)
+#quitting_rpc_timeout = 10
+
+# The DSCP value to use for outer headers during tunnel encapsulation. (integer
+# value)
+# Minimum value: 0
+# Maximum value: 63
+#dscp = <None>
+
+# If set to True, the DSCP value of tunnel interfaces is overwritten and set to
+# inherit. The DSCP value of the inner header is then copied to the outer
+# header. (boolean value)
+#dscp_inherit = false
+
+# Extensions list to use (list value)
+#extensions =
+
+
+[linux_bridge]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Comma-separated list of <physical_network>:<physical_interface> tuples
+# mapping physical network names to the agent's node-specific physical network
+# interfaces to be used for flat and VLAN networks. All physical networks
+# listed in network_vlan_ranges on the server should have mappings to
+# appropriate interfaces on each agent. (list value)
+#physical_interface_mappings =
+
+# List of <physical_network>:<physical_bridge> (list value)
+{% set bridge_mappings = [] %}
+{%- if neutron.bridge_mappings is defined %}
+{%-   for physnet, bridge in neutron.bridge_mappings.iteritems() %}
+{%-     do bridge_mappings.append(physnet ~ ':' ~ bridge) %}
+{%-   endfor %}
+{%- endif %}
+{%- if 'br-floating' not in neutron.get('bridge_mappings', {}).values() %}
+{%-   if neutron.get('external_access', True) %}
+{%-     do bridge_mappings.append('physnet1:br-floating') %}
+{%-   endif %}
+{%- endif %}
+{%- if 'br-prv' not in neutron.get('bridge_mappings', {}).values() %}
+{%-   if "vlan" in neutron.backend.tenant_network_types %}
+{%-     do bridge_mappings.append('physnet2:br-prv') %}
+{%-   endif %}
+{%- endif %}
+{%- if 'br-baremetal' not in neutron.get('bridge_mappings', {}).values() %}
+{%-   if neutron.get('ironic_enabled', False) %}
+{%-     do bridge_mappings.append('physnet3:br-baremetal') %}
+{%-   endif %}
+{%- endif %}
+{%- if bridge_mappings %}
+bridge_mappings = {{ bridge_mappings|join(',') }}
+{%- else %}
+#bridge_mappings =
+{%- endif %}
+
+
+[network_log]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Maximum packets logging per second. (integer value)
+# Minimum value: 100
+#rate_limit = 100
+
+# Maximum number of packets per rate_limit. (integer value)
+# Minimum value: 25
+#burst_limit = 25
+
+# Output logfile path on agent side, default syslog file. (string value)
+#local_output_log_base = <None>
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+#firewall_driver = <None>
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+#enable_security_group = true
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+
+{%- if "vxlan" in neutron.backend.tenant_network_types %}
+[vxlan]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Enable VXLAN on the agent. Can be enabled when agent is managed by ml2 plugin
+# using linuxbridge mechanism driver (boolean value)
+enable_vxlan = true
+
+# TTL for vxlan interface protocol packets. (integer value)
+#ttl = <None>
+
+# DEPRECATED: TOS for vxlan interface protocol packets. This option is
+# deprecated in favor of the dscp option in the AGENT section and will be
+# removed in a future release. To convert the TOS value to DSCP, divide by 4.
+# (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#tos = <None>
+
+# Multicast group(s) for vxlan interface. A range of group addresses may be
+# specified by using CIDR notation. Specifying a range allows different VNIs to
+# use different group addresses, reducing or eliminating spurious broadcast
+# traffic to the tunnel endpoints. To reserve a unique group for each possible
+# (24-bit) VNI, use a /8 such as 239.0.0.0/8. This setting must be the same on
+# all the agents. (string value)
+#vxlan_group = 224.0.0.1
+
+# IP address of local overlay (tunnel) network endpoint. Use either an IPv4 or
+# IPv6 address that resides on one of the host network interfaces. The IP
+# version of this value must match the value of the 'overlay_ip_version' option
+# in the ML2 plug-in configuration file on the neutron server node(s). (IP
+# address value)
+local_ip = {{ neutron.local_ip }}
+
+# The minimum of the UDP source port range used for VXLAN communication. (port
+# value)
+# Minimum value: 0
+# Maximum value: 65535
+#udp_srcport_min = 0
+
+# The maximum of the UDP source port range used for VXLAN communication. (port
+# value)
+# Minimum value: 0
+# Maximum value: 65535
+#udp_srcport_max = 0
+
+# The UDP port used for VXLAN communication. By default, the Linux kernel
+# doesn't use the IANA assigned standard value, so if you want to use it, this
+# option must be set to 4789. It is not set by default because of backward
+# compatibiltiy. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#udp_dstport = <None>
+
+# Extension to use alongside ml2 plugin's l2population mechanism driver. It
+# enables the plugin to populate VXLAN forwarding table. (boolean value)
+l2_population = true
+
+# Enable local ARP responder which provides local responses instead of
+# performing ARP broadcast into the overlay. Enabling local ARP responder is
+# not fully compatible with the allowed-address-pairs extension. (boolean
+# value)
+#arp_responder = false
+
+# Optional comma-separated list of <multicast address>:<vni_min>:<vni_max>
+# triples describing how to assign a multicast address to VXLAN according to
+# its VNI ID. (list value)
+#multicast_ranges =
+{%- endif %}
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.linuxbridge.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+{%- if not neutron.get('security_groups_enabled', True) %}
+{%-   set _firewall_driver = 'noop' %}
+{%- else %}
+{%-   set _firewall_driver = 'iptables' %}
+{%- endif %}
+firewall_driver = {{ neutron.get('firewall_driver', _firewall_driver) }}
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+enable_security_group = {{ neutron.get('security_groups_enabled', 'true') }}
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
diff --git a/neutron/files/rocky/metadata_agent.ini b/neutron/files/rocky/metadata_agent.ini
new file mode 100644
index 0000000..dadb500
--- /dev/null
+++ b/neutron/files/rocky/metadata_agent.ini
@@ -0,0 +1,106 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+#
+# From neutron.metadata.agent
+#
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
+
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
+
+# Certificate Authority public key (CA cert) file for ssl (string value)
+#auth_ca_cert = <None>
+
+# IP address or DNS name of Nova metadata server. (host address value)
+nova_metadata_host = {{ neutron.metadata.host }}
+
+# TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID header with a
+# shared secret to prevent spoofing. You may select any string for a secret,
+# but it must match here and in the configuration used by the Nova Metadata
+# Server. NOTE: Nova uses the same config key, but in [neutron] section.
+# (string value)
+metadata_proxy_shared_secret = {{ neutron.metadata.password }}
+
+# Protocol to access nova metadata, http or https (string value)
+# Possible values:
+# http - <No description provided>
+# https - <No description provided>
+nova_metadata_protocol = http
+
+# Allow to perform insecure SSL (https) requests to nova metadata (boolean
+# value)
+#nova_metadata_insecure = false
+
+# Client certificate for nova metadata api server. (string value)
+#nova_client_cert =
+
+# Private key of client certificate. (string value)
+#nova_client_priv_key =
+
+# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce': deduce
+# mode from metadata_proxy_user/group values, 'user': set metadata proxy socket
+# mode to 0o644, to use when metadata_proxy_user is agent effective user or
+# root, 'group': set metadata proxy socket mode to 0o664, to use when
+# metadata_proxy_group is agent effective group or root, 'all': set metadata
+# proxy socket mode to 0o666, to use otherwise. (string value)
+# Possible values:
+# deduce - <No description provided>
+# user - <No description provided>
+# group - <No description provided>
+# all - <No description provided>
+#metadata_proxy_socket_mode = deduce
+
+# Number of separate worker processes for metadata server (defaults to half of
+# the number of CPUs) (integer value)
+#metadata_workers = <num_of_cpus> / 2
+{%- if neutron.metadata.workers is defined %}
+metadata_workers = {{ neutron.metadata.workers }}
+{%- endif %}
+
+# Number of backlog requests to configure the metadata server socket with
+# (integer value)
+#metadata_backlog = 4096
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[agent]
+
+#
+# From neutron.metadata.agent
+#
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+#report_interval = 30
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+
+[cache]
+{%- if neutron.cache is defined %}
+{%- set _data = neutron.cache %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_cache.conf" %}
+{%- endif %}
diff --git a/neutron/files/rocky/ml2_conf.ini b/neutron/files/rocky/ml2_conf.ini
new file mode 100644
index 0000000..48ad7df
--- /dev/null
+++ b/neutron/files/rocky/ml2_conf.ini
@@ -0,0 +1,264 @@
+{%- from "neutron/map.jinja" import server with context %}
+[DEFAULT]
+
+{%- if server.logging is defined %}
+{%- set _data = server.logging %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[l2pop]
+
+#
+# From neutron.ml2
+#
+
+# Delay within which agent is expected to update existing ports when it
+# restarts (integer value)
+agent_boot_time = {{ server.get('agent_boot_time', 180) }}
+
+
+[ml2]
+
+#
+# From neutron.ml2
+#
+
+# List of network type driver entrypoints to be loaded from the
+# neutron.ml2.type_drivers namespace. (list value)
+#type_drivers = local,flat,vlan,gre,vxlan,geneve
+
+# Ordered list of network_types to allocate as tenant networks. The default
+# value 'local' is useful for single-box testing but provides no connectivity
+# between hosts. (list value)
+tenant_network_types = {{ server.backend.tenant_network_types }}
+
+# An ordered list of networking mechanism driver entrypoints to be loaded from
+# the neutron.ml2.mechanism_drivers namespace. (list value)
+{%- set mechanism_drivers = [] %}
+{%- set default_order = 0 %}
+{%- for mechanism_name, mechanism in server.get('backend', {}).get('mechanism', []).items() %}
+{%-   if 'driver' in mechanism %}
+{%-     do mechanism.update({'order': default_order}) if 'order' not in mechanism %}
+{%-     do mechanism_drivers.append(mechanism) %}
+{%-   endif %}
+{%- endfor %}
+{%- if "vxlan" in server.backend.tenant_network_types and not server.backend.opendaylight|default(False) %}
+{%-   do mechanism_drivers.append({'driver': 'l2population', 'order': default_order}) %}
+{%- endif %}
+mechanism_drivers = {{ mechanism_drivers|sort(True, attribute='order')|join(',', attribute='driver') }}
+
+# An ordered list of extension driver entrypoints to be loaded from the
+# neutron.ml2.extension_drivers namespace. For example: extension_drivers =
+# port_security,qos (list value)
+#extension_drivers =
+{# Get server:backend:extension mapping and prepare tmp_ext_list list with extentions where enabled = True #}
+{%- set tmp_ext_list = [] %}
+{%- for ext_name, ext_params in server.backend.get('extension', {}).iteritems() %}
+{%-   do tmp_ext_list.append(ext_name) if ext_params.get('enabled', False)  %}
+{%- endfor %}
+{# Below section is for backward compatible when extentions were separated properties without server:backend:extension pillar #}
+{%- do tmp_ext_list.append('port_security') if 'port_security' not in tmp_ext_list %}
+{%- do tmp_ext_list.append('qos') if server.get('qos', 'True') and 'qos' not in tmp_ext_list %}
+extension_drivers = {{ tmp_ext_list|join(',') }}
+
+# Maximum size of an IP packet (MTU) that can traverse the underlying physical
+# network infrastructure without fragmentation when using an overlay/tunnel
+# protocol. This option allows specifying a physical network MTU value that
+# differs from the default global_physnet_mtu value. (integer value)
+path_mtu = {{ server.get('path_mtu', server.get('global_physnet_mtu', '0')) }}
+
+# A list of mappings of physical networks to MTU values. The format of the
+# mapping is <physnet>:<mtu val>. This mapping allows specifying a physical
+# network MTU value that differs from the default global_physnet_mtu value.
+# (list value)
+{%- set physical_network_mtus = [] %}
+{%- for physnet, params in server.backend.get('physnets', {}).iteritems() %}
+{%-   do physical_network_mtus.append([physnet, params.get('mtu')]|join(":")) if params.get('mtu', False) %}
+{%- endfor %}
+{%- if not physical_network_mtus %}
+{%-   if server.get('external_access', True) %}
+{%-     do physical_network_mtus.append(['physnet1',server.backend.get('external_mtu', '1500')]|join(":")) %}
+{%-   endif %}
+{%-   if "vlan" in server.backend.tenant_network_types %}
+{%-     do physical_network_mtus.append(['physnet2',server.backend.get('tenant_net_mtu', server.backend.get('external_mtu', '1500'))]|join(":")) %}
+{%-   endif %}
+{%-   if server.get('ironic_enabled', False) %}
+{%-     do physical_network_mtus.append(['physnet3',server.backend.get('ironic_net_mtu', server.backend.get('external_mtu', '1500'))]|join(":")) %}
+{%-   endif %}
+{%- endif %}
+physical_network_mtus = {{ ','.join(physical_network_mtus) }}
+
+# Default network type for external networks when no provider attributes are
+# specified. By default it is None, which means that if provider attributes are
+# not specified while creating external networks then they will have the same
+# type as tenant networks. Allowed values for external_network_type config
+# option depend on the network type values configured in type_drivers config
+# option. (string value)
+#external_network_type = <None>
+
+# IP version of all overlay (tunnel) network endpoints. Use a value of 4 for
+# IPv4 or 6 for IPv6. (integer value)
+#overlay_ip_version = 4
+
+
+[ml2_type_flat]
+
+#
+# From neutron.ml2
+#
+
+# List of physical_network names with which flat networks can be created. Use
+# default '*' to allow flat networks with arbitrary physical_network names. Use
+# an empty list to disable flat networks. (list value)
+{%- set flat_nets = [] %}
+{%- for physnet, params in server.backend.get('physnets', {}).iteritems() %}
+{%-   do flat_nets.append(physnet) if 'flat' in params.get('types', []) %}
+{%- endfor %}
+{%- if not flat_nets %}
+{%-   do flat_nets.append(server.backend.get('flat_networks_default', '*')) %}
+{%- endif %}
+flat_networks = {{ ','.join(flat_nets) }}
+
+
+[ml2_type_geneve]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
+# Geneve VNI IDs that are available for tenant network allocation (list value)
+vni_ranges = {{ server.get('geneve', {}).vni_ranges|default('1:65536') }}
+
+# Geneve encapsulation header size is dynamic, this value is used to calculate
+# the maximum MTU for the driver. This is the sum of the sizes of the outer ETH
+# + IP + UDP + GENEVE header sizes. The default size for this field is 50,
+# which is the size of the Geneve header without any additional option headers.
+# (integer value)
+max_header_size = 38
+
+
+[ml2_type_gre]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <tun_min>:<tun_max> tuples enumerating ranges of GRE
+# tunnel IDs that are available for tenant network allocation (list value)
+tunnel_id_ranges = 2:65535
+
+
+[ml2_type_vlan]
+
+#
+# From neutron.ml2
+#
+
+# List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network>
+# specifying physical_network names usable for VLAN provider and tenant
+# networks, as well as ranges of VLAN tags on each available for allocation to
+# tenant networks. (list value)
+{%- set network_vlan_ranges = [] %}
+{%- for physnet, params in server.backend.get('physnets', {}).iteritems() %}
+{%-   if 'vlan' in params.get('types', ['vlan']) %}
+{%-     if params.get('vlan_range', False) %}
+{%-       for vlan_range in params.get('vlan_range').split(',') %}
+{%-         do network_vlan_ranges.append([physnet, vlan_range]|join(":")) %}
+{%-       endfor %}
+{%-     else %}
+{%-       do network_vlan_ranges.append(physnet) %}
+{%-     endif %}
+{%-   endif %}
+{%- endfor %}
+{%- if not network_vlan_ranges %}
+{%-   if server.backend.external_vlan_range is defined %}
+{%-     do network_vlan_ranges.append(['physnet1',server.backend.get('external_vlan_range')]|join(":")) %}
+{%-   endif %}
+{%-   if "vlan" in server.backend.tenant_network_types %}
+{%-     do network_vlan_ranges.append(['physnet2',server.backend.get('tenant_vlan_range')]|join(":")) %}
+{%-   endif %}
+{%-   if server.get('ironic_enabled', False) %}
+{%-     do network_vlan_ranges.append(['physnet3',server.backend.get('ironic_vlan_range')]|join(":")) %}
+{%-   endif %}
+{%- endif %}
+network_vlan_ranges = {{ ','.join(network_vlan_ranges) }}
+
+
+[ml2_type_vxlan]
+
+#
+# From neutron.ml2
+#
+
+# Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
+# VXLAN VNI IDs that are available for tenant network allocation (list value)
+vni_ranges = {{ server.get('vxlan', {}).vni_ranges|default('2:65535') }}
+
+# Multicast group for VXLAN. When configured, will enable sending all broadcast
+# traffic to this multicast group. When left unconfigured, will disable
+# multicast VXLAN mode. (string value)
+vxlan_group = {{ server.get('vxlan', {}).group|default('224.0.0.1') }}
+
+
+[securitygroup]
+
+#
+# From neutron.ml2
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+{%- if not server.get('security_groups_enabled', True) %}
+{%-   set _firewall_driver = 'noop' %}
+{%- elif server.dpdk or server.get('vlan_aware_vms', False) %}
+{%-   set _firewall_driver = 'openvswitch' %}
+{%- else %}
+{%-   set _firewall_driver = 'iptables_hybrid' %}
+{%- endif %}
+firewall_driver = {{ server.get('firewall_driver', _firewall_driver) }}
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+enable_security_group = {{ server.get('security_groups_enabled', True) }}
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+
+{%- if server.backend.engine == "ovn" %}
+{%- set _ovn = server.backend.ovn|default({}) %}
+[ovn]
+ovn_nb_connection = tcp:{{ server.controller_vip }}:6641
+ovn_sb_connection = tcp:{{ server.controller_vip }}:6642
+ovn_l3_scheduler = {{ _ovn.ovn_l3_scheduler|default('leastloaded') }}
+ovn_metadata_enabled = {{ _ovn.metadata_enabled|default('false') }}
+neutron_sync_mode = {{ _ovn.neutron_sync_mode|default('repair') }}
+enable_distributed_floating_ip = {{ server.dvr|default('false') }}
+{%- endif %}
+
+
+{%- if server.backend.opendaylight|default(False) %}
+[ml2_odl]
+# HTTP URL of OpenDaylight REST interface. (string value)
+url = {{ server.backend.protocol|default('http') }}://{{ server.backend.host }}:{{ server.backend.rest_api_port }}/controller/nb/v2/neutron
+
+# HTTP username for authentication. (string value)
+username = {{ server.backend.user }}
+
+# HTTP password for authentication. (string value)
+password = {{ server.backend.password }}
+
+# Name of the controller to be used for port binding. (string value)
+port_binding_controller = pseudo-agentdb-binding
+
+# Enable websocket for pseudo-agent-port-binding. (boolean value)
+enable_websocket_pseudo_agentdb = {{ server.backend.enable_websocket|default('false') }}
+
+# Enables the networking-odl driver to supply special neutron ports of
+# "dhcp" type to OpenDaylight Controller for its use in providing DHCP
+# Service. (boolean value)
+enable_dhcp_service = {{ server.backend.enable_dhcp_service|default('false') }}
+{%- endif %}
diff --git a/neutron/files/rocky/neutron-generic.conf b/neutron/files/rocky/neutron-generic.conf
new file mode 100644
index 0000000..8b6fda7
--- /dev/null
+++ b/neutron/files/rocky/neutron-generic.conf
@@ -0,0 +1,510 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+#
+# From neutron
+#
+
+# Where to store Neutron state files. This directory must be writable by the
+# agent. (string value)
+#state_path = /var/lib/neutron
+
+# The host IP to bind to. (host address value)
+#bind_host = 0.0.0.0
+
+# The port to bind to (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#bind_port = 9696
+
+# The path for API extensions. Note that this can be a colon-separated list of
+# paths. For example: api_extensions_path =
+# extensions:/path/to/more/exts:/even/more/exts. The __path__ of
+# neutron.extensions is appended to this, so if your extensions are in there
+# you don't need to specify them here. (string value)
+#api_extensions_path =
+
+# The type of authentication to use (string value)
+#auth_strategy = keystone
+
+{%- if neutron.backend.engine == "ml2" %}
+# The core plugin Neutron will use (string value)
+core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+
+# The service plugins Neutron will use (list value)
+service_plugins = {{ neutron.backend.get('router', 'router')}},metering
+{%- endif %}
+
+# The base MAC address Neutron will use for VIFs. The first 3 octets will
+# remain unchanged. If the 4th octet is not 00, it will also be used. The
+# others will be randomly generated. (string value)
+#base_mac = fa:16:3e:00:00:00
+{%- if neutron.base_mac is defined %}
+base_mac = {{ neutron.base_mac }}
+{%- endif %}
+
+# Allow the usage of the bulk API (boolean value)
+#allow_bulk = true
+
+# The maximum number of items returned in a single response, value was
+# 'infinite' or negative integer means no limit (string value)
+pagination_max_limit = {{ neutron.pagination_max_limit|default('-1') }}
+
+# Default value of availability zone hints. The availability zone aware
+# schedulers use this when the resources availability_zone_hints is empty.
+# Multiple availability zones can be specified by a comma separated string.
+# This value can be empty. In this case, even if availability_zone_hints for a
+# resource is empty, availability zone is considered for high availability
+# while scheduling the resource. (list value)
+#default_availability_zones =
+
+# Maximum number of DNS nameservers per subnet (integer value)
+#max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet (integer value)
+#max_subnet_host_routes = 20
+
+# Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to
+# True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable
+# environment. Users making subnet creation requests for IPv6 subnets without
+# providing a CIDR or subnetpool ID will be given a CIDR via the Prefix
+# Delegation mechanism. Note that enabling PD will override the behavior of the
+# default IPv6 subnetpool. (boolean value)
+#ipv6_pd_enabled = false
+
+# DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite
+# lease times. (integer value)
+dhcp_lease_duration = {{ neutron.dhcp_lease_duration|default('600') }}
+
+# Domain to use for building the hostnames (string value)
+#dns_domain = openstacklocal
+
+# Driver for external DNS integration. (string value)
+#external_dns_driver = <None>
+
+# Allow sending resource operation notification to DHCP agent (boolean value)
+#dhcp_agent_notification = true
+
+# Allow overlapping IP support in Neutron. Attention: the following parameter
+# MUST be set to False if Neutron is being used in conjunction with Nova
+# security groups. (boolean value)
+allow_overlapping_ips = true
+
+# Hostname to be used by the Neutron server, agents and services running on
+# this machine. All the agents and services running on this machine must use
+# the same host value. (host address value)
+#host = example.domain
+
+# This string is prepended to the normal URL that is returned in links to the
+# OpenStack Network API. If it is empty (the default), the URLs are returned
+# unchanged. (string value)
+#network_link_prefix = <None>
+
+# Send notification to nova when port status changes (boolean value)
+notify_nova_on_port_status_changes = true
+
+# Send notification to nova when port data (fixed_ips/floatingip) changes so
+# nova can update its cache. (boolean value)
+notify_nova_on_port_data_changes = true
+
+# Number of seconds between sending events to nova if there are any events to
+# send. (integer value)
+#send_events_interval = 2
+
+# Neutron IPAM (IP address management) driver to use. By default, the reference
+# implementation of the Neutron IPAM driver is used. (string value)
+#ipam_driver = internal
+
+# If True, then allow plugins that support it to create VLAN transparent
+# networks. (boolean value)
+#vlan_transparent = false
+
+# If True, then allow plugins to decide whether to perform validations on
+# filter parameters. Filter validation is enabled if this configis turned on
+# and it is supported by all plugins (boolean value)
+#filter_validation = true
+
+# MTU of the underlying physical network. Neutron uses this value to calculate
+# MTU for all virtual network components. For flat and VLAN networks, neutron
+# uses this value without modification. For overlay networks such as VXLAN,
+# neutron automatically subtracts the overlay protocol overhead from this
+# value. Defaults to 1500, the standard value for Ethernet. (integer value)
+# Deprecated group/name - [ml2]/segment_mtu
+global_physnet_mtu = {{ neutron.get('global_physnet_mtu', '1500') }}
+
+# Number of backlog requests to configure the socket with (integer value)
+#backlog = 4096
+
+# Number of seconds to keep retrying to listen (integer value)
+#retry_until_window = 30
+
+# Enable SSL on the API server (boolean value)
+#use_ssl = false
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval = 40
+
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+{%- if neutron.api_workers is defined %}
+api_workers = {{ neutron.api_workers }}
+{%- endif %}
+
+# Number of RPC worker processes for service. (integer value)
+rpc_workers = {{ neutron.rpc_workers|default(grains.num_cpus) }}
+
+# Number of RPC worker processes dedicated to state reports queue. (integer
+# value)
+rpc_state_report_workers = {{ neutron.rpc_state_report_workers|default('4') }}
+
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+#
+# From neutron.agent
+#
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
+
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
+
+#
+# From neutron.db
+#
+
+# Seconds to regard the agent is down; should be at least twice
+# report_interval, to be sure the agent is down for good. (integer value)
+#agent_down_time = 75
+
+# Representing the resource type whose load is being reported by the agent.
+# This can be "networks", "subnets" or "ports". When specified (Default is
+# networks), the server will extract particular load sent as part of its agent
+# configuration object from the agent report state, which is the number of
+# resources being consumed, at every report_interval.dhcp_load_type can be used
+# in combination with network_scheduler_driver =
+# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the
+# network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured
+# to represent the choice for the resource being balanced. Example:
+# dhcp_load_type=networks (string value)
+# Possible values:
+# networks - <No description provided>
+# subnets - <No description provided>
+# ports - <No description provided>
+#dhcp_load_type = networks
+
+# Agent starts with admin_state_up=False when enable_new_agents=False. In the
+# case, user's resources will not be scheduled automatically to the agent until
+# admin changes admin_state_up to True. (boolean value)
+#enable_new_agents = true
+
+# Maximum number of routes per router (integer value)
+#max_routes = 30
+
+# Define the default value of enable_snat if not provided in
+# external_gateway_info. (boolean value)
+#enable_snat_by_default = true
+
+# Driver to use for scheduling network to DHCP agent (string value)
+#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
+
+# Allow auto scheduling networks to DHCP agent. (boolean value)
+#network_auto_schedule = true
+
+# Automatically remove networks from offline DHCP agents. (boolean value)
+#allow_automatic_dhcp_failover = true
+
+# Number of DHCP agents scheduled to host a tenant network. If this number is
+# greater than 1, the scheduler automatically assigns multiple DHCP agents for
+# a given tenant network, providing high availability for DHCP service.
+# (integer value)
+dhcp_agents_per_network = 2
+
+# Enable services on an agent with admin_state_up False. If this option is
+# False, when admin_state_up of an agent is turned False, services on it will
+# be disabled. Agents with admin_state_up False are not selected for automatic
+# scheduling regardless of this option. But manual scheduling to such agents is
+# available if this option is True. (boolean value)
+#enable_services_on_agents_with_admin_state_down = false
+
+# The base mac address used for unique DVR instances by Neutron. The first 3
+# octets will remain unchanged. If the 4th octet is not 00, it will also be
+# used. The others will be randomly generated. The 'dvr_base_mac' *must* be
+# different from 'base_mac' to avoid mixing them up with MAC's allocated for
+# tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00.
+# The default is 3 octet (string value)
+#dvr_base_mac = fa:16:3f:00:00:00
+{%- if neutron.dvr_base_mac is defined %}
+  {%- if neutron.base_mac is defined %}
+    {%- if neutron.base_mac != neutron.dvr_base_mac %}
+dvr_base_mac = {{ neutron.dvr_base_mac }}
+    {%- endif %}
+  {%- else %}
+dvr_base_mac = {{ neutron.dvr_base_mac }}
+  {%- endif %}
+{%- endif %}
+
+# System-wide flag to determine the type of router that tenants can create.
+# Only admin can override. (boolean value)
+router_distributed = {{ neutron.get('dvr', 'false') }}
+
+# Determine if setup is configured for DVR. If False, DVR API extension will be
+# disabled. (boolean value)
+enable_dvr = {{ neutron.get('dvr', 'false') }}
+
+# Driver to use for scheduling router to a default L3 agent (string value)
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling of routers to L3 agent. (boolean value)
+#router_auto_schedule = true
+
+# Automatically reschedule routers from offline L3 agents to online L3 agents.
+# (boolean value)
+allow_automatic_l3agent_failover = true
+
+# Enable HA mode for virtual routers. (boolean value)
+l3_ha = {{ neutron.get('l3_ha', 'false') }}
+
+# Maximum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+max_l3_agents_per_router = 0
+
+# Subnet used for the l3 HA admin network. (string value)
+#l3_ha_net_cidr = 169.254.192.0/18
+
+# The network type to use when creating the HA network for an HA router. By
+# default or if empty, the first 'tenant_network_types' is used. This is
+# helpful when the VRRP traffic should use a specific network which is not the
+# default one. (string value)
+#l3_ha_network_type =
+
+# The physical network name with which the HA network can be created. (string
+# value)
+#l3_ha_network_physical_name =
+
+#
+# From neutron.extensions
+#
+
+# Maximum number of allowed address pairs (integer value)
+#max_allowed_address_pair = 10
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+{%- if neutron.message_queue|default(none) is not none %}
+{%- set _data = neutron.message_queue %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/messaging/_default.conf" %}
+{%- endif %}
+
+{%- set _data = {} %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/service/_wsgi_default.conf" %}
+
+
+[agent]
+
+#
+# From neutron.agent
+#
+
+# Root helper application. Use 'sudo neutron-rootwrap
+# /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to
+# 'sudo' to skip the filtering and just run the command directly. (string
+# value)
+root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+# Use the root helper when listing the namespaces on a system. This may not be
+# required depending on the security configuration. If the root helper is not
+# required, set this to False for a performance improvement. (boolean value)
+#use_helper_for_ns_read = true
+
+#
+# Root helper daemon application to use when possible.
+#
+# Use 'sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf' to run rootwrap
+# in "daemon mode" which has been reported to improve performance at scale. For
+# more information on running rootwrap in "daemon mode", see:
+#
+# https://docs.openstack.org/oslo.rootwrap/latest/user/usage.html#daemon-mode
+#
+# For the agent which needs to execute commands in Dom0 in the hypervisor of
+# XenServer, this option should be set to 'xenapi_root_helper', so that it will
+# keep a XenAPI session to pass commands to Dom0.
+#  (string value)
+#root_helper_daemon = <None>
+{%- if neutron.root_helper_daemon|default(True) %}
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
+{%- endif %}
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+report_interval = 10
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+# Add comments to iptables rules. Set to false to disallow the addition of
+# comments to generated iptables rules that describe each rule's purpose.
+# System must support the iptables comments module for addition of comments.
+# (boolean value)
+#comment_iptables_rules = true
+
+# Duplicate every iptables difference calculation to ensure the format being
+# generated matches the format of iptables-save. This option should not be
+# turned on for production systems because it imposes a performance penalty.
+# (boolean value)
+#debug_iptables_rules = false
+
+# Action to be executed when a child process dies (string value)
+# Possible values:
+# respawn - <No description provided>
+# exit - <No description provided>
+#check_child_processes_action = respawn
+
+# Interval between checks of child process liveness (seconds), use 0 to disable
+# (integer value)
+#check_child_processes_interval = 60
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+
+[cors]
+{%- if neutron.cors is defined %}
+{%- set _data = neutron.cors %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_cors.conf" %}
+{%- endif %}
+
+
+[keystone_authtoken]
+{%- if neutron.identity is defined %}
+{%- set _data = neutron.identity %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': neutron.cacert_file}) %}{% endif %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- if neutron.get('cache',{}).members is defined and 'cache' not in _data.keys() %}
+{%- do _data.update({'cache': neutron.cache}) %}
+{%- endif %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/keystonemiddleware/_auth_token.conf" %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/keystoneauth/_type_" + auth_type + ".conf" %}
+{%- endif %}
+
+
+[oslo_concurrency]
+{%- set _data = neutron.get('concurrency', {}) %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_concurrency.conf" %}
+
+
+{%- if neutron.message_queue|default(none) is not none %}
+{%- set _data = neutron.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+    {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+    {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': neutron.cacert_file}) %}{% endif %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
+
+[oslo_messaging_notifications]
+{%- set _data = neutron.notification %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/messaging/_notifications.conf" %}
+
+
+[oslo_middleware]
+{%- set _data = neutron %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_middleware.conf" %}
+
+
+[oslo_policy]
+{%- if neutron.policy is defined %}
+{%- set _data = neutron.policy %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_policy.conf" %}
+{%- endif %}
+
+
+[quotas]
+
+#
+# From neutron
+#
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited. (integer value)
+#default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_network = 100
+
+# Number of subnets allowed per tenant, A negative value means unlimited.
+# (integer value)
+#quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_port = 500
+
+# Default driver to use for quota checks. (string value)
+#quota_driver = neutron.db.quota.driver.DbQuotaDriver
+{%- if neutron.backend.engine == "contrail" %}
+quota_driver = neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver
+{%- endif %}
+
+# Keep in track in the database of current resource quota usage. Plugins which
+# do not leverage the neutron database should set this flag to False. (boolean
+# value)
+#track_quota_usage = true
+
+#
+# From neutron.extensions
+#
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_floatingip = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group = 10
+
+# Number of security rules allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group_rule = 100
+
+
+[ssl]
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/service/_ssl.conf" %}
+
+
+[ovs]
+{%- if neutron.backend.ovsdb_interface is defined %}
+ovsdb_interface = {{ neutron.backend.ovsdb_interface }}
+{%- endif %}
+{%- if neutron.backend.ovsdb_connection is defined %}
+ovsdb_connection = {{ neutron.backend.ovsdb_connection }}
+{%- endif %}
diff --git a/neutron/files/rocky/neutron-server b/neutron/files/rocky/neutron-server
new file mode 100644
index 0000000..6f48434
--- /dev/null
+++ b/neutron/files/rocky/neutron-server
@@ -0,0 +1,28 @@
+# Generated by Salt.
+{%- from "neutron/map.jinja" import server with context %}
+
+# defaults for neutron-server
+
+# path to config file corresponding to the core_plugin specified in
+# neutron.conf
+#NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini"
+
+{%- if server.backend.engine in ["ml2", "ovn"] %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/ml2/ml2_conf.ini"
+{%- endif %}
+
+{%- if server.backend.engine == "contrail" %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
+{%- endif %}
+
+{%- if server.backend.engine == "vmware" %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/vmware/nsx.ini"
+{%- endif %}
+
+{%- if server.logging.log_appender %}
+DAEMON_ARGS="${DAEMON_ARGS} --log-config-append=/etc/neutron/logging/logging-neutron-server.conf"
+{%- endif %}
+
+{%- if server.l2gw is defined and server.l2gw.get('enabled', False) %}
+DAEMON_ARGS="${DAEMON_ARGS} --config-file=/etc/neutron/l2gw_plugin.ini"
+{%- endif %}
diff --git a/neutron/files/rocky/neutron-server.conf b/neutron/files/rocky/neutron-server.conf
new file mode 100644
index 0000000..d841b74
--- /dev/null
+++ b/neutron/files/rocky/neutron-server.conf
@@ -0,0 +1,613 @@
+{%- from "neutron/map.jinja" import fwaas, server with context %}
+[DEFAULT]
+
+#
+# From neutron
+#
+
+# Where to store Neutron state files. This directory must be writable by the
+# agent. (string value)
+#state_path = /var/lib/neutron
+
+# The host IP to bind to. (host address value)
+bind_host = {{ server.bind.address }}
+
+# The port to bind to (port value)
+# Minimum value: 0
+# Maximum value: 65535
+bind_port = {{ server.bind.port }}
+
+{%- if server.core_plugin is defined %}
+# The core plugin Neutron will use (string value)
+core_plugin = {{ server.core_plugin }}
+{%-   if server.service_plugins is defined %}
+{%-     set service_plugins = [] %}
+{%-     for sname,service in server.service_plugins.iteritems() %}
+{%-       if service.enabled %}
+{%-         do service_plugins.append(sname) %}
+{%-       endif %}
+{%-     endfor %}
+# The service plugins Neutron will use (list value)
+service_plugins = {{ service_plugins|join(',') }}
+{%-   endif %}
+{%- elif server.backend.engine == "contrail" %}
+# The path for API extensions. Note that this can be a colon-separated list of
+# paths. For example: api_extensions_path =
+# extensions:/path/to/more/exts:/even/more/exts. The __path__ of
+# neutron.extensions is appended to this, so if your extensions are in there
+# you don't need to specify them here. (string value)
+api_extensions_path = extensions:/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions:/usr/lib/python2.7/dist-packages/neutron_lbaas/extensions
+
+# The core plugin Neutron will use (string value)
+core_plugin = neutron_plugin_contrail.plugins.opencontrail.contrail_plugin.NeutronPluginContrailCoreV2
+
+# The service plugins Neutron will use (list value)
+service_plugins = neutron_plugin_contrail.plugins.opencontrail.loadbalancer.v2.plugin.LoadBalancerPluginV2
+
+{%- elif server.backend.engine in ["ml2", "ovn"] %}
+# The core plugin Neutron will use (string value)
+core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
+
+{%-   if server.backend.engine == "ml2" %}
+{%-     set l3_plugin = 'neutron.services.l3_router.l3_router_plugin.L3RouterPlugin' %}
+{%-   elif server.backend.engine == "ovn" %}
+{%-     set l3_plugin = 'networking_ovn.l3.l3_ovn.OVNL3RouterPlugin' %}
+{%-   endif %}
+# The service plugins Neutron will use (list value)
+service_plugins = {{ server.backend.get('router', l3_plugin) }},metering
+{%-   if fwaas.get('enabled', False) -%},{{ fwaas[fwaas.api_version]['service_plugin'] }}{%- endif -%}
+{%-   if server.get('qos', 'True') -%},neutron.services.qos.qos_plugin.QoSPlugin{%- endif -%}
+{%-   if server.get('vlan_aware_vms', False) -%},trunk{%- endif -%}
+{%-   if server.l2gw is defined and server.l2gw.get('enabled', False) -%},networking_l2gw.services.l2gateway.plugin.L2GatewayPlugin{%- endif -%}
+{%-   if server.get('bgp_vpn', {}).get('enabled', False) -%},bgpvpn{%- endif -%}
+{%-   if server.get('sfc', {}).get('enabled', False) -%},flow_classifier,sfc{%- endif -%}
+
+{%- endif %}
+
+# The type of authentication to use (string value)
+#auth_strategy = keystone
+
+# The base MAC address Neutron will use for VIFs. The first 3 octets will
+# remain unchanged. If the 4th octet is not 00, it will also be used. The
+# others will be randomly generated. (string value)
+#base_mac = fa:16:3e:00:00:00
+{%- if server.base_mac is defined %}
+base_mac = {{ server.base_mac }}
+{%- endif %}
+
+# Allow the usage of the bulk API (boolean value)
+#allow_bulk = true
+
+# The maximum number of items returned in a single response, value was
+# 'infinite' or negative integer means no limit (string value)
+pagination_max_limit = {{ server.pagination_max_limit|default('-1') }}
+
+# Default value of availability zone hints. The availability zone aware
+# schedulers use this when the resources availability_zone_hints is empty.
+# Multiple availability zones can be specified by a comma separated string.
+# This value can be empty. In this case, even if availability_zone_hints for a
+# resource is empty, availability zone is considered for high availability
+# while scheduling the resource. (list value)
+#default_availability_zones =
+
+# Maximum number of DNS nameservers per subnet (integer value)
+#max_dns_nameservers = 5
+
+# Maximum number of host routes per subnet (integer value)
+#max_subnet_host_routes = 20
+
+# Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to
+# True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable
+# environment. Users making subnet creation requests for IPv6 subnets without
+# providing a CIDR or subnetpool ID will be given a CIDR via the Prefix
+# Delegation mechanism. Note that enabling PD will override the behavior of the
+# default IPv6 subnetpool. (boolean value)
+#ipv6_pd_enabled = false
+
+# DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite
+# lease times. (integer value)
+dhcp_lease_duration = {{ server.dhcp_lease_duration|default('600') }}
+
+# Domain to use for building the hostnames (string value)
+dns_domain = {{ server.dns_domain }}
+
+# Driver for external DNS integration. (string value)
+#external_dns_driver = <None>
+{%- if server.backend.get('extension', {}).get('dns', {}).get('enabled', False) %}
+external_dns_driver = {{ server.backend.get('extension', {}).get('dns', {}).get('engine', '') }}
+{%- endif %}
+
+# Allow sending resource operation notification to DHCP agent (boolean value)
+#dhcp_agent_notification = true
+
+# Allow overlapping IP support in Neutron. Attention: the following parameter
+# MUST be set to False if Neutron is being used in conjunction with Nova
+# security groups. (boolean value)
+allow_overlapping_ips = true
+
+# Hostname to be used by the Neutron server, agents and services running on
+# this machine. All the agents and services running on this machine must use
+# the same host value. (host address value)
+#host = example.domain
+
+# This string is prepended to the normal URL that is returned in links to the
+# OpenStack Network API. If it is empty (the default), the URLs are returned
+# unchanged. (string value)
+#network_link_prefix = <None>
+
+# Send notification to nova when port status changes (boolean value)
+notify_nova_on_port_status_changes = true
+
+# Send notification to nova when port data (fixed_ips/floatingip) changes so
+# nova can update its cache. (boolean value)
+notify_nova_on_port_data_changes = true
+
+# Number of seconds between sending events to nova if there are any events to
+# send. (integer value)
+#send_events_interval = 2
+
+# Neutron IPAM (IP address management) driver to use. By default, the reference
+# implementation of the Neutron IPAM driver is used. (string value)
+#ipam_driver = internal
+
+# If True, then allow plugins that support it to create VLAN transparent
+# networks. (boolean value)
+#vlan_transparent = false
+
+# If True, then allow plugins to decide whether to perform validations on
+# filter parameters. Filter validation is enabled if this configis turned on
+# and it is supported by all plugins (boolean value)
+#filter_validation = true
+
+# MTU of the underlying physical network. Neutron uses this value to calculate
+# MTU for all virtual network components. For flat and VLAN networks, neutron
+# uses this value without modification. For overlay networks such as VXLAN,
+# neutron automatically subtracts the overlay protocol overhead from this
+# value. Defaults to 1500, the standard value for Ethernet. (integer value)
+# Deprecated group/name - [ml2]/segment_mtu
+global_physnet_mtu = {{ server.get('global_physnet_mtu', '1500') }}
+
+# Number of backlog requests to configure the socket with (integer value)
+#backlog = 4096
+
+# Number of seconds to keep retrying to listen (integer value)
+#retry_until_window = 30
+
+# Enable SSL on the API server (boolean value)
+#use_ssl = false
+
+# Seconds between running periodic tasks. (integer value)
+#periodic_interval = 40
+
+# Number of separate API worker processes for service. If not specified, the
+# default is equal to the number of CPUs available for best performance.
+# (integer value)
+#api_workers = <None>
+{%- if server.api_workers is defined %}
+api_workers = {{ server.api_workers }}
+{%- endif %}
+
+# Number of RPC worker processes for service. (integer value)
+rpc_workers = {{ server.rpc_workers|default(grains.num_cpus) }}
+
+# Number of RPC worker processes dedicated to state reports queue. (integer
+# value)
+rpc_state_report_workers = {{ server.rpc_state_report_workers|default('4') }}
+
+# Range of seconds to randomly delay when starting the periodic task scheduler
+# to reduce stampeding. (Disable by setting to 0) (integer value)
+#periodic_fuzzy_delay = 5
+
+#
+# From neutron.agent
+#
+
+# The driver used to manage the virtual interface. (string value)
+#interface_driver = <None>
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
+
+# User (uid or name) running metadata proxy after its initialization (if empty:
+# agent effective user). (string value)
+#metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization (if
+# empty: agent effective group). (string value)
+#metadata_proxy_group =
+
+#
+# From neutron.db
+#
+
+# Seconds to regard the agent is down; should be at least twice
+# report_interval, to be sure the agent is down for good. (integer value)
+#agent_down_time = 75
+
+# Representing the resource type whose load is being reported by the agent.
+# This can be "networks", "subnets" or "ports". When specified (Default is
+# networks), the server will extract particular load sent as part of its agent
+# configuration object from the agent report state, which is the number of
+# resources being consumed, at every report_interval.dhcp_load_type can be used
+# in combination with network_scheduler_driver =
+# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the
+# network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured
+# to represent the choice for the resource being balanced. Example:
+# dhcp_load_type=networks (string value)
+# Possible values:
+# networks - <No description provided>
+# subnets - <No description provided>
+# ports - <No description provided>
+#dhcp_load_type = networks
+
+# Agent starts with admin_state_up=False when enable_new_agents=False. In the
+# case, user's resources will not be scheduled automatically to the agent until
+# admin changes admin_state_up to True. (boolean value)
+#enable_new_agents = true
+
+# Maximum number of routes per router (integer value)
+#max_routes = 30
+
+# Define the default value of enable_snat if not provided in
+# external_gateway_info. (boolean value)
+#enable_snat_by_default = true
+
+# Driver to use for scheduling network to DHCP agent (string value)
+#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler
+
+# Allow auto scheduling networks to DHCP agent. (boolean value)
+#network_auto_schedule = true
+
+# Automatically remove networks from offline DHCP agents. (boolean value)
+#allow_automatic_dhcp_failover = true
+
+# Number of DHCP agents scheduled to host a tenant network. If this number is
+# greater than 1, the scheduler automatically assigns multiple DHCP agents for
+# a given tenant network, providing high availability for DHCP service.
+# (integer value)
+dhcp_agents_per_network = 2
+
+# Enable services on an agent with admin_state_up False. If this option is
+# False, when admin_state_up of an agent is turned False, services on it will
+# be disabled. Agents with admin_state_up False are not selected for automatic
+# scheduling regardless of this option. But manual scheduling to such agents is
+# available if this option is True. (boolean value)
+#enable_services_on_agents_with_admin_state_down = false
+
+# The base mac address used for unique DVR instances by Neutron. The first 3
+# octets will remain unchanged. If the 4th octet is not 00, it will also be
+# used. The others will be randomly generated. The 'dvr_base_mac' *must* be
+# different from 'base_mac' to avoid mixing them up with MAC's allocated for
+# tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00.
+# The default is 3 octet (string value)
+#dvr_base_mac = fa:16:3f:00:00:00
+{%- if server.dvr_base_mac is defined %}
+  {%- if server.base_mac is defined %}
+    {%- if server.base_mac != server.dvr_base_mac %}
+dvr_base_mac = {{ server.dvr_base_mac }}
+    {%- endif %}
+  {%- else %}
+dvr_base_mac = {{ server.dvr_base_mac }}
+  {%- endif %}
+{%- endif %}
+
+# System-wide flag to determine the type of router that tenants can create.
+# Only admin can override. (boolean value)
+router_distributed = {{ server.get('dvr', 'false') }}
+
+# Determine if setup is configured for DVR. If False, DVR API extension will be
+# disabled. (boolean value)
+enable_dvr = {{ server.get('dvr', 'false') }}
+
+# Driver to use for scheduling router to a default L3 agent (string value)
+router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.ChanceScheduler
+
+# Allow auto scheduling of routers to L3 agent. (boolean value)
+#router_auto_schedule = true
+
+# Automatically reschedule routers from offline L3 agents to online L3 agents.
+# (boolean value)
+allow_automatic_l3agent_failover = true
+
+# Enable HA mode for virtual routers. (boolean value)
+l3_ha = {{ server.get('l3_ha', 'false') }}
+
+# Maximum number of L3 agents which a HA router will be scheduled on. If it is
+# set to 0 then the router will be scheduled on every agent. (integer value)
+max_l3_agents_per_router = 0
+
+# Subnet used for the l3 HA admin network. (string value)
+#l3_ha_net_cidr = 169.254.192.0/18
+
+# The network type to use when creating the HA network for an HA router. By
+# default or if empty, the first 'tenant_network_types' is used. This is
+# helpful when the VRRP traffic should use a specific network which is not the
+# default one. (string value)
+#l3_ha_network_type =
+
+# The physical network name with which the HA network can be created. (string
+# value)
+#l3_ha_network_physical_name =
+
+#
+# From neutron.extensions
+#
+
+# Maximum number of allowed address pairs (integer value)
+#max_allowed_address_pair = 10
+
+{%- if server.logging is defined %}
+{%- set _data = server.logging %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+{%- set _data = server.message_queue %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/messaging/_default.conf" %}
+
+{%- set _data = {} %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/service/_wsgi_default.conf" %}
+
+
+[agent]
+
+#
+# From neutron.agent
+#
+
+# Root helper application. Use 'sudo neutron-rootwrap
+# /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to
+# 'sudo' to skip the filtering and just run the command directly. (string
+# value)
+root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
+
+# Use the root helper when listing the namespaces on a system. This may not be
+# required depending on the security configuration. If the root helper is not
+# required, set this to False for a performance improvement. (boolean value)
+#use_helper_for_ns_read = true
+
+#
+# Root helper daemon application to use when possible.
+#
+# Use 'sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf' to run rootwrap
+# in "daemon mode" which has been reported to improve performance at scale. For
+# more information on running rootwrap in "daemon mode", see:
+#
+# https://docs.openstack.org/oslo.rootwrap/latest/user/usage.html#daemon-mode
+#
+# For the agent which needs to execute commands in Dom0 in the hypervisor of
+# XenServer, this option should be set to 'xenapi_root_helper', so that it will
+# keep a XenAPI session to pass commands to Dom0.
+#  (string value)
+#root_helper_daemon = <None>
+{%- if server.root_helper_daemon|default(True) %}
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
+{%- endif %}
+
+# Seconds between nodes reporting state to server; should be less than
+# agent_down_time, best if it is half or less than agent_down_time. (floating
+# point value)
+report_interval = 10
+
+# Log agent heartbeats (boolean value)
+#log_agent_heartbeats = false
+
+# Add comments to iptables rules. Set to false to disallow the addition of
+# comments to generated iptables rules that describe each rule's purpose.
+# System must support the iptables comments module for addition of comments.
+# (boolean value)
+#comment_iptables_rules = true
+
+# Duplicate every iptables difference calculation to ensure the format being
+# generated matches the format of iptables-save. This option should not be
+# turned on for production systems because it imposes a performance penalty.
+# (boolean value)
+#debug_iptables_rules = false
+
+# Action to be executed when a child process dies (string value)
+# Possible values:
+# respawn - <No description provided>
+# exit - <No description provided>
+#check_child_processes_action = respawn
+
+# Interval between checks of child process liveness (seconds), use 0 to disable
+# (integer value)
+#check_child_processes_interval = 60
+
+# Availability zone of this node (string value)
+#availability_zone = nova
+
+
+[cors]
+{%- if server.cors is defined %}
+{%- set _data = server.cors %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_cors.conf" %}
+{%- endif %}
+
+
+[database]
+{%- set _data = server.database %}
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': server.cacert_file}) %}{% endif %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_database.conf" %}
+
+
+[keystone_authtoken]
+{%- set _data = server.identity %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': server.cacert_file}) %}{% endif %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- if server.get('cache',{}).members is defined and 'cache' not in _data.keys() %}
+{% do _data.update({'cache': server.cache}) %}
+{%- endif %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/keystonemiddleware/_auth_token.conf" %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/keystoneauth/_type_" + auth_type + ".conf" %}
+
+
+[matchmaker_redis]
+
+#
+# From oslo.messaging
+#
+
+# DEPRECATED: Host to locate redis. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#host = 127.0.0.1
+
+# DEPRECATED: Use this port to connect to redis host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#port = 6379
+
+# DEPRECATED: Password for Redis server (optional). (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#password =
+
+# DEPRECATED: List of Redis Sentinel hosts (fault tolerance mode), e.g.,
+# [host:port, host1:port ... ] (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#sentinel_hosts =
+
+# Redis replica set name. (string value)
+#sentinel_group_name = oslo-messaging-zeromq
+
+# Time in ms to wait between connection attempts. (integer value)
+#wait_timeout = 2000
+
+# Time in ms to wait before the transaction is killed. (integer value)
+#check_timeout = 20000
+
+# Timeout in ms on blocking socket operations. (integer value)
+#socket_timeout = 10000
+
+
+[nova]
+{%- set _data = server.get('compute', server.get('identity', {})) %}
+{%- if 'protocol' not in _data.keys() %}{% do _data.update({'protocol': server.get('identity', {}).get('protocol', 'http')}) %}{% endif %}
+{%- if 'cacert_file' not in _data.keys() %}{% do _data.update({'cacert_file': server.cacert_file}) %}{% endif %}
+{%- set auth_type = _data.get('auth_type', 'password') %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/keystoneauth/_type_" + auth_type + ".conf" %}
+
+
+[oslo_concurrency]
+{%- set _data = server.get('concurrency', {}) %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_concurrency.conf" %}
+
+
+{%- if server.message_queue is defined %}
+{%- set _data = server.message_queue %}
+{%- if _data.engine == 'rabbitmq' %}
+    {%- set messaging_engine = 'rabbit' %}
+{%- else %}
+    {%- set messaging_engine = _data.engine %}
+{%- endif %}
+[oslo_messaging_{{ messaging_engine }}]
+{%- if _data.ssl is defined and 'cacert_file' not in _data.get('ssl', {}).keys() %}{% do _data['ssl'].update({'cacert_file': server.cacert_file}) %}{% endif %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/messaging/_" + messaging_engine + ".conf" %}
+{%- endif %}
+
+
+[oslo_messaging_notifications]
+{%- set _data = server.notification %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/messaging/_notifications.conf" %}
+
+
+[oslo_middleware]
+{%- set _data = server %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_middleware.conf" %}
+
+
+[oslo_policy]
+{%- if server.policy is defined %}
+{%- set _data = server.policy %}
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/_policy.conf" %}
+{%- endif %}
+
+
+[quotas]
+
+#
+# From neutron
+#
+
+# Default number of resource allowed per tenant. A negative value means
+# unlimited. (integer value)
+#default_quota = -1
+
+# Number of networks allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_network = 100
+
+# Number of subnets allowed per tenant, A negative value means unlimited.
+# (integer value)
+#quota_subnet = 100
+
+# Number of ports allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_port = 500
+
+# Default driver to use for quota checks. (string value)
+#quota_driver = neutron.db.quota.driver.DbQuotaDriver
+{%- if server.backend.engine == "contrail" %}
+quota_driver = neutron_plugin_contrail.plugins.opencontrail.quota.driver.QuotaDriver
+{%- endif %}
+
+# Keep in track in the database of current resource quota usage. Plugins which
+# do not leverage the neutron database should set this flag to False. (boolean
+# value)
+#track_quota_usage = true
+
+#
+# From neutron.extensions
+#
+
+# Number of routers allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_router = 10
+
+# Number of floating IPs allowed per tenant. A negative value means unlimited.
+# (integer value)
+#quota_floatingip = 50
+
+# Number of security groups allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group = 10
+
+# Number of security rules allowed per tenant. A negative value means
+# unlimited. (integer value)
+#quota_security_group_rule = 100
+
+
+[ssl]
+{%- include "oslo_templates/files/" ~ server.version ~ "/oslo/service/_ssl.conf" %}
+
+
+[ovs]
+{%- if server.backend.ovsdb_interface is defined %}
+ovsdb_interface = {{ server.backend.ovsdb_interface }}
+{%- endif %}
+{%- if server.backend.ovsdb_connection is defined %}
+ovsdb_connection = {{ server.backend.ovsdb_connection }}
+{%- endif %}
+
+#
+# Advanced services configs
+#
+
+{% if server.lbaas is defined -%}
+{%- include "neutron/files/" ~ server.version ~ "/lbaas.conf" %}
+{% endif %}
+
+{% if server.bgp_vpn is defined -%}
+{%- include "neutron/files/" ~ server.version ~ "/bgpvpn.conf" %}
+{% endif %}
+
+{% if server.sfc is defined -%}
+{%- include "neutron/files/" ~ server.version ~ "/plugins/sfc.conf" %}
+{% endif %}
diff --git a/neutron/files/rocky/openvswitch_agent.ini b/neutron/files/rocky/openvswitch_agent.ini
new file mode 100644
index 0000000..5330ff0
--- /dev/null
+++ b/neutron/files/rocky/openvswitch_agent.ini
@@ -0,0 +1,285 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[agent]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Minimize polling by monitoring ovsdb for interface changes. (boolean value)
+#minimize_polling = true
+
+# The number of seconds to wait before respawning the ovsdb monitor after
+# losing communication with it. (integer value)
+#ovsdb_monitor_respawn_interval = 30
+
+{%- if "vxlan" in neutron.backend.tenant_network_types %}
+# Network types supported by the agent (gre, vxlan and/or geneve). (list value)
+tunnel_types = vxlan
+
+# The UDP port to use for VXLAN tunnels. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+vxlan_udp_port = 4789
+
+# MTU size of veth interfaces (integer value)
+#veth_mtu = 9000
+
+# Use ML2 l2population mechanism driver to learn remote MAC and IPs and improve
+# tunnel scalability. (boolean value)
+l2_population = true
+
+# Enable local ARP responder if it is supported. Requires OVS 2.1 and ML2
+# l2population driver. Allows the switch (when supporting an overlay) to
+# respond to an ARP request locally without performing a costly ARP broadcast
+# into the overlay. (boolean value)
+arp_responder = true
+
+# Set or un-set the don't fragment (DF) bit on outgoing IP packet carrying
+# GRE/VXLAN tunnel. (boolean value)
+#dont_fragment = true
+{%- endif %}
+
+# Make the l2 agent run in DVR mode. (boolean value)
+enable_distributed_routing = {{ neutron.get('dvr', 'false') }}
+
+# Reset flow table on start. Setting this to True will cause brief traffic
+# interruption. (boolean value)
+drop_flows_on_start = false
+
+# Set or un-set the tunnel header checksum  on outgoing IP packet carrying
+# GRE/VXLAN tunnel. (boolean value)
+#tunnel_csum = false
+
+# DEPRECATED: Selects the Agent Type reported (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#agent_type = Open vSwitch agent
+
+# Extensions list to use (list value)
+{%- set extensions = [] %}
+{%- for section_key in ('ovs_extension', 'extension') %}
+{%-   for ext_name, ext_params in neutron.backend.get(section_key, {}).iteritems() %}
+{%-     do extensions.append(ext_name) if ext_params.get('enabled', False)  %}
+{%-   endfor %}
+{%- endfor %}
+{#- NOTE: Below section is for backward compatible when extentions were #}
+{#- separated properties without neutron:backend:extension pillar #}
+{%- do extensions.append('qos') if neutron.get('qos', True) %}
+extensions = {{ extensions|unique|join(',') }}
+
+
+[network_log]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Maximum packets logging per second. (integer value)
+# Minimum value: 100
+#rate_limit = 100
+
+# Maximum number of packets per rate_limit. (integer value)
+# Minimum value: 25
+#burst_limit = 25
+
+# Output logfile path on agent side, default syslog file. (string value)
+#local_output_log_base = <None>
+
+
+[ovs]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Integration bridge to use. Do not change this parameter unless you have a
+# good reason to. This is the name of the OVS integration bridge. There is one
+# per hypervisor. The integration bridge acts as a virtual 'patch bay'. All VM
+# VIFs are attached to this bridge and then 'patched' according to their
+# network connectivity. (string value)
+#integration_bridge = br-int
+
+# Tunnel bridge to use. (string value)
+#tunnel_bridge = br-tun
+
+# Peer patch port in integration bridge for tunnel bridge. (string value)
+#int_peer_patch_port = patch-tun
+
+# Peer patch port in tunnel bridge for integration bridge. (string value)
+#tun_peer_patch_port = patch-int
+
+# IP address of local overlay (tunnel) network endpoint. Use either an IPv4 or
+# IPv6 address that resides on one of the host network interfaces. The IP
+# version of this value must match the value of the 'overlay_ip_version' option
+# in the ML2 plug-in configuration file on the neutron server node(s). (IP
+# address value)
+#local_ip = <None>
+{%- if 'vxlan' in neutron.backend.tenant_network_types %}
+local_ip = {{ neutron.local_ip }}
+{%- endif %}
+
+# Comma-separated list of <physical_network>:<bridge> tuples mapping physical
+# network names to the agent's node-specific Open vSwitch bridge names to be
+# used for flat and VLAN networks. The length of bridge names should be no more
+# than 11. Each bridge must exist, and should have a physical network interface
+# configured as a port. All physical networks configured on the server should
+# have mappings to appropriate bridges on each agent. Note: If you remove a
+# bridge from this mapping, make sure to disconnect it from the integration
+# bridge as it won't be managed by the agent anymore. (list value)
+{% set bridge_mappings = [] %}
+{%- if neutron.bridge_mappings is defined %}
+{%-   for physnet, bridge in neutron.bridge_mappings.iteritems() %}
+{%-     do bridge_mappings.append(physnet ~ ':' ~ bridge) %}
+{%-   endfor %}
+{%- endif %}
+{%- if 'br-floating' not in neutron.get('bridge_mappings', {}).values() %}
+{%-   if neutron.get('external_access', True) %}
+{%-     do bridge_mappings.append('physnet1:br-floating') %}
+{%-   endif %}
+{%- endif %}
+{%- if 'br-prv' not in neutron.get('bridge_mappings', {}).values() %}
+{%-   if "vlan" in neutron.backend.tenant_network_types %}
+{%-     do bridge_mappings.append('physnet2:br-prv') %}
+{%-   endif %}
+{%- endif %}
+{%- if 'br-baremetal' not in neutron.get('bridge_mappings', {}).values() %}
+{%-   if neutron.get('ironic_enabled', False) %}
+{%-     do bridge_mappings.append('physnet3:br-baremetal') %}
+{%-   endif %}
+{%- endif %}
+{%- if bridge_mappings %}
+bridge_mappings = {{ bridge_mappings|join(',') }}
+{%- else %}
+#bridge_mappings =
+{%- endif %}
+
+# Use veths instead of patch ports to interconnect the integration bridge to
+# physical networks. Support kernel without Open vSwitch patch port support so
+# long as it is set to True. (boolean value)
+#use_veth_interconnection = false
+
+# DEPRECATED: OpenFlow interface to use. (string value)
+# Possible values:
+# ovs-ofctl - <No description provided>
+# native - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#of_interface = native
+
+# OVS datapath to use. 'system' is the default value and corresponds to the
+# kernel datapath. To enable the userspace datapath set this value to 'netdev'.
+# (string value)
+# Possible values:
+# system - <No description provided>
+# netdev - <No description provided>
+#datapath_type = system
+{%- if neutron.dpdk %}
+datapath_type = netdev
+{%- endif %}
+
+# OVS vhost-user socket directory. (string value)
+#vhostuser_socket_dir = /var/run/openvswitch
+{%- if neutron.vhost_socket_dir is defined %}
+vhostuser_socket_dir = {{ neutron.vhost_socket_dir }}
+{%- endif %}
+
+# Address to listen on for OpenFlow connections. Used only for 'native' driver.
+# (IP address value)
+#of_listen_address = 127.0.0.1
+
+# Port to listen on for OpenFlow connections. Used only for 'native' driver.
+# (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#of_listen_port = 6633
+
+# Timeout in seconds to wait for the local switch connecting the controller.
+# Used only for 'native' driver. (integer value)
+#of_connect_timeout = 30
+
+# Timeout in seconds to wait for a single OpenFlow request. Used only for
+# 'native' driver. (integer value)
+#of_request_timeout = 10
+
+# DEPRECATED: The interface for interacting with the OVSDB (string value)
+# Possible values:
+# native - <No description provided>
+# vsctl - <No description provided>
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#ovsdb_interface = native
+
+# The connection string for the OVSDB backend. Will be used by ovsdb-client
+# when monitoring and used for the all ovsdb commands when native
+# ovsdb_interface is enabled (string value)
+#ovsdb_connection = tcp:127.0.0.1:6640
+
+# The SSL private key file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_key_file = <None>
+
+# The SSL certificate file to use when interacting with OVSDB. Required when
+# using an "ssl:" prefixed ovsdb_connection (string value)
+#ssl_cert_file = <None>
+
+# The Certificate Authority (CA) certificate to use when interacting with
+# OVSDB.  Required when using an "ssl:" prefixed ovsdb_connection (string
+# value)
+#ssl_ca_cert_file = <None>
+
+# Enable OVSDB debug logs (boolean value)
+#ovsdb_debug = false
+
+
+[securitygroup]
+
+#
+# From neutron.ml2.ovs.agent
+#
+
+# Driver for security groups firewall in the L2 agent (string value)
+{%- if not neutron.get('security_groups_enabled', True) %}
+{%-   set _firewall_driver = 'noop' %}
+{%- elif neutron.dpdk or neutron.get('vlan_aware_vms', False) %}
+{%-   set _firewall_driver = 'openvswitch' %}
+{%- else %}
+{%-   set _firewall_driver = 'iptables_hybrid' %}
+{%- endif %}
+firewall_driver = {{ neutron.get('firewall_driver', _firewall_driver) }}
+
+# Controls whether the neutron security group API is enabled in the server. It
+# should be false when using no security groups or using the nova security
+# group API. (boolean value)
+enable_security_group = {{ neutron.get('security_groups_enabled', 'true') }}
+
+# Use ipset to speed-up the iptables based security groups. Enabling ipset
+# support requires that ipset is installed on L2 agent node. (boolean value)
+#enable_ipset = true
+
+
+[xenapi]
+
+#
+# From neutron.ml2.xenapi
+#
+
+# URL for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_url = <None>
+
+# Username for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_username = <None>
+
+# Password for connection to XenServer/Xen Cloud Platform. (string value)
+#connection_password = <None>
diff --git a/neutron/files/rocky/ovn/metadata-agent.ini b/neutron/files/rocky/ovn/metadata-agent.ini
new file mode 100644
index 0000000..8a65292
--- /dev/null
+++ b/neutron/files/rocky/ovn/metadata-agent.ini
@@ -0,0 +1,97 @@
+{%- from "neutron/map.jinja" import compute with context -%}
+[DEFAULT]
+
+#
+# From networking_ovn.metadata.agent
+#
+
+# Location for Metadata Proxy UNIX domain socket. (string value)
+#metadata_proxy_socket = $state_path/metadata_proxy
+
+# User (uid or name) running metadata proxy after its initialization
+# (if empty: agent effective user). (string value)
+#metadata_proxy_user =
+
+# Group (gid or name) running metadata proxy after its initialization
+# (if empty: agent effective group). (string value)
+#metadata_proxy_group =
+
+# Name of Open vSwitch bridge to use (string value)
+#ovs_integration_bridge = br-int
+
+# Certificate Authority public key (CA cert) file for ssl (string
+# value)
+#auth_ca_cert = <None>
+
+# IP address or DNS name of Nova metadata server. (unknown value)
+# Deprecated group/name - [DEFAULT]/nova_metadata_ip
+nova_metadata_host = {{ compute.metadata.host }}
+
+# TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
+
+# When proxying metadata requests, Neutron signs the Instance-ID
+# header with a shared secret to prevent spoofing. You may select any
+# string for a secret, but it must match here and in the configuration
+# used by the Nova Metadata Server. NOTE: Nova uses the same config
+# key, but in [neutron] section. (string value)
+metadata_proxy_shared_secret = {{ compute.metadata.password }}
+
+# Protocol to access nova metadata, http or https (string value)
+# Possible values:
+# http - <No description provided>
+# https - <No description provided>
+#nova_metadata_protocol = http
+
+# Allow to perform insecure SSL (https) requests to nova metadata
+# (boolean value)
+#nova_metadata_insecure = false
+
+# Client certificate for nova metadata api server. (string value)
+#nova_client_cert =
+
+# Private key of client certificate. (string value)
+#nova_client_priv_key =
+
+# Metadata Proxy UNIX domain socket mode, 4 values allowed: 'deduce':
+# deduce mode from metadata_proxy_user/group values, 'user': set
+# metadata proxy socket mode to 0o644, to use when metadata_proxy_user
+# is agent effective user or root, 'group': set metadata proxy socket
+# mode to 0o664, to use when metadata_proxy_group is agent effective
+# group or root, 'all': set metadata proxy socket mode to 0o666, to
+# use otherwise. (string value)
+# Possible values:
+# deduce - <No description provided>
+# user - <No description provided>
+# group - <No description provided>
+# all - <No description provided>
+#metadata_proxy_socket_mode = deduce
+
+# Number of separate worker processes for metadata server (defaults to
+# half of the number of CPUs) (integer value)
+#metadata_workers = 16
+
+# Number of backlog requests to configure the metadata server socket
+# with (integer value)
+#metadata_backlog = 4096
+
+[ovs]
+
+#
+# From networking_ovn.metadata.agent
+#
+
+# The connection string for the native OVSDB backend.
+# Use tcp:IP:PORT for TCP connection.
+# Use unix:FILE for unix domain socket connection. (string value)
+#ovsdb_connection = unix:/usr/local/var/run/openvswitch/db.sock
+
+# Timeout in seconds for the OVSDB connection transaction (integer
+# value)
+#ovsdb_connection_timeout = 180
+
+[ovn]
+
+ovn_sb_connection = tcp:{{ compute.controller_vip }}:6642
diff --git a/neutron/files/rocky/ovn/metadata-agent.systemd b/neutron/files/rocky/ovn/metadata-agent.systemd
new file mode 100644
index 0000000..552f7d7
--- /dev/null
+++ b/neutron/files/rocky/ovn/metadata-agent.systemd
@@ -0,0 +1,22 @@
+[Unit]
+Description=OpenStack Networking OVN Metadata Agent
+After=rsyslog.target networking.target openvswitch-switch.service
+Requires=openvswitch-switch.service
+
+[Service]
+Type=simple
+User=neutron
+Group=neutron
+PermissionsStartOnly=true
+WorkingDirectory=/var/lib/neutron
+ExecStartPre=/bin/mkdir -p /var/lock/neutron /var/log/neutron /var/lib/neutron
+ExecStartPre=/bin/chown neutron:neutron /var/lock/neutron /var/lib/neutron
+ExecStartPre=/bin/chown neutron:adm /var/log/neutron
+ExecStart=/usr/bin/networking-ovn-metadata-agent --config-file /etc/neutron/plugins/ovn/metadata-agent.ini --config-file /etc/neutron/neutron.conf --log-file /var/log/neutron/ovn-metadata-agent.log
+PrivateTmp=false
+KillMode=process
+Restart=on-failure
+TimeoutStopSec=15
+
+[Install]
+WantedBy=multi-user.target
diff --git a/neutron/files/rocky/plugins/nsx.ini b/neutron/files/rocky/plugins/nsx.ini
new file mode 100644
index 0000000..2fe53a2
--- /dev/null
+++ b/neutron/files/rocky/plugins/nsx.ini
@@ -0,0 +1,1243 @@
+{%- from "neutron/map.jinja" import server with context %}
+[DEFAULT]
+
+#
+# From nsx
+#
+
+# This is uuid of the default NSX Transport zone that will be used for
+# creating tunneled isolated "Neutron" networks. It needs to be
+# created in NSX before starting Neutron with the nsx plugin. (string
+# value)
+#default_tz_uuid = <None>
+{%- if server.vmware.default_tz_uuid is defined %}
+default_tz_uuid = {{ server.vmware.default_tz_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the NSX L3 Gateway service which will be used for
+# implementing routers and floating IPs (string value)
+#default_l3_gw_service_uuid = <None>
+{%- if server.vmware.default_l3_gw_service_uuid is defined %}
+default_l3_gw_service_uuid = {{ server.vmware.default_l3_gw_service_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the NSX L2 Gateway service which will be used by
+# default for network gateways (string value)
+#default_l2_gw_service_uuid = <None>
+{%- if server.vmware.default_l2_gw_service_uuid is defined %}
+default_l2_gw_service_uuid = {{ server.vmware.default_l2_gw_service_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the Service Cluster which will be used by logical
+# services like dhcp and metadata (string value)
+#default_service_cluster_uuid = <None>
+{%- if server.vmware.default_service_cluster_uuid is defined %}
+default_service_cluster_uuid = {{ server.vmware.default_service_cluster_uuid }}
+{%- endif %}
+
+# Name of the interface on a L2 Gateway transport node which should be
+# used by default when setting up a network connection (string value)
+# Deprecated group/name - [DEFAULT]/default_interface_name
+#nsx_default_interface_name = breth0
+{%- if server.vmware.get('nsx', {}).default_interface_name is defined %}
+nsx_default_interface_name = {{ server.vmware.nsx.default_interface_name }}
+{%- endif %}
+
+# User name for NSX controllers in this cluster (string value)
+# Deprecated group/name - [DEFAULT]/nvp_user
+#nsx_user = admin
+{%- if server.vmware.get('nsx', {}).user is defined %}
+nsx_user = {{ server.vmware.nsx.user }}
+{%- endif %}
+
+# Password for NSX controllers in this cluster (string value)
+# Deprecated group/name - [DEFAULT]/nvp_password
+#nsx_password = admin
+{%- if server.vmware.get('nsx', {}).password is defined %}
+nsx_password = {{ server.vmware.nsx.password }}
+{%- endif %}
+
+# Time before aborting a request on an unresponsive controller
+# (Seconds) (integer value)
+#http_timeout = 75
+{%- if server.vmware.http_timeout is defined %}
+http_timeout = {{ server.vmware.http_timeout }}
+{%- endif %}
+
+# Maximum number of times a particular request should be retried
+# (integer value)
+#retries = 2
+{%- if server.vmware.retries is defined %}
+retries = {{ server.vmware.retries }}
+{%- endif %}
+
+# Maximum number of times a redirect response should be followed
+# (integer value)
+#redirects = 2
+{%- if server.vmware.redirects is defined %}
+redirects = {{ server.vmware.redirects }}
+{%- endif %}
+
+# Comma-separated list of NSX controller endpoints (<ip>:<port>). When
+# port is omitted, 443 is assumed. This option MUST be specified.
+# e.g.: aa.bb.cc.dd, ee.ff.gg.hh.ee:80 (list value)
+# Deprecated group/name - [DEFAULT]/nvp_controllers
+#nsx_controllers =
+{%- set nsx_controllers = []%}
+{%- for _,controller in server.vmware.get('controllers', {}).iteritems() %}
+{%- do nsx_controllers.append(controller.host + ":" + controller.get('port', '443')) %}
+nsx_controllers = {{ ','.join(nsx_controllers) }}
+{%- endfor %}
+
+# Reconnect connection to nsx if not used within this amount of time.
+# (integer value)
+#conn_idle_timeout = 900
+{%- if server.vmware.conn_idle_timeout is defined %}
+conn_idle_timeout = {{ server.vmware.conn_idle_timeout }}
+{%- endif %}
+
+# Specify the class path for the Layer 2 gateway backend driver(i.e.
+# NSXv3/NSX-V). This field will be used when a L2 Gateway service
+# plugin is configured. (string value)
+#nsx_l2gw_driver = <None>
+{%- if server.vmware.get('nsx', {}).l2gw_driver is defined %}
+nsx_l2gw_driver = {{ server.vmware.nsx.l2gw_driver }}
+{%- endif %}
+
+# (Optional) URL for distributed locking coordination resource for
+# lock manager. This value is passed as a parameter to tooz
+# coordinator. By default, value is None and oslo_concurrency is used
+# for single-node lock management. (string value)
+#locking_coordinator_url = <None>
+{%- if server.vmware.get('nsx', {}).locking_coordinator_url is defined %}
+locking_coordinator_url = {{ server.vmware.nsx.locking_coordinator_url }}
+{%- endif %}
+
+# If true, the server then allows the caller to specify the id of
+# resources. This should only be enabled in order to allow one to
+# migrate an existing install of neutron to the nsx-v3 plugin.
+# (boolean value)
+#api_replay_mode = false
+{%- if server.vmware.get('nsx', {}).api_replay_mode is defined %}
+api_replay_mode = {{ server.vmware.nsx.api_replay_mode }}
+{%- endif %}
+
+# An ordered list of extension driver entrypoints to be loaded from
+# the vmware_nsx.extension_drivers namespace. (list value)
+#nsx_extension_drivers =
+{%- if server.vmware.get('nsx', {}).extension_drivers is defined %}
+nsx_extension_drivers = {{ ','.join(server.vmware.nsx.extension_drivers) }}
+{%- endif %}
+
+
+[dvs]
+
+#
+# From nsx
+#
+
+# Hostname or IP address for connection to VMware vCenter host.
+# (string value)
+#host_ip = <None>
+{%- if server.vmware.get('dvs', {}).host_ip is defined %}
+host_ip = {{ server.vmware.dvs.host_ip }}
+{%- endif %}
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+{%- if server.vmware.get('dvs', {}).host_port is defined %}
+host_port = {{ server.vmware.dvs.host_port }}
+{%- endif %}
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+{%- if server.vmware.get('dvs', {}).host_username is defined %}
+host_username = {{ server.vmware.dvs.host_username }}
+{%- endif %}
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+{%- if server.vmware.get('dvs', {}).host_password is defined %}
+host_password = {{ server.vmware.dvs.host_password }}
+{%- endif %}
+
+# The interval used for polling of remote tasks. (floating point
+# value)
+#task_poll_interval = 0.5
+{%- if server.vmware.get('dvs', {}).task_poll_interval is defined %}
+task_poll_interval = {{ server.vmware.dvs.task_poll_interval }}
+{%- endif %}
+
+# Specify a CA bundle file to use in verifying the vCenter server
+# certificate. (string value)
+#ca_file = <None>
+{%- if server.vmware.get('dvs', {}).ca_file is defined %}
+ca_file = {{ server.vmware.dvs.ca_file }}
+{%- endif %}
+
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "ca_file" is set. (boolean value)
+#insecure = false
+{%- if server.vmware.get('dvs', {}).insecure is defined %}
+insecure = {{ server.vmware.dvs.insecure }}
+{%- endif %}
+
+# The number of times we retry on failures, e.g., socket error, etc.
+# (integer value)
+#api_retry_count = 10
+{%- if server.vmware.get('dvs', {}).api_retry_count is defined %}
+api_retry_count = {{ server.vmware.dvs.api_retry_count }}
+{%- endif %}
+
+# The name of the preconfigured DVS. (string value)
+#dvs_name = <None>
+{%- if server.vmware.get('dvs', {}).dvs_name is defined %}
+dvs_name = {{ server.vmware.dvs.dvs_name }}
+{%- endif %}
+
+# This value should not be set. It is just required for ensuring that
+# the DVS plugin works with the generic NSX metadata code (string
+# value)
+#metadata_mode = <None>
+{%- if server.vmware.get('dvs', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.dvs.metadata_mode }}
+{%- endif %}
+
+
+[nsx]
+
+#
+# From nsx
+#
+
+# Maximum number of ports of a logical switch on a bridged transport
+# zone. The recommended value for this parameter varies with NSX
+# version.
+# Please use:
+# NSX 2.x -> 64
+# NSX 3.0, 3.1 -> 5000
+# NSX 3.2 -> 10000 (integer value)
+#max_lp_per_bridged_ls = 5000
+{%- if server.vmware.get('nsx', {}).max_lp_per_bridged_ls is defined %}
+max_lp_per_bridged_ls = {{ server.vmware.nsx.max_lp_per_bridged_ls }}
+{%- endif %}
+
+# Maximum number of ports of a logical switch on an overlay transport
+# zone (integer value)
+#max_lp_per_overlay_ls = 256
+{%- if server.vmware.get('nsx', {}).max_lp_per_overlay_ls is defined %}
+max_lp_per_overlay_ls = {{ server.vmware.nsx.max_lp_per_overlay_ls }}
+{%- endif %}
+
+# Maximum concurrent connections to each NSX controller. (integer
+# value)
+#concurrent_connections = 10
+{%- if server.vmware.get('nsx', {}).concurrent_connections is defined %}
+concurrent_connections = {{ server.vmware.nsx.concurrent_connections }}
+{%- endif %}
+
+# Number of seconds a generation id should be valid for (default -1
+# meaning do not time out) (integer value)
+# Deprecated group/name - [NVP]/nvp_gen_timeout
+#nsx_gen_timeout = -1
+{%- if server.vmware.get('nsx', {}).nsx_gen_timeout is defined %}
+nsx_gen_timeout = {{ server.vmware.nsx.nsx_gen_timeout }}
+{%- endif %}
+
+# If set to access_network this enables a dedicated connection to the
+# metadata proxy for metadata server access via Neutron router. If set
+# to dhcp_host_route this enables host route injection via the dhcp
+# agent. This option is only useful if running on a host that does not
+# support namespaces otherwise access_network should be used. (string
+# value)
+#metadata_mode = access_network
+{%- if server.vmware.get('nsx', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.nsx.metadata_mode }}
+{%- endif %}
+
+# The default network tranport type to use (stt, gre, bridge,
+# ipsec_gre, or ipsec_stt) (string value)
+#default_transport_type = stt
+{%- if server.vmware.get('nsx', {}).default_transport_type is defined %}
+default_transport_type = {{ server.vmware.nsx.default_transport_type }}
+{%- endif %}
+
+# Specifies in which mode the plugin needs to operate in order to
+# provide DHCP and metadata proxy services to tenant instances. If
+# 'agent' is chosen (default) the NSX plugin relies on external RPC
+# agents (i.e. dhcp and metadata agents) to provide such services. In
+# this mode, the plugin supports API extensions 'agent' and
+# 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in
+# Icehouse), the plugin will use NSX logical services for DHCP and
+# metadata proxy. This simplifies the deployment model for Neutron, in
+# that the plugin no longer requires the RPC agents to operate. When
+# 'agentless' is chosen, the config option metadata_mode becomes
+# ineffective. The 'agentless' mode works only on NSX 4.1.
+# Furthermore, a 'combined' mode is also provided and is used to
+# support existing deployments that want to adopt the agentless mode.
+# With this mode, existing networks keep being served by the existing
+# infrastructure (thus preserving backward compatibility, whereas new
+# networks will be served by the new infrastructure. Migration tools
+# are provided to 'move' one network from one model to another; with
+# agent_mode set to 'combined', option 'network_auto_schedule' in
+# neutron.conf is ignored, as new networks will no longer be scheduled
+# to existing dhcp agents. (string value)
+#agent_mode = agent
+{%- if server.vmware.get('nsx', {}).agent_mode is defined %}
+agent_mode = {{ server.vmware.nsx.agent_mode }}
+{%- endif %}
+
+# Specifies which mode packet replication should be done in. If set to
+# service a service node is required in order to perform packet
+# replication. This can also be set to source if one wants replication
+# to be performed locally (NOTE: usually only useful for testing if
+# one does not want to deploy a service node). In order to leverage
+# distributed routers, replication_mode should be set to 'service'.
+# (string value)
+# Possible values:
+# service - <No description provided>
+# source - <No description provided>
+#replication_mode = service
+{%- if server.vmware.get('nsx', {}).replication_mode is defined %}
+replication_mode = {{ server.vmware.nsx.replication_mode }}
+{%- endif %}
+
+# The QoS rules peak bandwidth value will be the configured maximum
+# bandwidth of the QoS rule, multiplied by this value. Value must be
+# bigger than 1 (floating point value)
+# Minimum value: 1
+#qos_peak_bw_multiplier = 2.0
+{%- if server.vmware.get('nsx', {}).qos_peak_bw_multiplier is defined %}
+qos_peak_bw_multiplier = {{ server.vmware.nsx.qos_peak_bw_multiplier }}
+{%- endif %}
+
+
+[nsx_dhcp]
+
+#
+# From nsx
+#
+
+# Comma separated list of additional domain name servers (list value)
+#extra_domain_name_servers =
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).extra_domain_name_servers is defined %}
+extra_domain_name_servers = {{ server.vmware.nsx.dhcp.extra_domain_name_servers }}
+{%- endif %}
+
+# Domain to use for building the hostnames (string value)
+#domain_name = openstacklocal
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).domain_name is defined %}
+domain_name = {{ server.vmware.nsx.dhcp.domain_name }}
+{%- endif %}
+
+# Default DHCP lease time (integer value)
+#default_lease_time = 43200
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).default_lease_time is defined %}
+default_lease_time = {{ server.vmware.nsx.dhcp.default_lease_time }}
+{%- endif %}
+
+
+[nsx_lsn]
+
+#
+# From nsx
+#
+
+# Pull LSN information from NSX in case it is missing from the local
+# data store. This is useful to rebuild the local store in case of
+# server recovery. (boolean value)
+#sync_on_missing_data = false
+{%- if server.vmware.get('nsx', {}).get('lsn', {}).sync_on_missing_data is defined %}
+sync_on_missing_data = {{ server.vmware.nsx.lsn.sync_on_missing_data }}
+{%- endif %}
+
+
+[nsx_metadata]
+
+#
+# From nsx
+#
+
+# IP address used by Metadata server. (string value)
+#metadata_server_address = 127.0.0.1
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).server_address is defined %}
+metadata_server_address = {{ server.vmware.nsx.metadata.server_address }}
+{%- endif %}
+
+# TCP Port used by Metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_server_port = 8775
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).server_port is defined %}
+metadata_server_port = {{ server.vmware.nsx.metadata.server_port }}
+{%- endif %}
+
+# When proxying metadata requests, Neutron signs the Instance-ID
+# header with a shared secret to prevent spoofing. You may select any
+# string for a secret, but it MUST match with the configuration used
+# by the Metadata server. (string value)
+#metadata_shared_secret =
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).shared_secret is defined %}
+metadata_shared_secret = {{ server.vmware.nsx.metadata.shared_secret }}
+{%- endif %}
+
+
+[nsx_sync]
+
+#
+# From nsx
+#
+
+# Interval in seconds between runs of the status synchronization task.
+# The plugin will aim at resynchronizing operational status for all
+# resources in this interval, and it should be therefore large enough
+# to ensure the task is feasible. Otherwise the plugin will be
+# constantly synchronizing resource status, ie: a new task is started
+# as soon as the previous is completed. If this value is set to 0, the
+# state synchronization thread for this Neutron instance will be
+# disabled. (integer value)
+#state_sync_interval = 10
+{%- if server.vmware.get('nsx', {}).get('sync', {}).state_sync_interval is defined %}
+state_sync_interval = {{ server.vmware.nsx.sync.state_sync_interval }}
+{%- endif %}
+
+# Random additional delay between two runs of the state
+# synchronization task. An additional wait time between 0 and
+# max_random_sync_delay seconds will be added on top of
+# state_sync_interval. (integer value)
+#max_random_sync_delay = 0
+{%- if server.vmware.get('nsx', {}).get('sync', {}).max_random_sync_delay is defined %}
+max_random_sync_delay = {{ server.vmware.nsx.sync.max_random_sync_delay }}
+{%- endif %}
+
+# Minimum delay, in seconds, between two status synchronization
+# requests for NSX. Depending on chunk size, controller load, and
+# other factors, state synchronization requests might be pretty heavy.
+# This means the controller might take time to respond, and its load
+# might be quite increased by them. This parameter allows to specify a
+# minimum interval between two subsequent requests. The value for this
+# parameter must never exceed state_sync_interval. If this does, an
+# error will be raised at startup. (integer value)
+#min_sync_req_delay = 1
+{%- if server.vmware.get('nsx', {}).get('sync', {}).min_sync_req_delay is defined %}
+min_sync_req_delay = {{ server.vmware.nsx.sync.min_sync_req_delay }}
+{%- endif %}
+
+# Minimum number of resources to be retrieved from NSX in a single
+# status synchronization request. The actual size of the chunk will
+# increase if the number of resources is such that using the minimum
+# chunk size will cause the interval between two requests to be less
+# than min_sync_req_delay (integer value)
+#min_chunk_size = 500
+{%- if server.vmware.get('nsx', {}).get('sync', {}).min_chunk_size is defined %}
+min_chunk_size = {{ server.vmware.nsx.sync.min_chunk_size }}
+{%- endif %}
+
+# Enable this option to allow punctual state synchronization on show
+# operations. In this way, show operations will always fetch the
+# operational status of the resource from the NSX backend, and this
+# might have a considerable impact on overall performance. (boolean
+# value)
+#always_read_status = false
+{%- if server.vmware.get('nsx', {}).get('sync', {}).always_read_status is defined %}
+always_read_status = {{ server.vmware.nsx.sync.always_read_status }}
+{%- endif %}
+
+
+[nsx_v3]
+
+#
+# From nsx
+#
+
+# User names for the NSX managers (list value)
+#nsx_api_user = admin
+{%- if server.vmware.get('nsx', {}).get('v3', {}).api_user is defined %}
+nsx_api_user = {{ server.vmware.nsx.v3.api_user }}
+{%- endif %}
+
+# Passwords for the NSX managers (list value)
+#nsx_api_password = default
+{%- if server.vmware.get('nsx', {}).get('v3', {}).api_password is defined %}
+nsx_api_password = {{ server.vmware.nsx.v3.api_password }}
+{%- endif %}
+
+# IP address of one or more NSX managers separated by commas. The IP
+# address should be of the form:
+# [<scheme>://]<ip_address>[:<port>]
+# If scheme is not provided https is used. If port is not provided
+# port 80 is used for http and port 443 for https. (list value)
+#nsx_api_managers =
+{%- set nsx_api_managers = [] %}
+{%- for _, manager in server.vmware.get('nsx', {}).get('v3', {'api_managers': {}}).api_managers.iteritems() %}
+{%- do nsx_api_managers.append(manager.scheme + "://" + manager.host + ":" + manager.port) %}
+nsx_api_managers = {{ ','.join(nsx_api_managers) }}
+{%- endfor %}
+
+# Use client certificate in NSX manager authentication (boolean value)
+#nsx_use_client_auth = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).use_client_auth is defined %}
+nsx_use_client_auth = {{ server.vmware.nsx.v3.use_client_auth }}
+{%- endif %}
+
+# File to contain client certificate and private key (string value)
+#nsx_client_cert_file =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_file is defined %}
+nsx_client_cert_file = {{ server.vmware.nsx.v3.client_cert_file }}
+{%- endif %}
+
+# password for private key encryption (string value)
+#nsx_client_cert_pk_password =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_pk_password is defined %}
+nsx_client_cert_pk_password = {{ server.vmware.nsx.v3.client_cert_pk_password }}
+{%- endif %}
+
+# Storage type for client certificate sensitive data (string value)
+# Possible values:
+# nsx-db - <No description provided>
+# none - <No description provided>
+#nsx_client_cert_storage = nsx-db
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_storage is defined %}
+nsx_client_cert_storage = {{ server.vmware.nsx.v3.client_cert_storage }}
+{%- endif %}
+
+# This is the name or UUID of the default NSX overlay transport zone
+# that will be used for creating tunneled isolated Neutron networks.
+# It needs to be created in NSX before starting Neutron with the NSX
+# plugin. (string value)
+#default_overlay_tz = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_overlay_tz is defined %}
+default_overlay_tz = {{ server.vmware.nsx.v3.default_overlay_tz }}
+{%- endif %}
+
+# (Optional) Only required when creating VLAN or flat provider
+# networks. Name or UUID of default NSX VLAN transport zone that will
+# be used for bridging between Neutron networks, if no physical
+# network has been specified (string value)
+#default_vlan_tz = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_vlan_tz is defined %}
+default_vlan_tz = {{ server.vmware.nsx.v3.default_vlan_tz }}
+{%- endif %}
+
+# (Optional) Name or UUID of the default NSX bridge cluster that will
+# be used to perform L2 gateway bridging between VXLAN and VLAN
+# networks. If default bridge cluster UUID is not specified, admin
+# will have to manually create a L2 gateway corresponding to a NSX
+# Bridge Cluster using L2 gateway APIs. This field must be specified
+# on one of the active neutron servers only. (string value)
+#default_bridge_cluster = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_bridge_cluster is defined %}
+default_bridge_cluster = {{ server.vmware.nsx.v3.default_bridge_cluster }}
+{%- endif %}
+
+# Maximum number of times to retry API requests upon stale revision
+# errors. (integer value)
+#retries = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).retries is defined %}
+retries = {{ server.vmware.nsx.v3.retries }}
+{%- endif %}
+
+# Specify a CA bundle files to use in verifying the NSX Managers
+# server certificate. This option is ignored if "insecure" is set to
+# True. If "insecure" is set to False and ca_file is unset, the system
+# root CAs will be used to verify the server certificate. (list value)
+#ca_file = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).ca_file is defined %}
+ca_file = {{ server.vmware.nsx.v3.ca_file }}
+{%- endif %}
+
+# If true, the NSX Manager server certificate is not verified. If
+# false the CA bundle specified via "ca_file" will be used or if
+# unsest the default system root CAs will be used. (boolean value)
+#insecure = true
+{%- if server.vmware.get('nsx', {}).get('v3', {}).insecure is defined %}
+insecure = {{ server.vmware.nsx.v3.insecure }}
+{%- endif %}
+
+# The time in seconds before aborting a HTTP connection to a NSX
+# manager. (integer value)
+#http_timeout = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_timeout is defined %}
+http_timeout = {{ server.vmware.nsx.v3.http_timeout }}
+{%- endif %}
+
+
+# The time in seconds before aborting a HTTP read response from a NSX
+# manager. (integer value)
+#http_read_timeout = 180
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_read_timeout is defined %}
+http_read_timeout = {{ server.vmware.nsx.v3.http_read_timeout }}
+{%- endif %}
+
+# Maximum number of times to retry a HTTP connection. (integer value)
+#http_retries = 3
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_retries is defined %}
+http_retries = {{ server.vmware.nsx.v3.http_retries }}
+{%- endif %}
+
+# Maximum concurrent connections to each NSX manager. (integer value)
+#concurrent_connections = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).concurrent_connections is defined %}
+concurrent_connections = {{ server.vmware.nsx.v3.concurrent_connections }}
+{%- endif %}
+
+# The amount of time in seconds to wait before ensuring connectivity
+# to the NSX manager if no manager connection has been used. (integer
+# value)
+#conn_idle_timeout = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).conn_idle_timeout is defined %}
+conn_idle_timeout = {{ server.vmware.nsx.v3.conn_idle_timeout }}
+{%- endif %}
+
+# Number of times a HTTP redirect should be followed. (integer value)
+#redirects = 2
+{%- if server.vmware.get('nsx', {}).get('v3', {}).redirects is defined %}
+redirects = {{ server.vmware.nsx.v3.redirects }}
+{%- endif %}
+
+# Name or UUID of the default tier0 router that will be used for
+# connecting to tier1 logical routers and configuring external
+# networks (string value)
+#default_tier0_router = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_tier0_router is defined %}
+default_tier0_router = {{ server.vmware.nsx.v3.default_tier0_router }}
+{%- endif %}
+
+# (Optional) The number of nested groups which are used by the plugin,
+# each Neutron security-groups is added to one nested group, and each
+# nested group can contain as maximum as 500 security-groups,
+# therefore, the maximum number of security groups that can be created
+# is 500 * number_of_nested_groups. The default is 8 nested groups,
+# which allows a maximum of 4k security-groups, to allow creation of
+# more security-groups, modify this figure. (integer value)
+#number_of_nested_groups = 8
+{%- if server.vmware.get('nsx', {}).get('v3', {}).number_of_nested_groups is defined %}
+number_of_nested_groups = {{ server.vmware.nsx.v3.number_of_nested_groups }}
+{%- endif %}
+
+# If set to access_network this enables a dedicated connection to the
+# metadata proxy for metadata server access via Neutron router. If set
+# to dhcp_host_route this enables host route injection via the dhcp
+# agent. This option is only useful if running on a host that does not
+# support namespaces otherwise access_network should be used. (string
+# value)
+#metadata_mode = access_network
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.nsx.v3.metadata_mode }}
+{%- endif %}
+
+# If true, an internal metadata network will be created for a router
+# only when the router is attached to a DHCP-disabled subnet. (boolean
+# value)
+#metadata_on_demand = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_on_demand is defined %}
+metadata_on_demand = {{ server.vmware.nsx.v3.metadata_on_demand }}
+{%- endif %}
+
+# If true, DHCP and metadata proxy services will be provided by NSX
+# backend. (boolean value)
+#native_dhcp_metadata = true
+{%- if server.vmware.get('nsx', {}).get('v3', {}).native_dhcp_metadata is defined %}
+native_dhcp_metadata = {{ server.vmware.nsx.v3.native_dhcp_metadata }}
+{%- endif %}
+
+# The metadata route used for native metadata proxy service. (string
+# value)
+#native_metadata_route = 169.254.169.254/31
+{%- if server.vmware.get('nsx', {}).get('v3', {}).native_metadata_route is defined %}
+native_metadata_route = {{ server.vmware.nsx.v3.native_metadata_route }}
+{%- endif %}
+
+# This is the name or UUID of the NSX DHCP Profile that will be used
+# to enable native DHCP service. It needs to be created in NSX before
+# starting Neutron with the NSX plugin (string value)
+#dhcp_profile = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_profile is defined %}
+dhcp_profile = {{ server.vmware.nsx.v3.dhcp_profile }}
+{%- endif %}
+
+# DHCP default lease time. (integer value)
+#dhcp_lease_time = 86400
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_lease_time is defined %}
+dhcp_lease_time = {{ server.vmware.nsx.v3.dhcp_lease_time }}
+{%- endif %}
+
+# Domain to use for building the hostnames. (string value)
+#dns_domain = openstacklocal
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dns_domain is defined %}
+dns_domain = {{ server.vmware.nsx.v3.dns_domain }}
+{%- endif %}
+
+# List of nameservers to configure for the DHCP binding entries. These
+# will be used if there are no nameservers defined on the subnet.
+# (list value)
+#nameservers =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).nameservers is defined %}
+nameservers = {{ ','.join(server.vmware.nsx.v3.nameservers) }}
+{%- endif %}
+
+
+# This is the name or UUID of the NSX Metadata Proxy that will be used
+# to enable native metadata service. It needs to be created in NSX
+# before starting Neutron with the NSX plugin. (string value)
+#metadata_proxy = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_proxy is defined%}
+metadata_proxy = {{ server.vmware.nsx.v3.metadata_proxy }}
+{%- endif %}
+ 
+# (Optional) This is the name or UUID of the NSX dhcp relay service
+# that will be used to enable DHCP relay on router ports. (string
+# value)
+#dhcp_relay_service = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_relay_service is defined %}
+dhcp_relay_service = {{ server.vmware.nsx.v3.dhcp_relay_service }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall rule for security-
+# groups blocked traffic is logged. (boolean value)
+#log_security_groups_blocked_traffic = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).log_security_groups_blocked_traffic is defined %}
+log_security_groups_blocked_traffic = {{ server.vmware.nsx.v3.log_security_groups_blocked_traffic }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall security-groups
+# rules are logged. (boolean value)
+#log_security_groups_allowed_traffic = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).log_security_groups_allowed_traffic is defined %}
+log_security_groups_allowed_traffic = {{ server.vmware.nsx.v3.log_security_groups_allowed_traffic }}
+{%- endif %}
+
+# Optional parameter defining the networks availability zones names
+# for the native dhcp configuration. The configuration of each zone
+# will be under a group names [az:<name>] (list value)
+#availability_zones =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).availability_zones is defined %}
+availability_zones = {{ server.vmware.nsx.v3.availability_zones }}
+{%- endif %}
+
+# When True, the configured transport zones, router and profiles will
+# be found by tags on the NSX. The scope of the tag will be the value
+# of search_objects_scope. The value of the search tag will be the
+# name configured in each respective configuration. (boolean value)
+#init_objects_by_tags = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).init_objects_by_tags is defined %}
+init_objects_by_tags = {{ server.vmware.nsx.v3.init_objects_by_tags }}
+{%- endif %}
+ 
+# This is the scope of the tag that will be used for finding the
+# objects uuids on the NSX during plugin init. (string value)
+#search_objects_scope = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).search_objects_scope is defined %}
+search_objects_scope = {{ server.vmware.nsx.v3.search_objects_scope }}
+{%- endif %}
+
+# Optional parameter defining a list switching profiles uuids that
+# will be attached to all neutron created nsx ports. (list value)
+#switching_profiles =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).switching_profiles is defined %}
+switching_profiles = {{ server.vmware.nsx.v3.switching_profiles }}
+{%- endif %}
+
+# (Optional) Indicates whether ENS transport zones can be used
+# (boolean value)
+#ens_support = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).ens_support is defined %}
+ens_support = {{ server.vmware.nsx.v3.ens_support }}
+{%- endif %}
+
+
+[nsxv]
+
+#
+# From nsx
+#
+
+# User name for NSXv manager (string value)
+#user = admin
+{%- if server.vmware.get('nsxv', {}).user is defined %}
+user = {{ server.vmware.nsxv.user }}
+{%- endif %}
+
+# Password for NSXv manager (string value)
+#password = default
+{%- if server.vmware.get('nsxv', {}).password is defined %}
+password = {{ server.vmware.nsxv.password }}
+{%- endif %}
+
+# URL for NSXv manager (string value)
+#manager_uri = <None>
+{%- if server.vmware.get('nsxv', {}).manager_uri is defined %}
+manager_uri = {{ server.vmware.nsxv.manager_uri }}
+{%- endif %}
+
+# Specify a CA bundle file to use in verifying the NSXv server
+# certificate. (string value)
+#ca_file = <None>
+{%- if server.vmware.get('nsxv', {}).ca_file is defined %}
+ca_file = {{ server.vmware.nsxv.ca_file }}
+{%- endif %}
+
+# If true, the NSXv server certificate is not verified. If false, then
+# the default CA truststore is used for verification. This option is
+# ignored if "ca_file" is set. (boolean value)
+#insecure = true
+{%- if server.vmware.get('nsxv', {}).insecure is defined %}
+insecure = {{ server.vmware.nsxv.insecure }}
+{%- endif %}
+
+# (Required) Parameter listing the IDs of the clusters which are used
+# by OpenStack. (list value)
+#cluster_moid =
+{%- if server.vmware.get('nsxv', {}).cluster_moid is defined %}
+cluster_moid = {{ server.vmware.nsxv.cluster_moid }}
+{%- endif %}
+
+# Required parameter identifying the ID of datacenter to deploy NSX
+# Edges (string value)
+#datacenter_moid = <None>
+{%- if server.vmware.get('nsxv', {}).datacenter_moid is defined %}
+datacenter_moid = {{ server.vmware.nsxv.datacenter_moid }}
+{%- endif %}
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges (string value)
+#deployment_container_id = <None>
+{%- if server.vmware.get('nsxv', {}).deployment_container_id is defined %}
+deployment_container_id = {{ server.vmware.nsxv.deployment_container_id }}
+{%- endif %}
+
+# Optional parameter identifying the ID of resource to deploy NSX
+# Edges (string value)
+#resource_pool_id = <None>
+{%- if server.vmware.get('nsxv', {}).resource_pool_id is defined %}
+resource_pool_id = {{ server.vmware.nsxv.resource_pool_id is defined }}
+{%- endif %}
+
+# Optional parameter defining the availability zones names for
+# deploying NSX Edges. The configuration of each zone will be under a
+# group names [az:<name>] (list value)
+#availability_zones =
+
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges (string value)
+#datastore_id = <None>
+{%- if server.vmware.get('nsxv', {}).datastore_id is defined %}
+datastore_id = {{ server.vmware.nsxv.datastore_id }}
+{%- endif %}
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges in addition to data_store_id in caseedge_ha is True (string
+# value)
+#ha_datastore_id = <None>
+{%- if server.vmware.get('nsxv', {}).a_datastore_id is defined %}
+ha_datastore_id = {{ server.vmware.nsxv.a_datastore_id }}
+{%- endif %}
+
+# When True and in case edge_ha is True, half of the edges will be
+# placed in the primary datastore as active and the other half will be
+# placed in the ha_datastore (boolean value)
+#ha_placement_random = false
+{%- if server.vmware.get('nsxv', {}).ha_placement_random is defined %}
+ha_placement_random = {{ server.vmware.nsxv.ha_placement_random }}
+{%- endif %}
+
+# (Optional) If edge HA is used then this will ensure that
+# active/backup edges are placed in the listed host groups. At least 2
+# predefined host groups need to be configured. (list value)
+#edge_host_groups =
+
+# (Required) Network ID for physical network connectivity (string
+# value)
+#external_network = <None>
+{%- if server.vmware.get('nsxv', {}).external_network is defined %}
+external_network = {{ server.vmware.nsxv.external_network }}
+{%- endif %}
+
+# (Optional) Asynchronous task status check interval. Default is 2000
+# (millisecond) (integer value)
+#task_status_check_interval = 2000
+{%- if server.vmware.get('nsxv', {}).task_status_check_interval is defined %}
+task_status_check_interval = {{ server.vmware.nsxv.task_status_check_interval }}
+{%- endif %}
+
+# (Optional) Network scope ID for VXLAN virtual wires (string value)
+#vdn_scope_id = <None>
+{%- if server.vmware.get('nsxv', {}).vdn_scope_id is defined %}
+vdn_scope_id = {{ server.vmware.nsxv.vdn_scope_id }}
+{%- endif %}
+
+# (Optional) DVS MoRef ID for DVS connected to Management / Edge
+# cluster (string value)
+#dvs_id = <None>
+{%- if server.vmware.get('nsxv', {}).dvs_id is defined %}
+dvs_id = {{ server.vmware.nsxv.dvs_id }}
+{%- endif %}
+
+# (Optional) Maximum number of sub interfaces supported per vnic in
+# edge. (integer value)
+# Minimum value: 1
+# Maximum value: 110
+#maximum_tunnels_per_vnic = 20
+{%- if server.vmware.get('nsxv', {}).maximum_tunnels_per_vnic is defined %}
+maximum_tunnels_per_vnic = {{ server.vmware.nsxv.maximum_tunnels_per_vnic }}
+{%- endif %}
+
+# Defines edge pool's management range with the format:
+# <edge_type>:[edge_size]:<min_edges>:<max_edges>.edge_type:
+# service,vdr. edge_size: compact, large, xlarge, quadlarge and
+# default is compact. By default, edge pool manager would manage
+# service edge with compact size and distributed edge with compact
+# size as following: service:compact:4:10,vdr:compact:4:10 (list
+# value)
+#backup_edge_pool = service:compact:4:10,vdr:compact:4:10
+
+# Maximum number of API retries on endpoint. (integer value)
+#retries = 20
+{%- if server.vmware.get('nsxv', {}).retries is defined %}
+retries = {{ server.vmware.nsxv.retries }}
+{%- endif %}
+
+# (Optional) Portgroup MoRef ID for metadata proxy management network
+# (string value)
+#mgt_net_moid = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_moid is defined %}
+mgt_net_moid = {{ server.vmware.nsxv.mgt_net_moid }}
+{%- endif %}
+
+# (Optional) Comma separated list of management network IP addresses
+# for metadata proxy. (list value)
+#mgt_net_proxy_ips =
+
+# (Optional) Management network netmask for metadata proxy. (string
+# value)
+#mgt_net_proxy_netmask = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_proxy_netmask is defined %}
+mgt_net_proxy_netmask = {{ server.vmware.nsxv.mgt_net_proxy_netmask }}
+{%- endif %}
+
+# (Optional) Management network default gateway for metadata proxy.
+# (string value)
+#mgt_net_default_gateway = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_default_gateway is defined %}
+mgt_net_default_gateway = {{ server.vmware.nsxv.mgt_net_default_gateway }}
+{%- endif %}
+
+# (Optional) IP addresses used by Nova metadata service. (list value)
+#nova_metadata_ips =
+
+# (Optional) TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
+{%- if server.vmware.get('nsxv', {}).nova_metadata_port is defined %}
+nova_metadata_port = {{ server.vmware.nsxv.nova_metadata_port }}
+{%- endif %}
+
+# (Optional) Shared secret to sign metadata requests. (string value)
+#metadata_shared_secret = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_shared_secret is defined %}
+metadata_shared_secret = {{ server.vmware.nsxv.metadata_shared_secret }}
+{%- endif %}
+
+# (Optional) If True, the end to end connection for metadata service
+# is not verified. If False, the default CA truststore is used for
+# verification. (boolean value)
+#metadata_insecure = true
+{%- if server.vmware.get('nsxv', {}).metadata_insecure is defined %}
+metadata_insecure = {{ server.vmware.nsxv.metadata_insecure }}
+{%- endif %}
+
+# (Optional) Client certificate to use when metadata connection is to
+# be verified. If not provided, a self signed certificate will be
+# used. (string value)
+#metadata_nova_client_cert = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_nova_client_cert is defined %}
+metadata_nova_client_cert = {{ server.vmware.nsxv.metadata_nova_client_cert }}
+{%- endif %}
+
+# (Optional) Private key of client certificate. (string value)
+#metadata_nova_client_priv_key = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_nova_client_priv_key is defined %}
+metadata_nova_client_priv_key = {{ server.vmware.nsxv.metadata_nova_client_priv_key }}
+{%- endif %}
+
+# (Optional) If True then plugin will use NSXV spoofguard component
+# for port-security feature. (boolean value)
+#spoofguard_eanabled = true
+{%- if server.vmware.get('nsxv', {}).spoofguard_enabled is defined %}
+spoofguard_enabled = {{ server.vmware.nsxv.spoofguard_enabled }}
+{%- endif %}
+
+# (Optional) If True then plugin will use NSXV exclude list component
+# when port security is disabled and spoofguard is enabled. (boolean
+# value)
+#use_exclude_list = true
+{%- if server.vmware.get('nsxv', {}).use_exclude_list is defined %}
+use_exclude_list = {{ server.vmware.nsxv.use_exclude_list }}
+{%- endif %}
+
+# Ordered list of router_types to allocate as tenant routers. It
+# limits the router types that the Nsxv can support for tenants:
+# distributed: router is supported by distributed edge at the backend.
+# shared: multiple routers share the same service edge at the backend.
+# exclusive: router exclusively occupies one service edge at the
+# backend.
+# Nsxv would select the first available router type from
+# tenant_router_types list if router-type is not specified. If the
+# tenant defines the router type with '--distributed','--router_type
+# exclusive' or '--router_type shared', Nsxv would verify that the
+# router type is in tenant_router_types. Admin supports all these
+# three router types. (list value)
+#tenant_router_types = shared,distributed,exclusive
+
+# (Optional) Username to configure for Edge appliance login. (string
+# value)
+#edge_appliance_user = <None>
+{%- if server.vmware.get('nsxv', {}).edge_appliance_user is defined %}
+edge_appliance_user = {{ server.vmware.nsxv.edge_appliance_user }}
+{%- endif %}
+
+# (Optional) Password to configure for Edge appliance login. (string
+# value)
+#edge_appliance_password = <None>
+{%- if server.vmware.get('nsxv', {}).edge_appliance_password is defined %}
+edge_appliance_password = {{ server.vmware.nsxv.edge_appliance_password }}
+{%- endif %}
+
+# (Optional) DHCP default lease time. (integer value)
+#dhcp_lease_time = 86400
+{%- if server.vmware.get('nsxv', {}).dhcp_lease_time is defined %}
+dhcp_lease_time = {{ server.vmware.nsxv.dhcp_lease_time }}
+{%- endif %}
+
+# If True, the server instance will attempt to initialize the metadata
+# infrastructure (boolean value)
+#metadata_initializer = true
+{%- if server.vmware.get('nsxv', {}).metadata_initializer is defined %}
+metadata_initializer = {{ server.vmware.nsxv.metadata_initializer }}
+{%- endif %}
+
+# List of tcp ports, to be allowed access to the metadata proxy, in
+# addition to the default 80,443,8775 tcp ports (list value)
+#metadata_service_allowed_ports =
+{%- if server.vmware.get('nsxv', {}).metadata_service_allowed_ports is defined %}
+metadata_service_allowed_ports = {{ ','.join(server.vmware.nsxv.metadata_service_allowed_ports) }}
+{%- endif %}
+
+# (Optional) Enable HA for NSX Edges. (boolean value)
+#edge_ha = false
+{%- if server.vmware.get('nsxv', {}).edge_ha is defined %}
+edge_ha = {{ server.vmware.nsxv.edge_ha }}
+{%- endif %}
+
+# (Optional) Edge appliance size to be used for creating exclusive
+# router. Valid values: ['compact', 'large', 'xlarge', 'quadlarge'].
+# This exclusive_router_appliance_size will be picked up if --router-
+# size parameter is not specified while doing neutron router-create
+# (string value)
+# Possible values:
+# compact - <No description provided>
+# large - <No description provided>
+# xlarge - <No description provided>
+# quadlarge - <No description provided>
+#exclusive_router_appliance_size = compact
+{%- if server.vmware.get('nsxv', {}).exclusive_router_appliance_size is defined %}
+exclusive_router_appliance_size = {{ server.vmware.nsxv.exclusive_router_appliance_size }}
+{%- endif %}
+
+# (Optional) Edge appliance size to be used for creating shared router
+# edge. Valid values: ['compact', 'large', 'xlarge', 'quadlarge'].
+# (string value)
+# Possible values:
+# compact - <No description provided>
+# large - <No description provided>
+# xlarge - <No description provided>
+# quadlarge - <No description provided>
+#shared_router_appliance_size = compact
+{%- if server.vmware.get('nsxv', {}).shared_router_appliance_size is defined %}
+shared_router_appliance_size = {{ server.vmware.nsxv.shared_router_appliance_size }}
+{%- endif %}
+
+# (Optional) Use this search domain if there is no search domain
+# configured on the subnet. (string value)
+#dns_search_domain = <None>
+{%- if server.vmware.get('nsxv', {}).dns_search_domain is defined %}
+dns_search_domain = {{ server.vmware.nsxv.dns_search_domain }}
+{%- endif %}
+
+# List of nameservers to configure for the DHCP binding entries. These
+# will be used if there are no nameservers defined on the subnet.
+# (list value)
+#nameservers =
+{%- if server.vmware.get('nsxv', {}).nameservers is defined %}
+nameservers = {{ ','.join(server.vmware.nsxv.nameservers) }}
+{%- endif %}
+
+# If True, dvs features will be supported which involves configuring
+# the dvs backing nsx_v directly. If False, only features exposed via
+# nsx_v will be supported (boolean value)
+#use_dvs_features = false
+{%- if server.vmware.get('nsxv', {}).use_dvs_features is defined %}
+use_dvs_features = {{ server.vmware.nsxv.use_dvs_features }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall rule for security-
+# groups blocked traffic is logged. (boolean value)
+#log_security_groups_blocked_traffic = false
+{%- if server.vmware.get('nsxv', {}).log_security_groups_blocked_traffic is defined %}
+log_security_groups_blocked_traffic = {{ server.vmware.nsxv.log_security_groups_blocked_traffic }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall security-groups
+# allowed traffic is logged. (boolean value)
+#log_security_groups_allowed_traffic = false
+{%- if server.vmware.get('nsxv', {}).log_security_groups_allowed_traffic is defined %}
+log_security_groups_allowed_traffic = {{ server.vmware.nsxv.log_security_groups_allowed_traffic }}
+{%- endif %}
+
+# (Optional) The profile id of the redirect firewall rules that will
+# be used for the Service Insertion feature. (string value)
+#service_insertion_profile_id = <None>
+{%- if server.vmware.get('nsxv', {}).service_insertion_profile_id is defined %}
+service_insertion_profile_id = {{ server.vmware.nsxv.service_insertion_profile_id }}
+{%- endif %}
+
+# (Optional) If set to True, the plugin will create a redirect rule to
+# send all the traffic to the security partner (boolean value)
+#service_insertion_redirect_all = false
+{%- if server.vmware.get('nsxv', {}).service_insertion_redirect_all is defined %}
+service_insertion_redirect_all = {{ server.vmware.nsxv.service_insertion_redirect_all }}
+{%- endif %}
+
+# If set to True, the plugin will use NSX policies in the neutron
+# security groups. (boolean value)
+#use_nsx_policies = false
+{%- if server.vmware.get('nsxv', {}).use_nsx_policies is defined %}
+use_nsx_policies = {{ server.vmware.nsxv.use_nsx_policies }}
+{%- endif %}
+
+# (Optional) If use_nsx_policies is True, this policy will be used as
+# the default policy for new tenants. (string value)
+#default_policy_id = <None>
+{%- if server.vmware.get('nsxv', {}).default_policy_id is defined %}
+default_policy_id = {{ server.vmware.nsxv.default_policy_id }}
+{%- endif %}
+
+# (Optional) If use_nsx_policies is True, this value will determine if
+# a tenants can add rules to their security groups. (boolean value)
+#allow_tenant_rules_with_policy = false
+{%- if server.vmware.get('nsxv', {}).allow_tenant_rules_with_policy is defined %}
+allow_tenant_rules_with_policy = {{ server.vmware.nsxv.allow_tenant_rules_with_policy }}
+{%- endif %}
+
+# (Optional) Sets the network address for distributed router TLR-PLR
+# connectivity, with <network IP>/<prefix> syntax (string value)
+#vdr_transit_network = 169.254.2.0/28
+{%- if server.vmware.get('nsxv', {}).vdr_transit_network is defined %}
+vdr_transit_network = {{ server.vmware.nsxv.vdr_transit_network }}
+{%- endif %}
+
+# If set to False, router will associate floating ip with external
+# interface of only, thus denying connectivity between hosts on same
+# network via their floating ips. If True, floating ip will be
+# associated with all router interfaces. (boolean value)
+#bind_floatingip_to_all_interfaces = false
+{%- if server.vmware.get('nsxv', {}).bind_floatingip_to_all_interfaces is defined %}
+bind_floatingip_to_all_interfaces = {{ server.vmware.nsxv.bind_floatingip_to_all_interfaces }}
+{%- endif %}
+
+# (Optional) Have exclusive DHCP edge per network. (boolean value)
+#exclusive_dhcp_edge = false
+{%- if server.vmware.get('nsxv', {}).exclusive_dhcp_edge is defined %}
+exclusive_dhcp_edge = {{ server.vmware.nsxv.exclusive_dhcp_edge }}
+{%- endif %}
+
+# (Optional) Set the interval (Seconds) for BGP neighbour hold down
+# time. (integer value)
+#bgp_neighbour_hold_down_timer = 4
+{%- if server.vmware.get('nsxv', {}).bgp_neighbour_hold_down_timer is defined %}
+bgp_neighbour_hold_down_timer = {{ server.vmware.nsxv.bgp_neighbour_hold_down_timer }}
+{%- endif %}
+
+# (Optional) Set the interval (Seconds) for BGP neighbour keep alive
+# time. (integer value)
+#bgp_neighbour_keep_alive_timer = 1
+{%- if server.vmware.get('nsxv', {}).bgp_neighbour_keep_alive_timer is defined %}
+bgp_neighbour_keep_alive_timer = {{ server.vmware.nsxv.bgp_neighbour_keep_alive_timer }}
+{%- endif %}
+
+# (Optional) Set the wait time (Seconds) between enablement of ECMP.
+# (integer value)
+#ecmp_wait_time = 2
+{%- if server.vmware.get('nsxv', {}).ecmp_wait_time is defined %}
+ecmp_wait_time = {{ server.vmware.nsxv.ecmp_wait_time }}
+{%- endif %}
+
+# List of <DVS MoRef ID>:<vlan_min>:<vlan_max> specifying DVS MoRef ID
+# usable for VLAN provider networks, as well as ranges of VLAN tags on
+# each available for allocation to networks. (list value)
+#network_vlan_ranges =
+{%- set network_vlan_ranges = []%}
+{%- for _,netrange in server.vmware.get('nsxv', {'network_vlan_ranges': {}}).network_vlan_ranges.iteritems() %}
+{%- do network_vlan_ranges.append(netrange.dvs_id + ":" + netrange.vlan_min + ":" + netrange.vlan_max) %}
+network_vlan_ranges = {{ ','.join(network_vlan_ranges) }}
+{%- endfor %}
+
+# Timeout interval for NSX backend transactions. (integer value)
+#nsx_transaction_timeout = 240
+{%- if server.vmware.get('nsxv', {}).nsx_transaction_timeout is defined %}
+nsx_transaction_timeout = {{ server.vmware.nsxv.nsx_transaction_timeout }}
+{%- endif %}
+
+# If False, different tenants will not use the same DHCP edge or
+# router edge. (boolean value)
+#share_edges_between_tenants = true
+{%- if server.vmware.get('nsxv', {}).share_edges_between_tenants is defined %}
+share_edges_between_tenants = {{ server.vmware.nsxv.share_edges_between_tenants }}
+{%- endif %}
+
+# List of the enabled housekeeping jobs (list value)
+#housekeeping_jobs = error_dhcp_edge,error_backup_edge
+{%- if server.vmware.get('nsxv', {}).housekeeping_jobs is defined %}
+housekeeping_jobs = {{ ','.join(server.vmware.nsxv.housekeeping_jobs) }}
+{%- endif %}
+
+
+# List of housekeeping jobs which are enabled in read only mode (list
+# value)
+#housekeeping_readonly_jobs =
+{%- if server.vmware.get('nsxv', {}).housekeeping_readonly_jobs is defined %}
+housekeeping_readonly_jobs = {{ ','.join(server.vmware.nsxv.housekeeping_readonly_jobs) }}
+{%- endif %}
+
+
+# Housekeeping will only warn about breakage. (boolean value)
+#housekeeping_readonly = true
+{%- if server.vmware.get('nsxv', {}).housekeeping_readonly is defined %}
+housekeeping_readonly = {{ server.vmware.nsxv.housekeeping_readonly }}
+{%- endif %}
+
+# Use default block all rule when no security groups are set on a port
+# and port security is enabled (boolean value)
+#use_default_block_all = false
+{%- if server.vmware.get('nsxv', {}).use_default_block_all is defined %}
+use_default_block_all = {{ server.vmware.nsxv.use_default_block_all }}
+{%- endif %}
+
+
+[quotas]
+
+#
+# From nsx
+#
+
+# Number of network gateways allowed per tenant, -1 for unlimited
+# (integer value)
+#quota_network_gateway = 5
+{%- if server.vmware.get('nsxv', {}).quota_network_gateway is defined %}
+quota_network_gateway = {{ server.vmware.nsxv.quota_network_gateway }}
+{%- endif %}
diff --git a/neutron/files/rocky/plugins/sfc.conf b/neutron/files/rocky/plugins/sfc.conf
new file mode 100644
index 0000000..5d4dcf9
--- /dev/null
+++ b/neutron/files/rocky/plugins/sfc.conf
@@ -0,0 +1,9 @@
+{%- if server.sfc.enabled|default(False) %}
+[sfc]
+# Ordered list of sfc drivers
+drivers = {{ server.sfc.sfc_drivers|join(',') }}
+
+[flowclassifier]
+# Ordered list of flow classifier drivers
+drivers = {{ server.sfc.flow_classifier_drivers|join(',') }}
+{%- endif %}
diff --git a/neutron/files/rocky/sriov_agent.ini b/neutron/files/rocky/sriov_agent.ini
new file mode 100644
index 0000000..388cec4
--- /dev/null
+++ b/neutron/files/rocky/sriov_agent.ini
@@ -0,0 +1,59 @@
+{%- if pillar.neutron.gateway is defined %}
+{%- from "neutron/map.jinja" import gateway as neutron with context %}
+{%- else %}
+{%- from "neutron/map.jinja" import compute as neutron with context %}
+{%- endif %}
+[DEFAULT]
+
+{%- if neutron.logging is defined %}
+{%- set _data = neutron.logging %}
+{%- include "oslo_templates/files/" ~ neutron.version ~ "/oslo/_log.conf" %}
+{%- endif %}
+
+
+[agent]
+
+#
+# From neutron.ml2.sriov.agent
+#
+
+# Extensions list to use (list value)
+{%- set extensions = [] %}
+{%- for section_key in ('sriov_extension', 'extension') %}
+{%-   for ext_name, ext_params in neutron.backend.get(section_key, {}).iteritems() %}
+{%-     continue if ext_name == 'bagpipe_bgpvpn' %}
+{%-     do extensions.append(ext_name) if ext_params.get('enabled', False)  %}
+{%-   endfor %}
+{%- endfor %}
+{#- NOTE: Below section is for backward compatible when extentions were #}
+{#- separated properties without neutron:backend:extension pillar #}
+{%- do extensions.append('qos') if neutron.get('qos', True) %}
+extensions = {{ extensions|unique|join(',') }}
+
+
+[sriov_nic]
+
+#
+# From neutron.ml2.sriov.agent
+#
+
+# Comma-separated list of <physical_network>:<network_device> tuples mapping
+# physical network names to the agent's node-specific physical network device
+# interfaces of SR-IOV physical function to be used for VLAN networks. All
+# physical networks listed in network_vlan_ranges on the server should have
+# mappings to appropriate interfaces on each agent. (list value)
+physical_device_mappings = {%- for nic_name, sriov in neutron.backend.sriov.iteritems() %}{{ sriov.physical_network }}:{{ sriov.devname }}{% if not loop.last %},{% endif %}{%- endfor %}
+
+# Comma-separated list of <network_device>:<vfs_to_exclude> tuples, mapping
+# network_device to the agent's node-specific list of virtual functions that
+# should not be used for virtual networking. vfs_to_exclude is a semicolon-
+# separated list of virtual functions to exclude from network_device. The
+# network_device in the mapping should appear in the physical_device_mappings
+# list. (list value)
+#exclude_devices =
+
+
+{%- if pillar.neutron.compute is defined %}
+[securitygroup]
+firewall_driver = noop
+{%- endif %}
diff --git a/neutron/map.jinja b/neutron/map.jinja
index 78e2867..1670a3f 100644
--- a/neutron/map.jinja
+++ b/neutron/map.jinja
@@ -1,3 +1,5 @@
+{%- set os_family = salt['grains.get']('os_family', '') %}
+
 {%- set default_params = {
     'cacert_file': salt['grains.filter_by']({
         'Debian': '/etc/ssl/certs/ca-certificates.crt',
@@ -6,7 +8,7 @@
     'enabled': false }
 %}
 
-{%- if grains.os_family == "Debian" %}
+{%- if os_family == "Debian" %}
 {%- set compute_pkgs_ovn = ['ovn-common', 'ovn-host'] %}
 {%- if pillar.neutron.compute is defined and pillar.neutron.compute.metadata is defined %}
 {%- do compute_pkgs_ovn.extend(['neutron-common', 'python-networking-ovn', 'haproxy']) %}
diff --git a/tests/pillar/compute_legacy.sls b/tests/pillar/compute_legacy.sls
index 1a8aa58..0864a01 100644
--- a/tests/pillar/compute_legacy.sls
+++ b/tests/pillar/compute_legacy.sls
@@ -23,4 +23,4 @@
       host: 127.0.0.1
       password: password
       workers: 2
-    version: mitaka
+    version: mitaka
\ No newline at end of file
diff --git a/tests/pillar/repo_pike.sls b/tests/pillar/repo_pike.sls
index bc006ea..6e29aab 100644
--- a/tests/pillar/repo_pike.sls
+++ b/tests/pillar/repo_pike.sls
@@ -5,4 +5,4 @@
       mirantis_openstack_repo:
         source: "deb http://mirror.mirantis.com/stable/openstack-pike/xenial xenial main"
         architectures: amd64
-        key_url: "http://mirror.mirantis.com/stable/openstack-pike/xenial/archive-pike.key"
+        key_url: "http://mirror.mirantis.com/stable/openstack-pike/xenial/archive-pike.key"
\ No newline at end of file
diff --git a/tests/pillar/repo_rocky.sls b/tests/pillar/repo_rocky.sls
new file mode 100644
index 0000000..fa26377
--- /dev/null
+++ b/tests/pillar/repo_rocky.sls
@@ -0,0 +1,8 @@
+linux:
+  system:
+    enabled: true
+    repo:
+      mirantis_openstack_rocky:
+        source: "deb http://mirror.mirantis.com/nightly/openstack-rocky/xenial xenial main"
+        architectures: amd64
+        key_url: "http://mirror.mirantis.com/nightly/openstack-rocky/xenial/archive-openstack-rocky.key"
\ No newline at end of file