[VMware] inital commit

This commit unhardcode vmware related options.

Change-Id: Ibeb35a4c50f6ce7e16885ed4180db110fc009724
Related-Prod: PROD-21446
diff --git a/README.rst b/README.rst
index 34d660e..4d3e86a 100644
--- a/README.rst
+++ b/README.rst
@@ -991,6 +991,29 @@
           user: admin
           password: password
 
+Neutron Server with NSX
+
+.. code-block:: yaml
+
+    neutron:
+      server:
+        backend:
+          engine: vmware
+        core_plugin: vmware_nsxv3
+        vmware:
+          nsx:
+            extension_drivers:
+              - vmware_nsxv3_dns
+            v3:
+              api_password: nsx_password
+              api_user: nsx_username
+              api_managers:
+                01:
+                  scheme: https
+                  host: 192.168.10.120
+                  port: '443'
+              insecure: true
+
 Neutron Keystone region
 
 .. code-block:: yaml
diff --git a/neutron/files/pike/neutron-server b/neutron/files/pike/neutron-server
index d147249..6f48434 100644
--- a/neutron/files/pike/neutron-server
+++ b/neutron/files/pike/neutron-server
@@ -15,6 +15,10 @@
 NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
 {%- endif %}
 
+{%- if server.backend.engine == "vmware" %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/vmware/nsx.ini"
+{%- endif %}
+
 {%- if server.logging.log_appender %}
 DAEMON_ARGS="${DAEMON_ARGS} --log-config-append=/etc/neutron/logging/logging-neutron-server.conf"
 {%- endif %}
diff --git a/neutron/files/pike/neutron-server.conf.Debian b/neutron/files/pike/neutron-server.conf.Debian
index 7118277..1fb1781 100644
--- a/neutron/files/pike/neutron-server.conf.Debian
+++ b/neutron/files/pike/neutron-server.conf.Debian
@@ -32,6 +32,19 @@
 #auth_strategy = keystone
 auth_strategy = keystone
 
+{%- if server.core_plugin is defined %}
+core_plugin = {{ server.core_plugin }}
+{%- if server.service_plugins is defined %}
+{%- set service_plugins = [] %}
+{%- for sname,service in server.service_plugins.iteritems() %}
+{%- if service.enabled%}
+{%- do service_plugins.append(sname)%}
+{%- endif %}
+{%- endfor %}
+service_plugins = {{ ','.join(service_plugins) }}
+{%- endif %}
+{%- else %}
+
 {% if server.backend.engine == "contrail" %}
 
 api_extensions_path = extensions:/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions:/usr/lib/python2.7/dist-packages/neutron_lbaas/extensions
@@ -59,6 +72,7 @@
 {%- if server.get('bgp_vpn', {}).get('enabled', False) -%},bgpvpn{%- endif -%}
 
 {% endif %}
+{%- endif %}
 
 # The service plugins Neutron will use (list value)
 #service_plugins =
diff --git a/neutron/files/pike/plugins/nsx.ini b/neutron/files/pike/plugins/nsx.ini
new file mode 100644
index 0000000..c8135fe
--- /dev/null
+++ b/neutron/files/pike/plugins/nsx.ini
@@ -0,0 +1,1179 @@
+{%- from "neutron/map.jinja" import server with context %}
+[DEFAULT]
+
+#
+# From nsx
+#
+
+# This is uuid of the default NSX Transport zone that will be used for
+# creating tunneled isolated "Neutron" networks. It needs to be
+# created in NSX before starting Neutron with the nsx plugin. (string
+# value)
+#default_tz_uuid = <None>
+{%- if server.vmware.default_tz_uuid is defined %}
+default_tz_uuid = {{ server.vmware.default_tz_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the NSX L3 Gateway service which will be used for
+# implementing routers and floating IPs (string value)
+#default_l3_gw_service_uuid = <None>
+{%- if server.vmware.default_l3_gw_service_uuid is defined %}
+default_l3_gw_service_uuid = {{ server.vmware.default_l3_gw_service_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the NSX L2 Gateway service which will be used by
+# default for network gateways (string value)
+#default_l2_gw_service_uuid = <None>
+{%- if server.vmware.default_l2_gw_service_uuid is defined %}
+default_l2_gw_service_uuid = {{ server.vmware.default_l2_gw_service_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the Service Cluster which will be used by logical
+# services like dhcp and metadata (string value)
+#default_service_cluster_uuid = <None>
+{%- if server.vmware.default_service_cluster_uuid is defined %}
+default_service_cluster_uuid = {{ server.vmware.default_service_cluster_uuid }}
+{%- endif %}
+
+# Name of the interface on a L2 Gateway transport node which should be
+# used by default when setting up a network connection (string value)
+# Deprecated group/name - [DEFAULT]/default_interface_name
+#nsx_default_interface_name = breth0
+{%- if server.vmware.get('nsx', {}).default_interface_name is defined %}
+nsx_default_interface_name = {{ server.vmware.nsx.default_interface_name }}
+{%- endif %}
+
+# User name for NSX controllers in this cluster (string value)
+# Deprecated group/name - [DEFAULT]/nvp_user
+#nsx_user = admin
+{%- if server.vmware.get('nsx', {}).user is defined %}
+nsx_user = {{ server.vmware.nsx.user }}
+{%- endif %}
+
+# Password for NSX controllers in this cluster (string value)
+# Deprecated group/name - [DEFAULT]/nvp_password
+#nsx_password = admin
+{%- if server.vmware.get('nsx', {}).password is defined %}
+nsx_password = {{ server.vmware.nsx.password }}
+{%- endif %}
+
+# Time before aborting a request on an unresponsive controller
+# (Seconds) (integer value)
+#http_timeout = 75
+{%- if server.vmware.http_timeout is defined %}
+http_timeout = {{ server.vmware.http_timeout }}
+{%- endif %}
+
+# Maximum number of times a particular request should be retried
+# (integer value)
+#retries = 2
+{%- if server.vmware.retries is defined %}
+retries = {{ server.vmware.retries }}
+{%- endif %}
+
+# Maximum number of times a redirect response should be followed
+# (integer value)
+#redirects = 2
+{%- if server.vmware.redirects is defined %}
+redirects = {{ server.vmware.redirects }}
+{%- endif %}
+
+# Comma-separated list of NSX controller endpoints (<ip>:<port>). When
+# port is omitted, 443 is assumed. This option MUST be specified.
+# e.g.: aa.bb.cc.dd, ee.ff.gg.hh.ee:80 (list value)
+# Deprecated group/name - [DEFAULT]/nvp_controllers
+#nsx_controllers =
+{%- set nsx_controllers = []%}
+{%- for _,controller in server.vmware.get('controllers', {}).iteritems() %}
+{%- do nsx_controllers.append(controller.host + ":" + controller.get('port', '443')) %}
+nsx_controllers = {{ ','.join(nsx_controllers) }}
+{%- endfor %}
+
+# Reconnect connection to nsx if not used within this amount of time.
+# (integer value)
+#conn_idle_timeout = 900
+{%- if server.vmware.conn_idle_timeout is defined %}
+conn_idle_timeout = {{ server.vmware.conn_idle_timeout }}
+{%- endif %}
+
+# Specify the class path for the Layer 2 gateway backend driver(i.e.
+# NSXv3/NSX-V). This field will be used when a L2 Gateway service
+# plugin is configured. (string value)
+#nsx_l2gw_driver = <None>
+{%- if server.vmware.get('nsx', {}).l2gw_driver is defined %}
+nsx_l2gw_driver = {{ server.vmware.nsx.l2gw_driver }}
+{%- endif %}
+
+# (Optional) URL for distributed locking coordination resource for
+# lock manager. This value is passed as a parameter to tooz
+# coordinator. By default, value is None and oslo_concurrency is used
+# for single-node lock management. (string value)
+#locking_coordinator_url = <None>
+{%- if server.vmware.get('nsx', {}).locking_coordinator_url is defined %}
+locking_coordinator_url = {{ server.vmware.nsx.locking_coordinator_url }}
+{%- endif %}
+
+# If true, the server then allows the caller to specify the id of
+# resources. This should only be enabled in order to allow one to
+# migrate an existing install of neutron to the nsx-v3 plugin.
+# (boolean value)
+#api_replay_mode = false
+{%- if server.vmware.get('nsx', {}).api_replay_mode is defined %}
+api_replay_mode = {{ server.vmware.nsx.api_replay_mode }}
+{%- endif %}
+
+# An ordered list of extension driver entrypoints to be loaded from
+# the vmware_nsx.extension_drivers namespace. (list value)
+#nsx_extension_drivers =
+{%- if server.vmware.get('nsx', {}).extension_drivers is defined %}
+nsx_extension_drivers = {{ ','.join(server.vmware.nsx.extension_drivers) }}
+{%- endif %}
+
+
+[dvs]
+
+#
+# From nsx
+#
+
+# Hostname or IP address for connection to VMware vCenter host.
+# (string value)
+#host_ip = <None>
+{%- if server.vmware.get('dvs', {}).host_ip is defined %}
+host_ip = {{ server.vmware.dvs.host_ip }}
+{%- endif %}
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+{%- if server.vmware.get('dvs', {}).host_port is defined %}
+host_port = {{ server.vmware.dvs.host_port }}
+{%- endif %}
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+{%- if server.vmware.get('dvs', {}).host_username is defined %}
+host_username = {{ server.vmware.dvs.host_username }}
+{%- endif %}
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+{%- if server.vmware.get('dvs', {}).host_password is defined %}
+host_password = {{ server.vmware.dvs.host_password }}
+{%- endif %}
+
+# The interval used for polling of remote tasks. (floating point
+# value)
+#task_poll_interval = 0.5
+{%- if server.vmware.get('dvs', {}).task_poll_interval is defined %}
+task_poll_interval = {{ server.vmware.dvs.task_poll_interval }}
+{%- endif %}
+
+# Specify a CA bundle file to use in verifying the vCenter server
+# certificate. (string value)
+#ca_file = <None>
+{%- if server.vmware.get('dvs', {}).ca_file is defined %}
+ca_file = {{ server.vmware.dvs.ca_file }}
+{%- endif %}
+
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "ca_file" is set. (boolean value)
+#insecure = false
+{%- if server.vmware.get('dvs', {}).insecure is defined %}
+insecure = {{ server.vmware.dvs.insecure }}
+{%- endif %}
+
+# The number of times we retry on failures, e.g., socket error, etc.
+# (integer value)
+#api_retry_count = 10
+{%- if server.vmware.get('dvs', {}).api_retry_count is defined %}
+api_retry_count = {{ server.vmware.dvs.api_retry_count }}
+{%- endif %}
+
+# The name of the preconfigured DVS. (string value)
+#dvs_name = <None>
+{%- if server.vmware.get('dvs', {}).dvs_name is defined %}
+dvs_name = {{ server.vmware.dvs.dvs_name }}
+{%- endif %}
+
+# This value should not be set. It is just required for ensuring that
+# the DVS plugin works with the generic NSX metadata code (string
+# value)
+#metadata_mode = <None>
+{%- if server.vmware.get('dvs', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.dvs.metadata_mode }}
+{%- endif %}
+
+
+[nsx]
+
+#
+# From nsx
+#
+
+# Maximum number of ports of a logical switch on a bridged transport
+# zone. The recommended value for this parameter varies with NSX
+# version.
+# Please use:
+# NSX 2.x -> 64
+# NSX 3.0, 3.1 -> 5000
+# NSX 3.2 -> 10000 (integer value)
+#max_lp_per_bridged_ls = 5000
+{%- if server.vmware.get('nsx', {}).max_lp_per_bridged_ls is defined %}
+max_lp_per_bridged_ls = {{ server.vmware.nsx.max_lp_per_bridged_ls }}
+{%- endif %}
+
+# Maximum number of ports of a logical switch on an overlay transport
+# zone (integer value)
+#max_lp_per_overlay_ls = 256
+{%- if server.vmware.get('nsx', {}).max_lp_per_overlay_ls is defined %}
+max_lp_per_overlay_ls = {{ server.vmware.nsx.max_lp_per_overlay_ls }}
+{%- endif %}
+
+# Maximum concurrent connections to each NSX controller. (integer
+# value)
+#concurrent_connections = 10
+{%- if server.vmware.get('nsx', {}).concurrent_connections is defined %}
+concurrent_connections = {{ server.vmware.nsx.concurrent_connections }}
+{%- endif %}
+
+# Number of seconds a generation id should be valid for (default -1
+# meaning do not time out) (integer value)
+# Deprecated group/name - [NVP]/nvp_gen_timeout
+#nsx_gen_timeout = -1
+{%- if server.vmware.get('nsx', {}).nsx_gen_timeout is defined %}
+nsx_gen_timeout = {{ server.vmware.nsx.nsx_gen_timeout }}
+{%- endif %}
+
+# If set to access_network this enables a dedicated connection to the
+# metadata proxy for metadata server access via Neutron router. If set
+# to dhcp_host_route this enables host route injection via the dhcp
+# agent. This option is only useful if running on a host that does not
+# support namespaces otherwise access_network should be used. (string
+# value)
+#metadata_mode = access_network
+{%- if server.vmware.get('nsx', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.nsx.metadata_mode }}
+{%- endif %}
+
+# The default network tranport type to use (stt, gre, bridge,
+# ipsec_gre, or ipsec_stt) (string value)
+#default_transport_type = stt
+{%- if server.vmware.get('nsx', {}).default_transport_type is defined %}
+default_transport_type = {{ server.vmware.nsx.default_transport_type }}
+{%- endif %}
+
+# Specifies in which mode the plugin needs to operate in order to
+# provide DHCP and metadata proxy services to tenant instances. If
+# 'agent' is chosen (default) the NSX plugin relies on external RPC
+# agents (i.e. dhcp and metadata agents) to provide such services. In
+# this mode, the plugin supports API extensions 'agent' and
+# 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in
+# Icehouse), the plugin will use NSX logical services for DHCP and
+# metadata proxy. This simplifies the deployment model for Neutron, in
+# that the plugin no longer requires the RPC agents to operate. When
+# 'agentless' is chosen, the config option metadata_mode becomes
+# ineffective. The 'agentless' mode works only on NSX 4.1.
+# Furthermore, a 'combined' mode is also provided and is used to
+# support existing deployments that want to adopt the agentless mode.
+# With this mode, existing networks keep being served by the existing
+# infrastructure (thus preserving backward compatibility, whereas new
+# networks will be served by the new infrastructure. Migration tools
+# are provided to 'move' one network from one model to another; with
+# agent_mode set to 'combined', option 'network_auto_schedule' in
+# neutron.conf is ignored, as new networks will no longer be scheduled
+# to existing dhcp agents. (string value)
+#agent_mode = agent
+{%- if server.vmware.get('nsx', {}).agent_mode is defined %}
+agent_mode = {{ server.vmware.nsx.agent_mode }}
+{%- endif %}
+
+# Specifies which mode packet replication should be done in. If set to
+# service a service node is required in order to perform packet
+# replication. This can also be set to source if one wants replication
+# to be performed locally (NOTE: usually only useful for testing if
+# one does not want to deploy a service node). In order to leverage
+# distributed routers, replication_mode should be set to 'service'.
+# (string value)
+# Allowed values: service, source
+#replication_mode = service
+{%- if server.vmware.get('nsx', {}).replication_mode is defined %}
+replication_mode = {{ server.vmware.nsx.replication_mode }}
+{%- endif %}
+
+# The QoS rules peak bandwidth value will be the configured maximum
+# bandwidth of the QoS rule, multiplied by this value. Value must be
+# bigger than 1 (floating point value)
+# Minimum value: 1
+#qos_peak_bw_multiplier = 2.0
+{%- if server.vmware.get('nsx', {}).qos_peak_bw_multiplier is defined %}
+qos_peak_bw_multiplier = {{ server.vmware.nsx.qos_peak_bw_multiplier }}
+{%- endif %}
+
+
+[nsx_dhcp]
+
+#
+# From nsx
+#
+
+# Comma separated list of additional domain name servers (list value)
+#extra_domain_name_servers =
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).extra_domain_name_servers is defined %}
+extra_domain_name_servers = {{ server.vmware.nsx.dhcp.extra_domain_name_servers }}
+{%- endif %}
+
+# Domain to use for building the hostnames (string value)
+#domain_name = openstacklocal
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).domain_name is defined %}
+domain_name = {{ server.vmware.nsx.dhcp.domain_name }}
+{%- endif %}
+
+# Default DHCP lease time (integer value)
+#default_lease_time = 43200
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).default_lease_time is defined %}
+default_lease_time = {{ server.vmware.nsx.dhcp.default_lease_time }}
+{%- endif %}
+
+
+[nsx_lsn]
+
+#
+# From nsx
+#
+
+# Pull LSN information from NSX in case it is missing from the local
+# data store. This is useful to rebuild the local store in case of
+# server recovery. (boolean value)
+#sync_on_missing_data = false
+{%- if server.vmware.get('nsx', {}).get('lsn', {}).sync_on_missing_data is defined %}
+sync_on_missing_data = {{ server.vmware.nsx.lsn.sync_on_missing_data }}
+{%- endif %}
+
+
+[nsx_metadata]
+
+#
+# From nsx
+#
+
+# IP address used by Metadata server. (string value)
+#metadata_server_address = 127.0.0.1
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).server_address is defined %}
+metadata_server_address = {{ server.vmware.nsx.metadata.server_address }}
+{%- endif %}
+
+# TCP Port used by Metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_server_port = 8775
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).server_port is defined %}
+metadata_server_port = {{ server.vmware.nsx.metadata.server_port }}
+{%- endif %}
+
+# When proxying metadata requests, Neutron signs the Instance-ID
+# header with a shared secret to prevent spoofing. You may select any
+# string for a secret, but it MUST match with the configuration used
+# by the Metadata server. (string value)
+#metadata_shared_secret =
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).shared_secret is defined %}
+metadata_shared_secret = {{ server.vmware.nsx.metadata.shared_secret }}
+{%- endif %}
+
+
+[nsx_sync]
+
+#
+# From nsx
+#
+
+# Interval in seconds between runs of the status synchronization task.
+# The plugin will aim at resynchronizing operational status for all
+# resources in this interval, and it should be therefore large enough
+# to ensure the task is feasible. Otherwise the plugin will be
+# constantly synchronizing resource status, ie: a new task is started
+# as soon as the previous is completed. If this value is set to 0, the
+# state synchronization thread for this Neutron instance will be
+# disabled. (integer value)
+#state_sync_interval = 10
+{%- if server.vmware.get('nsx', {}).get('sync', {}).state_sync_interval is defined %}
+state_sync_interval = {{ server.vmware.nsx.sync.state_sync_interval }}
+{%- endif %}
+
+# Random additional delay between two runs of the state
+# synchronization task. An additional wait time between 0 and
+# max_random_sync_delay seconds will be added on top of
+# state_sync_interval. (integer value)
+#max_random_sync_delay = 0
+{%- if server.vmware.get('nsx', {}).get('sync', {}).max_random_sync_delay is defined %}
+max_random_sync_delay = {{ server.vmware.nsx.sync.max_random_sync_delay }}
+{%- endif %}
+
+# Minimum delay, in seconds, between two status synchronization
+# requests for NSX. Depending on chunk size, controller load, and
+# other factors, state synchronization requests might be pretty heavy.
+# This means the controller might take time to respond, and its load
+# might be quite increased by them. This parameter allows to specify a
+# minimum interval between two subsequent requests. The value for this
+# parameter must never exceed state_sync_interval. If this does, an
+# error will be raised at startup. (integer value)
+#min_sync_req_delay = 1
+{%- if server.vmware.get('nsx', {}).get('sync', {}).min_sync_req_delay is defined %}
+min_sync_req_delay = {{ server.vmware.nsx.sync.min_sync_req_delay }}
+{%- endif %}
+
+# Minimum number of resources to be retrieved from NSX in a single
+# status synchronization request. The actual size of the chunk will
+# increase if the number of resources is such that using the minimum
+# chunk size will cause the interval between two requests to be less
+# than min_sync_req_delay (integer value)
+#min_chunk_size = 500
+{%- if server.vmware.get('nsx', {}).get('sync', {}).min_chunk_size is defined %}
+min_chunk_size = {{ server.vmware.nsx.sync.min_chunk_size }}
+{%- endif %}
+
+# Enable this option to allow punctual state synchronization on show
+# operations. In this way, show operations will always fetch the
+# operational status of the resource from the NSX backend, and this
+# might have a considerable impact on overall performance. (boolean
+# value)
+#always_read_status = false
+{%- if server.vmware.get('nsx', {}).get('sync', {}).always_read_status is defined %}
+always_read_status = {{ server.vmware.nsx.sync.always_read_status }}
+{%- endif %}
+
+
+[nsx_v3]
+
+#
+# From nsx
+#
+
+# User names for the NSX managers (list value)
+#nsx_api_user = admin
+{%- if server.vmware.get('nsx', {}).get('v3', {}).api_user is defined %}
+nsx_api_user = {{ server.vmware.nsx.v3.api_user }}
+{%- endif %}
+
+# Passwords for the NSX managers (list value)
+#nsx_api_password = default
+{%- if server.vmware.get('nsx', {}).get('v3', {}).api_password is defined %}
+nsx_api_password = {{ server.vmware.nsx.v3.api_password }}
+{%- endif %}
+
+# IP address of one or more NSX managers separated by commas. The IP
+# address should be of the form:
+# [<scheme>://]<ip_address>[:<port>]
+# If scheme is not provided https is used. If port is not provided
+# port 80 is used for http and port 443 for https. (list value)
+#nsx_api_managers =
+{%- set nsx_api_managers = [] %}
+{%- for _, manager in server.vmware.get('nsx', {}).get('v3', {'api_managers': {}}).api_managers.iteritems() %}
+{%- do nsx_api_managers.append(manager.scheme + "://" + manager.host + ":" + manager.port) %}
+nsx_api_managers = {{ ','.join(nsx_api_managers) }}
+{%- endfor %}
+
+# Use client certificate in NSX manager authentication (boolean value)
+#nsx_use_client_auth = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).use_client_auth is defined %}
+nsx_use_client_auth = {{ server.vmware.nsx.v3.use_client_auth }}
+{%- endif %}
+
+# File to contain client certificate and private key (string value)
+#nsx_client_cert_file =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_file is defined %}
+nsx_client_cert_file = {{ server.vmware.nsx.v3.client_cert_file }}
+{%- endif %}
+
+# password for private key encryption (string value)
+#nsx_client_cert_pk_password =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_pk_password is defined %}
+nsx_client_cert_pk_password = {{ server.vmware.nsx.v3.client_cert_pk_password }}
+{%- endif %}
+
+# Storage type for client certificate sensitive data (string value)
+# Allowed values: nsx-db, none
+#nsx_client_cert_storage = nsx-db
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_storage is defined %}
+nsx_client_cert_storage = {{ server.vmware.nsx.v3.client_cert_storage }}
+{%- endif %}
+
+# This is the name or UUID of the default NSX overlay transport zone
+# that will be used for creating tunneled isolated Neutron networks.
+# It needs to be created in NSX before starting Neutron with the NSX
+# plugin. (string value)
+#default_overlay_tz = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_overlay_tz is defined %}
+default_overlay_tz = {{ server.vmware.nsx.v3.default_overlay_tz }}
+{%- endif %}
+
+# (Optional) Only required when creating VLAN or flat provider
+# networks. Name or UUID of default NSX VLAN transport zone that will
+# be used for bridging between Neutron networks, if no physical
+# network has been specified (string value)
+#default_vlan_tz = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_vlan_tz is defined %}
+default_vlan_tz = {{ server.vmware.nsx.v3.default_vlan_tz }}
+{%- endif %}
+
+# (Optional) Name or UUID of the default NSX bridge cluster that will
+# be used to perform L2 gateway bridging between VXLAN and VLAN
+# networks. If default bridge cluster UUID is not specified, admin
+# will have to manually create a L2 gateway corresponding to a NSX
+# Bridge Cluster using L2 gateway APIs. This field must be specified
+# on one of the active neutron servers only. (string value)
+#default_bridge_cluster = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_bridge_cluster is defined %}
+default_bridge_cluster = {{ server.vmware.nsx.v3.default_bridge_cluster }}
+{%- endif %}
+
+# Maximum number of times to retry API requests upon stale revision
+# errors. (integer value)
+#retries = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).retries is defined %}
+retries = {{ server.vmware.nsx.v3.retries }}
+{%- endif %}
+
+# Specify a CA bundle files to use in verifying the NSX Managers
+# server certificate. This option is ignored if "insecure" is set to
+# True. If "insecure" is set to False and ca_file is unset, the system
+# root CAs will be used to verify the server certificate. (list value)
+#ca_file = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).ca_file is defined %}
+ca_file = {{ server.vmware.nsx.v3.ca_file }}
+{%- endif %}
+
+# If true, the NSX Manager server certificate is not verified. If
+# false the CA bundle specified via "ca_file" will be used or if
+# unsest the default system root CAs will be used. (boolean value)
+#insecure = true
+{%- if server.vmware.get('nsx', {}).get('v3', {}).insecure is defined %}
+insecure = {{ server.vmware.nsx.v3.insecure }}
+{%- endif %}
+
+# The time in seconds before aborting a HTTP connection to a NSX
+# manager. (integer value)
+#http_timeout = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_timeout is defined %}
+http_timeout = {{ server.vmware.nsx.v3.http_timeout }}
+{%- endif %}
+
+
+# The time in seconds before aborting a HTTP read response from a NSX
+# manager. (integer value)
+#http_read_timeout = 180
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_read_timeout is defined %}
+http_read_timeout = {{ server.vmware.nsx.v3.http_read_timeout }}
+{%- endif %}
+
+# Maximum number of times to retry a HTTP connection. (integer value)
+#http_retries = 3
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_retries is defined %}
+http_retries = {{ server.vmware.nsx.v3.http_retries }}
+{%- endif %}
+
+# Maximum concurrent connections to each NSX manager. (integer value)
+#concurrent_connections = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).concurrent_connections is defined %}
+concurrent_connections = {{ server.vmware.nsx.v3.concurrent_connections }}
+{%- endif %}
+
+# The amount of time in seconds to wait before ensuring connectivity
+# to the NSX manager if no manager connection has been used. (integer
+# value)
+#conn_idle_timeout = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).conn_idle_timeout is defined %}
+conn_idle_timeout = {{ server.vmware.nsx.v3.conn_idle_timeout }}
+{%- endif %}
+
+# Number of times a HTTP redirect should be followed. (integer value)
+#redirects = 2
+{%- if server.vmware.get('nsx', {}).get('v3', {}).redirects is defined %}
+redirects = {{ server.vmware.nsx.v3.redirects }}
+{%- endif %}
+
+# Name or UUID of the default tier0 router that will be used for
+# connecting to tier1 logical routers and configuring external
+# networks (string value)
+#default_tier0_router = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_tier0_router is defined %}
+default_tier0_router = {{ server.vmware.nsx.v3.default_tier0_router }}
+{%- endif %}
+
+# (Optional) The number of nested groups which are used by the plugin,
+# each Neutron security-groups is added to one nested group, and each
+# nested group can contain as maximum as 500 security-groups,
+# therefore, the maximum number of security groups that can be created
+# is 500 * number_of_nested_groups. The default is 8 nested groups,
+# which allows a maximum of 4k security-groups, to allow creation of
+# more security-groups, modify this figure. (integer value)
+#number_of_nested_groups = 8
+{%- if server.vmware.get('nsx', {}).get('v3', {}).number_of_nested_groups is defined %}
+number_of_nested_groups = {{ server.vmware.nsx.v3.number_of_nested_groups }}
+{%- endif %}
+
+# If set to access_network this enables a dedicated connection to the
+# metadata proxy for metadata server access via Neutron router. If set
+# to dhcp_host_route this enables host route injection via the dhcp
+# agent. This option is only useful if running on a host that does not
+# support namespaces otherwise access_network should be used. (string
+# value)
+#metadata_mode = access_network
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.nsx.v3.metadata_mode }}
+{%- endif %}
+
+# If true, an internal metadata network will be created for a router
+# only when the router is attached to a DHCP-disabled subnet. (boolean
+# value)
+#metadata_on_demand = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_on_demand is defined %}
+metadata_on_demand = {{ server.vmware.nsx.v3.metadata_on_demand }}
+{%- endif %}
+
+# If true, DHCP and metadata proxy services will be provided by NSX
+# backend. (boolean value)
+#native_dhcp_metadata = true
+{%- if server.vmware.get('nsx', {}).get('v3', {}).native_dhcp_metadata is defined %}
+native_dhcp_metadata = {{ server.vmware.nsx.v3.native_dhcp_metadata }}
+{%- endif %}
+
+# The metadata route used for native metadata proxy service. (string
+# value)
+#native_metadata_route = 169.254.169.254/31
+{%- if server.vmware.get('nsx', {}).get('v3', {}).native_metadata_route is defined %}
+native_metadata_route = {{ server.vmware.nsx.v3.native_metadata_route }}
+{%- endif %}
+
+# This is the name or UUID of the NSX DHCP Profile that will be used
+# to enable native DHCP service. It needs to be created in NSX before
+# starting Neutron with the NSX plugin (string value)
+#dhcp_profile = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_profile is defined %}
+dhcp_profile = {{ server.vmware.nsx.v3.dhcp_profile }}
+{%- endif %}
+
+# DHCP default lease time. (integer value)
+#dhcp_lease_time = 86400
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_lease_time is defined %}
+dhcp_lease_time = {{ server.vmware.nsx.v3.dhcp_lease_time }}
+{%- endif %}
+
+# Domain to use for building the hostnames. (string value)
+#dns_domain = openstacklocal
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dns_domain is defined %}
+dns_domain = {{ server.vmware.nsx.v3.dns_domain }}
+{%- endif %}
+
+# List of nameservers to configure for the DHCP binding entries. These
+# will be used if there are no nameservers defined on the subnet.
+# (list value)
+#nameservers =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).nameservers is defined %}
+nameservers = {{ ','.join(server.vmware.nsx.v3.nameservers) }}
+{%- endif %}
+
+
+# This is the name or UUID of the NSX Metadata Proxy that will be used
+# to enable native metadata service. It needs to be created in NSX
+# before starting Neutron with the NSX plugin. (string value)
+#metadata_proxy = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_proxy is defined%}
+metadata_proxy = {{ server.vmware.nsx.v3.metadata_proxy }}
+{%- endif %}
+ 
+# (Optional) Indicates whether distributed-firewall rule for security-
+# groups blocked traffic is logged. (boolean value)
+#log_security_groups_blocked_traffic = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).log_security_groups_blocked_traffic is defined %}
+log_security_groups_blocked_traffic = {{ server.vmware.nsx.v3.log_security_groups_blocked_traffic }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall security-groups
+# rules are logged. (boolean value)
+#log_security_groups_allowed_traffic = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).log_security_groups_allowed_traffic is defined %}
+log_security_groups_allowed_traffic = {{ server.vmware.nsx.v3.log_security_groups_allowed_traffic }}
+{%- endif %}
+
+# Optional parameter defining the networks availability zones names
+# for the native dhcp configuration. The configuration of each zone
+# will be under a group names [az:<name>] (list value)
+#availability_zones =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).availability_zones is defined %}
+availability_zones = {{ server.vmware.nsx.v3.availability_zones }}
+{%- endif %}
+
+# When True, the configured transport zones, router and profiles will
+# be found by tags on the NSX. The scope of the tag will be the value
+# of search_objects_scope. The value of the search tag will be the
+# name configured in each respective configuration. (boolean value)
+#init_objects_by_tags = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).init_objects_by_tags is defined %}
+init_objects_by_tags = {{ server.vmware.nsx.v3.init_objects_by_tags }}
+{%- endif %}
+ 
+# This is the scope of the tag that will be used for finding the
+# objects uuids on the NSX during plugin init. (string value)
+#search_objects_scope = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).search_objects_scope is defined %}
+search_objects_scope = {{ server.vmware.nsx.v3.search_objects_scope }}
+{%- endif %}
+
+# Optional parameter defining a list switching profiles uuids that
+# will be attached to all neutron created nsx ports. (list value)
+#switching_profiles =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).switching_profiles is defined %}
+switching_profiles = {{ server.vmware.nsx.v3.switching_profiles }}
+{%- endif %}
+
+
+[nsxv]
+
+#
+# From nsx
+#
+
+# User name for NSXv manager (string value)
+#user = admin
+{%- if server.vmware.get('nsxv', {}).user is defined %}
+user = {{ server.vmware.nsxv.user }}
+{%- endif %}
+
+# Password for NSXv manager (string value)
+#password = default
+{%- if server.vmware.get('nsxv', {}).password is defined %}
+password = {{ server.vmware.nsxv.password }}
+{%- endif %}
+
+# URL for NSXv manager (string value)
+#manager_uri = <None>
+{%- if server.vmware.get('nsxv', {}).manager_uri is defined %}
+manager_uri = {{ server.vmware.nsxv.manager_uri }}
+{%- endif %}
+
+# Specify a CA bundle file to use in verifying the NSXv server
+# certificate. (string value)
+#ca_file = <None>
+{%- if server.vmware.get('nsxv', {}).ca_file is defined %}
+ca_file = {{ server.vmware.nsxv.ca_file }}
+{%- endif %}
+
+# If true, the NSXv server certificate is not verified. If false, then
+# the default CA truststore is used for verification. This option is
+# ignored if "ca_file" is set. (boolean value)
+#insecure = true
+{%- if server.vmware.get('nsxv', {}).insecure is defined %}
+insecure = {{ server.vmware.nsxv.insecure }}
+{%- endif %}
+
+# (Required) Parameter listing the IDs of the clusters which are used
+# by OpenStack. (list value)
+#cluster_moid =
+{%- if server.vmware.get('nsxv', {}).cluster_moid is defined %}
+cluster_moid = {{ server.vmware.nsxv.cluster_moid }}
+{%- endif %}
+
+# Required parameter identifying the ID of datacenter to deploy NSX
+# Edges (string value)
+#datacenter_moid = <None>
+{%- if server.vmware.get('nsxv', {}).datacenter_moid is defined %}
+datacenter_moid = {{ server.vmware.nsxv.datacenter_moid }}
+{%- endif %}
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges (string value)
+#deployment_container_id = <None>
+{%- if server.vmware.get('nsxv', {}).deployment_container_id is defined %}
+deployment_container_id = {{ server.vmware.nsxv.deployment_container_id }}
+{%- endif %}
+
+# Optional parameter identifying the ID of resource to deploy NSX
+# Edges (string value)
+#resource_pool_id = <None>
+{%- if server.vmware.get('nsxv', {}).resource_pool_id is defined %}
+resource_pool_id = {{ server.vmware.nsxv.resource_pool_id is defined }}
+{%- endif %}
+
+# Optional parameter defining the availability zones names for
+# deploying NSX Edges. The configuration of each zone will be under a
+# group names [az:<name>] (list value)
+#availability_zones =
+
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges (string value)
+#datastore_id = <None>
+{%- if server.vmware.get('nsxv', {}).datastore_id is defined %}
+datastore_id = {{ server.vmware.nsxv.datastore_id }}
+{%- endif %}
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges in addition to data_store_id in caseedge_ha is True (string
+# value)
+#ha_datastore_id = <None>
+{%- if server.vmware.get('nsxv', {}).a_datastore_id is defined %}
+ha_datastore_id = {{ server.vmware.nsxv.a_datastore_id }}
+{%- endif %}
+
+# When True and in case edge_ha is True, half of the edges will be
+# placed in the primary datastore as active and the other half will be
+# placed in the ha_datastore (boolean value)
+#ha_placement_random = false
+{%- if server.vmware.get('nsxv', {}).ha_placement_random is defined %}
+ha_placement_random = {{ server.vmware.nsxv.ha_placement_random }}
+{%- endif %}
+
+# (Optional) If edge HA is used then this will ensure that
+# active/backup edges are placed in the listed host groups. At least 2
+# predefined host groups need to be configured. (list value)
+#edge_host_groups =
+
+# (Required) Network ID for physical network connectivity (string
+# value)
+#external_network = <None>
+{%- if server.vmware.get('nsxv', {}).external_network is defined %}
+external_network = {{ server.vmware.nsxv.external_network }}
+{%- endif %}
+
+# (Optional) Asynchronous task status check interval. Default is 2000
+# (millisecond) (integer value)
+#task_status_check_interval = 2000
+{%- if server.vmware.get('nsxv', {}).task_status_check_interval is defined %}
+task_status_check_interval = {{ server.vmware.nsxv.task_status_check_interval }}
+{%- endif %}
+
+# (Optional) Network scope ID for VXLAN virtual wires (string value)
+#vdn_scope_id = <None>
+{%- if server.vmware.get('nsxv', {}).vdn_scope_id is defined %}
+vdn_scope_id = {{ server.vmware.nsxv.vdn_scope_id }}
+{%- endif %}
+
+# (Optional) DVS MoRef ID for DVS connected to Management / Edge
+# cluster (string value)
+#dvs_id = <None>
+{%- if server.vmware.get('nsxv', {}).dvs_id is defined %}
+dvs_id = {{ server.vmware.nsxv.dvs_id }}
+{%- endif %}
+
+# (Optional) Maximum number of sub interfaces supported per vnic in
+# edge. (integer value)
+# Minimum value: 1
+# Maximum value: 110
+#maximum_tunnels_per_vnic = 20
+{%- if server.vmware.get('nsxv', {}).maximum_tunnels_per_vnic is defined %}
+maximum_tunnels_per_vnic = {{ server.vmware.nsxv.maximum_tunnels_per_vnic }}
+{%- endif %}
+
+# Defines edge pool's management range with the format:
+# <edge_type>:[edge_size]:<min_edges>:<max_edges>.edge_type:
+# service,vdr. edge_size: compact, large, xlarge, quadlarge and
+# default is compact. By default, edge pool manager would manage
+# service edge with compact size and distributed edge with compact
+# size as following: service:compact:4:10,vdr:compact:4:10 (list
+# value)
+#backup_edge_pool = service:compact:4:10,vdr:compact:4:10
+
+# Maximum number of API retries on endpoint. (integer value)
+#retries = 20
+{%- if server.vmware.get('nsxv', {}).retries is defined %}
+retries = {{ server.vmware.nsxv.retries }}
+{%- endif %}
+
+# (Optional) Portgroup MoRef ID for metadata proxy management network
+# (string value)
+#mgt_net_moid = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_moid is defined %}
+mgt_net_moid = {{ server.vmware.nsxv.mgt_net_moid }}
+{%- endif %}
+
+# (Optional) Comma separated list of management network IP addresses
+# for metadata proxy. (list value)
+#mgt_net_proxy_ips =
+
+# (Optional) Management network netmask for metadata proxy. (string
+# value)
+#mgt_net_proxy_netmask = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_proxy_netmask is defined %}
+mgt_net_proxy_netmask = {{ server.vmware.nsxv.mgt_net_proxy_netmask }}
+{%- endif %}
+
+# (Optional) Management network default gateway for metadata proxy.
+# (string value)
+#mgt_net_default_gateway = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_default_gateway is defined %}
+mgt_net_default_gateway = {{ server.vmware.nsxv.mgt_net_default_gateway }}
+{%- endif %}
+
+# (Optional) IP addresses used by Nova metadata service. (list value)
+#nova_metadata_ips =
+
+# (Optional) TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
+{%- if server.vmware.get('nsxv', {}).nova_metadata_port is defined %}
+nova_metadata_port = {{ server.vmware.nsxv.nova_metadata_port }}
+{%- endif %}
+
+# (Optional) Shared secret to sign metadata requests. (string value)
+#metadata_shared_secret = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_shared_secret is defined %}
+metadata_shared_secret = {{ server.vmware.nsxv.metadata_shared_secret }}
+{%- endif %}
+
+# (Optional) If True, the end to end connection for metadata service
+# is not verified. If False, the default CA truststore is used for
+# verification. (boolean value)
+#metadata_insecure = true
+{%- if server.vmware.get('nsxv', {}).metadata_insecure is defined %}
+metadata_insecure = {{ server.vmware.nsxv.metadata_insecure }}
+{%- endif %}
+
+# (Optional) Client certificate to use when metadata connection is to
+# be verified. If not provided, a self signed certificate will be
+# used. (string value)
+#metadata_nova_client_cert = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_nova_client_cert is defined %}
+metadata_nova_client_cert = {{ server.vmware.nsxv.metadata_nova_client_cert }}
+{%- endif %}
+
+# (Optional) Private key of client certificate. (string value)
+#metadata_nova_client_priv_key = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_nova_client_priv_key is defined %}
+metadata_nova_client_priv_key = {{ server.vmware.nsxv.metadata_nova_client_priv_key }}
+{%- endif %}
+
+# (Optional) If True then plugin will use NSXV spoofguard component
+# for port-security feature. (boolean value)
+#spoofguard_eanabled = true
+{%- if server.vmware.get('nsxv', {}).spoofguard_enabled is defined %}
+spoofguard_enabled = {{ server.vmware.nsxv.spoofguard_enabled }}
+{%- endif %}
+
+# (Optional) If True then plugin will use NSXV exclude list component
+# when port security is disabled and spoofguard is enabled. (boolean
+# value)
+#use_exclude_list = true
+{%- if server.vmware.get('nsxv', {}).use_exclude_list is defined %}
+use_exclude_list = {{ server.vmware.nsxv.use_exclude_list }}
+{%- endif %}
+
+# Ordered list of router_types to allocate as tenant routers. It
+# limits the router types that the Nsxv can support for tenants:
+# distributed: router is supported by distributed edge at the backend.
+# shared: multiple routers share the same service edge at the backend.
+# exclusive: router exclusively occupies one service edge at the
+# backend.
+# Nsxv would select the first available router type from
+# tenant_router_types list if router-type is not specified. If the
+# tenant defines the router type with '--distributed','--router_type
+# exclusive' or '--router_type shared', Nsxv would verify that the
+# router type is in tenant_router_types. Admin supports all these
+# three router types. (list value)
+#tenant_router_types = shared,distributed,exclusive
+
+# (Optional) Username to configure for Edge appliance login. (string
+# value)
+#edge_appliance_user = <None>
+{%- if server.vmware.get('nsxv', {}).edge_appliance_user is defined %}
+edge_appliance_user = {{ server.vmware.nsxv.edge_appliance_user }}
+{%- endif %}
+
+# (Optional) Password to configure for Edge appliance login. (string
+# value)
+#edge_appliance_password = <None>
+{%- if server.vmware.get('nsxv', {}).edge_appliance_password is defined %}
+edge_appliance_password = {{ server.vmware.nsxv.edge_appliance_password }}
+{%- endif %}
+
+# (Optional) DHCP default lease time. (integer value)
+#dhcp_lease_time = 86400
+{%- if server.vmware.get('nsxv', {}).dhcp_lease_time is defined %}
+dhcp_lease_time = {{ server.vmware.nsxv.dhcp_lease_time }}
+{%- endif %}
+
+# If True, the server instance will attempt to initialize the metadata
+# infrastructure (boolean value)
+#metadata_initializer = true
+{%- if server.vmware.get('nsxv', {}).metadata_initializer is defined %}
+metadata_initializer = {{ server.vmware.nsxv.metadata_initializer }}
+{%- endif %}
+
+# List of tcp ports, to be allowed access to the metadata proxy, in
+# addition to the default 80,443,8775 tcp ports (list value)
+#metadata_service_allowed_ports =
+{%- if server.vmware.get('nsxv', {}).metadata_service_allowed_ports is defined %}
+metadata_service_allowed_ports = {{ ','.join(server.vmware.nsxv.metadata_service_allowed_ports) }}
+{%- endif %}
+
+# (Optional) Enable HA for NSX Edges. (boolean value)
+#edge_ha = false
+{%- if server.vmware.get('nsxv', {}).edge_ha is defined %}
+edge_ha = {{ server.vmware.nsxv.edge_ha }}
+{%- endif %}
+
+# (Optional) Edge appliance size to be used for creating exclusive
+# router. Valid values: ['compact', 'large', 'xlarge', 'quadlarge'].
+# This exclusive_router_appliance_size will be picked up if --router-
+# size parameter is not specified while doing neutron router-create
+# (string value)
+# Allowed values: compact, large, xlarge, quadlargeAllo
+#exclusive_router_appliance_size = compact
+{%- if server.vmware.get('nsxv', {}).exclusive_router_appliance_size is defined %}
+exclusive_router_appliance_size = {{ server.vmware.nsxv.exclusive_router_appliance_size }}
+{%- endif %}
+
+# (Optional) Edge appliance size to be used for creating shared router
+# edge. Valid values: ['compact', 'large', 'xlarge', 'quadlarge'].
+# (string value)
+# Possible values:
+# compact - <No description provided>
+# large - <No description provided>
+# xlarge - <No description provided>
+# quadlarge - <No description provided>
+#shared_router_appliance_size = compact
+{%- if server.vmware.get('nsxv', {}).shared_router_appliance_size is defined %}
+shared_router_appliance_size = {{ server.vmware.nsxv.shared_router_appliance_size }}
+{%- endif %}
+
+# (Optional) Use this search domain if there is no search domain
+# configured on the subnet. (string value)
+#dns_search_domain = <None>
+{%- if server.vmware.get('nsxv', {}).dns_search_domain is defined %}
+dns_search_domain = {{ server.vmware.nsxv.dns_search_domain }}
+{%- endif %}
+
+# List of nameservers to configure for the DHCP binding entries. These
+# will be used if there are no nameservers defined on the subnet.
+# (list value)
+#nameservers =
+{%- if server.vmware.get('nsxv', {}).nameservers is defined %}
+nameservers = {{ ','.join(server.vmware.nsxv.nameservers) }}
+{%- endif %}
+
+# If True, dvs features will be supported which involves configuring
+# the dvs backing nsx_v directly. If False, only features exposed via
+# nsx_v will be supported (boolean value)
+#use_dvs_features = false
+{%- if server.vmware.get('nsxv', {}).use_dvs_features is defined %}
+use_dvs_features = {{ server.vmware.nsxv.use_dvs_features }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall rule for security-
+# groups blocked traffic is logged. (boolean value)
+#log_security_groups_blocked_traffic = false
+{%- if server.vmware.get('nsxv', {}).log_security_groups_blocked_traffic is defined %}
+log_security_groups_blocked_traffic = {{ server.vmware.nsxv.log_security_groups_blocked_traffic }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall security-groups
+# allowed traffic is logged. (boolean value)
+#log_security_groups_allowed_traffic = false
+{%- if server.vmware.get('nsxv', {}).log_security_groups_allowed_traffic is defined %}
+log_security_groups_allowed_traffic = {{ server.vmware.nsxv.log_security_groups_allowed_traffic }}
+{%- endif %}
+
+# (Optional) The profile id of the redirect firewall rules that will
+# be used for the Service Insertion feature. (string value)
+#service_insertion_profile_id = <None>
+{%- if server.vmware.get('nsxv', {}).service_insertion_profile_id is defined %}
+service_insertion_profile_id = {{ server.vmware.nsxv.service_insertion_profile_id }}
+{%- endif %}
+
+# (Optional) If set to True, the plugin will create a redirect rule to
+# send all the traffic to the security partner (boolean value)
+#service_insertion_redirect_all = false
+{%- if server.vmware.get('nsxv', {}).service_insertion_redirect_all is defined %}
+service_insertion_redirect_all = {{ server.vmware.nsxv.service_insertion_redirect_all }}
+{%- endif %}
+
+# If set to True, the plugin will use NSX policies in the neutron
+# security groups. (boolean value)
+#use_nsx_policies = false
+{%- if server.vmware.get('nsxv', {}).use_nsx_policies is defined %}
+use_nsx_policies = {{ server.vmware.nsxv.use_nsx_policies }}
+{%- endif %}
+
+# (Optional) If use_nsx_policies is True, this policy will be used as
+# the default policy for new tenants. (string value)
+#default_policy_id = <None>
+{%- if server.vmware.get('nsxv', {}).default_policy_id is defined %}
+default_policy_id = {{ server.vmware.nsxv.default_policy_id }}
+{%- endif %}
+
+# (Optional) If use_nsx_policies is True, this value will determine if
+# a tenants can add rules to their security groups. (boolean value)
+#allow_tenant_rules_with_policy = false
+{%- if server.vmware.get('nsxv', {}).allow_tenant_rules_with_policy is defined %}
+allow_tenant_rules_with_policy = {{ server.vmware.nsxv.allow_tenant_rules_with_policy }}
+{%- endif %}
+
+# (Optional) Sets the network address for distributed router TLR-PLR
+# connectivity, with <network IP>/<prefix> syntax (string value)
+#vdr_transit_network = 169.254.2.0/28
+{%- if server.vmware.get('nsxv', {}).vdr_transit_network is defined %}
+vdr_transit_network = {{ server.vmware.nsxv.vdr_transit_network }}
+{%- endif %}
+
+# If set to False, router will associate floating ip with external
+# interface of only, thus denying connectivity between hosts on same
+# network via their floating ips. If True, floating ip will be
+# associated with all router interfaces. (boolean value)
+#bind_floatingip_to_all_interfaces = false
+{%- if server.vmware.get('nsxv', {}).bind_floatingip_to_all_interfaces is defined %}
+bind_floatingip_to_all_interfaces = {{ server.vmware.nsxv.bind_floatingip_to_all_interfaces }}
+{%- endif %}
+
+# (Optional) Have exclusive DHCP edge per network. (boolean value)
+#exclusive_dhcp_edge = false
+{%- if server.vmware.get('nsxv', {}).exclusive_dhcp_edge is defined %}
+exclusive_dhcp_edge = {{ server.vmware.nsxv.exclusive_dhcp_edge }}
+{%- endif %}
+
+# (Optional) Set the interval (Seconds) for BGP neighbour hold down
+# time. (integer value)
+#bgp_neighbour_hold_down_timer = 4
+{%- if server.vmware.get('nsxv', {}).bgp_neighbour_hold_down_timer is defined %}
+bgp_neighbour_hold_down_timer = {{ server.vmware.nsxv.bgp_neighbour_hold_down_timer }}
+{%- endif %}
+
+# (Optional) Set the interval (Seconds) for BGP neighbour keep alive
+# time. (integer value)
+#bgp_neighbour_keep_alive_timer = 1
+{%- if server.vmware.get('nsxv', {}).bgp_neighbour_keep_alive_timer is defined %}
+bgp_neighbour_keep_alive_timer = {{ server.vmware.nsxv.bgp_neighbour_keep_alive_timer }}
+{%- endif %}
+
+# (Optional) Set the wait time (Seconds) between enablement of ECMP.
+# (integer value)
+#ecmp_wait_time = 2
+{%- if server.vmware.get('nsxv', {}).ecmp_wait_time is defined %}
+ecmp_wait_time = {{ server.vmware.nsxv.ecmp_wait_time }}
+{%- endif %}
+
+# List of <DVS MoRef ID>:<vlan_min>:<vlan_max> specifying DVS MoRef ID
+# usable for VLAN provider networks, as well as ranges of VLAN tags on
+# each available for allocation to networks. (list value)
+#network_vlan_ranges =
+{%- set network_vlan_ranges = []%}
+{%- for _,netrange in server.vmware.get('nsxv', {'network_vlan_ranges': {}}).network_vlan_ranges.iteritems() %}
+{%- do network_vlan_ranges.append(netrange.dvs_id + ":" + netrange.vlan_min + ":" + netrange.vlan_max) %}
+network_vlan_ranges = {{ ','.join(network_vlan_ranges) }}
+{%- endfor %}
+
+
+[quotas]
+
+#
+# From nsx
+#
+
+# Number of network gateways allowed per tenant, -1 for unlimited
+# (integer value)
+#quota_network_gateway = 5
+{%- if server.vmware.get('nsxv', {}).quota_network_gateway is defined %}
+quota_network_gateway = {{ server.vmware.nsxv.quota_network_gateway }}
+{%- endif %}
diff --git a/neutron/files/queens/neutron-server b/neutron/files/queens/neutron-server
index d147249..6f48434 100644
--- a/neutron/files/queens/neutron-server
+++ b/neutron/files/queens/neutron-server
@@ -15,6 +15,10 @@
 NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/opencontrail/ContrailPlugin.ini"
 {%- endif %}
 
+{%- if server.backend.engine == "vmware" %}
+NEUTRON_PLUGIN_CONFIG="/etc/neutron/plugins/vmware/nsx.ini"
+{%- endif %}
+
 {%- if server.logging.log_appender %}
 DAEMON_ARGS="${DAEMON_ARGS} --log-config-append=/etc/neutron/logging/logging-neutron-server.conf"
 {%- endif %}
diff --git a/neutron/files/queens/neutron-server.conf.Debian b/neutron/files/queens/neutron-server.conf.Debian
index bf4cddc..2edcdab 100644
--- a/neutron/files/queens/neutron-server.conf.Debian
+++ b/neutron/files/queens/neutron-server.conf.Debian
@@ -31,6 +31,19 @@
 #auth_strategy = keystone
 auth_strategy = keystone
 
+{%- if server.core_plugin is defined %}
+core_plugin = {{ server.core_plugin }}
+{%- if server.service_plugins is defined %}
+{%- set service_plugins = [] %}
+{%- for sname,service in server.service_plugins.iteritems() %}
+{%- if service.enabled%}
+{%- do service_plugins.append(sname)%}
+{%- endif %}
+{%- endfor %}
+service_plugins = {{ ','.join(service_plugins) }}
+{%- endif %}
+{%- else %}
+
 {% if server.backend.engine == "contrail" %}
 
 api_extensions_path = extensions:/usr/lib/python2.7/dist-packages/neutron_plugin_contrail/extensions:/usr/lib/python2.7/dist-packages/neutron_lbaas/extensions
@@ -58,6 +71,7 @@
 {%- if server.get('bgp_vpn', {}).get('enabled', False) -%},bgpvpn{%- endif -%}
 
 {% endif %}
+{%- endif %}
 
 # The service plugins Neutron will use (list value)
 #service_plugins =
diff --git a/neutron/files/queens/plugins/nsx.ini b/neutron/files/queens/plugins/nsx.ini
new file mode 100644
index 0000000..2fe53a2
--- /dev/null
+++ b/neutron/files/queens/plugins/nsx.ini
@@ -0,0 +1,1243 @@
+{%- from "neutron/map.jinja" import server with context %}
+[DEFAULT]
+
+#
+# From nsx
+#
+
+# This is uuid of the default NSX Transport zone that will be used for
+# creating tunneled isolated "Neutron" networks. It needs to be
+# created in NSX before starting Neutron with the nsx plugin. (string
+# value)
+#default_tz_uuid = <None>
+{%- if server.vmware.default_tz_uuid is defined %}
+default_tz_uuid = {{ server.vmware.default_tz_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the NSX L3 Gateway service which will be used for
+# implementing routers and floating IPs (string value)
+#default_l3_gw_service_uuid = <None>
+{%- if server.vmware.default_l3_gw_service_uuid is defined %}
+default_l3_gw_service_uuid = {{ server.vmware.default_l3_gw_service_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the NSX L2 Gateway service which will be used by
+# default for network gateways (string value)
+#default_l2_gw_service_uuid = <None>
+{%- if server.vmware.default_l2_gw_service_uuid is defined %}
+default_l2_gw_service_uuid = {{ server.vmware.default_l2_gw_service_uuid }}
+{%- endif %}
+
+# (Optional) UUID of the Service Cluster which will be used by logical
+# services like dhcp and metadata (string value)
+#default_service_cluster_uuid = <None>
+{%- if server.vmware.default_service_cluster_uuid is defined %}
+default_service_cluster_uuid = {{ server.vmware.default_service_cluster_uuid }}
+{%- endif %}
+
+# Name of the interface on a L2 Gateway transport node which should be
+# used by default when setting up a network connection (string value)
+# Deprecated group/name - [DEFAULT]/default_interface_name
+#nsx_default_interface_name = breth0
+{%- if server.vmware.get('nsx', {}).default_interface_name is defined %}
+nsx_default_interface_name = {{ server.vmware.nsx.default_interface_name }}
+{%- endif %}
+
+# User name for NSX controllers in this cluster (string value)
+# Deprecated group/name - [DEFAULT]/nvp_user
+#nsx_user = admin
+{%- if server.vmware.get('nsx', {}).user is defined %}
+nsx_user = {{ server.vmware.nsx.user }}
+{%- endif %}
+
+# Password for NSX controllers in this cluster (string value)
+# Deprecated group/name - [DEFAULT]/nvp_password
+#nsx_password = admin
+{%- if server.vmware.get('nsx', {}).password is defined %}
+nsx_password = {{ server.vmware.nsx.password }}
+{%- endif %}
+
+# Time before aborting a request on an unresponsive controller
+# (Seconds) (integer value)
+#http_timeout = 75
+{%- if server.vmware.http_timeout is defined %}
+http_timeout = {{ server.vmware.http_timeout }}
+{%- endif %}
+
+# Maximum number of times a particular request should be retried
+# (integer value)
+#retries = 2
+{%- if server.vmware.retries is defined %}
+retries = {{ server.vmware.retries }}
+{%- endif %}
+
+# Maximum number of times a redirect response should be followed
+# (integer value)
+#redirects = 2
+{%- if server.vmware.redirects is defined %}
+redirects = {{ server.vmware.redirects }}
+{%- endif %}
+
+# Comma-separated list of NSX controller endpoints (<ip>:<port>). When
+# port is omitted, 443 is assumed. This option MUST be specified.
+# e.g.: aa.bb.cc.dd, ee.ff.gg.hh.ee:80 (list value)
+# Deprecated group/name - [DEFAULT]/nvp_controllers
+#nsx_controllers =
+{%- set nsx_controllers = []%}
+{%- for _,controller in server.vmware.get('controllers', {}).iteritems() %}
+{%- do nsx_controllers.append(controller.host + ":" + controller.get('port', '443')) %}
+nsx_controllers = {{ ','.join(nsx_controllers) }}
+{%- endfor %}
+
+# Reconnect connection to nsx if not used within this amount of time.
+# (integer value)
+#conn_idle_timeout = 900
+{%- if server.vmware.conn_idle_timeout is defined %}
+conn_idle_timeout = {{ server.vmware.conn_idle_timeout }}
+{%- endif %}
+
+# Specify the class path for the Layer 2 gateway backend driver(i.e.
+# NSXv3/NSX-V). This field will be used when a L2 Gateway service
+# plugin is configured. (string value)
+#nsx_l2gw_driver = <None>
+{%- if server.vmware.get('nsx', {}).l2gw_driver is defined %}
+nsx_l2gw_driver = {{ server.vmware.nsx.l2gw_driver }}
+{%- endif %}
+
+# (Optional) URL for distributed locking coordination resource for
+# lock manager. This value is passed as a parameter to tooz
+# coordinator. By default, value is None and oslo_concurrency is used
+# for single-node lock management. (string value)
+#locking_coordinator_url = <None>
+{%- if server.vmware.get('nsx', {}).locking_coordinator_url is defined %}
+locking_coordinator_url = {{ server.vmware.nsx.locking_coordinator_url }}
+{%- endif %}
+
+# If true, the server then allows the caller to specify the id of
+# resources. This should only be enabled in order to allow one to
+# migrate an existing install of neutron to the nsx-v3 plugin.
+# (boolean value)
+#api_replay_mode = false
+{%- if server.vmware.get('nsx', {}).api_replay_mode is defined %}
+api_replay_mode = {{ server.vmware.nsx.api_replay_mode }}
+{%- endif %}
+
+# An ordered list of extension driver entrypoints to be loaded from
+# the vmware_nsx.extension_drivers namespace. (list value)
+#nsx_extension_drivers =
+{%- if server.vmware.get('nsx', {}).extension_drivers is defined %}
+nsx_extension_drivers = {{ ','.join(server.vmware.nsx.extension_drivers) }}
+{%- endif %}
+
+
+[dvs]
+
+#
+# From nsx
+#
+
+# Hostname or IP address for connection to VMware vCenter host.
+# (string value)
+#host_ip = <None>
+{%- if server.vmware.get('dvs', {}).host_ip is defined %}
+host_ip = {{ server.vmware.dvs.host_ip }}
+{%- endif %}
+
+# Port for connection to VMware vCenter host. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#host_port = 443
+{%- if server.vmware.get('dvs', {}).host_port is defined %}
+host_port = {{ server.vmware.dvs.host_port }}
+{%- endif %}
+
+# Username for connection to VMware vCenter host. (string value)
+#host_username = <None>
+{%- if server.vmware.get('dvs', {}).host_username is defined %}
+host_username = {{ server.vmware.dvs.host_username }}
+{%- endif %}
+
+# Password for connection to VMware vCenter host. (string value)
+#host_password = <None>
+{%- if server.vmware.get('dvs', {}).host_password is defined %}
+host_password = {{ server.vmware.dvs.host_password }}
+{%- endif %}
+
+# The interval used for polling of remote tasks. (floating point
+# value)
+#task_poll_interval = 0.5
+{%- if server.vmware.get('dvs', {}).task_poll_interval is defined %}
+task_poll_interval = {{ server.vmware.dvs.task_poll_interval }}
+{%- endif %}
+
+# Specify a CA bundle file to use in verifying the vCenter server
+# certificate. (string value)
+#ca_file = <None>
+{%- if server.vmware.get('dvs', {}).ca_file is defined %}
+ca_file = {{ server.vmware.dvs.ca_file }}
+{%- endif %}
+
+# If true, the vCenter server certificate is not verified. If false,
+# then the default CA truststore is used for verification. This option
+# is ignored if "ca_file" is set. (boolean value)
+#insecure = false
+{%- if server.vmware.get('dvs', {}).insecure is defined %}
+insecure = {{ server.vmware.dvs.insecure }}
+{%- endif %}
+
+# The number of times we retry on failures, e.g., socket error, etc.
+# (integer value)
+#api_retry_count = 10
+{%- if server.vmware.get('dvs', {}).api_retry_count is defined %}
+api_retry_count = {{ server.vmware.dvs.api_retry_count }}
+{%- endif %}
+
+# The name of the preconfigured DVS. (string value)
+#dvs_name = <None>
+{%- if server.vmware.get('dvs', {}).dvs_name is defined %}
+dvs_name = {{ server.vmware.dvs.dvs_name }}
+{%- endif %}
+
+# This value should not be set. It is just required for ensuring that
+# the DVS plugin works with the generic NSX metadata code (string
+# value)
+#metadata_mode = <None>
+{%- if server.vmware.get('dvs', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.dvs.metadata_mode }}
+{%- endif %}
+
+
+[nsx]
+
+#
+# From nsx
+#
+
+# Maximum number of ports of a logical switch on a bridged transport
+# zone. The recommended value for this parameter varies with NSX
+# version.
+# Please use:
+# NSX 2.x -> 64
+# NSX 3.0, 3.1 -> 5000
+# NSX 3.2 -> 10000 (integer value)
+#max_lp_per_bridged_ls = 5000
+{%- if server.vmware.get('nsx', {}).max_lp_per_bridged_ls is defined %}
+max_lp_per_bridged_ls = {{ server.vmware.nsx.max_lp_per_bridged_ls }}
+{%- endif %}
+
+# Maximum number of ports of a logical switch on an overlay transport
+# zone (integer value)
+#max_lp_per_overlay_ls = 256
+{%- if server.vmware.get('nsx', {}).max_lp_per_overlay_ls is defined %}
+max_lp_per_overlay_ls = {{ server.vmware.nsx.max_lp_per_overlay_ls }}
+{%- endif %}
+
+# Maximum concurrent connections to each NSX controller. (integer
+# value)
+#concurrent_connections = 10
+{%- if server.vmware.get('nsx', {}).concurrent_connections is defined %}
+concurrent_connections = {{ server.vmware.nsx.concurrent_connections }}
+{%- endif %}
+
+# Number of seconds a generation id should be valid for (default -1
+# meaning do not time out) (integer value)
+# Deprecated group/name - [NVP]/nvp_gen_timeout
+#nsx_gen_timeout = -1
+{%- if server.vmware.get('nsx', {}).nsx_gen_timeout is defined %}
+nsx_gen_timeout = {{ server.vmware.nsx.nsx_gen_timeout }}
+{%- endif %}
+
+# If set to access_network this enables a dedicated connection to the
+# metadata proxy for metadata server access via Neutron router. If set
+# to dhcp_host_route this enables host route injection via the dhcp
+# agent. This option is only useful if running on a host that does not
+# support namespaces otherwise access_network should be used. (string
+# value)
+#metadata_mode = access_network
+{%- if server.vmware.get('nsx', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.nsx.metadata_mode }}
+{%- endif %}
+
+# The default network tranport type to use (stt, gre, bridge,
+# ipsec_gre, or ipsec_stt) (string value)
+#default_transport_type = stt
+{%- if server.vmware.get('nsx', {}).default_transport_type is defined %}
+default_transport_type = {{ server.vmware.nsx.default_transport_type }}
+{%- endif %}
+
+# Specifies in which mode the plugin needs to operate in order to
+# provide DHCP and metadata proxy services to tenant instances. If
+# 'agent' is chosen (default) the NSX plugin relies on external RPC
+# agents (i.e. dhcp and metadata agents) to provide such services. In
+# this mode, the plugin supports API extensions 'agent' and
+# 'dhcp_agent_scheduler'. If 'agentless' is chosen (experimental in
+# Icehouse), the plugin will use NSX logical services for DHCP and
+# metadata proxy. This simplifies the deployment model for Neutron, in
+# that the plugin no longer requires the RPC agents to operate. When
+# 'agentless' is chosen, the config option metadata_mode becomes
+# ineffective. The 'agentless' mode works only on NSX 4.1.
+# Furthermore, a 'combined' mode is also provided and is used to
+# support existing deployments that want to adopt the agentless mode.
+# With this mode, existing networks keep being served by the existing
+# infrastructure (thus preserving backward compatibility, whereas new
+# networks will be served by the new infrastructure. Migration tools
+# are provided to 'move' one network from one model to another; with
+# agent_mode set to 'combined', option 'network_auto_schedule' in
+# neutron.conf is ignored, as new networks will no longer be scheduled
+# to existing dhcp agents. (string value)
+#agent_mode = agent
+{%- if server.vmware.get('nsx', {}).agent_mode is defined %}
+agent_mode = {{ server.vmware.nsx.agent_mode }}
+{%- endif %}
+
+# Specifies which mode packet replication should be done in. If set to
+# service a service node is required in order to perform packet
+# replication. This can also be set to source if one wants replication
+# to be performed locally (NOTE: usually only useful for testing if
+# one does not want to deploy a service node). In order to leverage
+# distributed routers, replication_mode should be set to 'service'.
+# (string value)
+# Possible values:
+# service - <No description provided>
+# source - <No description provided>
+#replication_mode = service
+{%- if server.vmware.get('nsx', {}).replication_mode is defined %}
+replication_mode = {{ server.vmware.nsx.replication_mode }}
+{%- endif %}
+
+# The QoS rules peak bandwidth value will be the configured maximum
+# bandwidth of the QoS rule, multiplied by this value. Value must be
+# bigger than 1 (floating point value)
+# Minimum value: 1
+#qos_peak_bw_multiplier = 2.0
+{%- if server.vmware.get('nsx', {}).qos_peak_bw_multiplier is defined %}
+qos_peak_bw_multiplier = {{ server.vmware.nsx.qos_peak_bw_multiplier }}
+{%- endif %}
+
+
+[nsx_dhcp]
+
+#
+# From nsx
+#
+
+# Comma separated list of additional domain name servers (list value)
+#extra_domain_name_servers =
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).extra_domain_name_servers is defined %}
+extra_domain_name_servers = {{ server.vmware.nsx.dhcp.extra_domain_name_servers }}
+{%- endif %}
+
+# Domain to use for building the hostnames (string value)
+#domain_name = openstacklocal
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).domain_name is defined %}
+domain_name = {{ server.vmware.nsx.dhcp.domain_name }}
+{%- endif %}
+
+# Default DHCP lease time (integer value)
+#default_lease_time = 43200
+{%- if server.vmware.get('nsx', {}).get('dhcp', {}).default_lease_time is defined %}
+default_lease_time = {{ server.vmware.nsx.dhcp.default_lease_time }}
+{%- endif %}
+
+
+[nsx_lsn]
+
+#
+# From nsx
+#
+
+# Pull LSN information from NSX in case it is missing from the local
+# data store. This is useful to rebuild the local store in case of
+# server recovery. (boolean value)
+#sync_on_missing_data = false
+{%- if server.vmware.get('nsx', {}).get('lsn', {}).sync_on_missing_data is defined %}
+sync_on_missing_data = {{ server.vmware.nsx.lsn.sync_on_missing_data }}
+{%- endif %}
+
+
+[nsx_metadata]
+
+#
+# From nsx
+#
+
+# IP address used by Metadata server. (string value)
+#metadata_server_address = 127.0.0.1
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).server_address is defined %}
+metadata_server_address = {{ server.vmware.nsx.metadata.server_address }}
+{%- endif %}
+
+# TCP Port used by Metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#metadata_server_port = 8775
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).server_port is defined %}
+metadata_server_port = {{ server.vmware.nsx.metadata.server_port }}
+{%- endif %}
+
+# When proxying metadata requests, Neutron signs the Instance-ID
+# header with a shared secret to prevent spoofing. You may select any
+# string for a secret, but it MUST match with the configuration used
+# by the Metadata server. (string value)
+#metadata_shared_secret =
+{%- if server.vmware.get('nsx', {}).get('metadata', {}).shared_secret is defined %}
+metadata_shared_secret = {{ server.vmware.nsx.metadata.shared_secret }}
+{%- endif %}
+
+
+[nsx_sync]
+
+#
+# From nsx
+#
+
+# Interval in seconds between runs of the status synchronization task.
+# The plugin will aim at resynchronizing operational status for all
+# resources in this interval, and it should be therefore large enough
+# to ensure the task is feasible. Otherwise the plugin will be
+# constantly synchronizing resource status, ie: a new task is started
+# as soon as the previous is completed. If this value is set to 0, the
+# state synchronization thread for this Neutron instance will be
+# disabled. (integer value)
+#state_sync_interval = 10
+{%- if server.vmware.get('nsx', {}).get('sync', {}).state_sync_interval is defined %}
+state_sync_interval = {{ server.vmware.nsx.sync.state_sync_interval }}
+{%- endif %}
+
+# Random additional delay between two runs of the state
+# synchronization task. An additional wait time between 0 and
+# max_random_sync_delay seconds will be added on top of
+# state_sync_interval. (integer value)
+#max_random_sync_delay = 0
+{%- if server.vmware.get('nsx', {}).get('sync', {}).max_random_sync_delay is defined %}
+max_random_sync_delay = {{ server.vmware.nsx.sync.max_random_sync_delay }}
+{%- endif %}
+
+# Minimum delay, in seconds, between two status synchronization
+# requests for NSX. Depending on chunk size, controller load, and
+# other factors, state synchronization requests might be pretty heavy.
+# This means the controller might take time to respond, and its load
+# might be quite increased by them. This parameter allows to specify a
+# minimum interval between two subsequent requests. The value for this
+# parameter must never exceed state_sync_interval. If this does, an
+# error will be raised at startup. (integer value)
+#min_sync_req_delay = 1
+{%- if server.vmware.get('nsx', {}).get('sync', {}).min_sync_req_delay is defined %}
+min_sync_req_delay = {{ server.vmware.nsx.sync.min_sync_req_delay }}
+{%- endif %}
+
+# Minimum number of resources to be retrieved from NSX in a single
+# status synchronization request. The actual size of the chunk will
+# increase if the number of resources is such that using the minimum
+# chunk size will cause the interval between two requests to be less
+# than min_sync_req_delay (integer value)
+#min_chunk_size = 500
+{%- if server.vmware.get('nsx', {}).get('sync', {}).min_chunk_size is defined %}
+min_chunk_size = {{ server.vmware.nsx.sync.min_chunk_size }}
+{%- endif %}
+
+# Enable this option to allow punctual state synchronization on show
+# operations. In this way, show operations will always fetch the
+# operational status of the resource from the NSX backend, and this
+# might have a considerable impact on overall performance. (boolean
+# value)
+#always_read_status = false
+{%- if server.vmware.get('nsx', {}).get('sync', {}).always_read_status is defined %}
+always_read_status = {{ server.vmware.nsx.sync.always_read_status }}
+{%- endif %}
+
+
+[nsx_v3]
+
+#
+# From nsx
+#
+
+# User names for the NSX managers (list value)
+#nsx_api_user = admin
+{%- if server.vmware.get('nsx', {}).get('v3', {}).api_user is defined %}
+nsx_api_user = {{ server.vmware.nsx.v3.api_user }}
+{%- endif %}
+
+# Passwords for the NSX managers (list value)
+#nsx_api_password = default
+{%- if server.vmware.get('nsx', {}).get('v3', {}).api_password is defined %}
+nsx_api_password = {{ server.vmware.nsx.v3.api_password }}
+{%- endif %}
+
+# IP address of one or more NSX managers separated by commas. The IP
+# address should be of the form:
+# [<scheme>://]<ip_address>[:<port>]
+# If scheme is not provided https is used. If port is not provided
+# port 80 is used for http and port 443 for https. (list value)
+#nsx_api_managers =
+{%- set nsx_api_managers = [] %}
+{%- for _, manager in server.vmware.get('nsx', {}).get('v3', {'api_managers': {}}).api_managers.iteritems() %}
+{%- do nsx_api_managers.append(manager.scheme + "://" + manager.host + ":" + manager.port) %}
+nsx_api_managers = {{ ','.join(nsx_api_managers) }}
+{%- endfor %}
+
+# Use client certificate in NSX manager authentication (boolean value)
+#nsx_use_client_auth = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).use_client_auth is defined %}
+nsx_use_client_auth = {{ server.vmware.nsx.v3.use_client_auth }}
+{%- endif %}
+
+# File to contain client certificate and private key (string value)
+#nsx_client_cert_file =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_file is defined %}
+nsx_client_cert_file = {{ server.vmware.nsx.v3.client_cert_file }}
+{%- endif %}
+
+# password for private key encryption (string value)
+#nsx_client_cert_pk_password =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_pk_password is defined %}
+nsx_client_cert_pk_password = {{ server.vmware.nsx.v3.client_cert_pk_password }}
+{%- endif %}
+
+# Storage type for client certificate sensitive data (string value)
+# Possible values:
+# nsx-db - <No description provided>
+# none - <No description provided>
+#nsx_client_cert_storage = nsx-db
+{%- if server.vmware.get('nsx', {}).get('v3', {}).client_cert_storage is defined %}
+nsx_client_cert_storage = {{ server.vmware.nsx.v3.client_cert_storage }}
+{%- endif %}
+
+# This is the name or UUID of the default NSX overlay transport zone
+# that will be used for creating tunneled isolated Neutron networks.
+# It needs to be created in NSX before starting Neutron with the NSX
+# plugin. (string value)
+#default_overlay_tz = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_overlay_tz is defined %}
+default_overlay_tz = {{ server.vmware.nsx.v3.default_overlay_tz }}
+{%- endif %}
+
+# (Optional) Only required when creating VLAN or flat provider
+# networks. Name or UUID of default NSX VLAN transport zone that will
+# be used for bridging between Neutron networks, if no physical
+# network has been specified (string value)
+#default_vlan_tz = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_vlan_tz is defined %}
+default_vlan_tz = {{ server.vmware.nsx.v3.default_vlan_tz }}
+{%- endif %}
+
+# (Optional) Name or UUID of the default NSX bridge cluster that will
+# be used to perform L2 gateway bridging between VXLAN and VLAN
+# networks. If default bridge cluster UUID is not specified, admin
+# will have to manually create a L2 gateway corresponding to a NSX
+# Bridge Cluster using L2 gateway APIs. This field must be specified
+# on one of the active neutron servers only. (string value)
+#default_bridge_cluster = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_bridge_cluster is defined %}
+default_bridge_cluster = {{ server.vmware.nsx.v3.default_bridge_cluster }}
+{%- endif %}
+
+# Maximum number of times to retry API requests upon stale revision
+# errors. (integer value)
+#retries = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).retries is defined %}
+retries = {{ server.vmware.nsx.v3.retries }}
+{%- endif %}
+
+# Specify a CA bundle files to use in verifying the NSX Managers
+# server certificate. This option is ignored if "insecure" is set to
+# True. If "insecure" is set to False and ca_file is unset, the system
+# root CAs will be used to verify the server certificate. (list value)
+#ca_file = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).ca_file is defined %}
+ca_file = {{ server.vmware.nsx.v3.ca_file }}
+{%- endif %}
+
+# If true, the NSX Manager server certificate is not verified. If
+# false the CA bundle specified via "ca_file" will be used or if
+# unsest the default system root CAs will be used. (boolean value)
+#insecure = true
+{%- if server.vmware.get('nsx', {}).get('v3', {}).insecure is defined %}
+insecure = {{ server.vmware.nsx.v3.insecure }}
+{%- endif %}
+
+# The time in seconds before aborting a HTTP connection to a NSX
+# manager. (integer value)
+#http_timeout = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_timeout is defined %}
+http_timeout = {{ server.vmware.nsx.v3.http_timeout }}
+{%- endif %}
+
+
+# The time in seconds before aborting a HTTP read response from a NSX
+# manager. (integer value)
+#http_read_timeout = 180
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_read_timeout is defined %}
+http_read_timeout = {{ server.vmware.nsx.v3.http_read_timeout }}
+{%- endif %}
+
+# Maximum number of times to retry a HTTP connection. (integer value)
+#http_retries = 3
+{%- if server.vmware.get('nsx', {}).get('v3', {}).http_retries is defined %}
+http_retries = {{ server.vmware.nsx.v3.http_retries }}
+{%- endif %}
+
+# Maximum concurrent connections to each NSX manager. (integer value)
+#concurrent_connections = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).concurrent_connections is defined %}
+concurrent_connections = {{ server.vmware.nsx.v3.concurrent_connections }}
+{%- endif %}
+
+# The amount of time in seconds to wait before ensuring connectivity
+# to the NSX manager if no manager connection has been used. (integer
+# value)
+#conn_idle_timeout = 10
+{%- if server.vmware.get('nsx', {}).get('v3', {}).conn_idle_timeout is defined %}
+conn_idle_timeout = {{ server.vmware.nsx.v3.conn_idle_timeout }}
+{%- endif %}
+
+# Number of times a HTTP redirect should be followed. (integer value)
+#redirects = 2
+{%- if server.vmware.get('nsx', {}).get('v3', {}).redirects is defined %}
+redirects = {{ server.vmware.nsx.v3.redirects }}
+{%- endif %}
+
+# Name or UUID of the default tier0 router that will be used for
+# connecting to tier1 logical routers and configuring external
+# networks (string value)
+#default_tier0_router = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).default_tier0_router is defined %}
+default_tier0_router = {{ server.vmware.nsx.v3.default_tier0_router }}
+{%- endif %}
+
+# (Optional) The number of nested groups which are used by the plugin,
+# each Neutron security-groups is added to one nested group, and each
+# nested group can contain as maximum as 500 security-groups,
+# therefore, the maximum number of security groups that can be created
+# is 500 * number_of_nested_groups. The default is 8 nested groups,
+# which allows a maximum of 4k security-groups, to allow creation of
+# more security-groups, modify this figure. (integer value)
+#number_of_nested_groups = 8
+{%- if server.vmware.get('nsx', {}).get('v3', {}).number_of_nested_groups is defined %}
+number_of_nested_groups = {{ server.vmware.nsx.v3.number_of_nested_groups }}
+{%- endif %}
+
+# If set to access_network this enables a dedicated connection to the
+# metadata proxy for metadata server access via Neutron router. If set
+# to dhcp_host_route this enables host route injection via the dhcp
+# agent. This option is only useful if running on a host that does not
+# support namespaces otherwise access_network should be used. (string
+# value)
+#metadata_mode = access_network
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_mode is defined %}
+metadata_mode = {{ server.vmware.nsx.v3.metadata_mode }}
+{%- endif %}
+
+# If true, an internal metadata network will be created for a router
+# only when the router is attached to a DHCP-disabled subnet. (boolean
+# value)
+#metadata_on_demand = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_on_demand is defined %}
+metadata_on_demand = {{ server.vmware.nsx.v3.metadata_on_demand }}
+{%- endif %}
+
+# If true, DHCP and metadata proxy services will be provided by NSX
+# backend. (boolean value)
+#native_dhcp_metadata = true
+{%- if server.vmware.get('nsx', {}).get('v3', {}).native_dhcp_metadata is defined %}
+native_dhcp_metadata = {{ server.vmware.nsx.v3.native_dhcp_metadata }}
+{%- endif %}
+
+# The metadata route used for native metadata proxy service. (string
+# value)
+#native_metadata_route = 169.254.169.254/31
+{%- if server.vmware.get('nsx', {}).get('v3', {}).native_metadata_route is defined %}
+native_metadata_route = {{ server.vmware.nsx.v3.native_metadata_route }}
+{%- endif %}
+
+# This is the name or UUID of the NSX DHCP Profile that will be used
+# to enable native DHCP service. It needs to be created in NSX before
+# starting Neutron with the NSX plugin (string value)
+#dhcp_profile = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_profile is defined %}
+dhcp_profile = {{ server.vmware.nsx.v3.dhcp_profile }}
+{%- endif %}
+
+# DHCP default lease time. (integer value)
+#dhcp_lease_time = 86400
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_lease_time is defined %}
+dhcp_lease_time = {{ server.vmware.nsx.v3.dhcp_lease_time }}
+{%- endif %}
+
+# Domain to use for building the hostnames. (string value)
+#dns_domain = openstacklocal
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dns_domain is defined %}
+dns_domain = {{ server.vmware.nsx.v3.dns_domain }}
+{%- endif %}
+
+# List of nameservers to configure for the DHCP binding entries. These
+# will be used if there are no nameservers defined on the subnet.
+# (list value)
+#nameservers =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).nameservers is defined %}
+nameservers = {{ ','.join(server.vmware.nsx.v3.nameservers) }}
+{%- endif %}
+
+
+# This is the name or UUID of the NSX Metadata Proxy that will be used
+# to enable native metadata service. It needs to be created in NSX
+# before starting Neutron with the NSX plugin. (string value)
+#metadata_proxy = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).metadata_proxy is defined%}
+metadata_proxy = {{ server.vmware.nsx.v3.metadata_proxy }}
+{%- endif %}
+ 
+# (Optional) This is the name or UUID of the NSX dhcp relay service
+# that will be used to enable DHCP relay on router ports. (string
+# value)
+#dhcp_relay_service = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).dhcp_relay_service is defined %}
+dhcp_relay_service = {{ server.vmware.nsx.v3.dhcp_relay_service }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall rule for security-
+# groups blocked traffic is logged. (boolean value)
+#log_security_groups_blocked_traffic = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).log_security_groups_blocked_traffic is defined %}
+log_security_groups_blocked_traffic = {{ server.vmware.nsx.v3.log_security_groups_blocked_traffic }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall security-groups
+# rules are logged. (boolean value)
+#log_security_groups_allowed_traffic = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).log_security_groups_allowed_traffic is defined %}
+log_security_groups_allowed_traffic = {{ server.vmware.nsx.v3.log_security_groups_allowed_traffic }}
+{%- endif %}
+
+# Optional parameter defining the networks availability zones names
+# for the native dhcp configuration. The configuration of each zone
+# will be under a group names [az:<name>] (list value)
+#availability_zones =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).availability_zones is defined %}
+availability_zones = {{ server.vmware.nsx.v3.availability_zones }}
+{%- endif %}
+
+# When True, the configured transport zones, router and profiles will
+# be found by tags on the NSX. The scope of the tag will be the value
+# of search_objects_scope. The value of the search tag will be the
+# name configured in each respective configuration. (boolean value)
+#init_objects_by_tags = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).init_objects_by_tags is defined %}
+init_objects_by_tags = {{ server.vmware.nsx.v3.init_objects_by_tags }}
+{%- endif %}
+ 
+# This is the scope of the tag that will be used for finding the
+# objects uuids on the NSX during plugin init. (string value)
+#search_objects_scope = <None>
+{%- if server.vmware.get('nsx', {}).get('v3', {}).search_objects_scope is defined %}
+search_objects_scope = {{ server.vmware.nsx.v3.search_objects_scope }}
+{%- endif %}
+
+# Optional parameter defining a list switching profiles uuids that
+# will be attached to all neutron created nsx ports. (list value)
+#switching_profiles =
+{%- if server.vmware.get('nsx', {}).get('v3', {}).switching_profiles is defined %}
+switching_profiles = {{ server.vmware.nsx.v3.switching_profiles }}
+{%- endif %}
+
+# (Optional) Indicates whether ENS transport zones can be used
+# (boolean value)
+#ens_support = false
+{%- if server.vmware.get('nsx', {}).get('v3', {}).ens_support is defined %}
+ens_support = {{ server.vmware.nsx.v3.ens_support }}
+{%- endif %}
+
+
+[nsxv]
+
+#
+# From nsx
+#
+
+# User name for NSXv manager (string value)
+#user = admin
+{%- if server.vmware.get('nsxv', {}).user is defined %}
+user = {{ server.vmware.nsxv.user }}
+{%- endif %}
+
+# Password for NSXv manager (string value)
+#password = default
+{%- if server.vmware.get('nsxv', {}).password is defined %}
+password = {{ server.vmware.nsxv.password }}
+{%- endif %}
+
+# URL for NSXv manager (string value)
+#manager_uri = <None>
+{%- if server.vmware.get('nsxv', {}).manager_uri is defined %}
+manager_uri = {{ server.vmware.nsxv.manager_uri }}
+{%- endif %}
+
+# Specify a CA bundle file to use in verifying the NSXv server
+# certificate. (string value)
+#ca_file = <None>
+{%- if server.vmware.get('nsxv', {}).ca_file is defined %}
+ca_file = {{ server.vmware.nsxv.ca_file }}
+{%- endif %}
+
+# If true, the NSXv server certificate is not verified. If false, then
+# the default CA truststore is used for verification. This option is
+# ignored if "ca_file" is set. (boolean value)
+#insecure = true
+{%- if server.vmware.get('nsxv', {}).insecure is defined %}
+insecure = {{ server.vmware.nsxv.insecure }}
+{%- endif %}
+
+# (Required) Parameter listing the IDs of the clusters which are used
+# by OpenStack. (list value)
+#cluster_moid =
+{%- if server.vmware.get('nsxv', {}).cluster_moid is defined %}
+cluster_moid = {{ server.vmware.nsxv.cluster_moid }}
+{%- endif %}
+
+# Required parameter identifying the ID of datacenter to deploy NSX
+# Edges (string value)
+#datacenter_moid = <None>
+{%- if server.vmware.get('nsxv', {}).datacenter_moid is defined %}
+datacenter_moid = {{ server.vmware.nsxv.datacenter_moid }}
+{%- endif %}
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges (string value)
+#deployment_container_id = <None>
+{%- if server.vmware.get('nsxv', {}).deployment_container_id is defined %}
+deployment_container_id = {{ server.vmware.nsxv.deployment_container_id }}
+{%- endif %}
+
+# Optional parameter identifying the ID of resource to deploy NSX
+# Edges (string value)
+#resource_pool_id = <None>
+{%- if server.vmware.get('nsxv', {}).resource_pool_id is defined %}
+resource_pool_id = {{ server.vmware.nsxv.resource_pool_id is defined }}
+{%- endif %}
+
+# Optional parameter defining the availability zones names for
+# deploying NSX Edges. The configuration of each zone will be under a
+# group names [az:<name>] (list value)
+#availability_zones =
+
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges (string value)
+#datastore_id = <None>
+{%- if server.vmware.get('nsxv', {}).datastore_id is defined %}
+datastore_id = {{ server.vmware.nsxv.datastore_id }}
+{%- endif %}
+
+# Optional parameter identifying the ID of datastore to deploy NSX
+# Edges in addition to data_store_id in caseedge_ha is True (string
+# value)
+#ha_datastore_id = <None>
+{%- if server.vmware.get('nsxv', {}).a_datastore_id is defined %}
+ha_datastore_id = {{ server.vmware.nsxv.a_datastore_id }}
+{%- endif %}
+
+# When True and in case edge_ha is True, half of the edges will be
+# placed in the primary datastore as active and the other half will be
+# placed in the ha_datastore (boolean value)
+#ha_placement_random = false
+{%- if server.vmware.get('nsxv', {}).ha_placement_random is defined %}
+ha_placement_random = {{ server.vmware.nsxv.ha_placement_random }}
+{%- endif %}
+
+# (Optional) If edge HA is used then this will ensure that
+# active/backup edges are placed in the listed host groups. At least 2
+# predefined host groups need to be configured. (list value)
+#edge_host_groups =
+
+# (Required) Network ID for physical network connectivity (string
+# value)
+#external_network = <None>
+{%- if server.vmware.get('nsxv', {}).external_network is defined %}
+external_network = {{ server.vmware.nsxv.external_network }}
+{%- endif %}
+
+# (Optional) Asynchronous task status check interval. Default is 2000
+# (millisecond) (integer value)
+#task_status_check_interval = 2000
+{%- if server.vmware.get('nsxv', {}).task_status_check_interval is defined %}
+task_status_check_interval = {{ server.vmware.nsxv.task_status_check_interval }}
+{%- endif %}
+
+# (Optional) Network scope ID for VXLAN virtual wires (string value)
+#vdn_scope_id = <None>
+{%- if server.vmware.get('nsxv', {}).vdn_scope_id is defined %}
+vdn_scope_id = {{ server.vmware.nsxv.vdn_scope_id }}
+{%- endif %}
+
+# (Optional) DVS MoRef ID for DVS connected to Management / Edge
+# cluster (string value)
+#dvs_id = <None>
+{%- if server.vmware.get('nsxv', {}).dvs_id is defined %}
+dvs_id = {{ server.vmware.nsxv.dvs_id }}
+{%- endif %}
+
+# (Optional) Maximum number of sub interfaces supported per vnic in
+# edge. (integer value)
+# Minimum value: 1
+# Maximum value: 110
+#maximum_tunnels_per_vnic = 20
+{%- if server.vmware.get('nsxv', {}).maximum_tunnels_per_vnic is defined %}
+maximum_tunnels_per_vnic = {{ server.vmware.nsxv.maximum_tunnels_per_vnic }}
+{%- endif %}
+
+# Defines edge pool's management range with the format:
+# <edge_type>:[edge_size]:<min_edges>:<max_edges>.edge_type:
+# service,vdr. edge_size: compact, large, xlarge, quadlarge and
+# default is compact. By default, edge pool manager would manage
+# service edge with compact size and distributed edge with compact
+# size as following: service:compact:4:10,vdr:compact:4:10 (list
+# value)
+#backup_edge_pool = service:compact:4:10,vdr:compact:4:10
+
+# Maximum number of API retries on endpoint. (integer value)
+#retries = 20
+{%- if server.vmware.get('nsxv', {}).retries is defined %}
+retries = {{ server.vmware.nsxv.retries }}
+{%- endif %}
+
+# (Optional) Portgroup MoRef ID for metadata proxy management network
+# (string value)
+#mgt_net_moid = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_moid is defined %}
+mgt_net_moid = {{ server.vmware.nsxv.mgt_net_moid }}
+{%- endif %}
+
+# (Optional) Comma separated list of management network IP addresses
+# for metadata proxy. (list value)
+#mgt_net_proxy_ips =
+
+# (Optional) Management network netmask for metadata proxy. (string
+# value)
+#mgt_net_proxy_netmask = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_proxy_netmask is defined %}
+mgt_net_proxy_netmask = {{ server.vmware.nsxv.mgt_net_proxy_netmask }}
+{%- endif %}
+
+# (Optional) Management network default gateway for metadata proxy.
+# (string value)
+#mgt_net_default_gateway = <None>
+{%- if server.vmware.get('nsxv', {}).mgt_net_default_gateway is defined %}
+mgt_net_default_gateway = {{ server.vmware.nsxv.mgt_net_default_gateway }}
+{%- endif %}
+
+# (Optional) IP addresses used by Nova metadata service. (list value)
+#nova_metadata_ips =
+
+# (Optional) TCP Port used by Nova metadata server. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#nova_metadata_port = 8775
+{%- if server.vmware.get('nsxv', {}).nova_metadata_port is defined %}
+nova_metadata_port = {{ server.vmware.nsxv.nova_metadata_port }}
+{%- endif %}
+
+# (Optional) Shared secret to sign metadata requests. (string value)
+#metadata_shared_secret = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_shared_secret is defined %}
+metadata_shared_secret = {{ server.vmware.nsxv.metadata_shared_secret }}
+{%- endif %}
+
+# (Optional) If True, the end to end connection for metadata service
+# is not verified. If False, the default CA truststore is used for
+# verification. (boolean value)
+#metadata_insecure = true
+{%- if server.vmware.get('nsxv', {}).metadata_insecure is defined %}
+metadata_insecure = {{ server.vmware.nsxv.metadata_insecure }}
+{%- endif %}
+
+# (Optional) Client certificate to use when metadata connection is to
+# be verified. If not provided, a self signed certificate will be
+# used. (string value)
+#metadata_nova_client_cert = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_nova_client_cert is defined %}
+metadata_nova_client_cert = {{ server.vmware.nsxv.metadata_nova_client_cert }}
+{%- endif %}
+
+# (Optional) Private key of client certificate. (string value)
+#metadata_nova_client_priv_key = <None>
+{%- if server.vmware.get('nsxv', {}).metadata_nova_client_priv_key is defined %}
+metadata_nova_client_priv_key = {{ server.vmware.nsxv.metadata_nova_client_priv_key }}
+{%- endif %}
+
+# (Optional) If True then plugin will use NSXV spoofguard component
+# for port-security feature. (boolean value)
+#spoofguard_eanabled = true
+{%- if server.vmware.get('nsxv', {}).spoofguard_enabled is defined %}
+spoofguard_enabled = {{ server.vmware.nsxv.spoofguard_enabled }}
+{%- endif %}
+
+# (Optional) If True then plugin will use NSXV exclude list component
+# when port security is disabled and spoofguard is enabled. (boolean
+# value)
+#use_exclude_list = true
+{%- if server.vmware.get('nsxv', {}).use_exclude_list is defined %}
+use_exclude_list = {{ server.vmware.nsxv.use_exclude_list }}
+{%- endif %}
+
+# Ordered list of router_types to allocate as tenant routers. It
+# limits the router types that the Nsxv can support for tenants:
+# distributed: router is supported by distributed edge at the backend.
+# shared: multiple routers share the same service edge at the backend.
+# exclusive: router exclusively occupies one service edge at the
+# backend.
+# Nsxv would select the first available router type from
+# tenant_router_types list if router-type is not specified. If the
+# tenant defines the router type with '--distributed','--router_type
+# exclusive' or '--router_type shared', Nsxv would verify that the
+# router type is in tenant_router_types. Admin supports all these
+# three router types. (list value)
+#tenant_router_types = shared,distributed,exclusive
+
+# (Optional) Username to configure for Edge appliance login. (string
+# value)
+#edge_appliance_user = <None>
+{%- if server.vmware.get('nsxv', {}).edge_appliance_user is defined %}
+edge_appliance_user = {{ server.vmware.nsxv.edge_appliance_user }}
+{%- endif %}
+
+# (Optional) Password to configure for Edge appliance login. (string
+# value)
+#edge_appliance_password = <None>
+{%- if server.vmware.get('nsxv', {}).edge_appliance_password is defined %}
+edge_appliance_password = {{ server.vmware.nsxv.edge_appliance_password }}
+{%- endif %}
+
+# (Optional) DHCP default lease time. (integer value)
+#dhcp_lease_time = 86400
+{%- if server.vmware.get('nsxv', {}).dhcp_lease_time is defined %}
+dhcp_lease_time = {{ server.vmware.nsxv.dhcp_lease_time }}
+{%- endif %}
+
+# If True, the server instance will attempt to initialize the metadata
+# infrastructure (boolean value)
+#metadata_initializer = true
+{%- if server.vmware.get('nsxv', {}).metadata_initializer is defined %}
+metadata_initializer = {{ server.vmware.nsxv.metadata_initializer }}
+{%- endif %}
+
+# List of tcp ports, to be allowed access to the metadata proxy, in
+# addition to the default 80,443,8775 tcp ports (list value)
+#metadata_service_allowed_ports =
+{%- if server.vmware.get('nsxv', {}).metadata_service_allowed_ports is defined %}
+metadata_service_allowed_ports = {{ ','.join(server.vmware.nsxv.metadata_service_allowed_ports) }}
+{%- endif %}
+
+# (Optional) Enable HA for NSX Edges. (boolean value)
+#edge_ha = false
+{%- if server.vmware.get('nsxv', {}).edge_ha is defined %}
+edge_ha = {{ server.vmware.nsxv.edge_ha }}
+{%- endif %}
+
+# (Optional) Edge appliance size to be used for creating exclusive
+# router. Valid values: ['compact', 'large', 'xlarge', 'quadlarge'].
+# This exclusive_router_appliance_size will be picked up if --router-
+# size parameter is not specified while doing neutron router-create
+# (string value)
+# Possible values:
+# compact - <No description provided>
+# large - <No description provided>
+# xlarge - <No description provided>
+# quadlarge - <No description provided>
+#exclusive_router_appliance_size = compact
+{%- if server.vmware.get('nsxv', {}).exclusive_router_appliance_size is defined %}
+exclusive_router_appliance_size = {{ server.vmware.nsxv.exclusive_router_appliance_size }}
+{%- endif %}
+
+# (Optional) Edge appliance size to be used for creating shared router
+# edge. Valid values: ['compact', 'large', 'xlarge', 'quadlarge'].
+# (string value)
+# Possible values:
+# compact - <No description provided>
+# large - <No description provided>
+# xlarge - <No description provided>
+# quadlarge - <No description provided>
+#shared_router_appliance_size = compact
+{%- if server.vmware.get('nsxv', {}).shared_router_appliance_size is defined %}
+shared_router_appliance_size = {{ server.vmware.nsxv.shared_router_appliance_size }}
+{%- endif %}
+
+# (Optional) Use this search domain if there is no search domain
+# configured on the subnet. (string value)
+#dns_search_domain = <None>
+{%- if server.vmware.get('nsxv', {}).dns_search_domain is defined %}
+dns_search_domain = {{ server.vmware.nsxv.dns_search_domain }}
+{%- endif %}
+
+# List of nameservers to configure for the DHCP binding entries. These
+# will be used if there are no nameservers defined on the subnet.
+# (list value)
+#nameservers =
+{%- if server.vmware.get('nsxv', {}).nameservers is defined %}
+nameservers = {{ ','.join(server.vmware.nsxv.nameservers) }}
+{%- endif %}
+
+# If True, dvs features will be supported which involves configuring
+# the dvs backing nsx_v directly. If False, only features exposed via
+# nsx_v will be supported (boolean value)
+#use_dvs_features = false
+{%- if server.vmware.get('nsxv', {}).use_dvs_features is defined %}
+use_dvs_features = {{ server.vmware.nsxv.use_dvs_features }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall rule for security-
+# groups blocked traffic is logged. (boolean value)
+#log_security_groups_blocked_traffic = false
+{%- if server.vmware.get('nsxv', {}).log_security_groups_blocked_traffic is defined %}
+log_security_groups_blocked_traffic = {{ server.vmware.nsxv.log_security_groups_blocked_traffic }}
+{%- endif %}
+
+# (Optional) Indicates whether distributed-firewall security-groups
+# allowed traffic is logged. (boolean value)
+#log_security_groups_allowed_traffic = false
+{%- if server.vmware.get('nsxv', {}).log_security_groups_allowed_traffic is defined %}
+log_security_groups_allowed_traffic = {{ server.vmware.nsxv.log_security_groups_allowed_traffic }}
+{%- endif %}
+
+# (Optional) The profile id of the redirect firewall rules that will
+# be used for the Service Insertion feature. (string value)
+#service_insertion_profile_id = <None>
+{%- if server.vmware.get('nsxv', {}).service_insertion_profile_id is defined %}
+service_insertion_profile_id = {{ server.vmware.nsxv.service_insertion_profile_id }}
+{%- endif %}
+
+# (Optional) If set to True, the plugin will create a redirect rule to
+# send all the traffic to the security partner (boolean value)
+#service_insertion_redirect_all = false
+{%- if server.vmware.get('nsxv', {}).service_insertion_redirect_all is defined %}
+service_insertion_redirect_all = {{ server.vmware.nsxv.service_insertion_redirect_all }}
+{%- endif %}
+
+# If set to True, the plugin will use NSX policies in the neutron
+# security groups. (boolean value)
+#use_nsx_policies = false
+{%- if server.vmware.get('nsxv', {}).use_nsx_policies is defined %}
+use_nsx_policies = {{ server.vmware.nsxv.use_nsx_policies }}
+{%- endif %}
+
+# (Optional) If use_nsx_policies is True, this policy will be used as
+# the default policy for new tenants. (string value)
+#default_policy_id = <None>
+{%- if server.vmware.get('nsxv', {}).default_policy_id is defined %}
+default_policy_id = {{ server.vmware.nsxv.default_policy_id }}
+{%- endif %}
+
+# (Optional) If use_nsx_policies is True, this value will determine if
+# a tenants can add rules to their security groups. (boolean value)
+#allow_tenant_rules_with_policy = false
+{%- if server.vmware.get('nsxv', {}).allow_tenant_rules_with_policy is defined %}
+allow_tenant_rules_with_policy = {{ server.vmware.nsxv.allow_tenant_rules_with_policy }}
+{%- endif %}
+
+# (Optional) Sets the network address for distributed router TLR-PLR
+# connectivity, with <network IP>/<prefix> syntax (string value)
+#vdr_transit_network = 169.254.2.0/28
+{%- if server.vmware.get('nsxv', {}).vdr_transit_network is defined %}
+vdr_transit_network = {{ server.vmware.nsxv.vdr_transit_network }}
+{%- endif %}
+
+# If set to False, router will associate floating ip with external
+# interface of only, thus denying connectivity between hosts on same
+# network via their floating ips. If True, floating ip will be
+# associated with all router interfaces. (boolean value)
+#bind_floatingip_to_all_interfaces = false
+{%- if server.vmware.get('nsxv', {}).bind_floatingip_to_all_interfaces is defined %}
+bind_floatingip_to_all_interfaces = {{ server.vmware.nsxv.bind_floatingip_to_all_interfaces }}
+{%- endif %}
+
+# (Optional) Have exclusive DHCP edge per network. (boolean value)
+#exclusive_dhcp_edge = false
+{%- if server.vmware.get('nsxv', {}).exclusive_dhcp_edge is defined %}
+exclusive_dhcp_edge = {{ server.vmware.nsxv.exclusive_dhcp_edge }}
+{%- endif %}
+
+# (Optional) Set the interval (Seconds) for BGP neighbour hold down
+# time. (integer value)
+#bgp_neighbour_hold_down_timer = 4
+{%- if server.vmware.get('nsxv', {}).bgp_neighbour_hold_down_timer is defined %}
+bgp_neighbour_hold_down_timer = {{ server.vmware.nsxv.bgp_neighbour_hold_down_timer }}
+{%- endif %}
+
+# (Optional) Set the interval (Seconds) for BGP neighbour keep alive
+# time. (integer value)
+#bgp_neighbour_keep_alive_timer = 1
+{%- if server.vmware.get('nsxv', {}).bgp_neighbour_keep_alive_timer is defined %}
+bgp_neighbour_keep_alive_timer = {{ server.vmware.nsxv.bgp_neighbour_keep_alive_timer }}
+{%- endif %}
+
+# (Optional) Set the wait time (Seconds) between enablement of ECMP.
+# (integer value)
+#ecmp_wait_time = 2
+{%- if server.vmware.get('nsxv', {}).ecmp_wait_time is defined %}
+ecmp_wait_time = {{ server.vmware.nsxv.ecmp_wait_time }}
+{%- endif %}
+
+# List of <DVS MoRef ID>:<vlan_min>:<vlan_max> specifying DVS MoRef ID
+# usable for VLAN provider networks, as well as ranges of VLAN tags on
+# each available for allocation to networks. (list value)
+#network_vlan_ranges =
+{%- set network_vlan_ranges = []%}
+{%- for _,netrange in server.vmware.get('nsxv', {'network_vlan_ranges': {}}).network_vlan_ranges.iteritems() %}
+{%- do network_vlan_ranges.append(netrange.dvs_id + ":" + netrange.vlan_min + ":" + netrange.vlan_max) %}
+network_vlan_ranges = {{ ','.join(network_vlan_ranges) }}
+{%- endfor %}
+
+# Timeout interval for NSX backend transactions. (integer value)
+#nsx_transaction_timeout = 240
+{%- if server.vmware.get('nsxv', {}).nsx_transaction_timeout is defined %}
+nsx_transaction_timeout = {{ server.vmware.nsxv.nsx_transaction_timeout }}
+{%- endif %}
+
+# If False, different tenants will not use the same DHCP edge or
+# router edge. (boolean value)
+#share_edges_between_tenants = true
+{%- if server.vmware.get('nsxv', {}).share_edges_between_tenants is defined %}
+share_edges_between_tenants = {{ server.vmware.nsxv.share_edges_between_tenants }}
+{%- endif %}
+
+# List of the enabled housekeeping jobs (list value)
+#housekeeping_jobs = error_dhcp_edge,error_backup_edge
+{%- if server.vmware.get('nsxv', {}).housekeeping_jobs is defined %}
+housekeeping_jobs = {{ ','.join(server.vmware.nsxv.housekeeping_jobs) }}
+{%- endif %}
+
+
+# List of housekeeping jobs which are enabled in read only mode (list
+# value)
+#housekeeping_readonly_jobs =
+{%- if server.vmware.get('nsxv', {}).housekeeping_readonly_jobs is defined %}
+housekeeping_readonly_jobs = {{ ','.join(server.vmware.nsxv.housekeeping_readonly_jobs) }}
+{%- endif %}
+
+
+# Housekeeping will only warn about breakage. (boolean value)
+#housekeeping_readonly = true
+{%- if server.vmware.get('nsxv', {}).housekeeping_readonly is defined %}
+housekeeping_readonly = {{ server.vmware.nsxv.housekeeping_readonly }}
+{%- endif %}
+
+# Use default block all rule when no security groups are set on a port
+# and port security is enabled (boolean value)
+#use_default_block_all = false
+{%- if server.vmware.get('nsxv', {}).use_default_block_all is defined %}
+use_default_block_all = {{ server.vmware.nsxv.use_default_block_all }}
+{%- endif %}
+
+
+[quotas]
+
+#
+# From nsx
+#
+
+# Number of network gateways allowed per tenant, -1 for unlimited
+# (integer value)
+#quota_network_gateway = 5
+{%- if server.vmware.get('nsxv', {}).quota_network_gateway is defined %}
+quota_network_gateway = {{ server.vmware.nsxv.quota_network_gateway }}
+{%- endif %}
diff --git a/neutron/server.sls b/neutron/server.sls
index e0c423f..0b4c877 100644
--- a/neutron/server.sls
+++ b/neutron/server.sls
@@ -327,6 +327,37 @@
 {%- endif %}
 {%- endif %}
 
+{% if server.backend.engine == "vmware" %}
+
+vmware_neutron_packages:
+  pkg.installed:
+  - names:
+    - python-vmware-nsx
+
+/etc/neutron/plugins/vmware/nsx.ini:
+  file.managed:
+    - source: salt://neutron/files/{{ server.version }}/plugins/nsx.ini
+    - user: root
+    - group: root
+    - mode: 644
+    - makedirs: true
+    - dir_mode: 755
+    - template: jinja
+    - require:
+      - pkg: vmware_neutron_packages
+
+neutron_db_manage:
+  cmd.run:
+  - name: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/vmware/nsx.ini upgrade head
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
+  - require:
+    - file: /etc/neutron/neutron.conf
+    - file: /etc/neutron/plugins/vmware/nsx.ini
+
+{%- endif %}
+
 {% if server.get('bgp_vpn', {}).get('enabled', False) %}
 
 bgpvpn_packages: