Merge "Fix openstack_api_check_status service name"
diff --git a/.kitchen.travis.yml b/.kitchen.travis.yml
new file mode 100644
index 0000000..f847543
--- /dev/null
+++ b/.kitchen.travis.yml
@@ -0,0 +1,6 @@
+suites:
+
+  - name: <%= ENV['SUITE'] %>
+    provisioner:
+      pillars-from-files:
+        neutron.sls: tests/pillar/<%= ENV['SUITE'] %>.sls
diff --git a/.kitchen.yml b/.kitchen.yml
index d144018..b762e23 100644
--- a/.kitchen.yml
+++ b/.kitchen.yml
@@ -78,6 +78,17 @@
       pillars-from-files:
         neutron.sls: tests/pillar/compute_sriov.sls
 
+  - name: compute_qos_sriov
+    provisioner:
+      pillars-from-files:
+        neutron.sls: tests/pillar/compute_qos_sriov.sls
+
+  - name: compute_qos
+    provisioner:
+      pillars-from-files:
+        neutron.sls: tests/pillar/compute_qos.sls
+
+
   - name: control_cluster
     provisioner:
       pillars-from-files:
@@ -93,11 +104,21 @@
       pillars-from-files:
         neutron.sls: tests/pillar/control_nodvr.sls
 
+  - name: control_lbaas_octavia.sls
+    provisioner:
+      pillars-from-files:
+        neutron.sls: tests/pillar/control_lbaas_octavia.sls
+
   - name: control_single
     provisioner:
       pillars-from-files:
         neutron.sls: tests/pillar/control_single.sls
 
+  - name: control_qos
+    provisioner:
+      pillars-from-files:
+        neutron.sls: tests/pillar/control_qos.sls
+
   - name: gateway_dvr
     provisioner:
       pillars-from-files:
@@ -107,4 +128,10 @@
     provisioner:
       pillars-from-files:
         neutron.sls: tests/pillar/gateway_legacy.sls
+
+  - name: gateway_qos
+    provisioner:
+      pillars-from-files:
+        neutron.sls: tests/pillar/gateway_qos.sls
+
 # vim: ft=yaml sw=2 ts=2 sts=2 tw=125
diff --git a/.travis.yml b/.travis.yml
index 7a77247..3925301 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -17,15 +17,35 @@
   - bundle install
 
 env:
-    - PLATFORM=trevorj/salty-whales:trusty
-    - PLATFORM=trevorj/salty-whales:xenial
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=compute_dpdk
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=compute_dpdk
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=compute_dvr
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=compute_dvr
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=compute_legacy
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=compute_legacy
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=compute_nonexternal_dvr
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=compute_nonexternal_dvr
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=compute_sriov
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=compute_sriov
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=control_cluster
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=control_cluster
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=control_dvr
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=control_dvr
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=control_nodvr
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=control_nodvr
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=control_single
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=control_single
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=gateway_dvr
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=gateway_dvr
+    - PLATFORM=trevorj/salty-whales:trusty SUITE=gateway_legacy
+    - PLATFORM=trevorj/salty-whales:xenial SUITE=gateway_legacy
 
 before_script:
   - set -o pipefail
   - make test | tail
 
 script:
-  - test ! -e .kitchen.yml || bundle exec kitchen test -t tests/integration
+  - KITCHEN_LOCAL_YAML=.kitchen.travis.yml bundle exec kitchen test -t tests/integration
 
 notifications:
   webhooks:
diff --git a/README.rst b/README.rst
index c01b822..66b2299 100644
--- a/README.rst
+++ b/README.rst
@@ -73,7 +73,8 @@
           # Add key without value to remove line from policy.json
           'create_network:shared':
 
-Neutron lbaas provides on the controller node
+Neutron LBaaSv2 enablement
+--------------------------
 
 .. code-block:: yaml
 
@@ -82,9 +83,13 @@
       lbaas:
         enabled: true
         providers:
+          octavia:
+            engine: octavia
+            driver_path: 'neutron_lbaas.drivers.octavia.driver.OctaviaDriver'
+            base_url: 'http://127.0.0.1:9876'
           avi_adc:
-            enabled: true
             engine: avinetworks
+            driver_path: 'avi_lbaasv2.avi_driver.AviDriver'
             controller_address: 10.182.129.239
             controller_user: admin
             controller_password: Cloudlab2016
@@ -93,15 +98,15 @@
             engine: avinetworks
             ...
 
-Note: If you want contrail lbaas then backend is only required. Lbaas in
-pillar should be define only if it should be disabled.
+Note: If the Contrail backend is set, Opencontrail loadbalancer would be enabled
+automatically. In this case lbaas should disabled in pillar:
 
 .. code-block:: yaml
 
   neutron:
     server:
       lbaas:
-        enabled: disabled
+        enabled: false
 
 Enable CORS parameters
 
@@ -514,6 +519,22 @@
             use_ovs_ports:
             - float-to-ex
 
+Additonal VXLAN tenant network settings
+---------------------------------------
+
+The default multicast group of 224.0.0.1 only multicasts to a single subnet.
+Allow overriding it to allow larger underlay network topologies.
+
+Neutron Server
+
+.. code-block:: yaml
+
+    neutron:
+      server:
+        vxlan:
+          group: 239.0.0.0/8
+          vni_ranges: "2:65535"
+
 Neutron VLAN tenant networks with Network Nodes
 -----------------------------------------------
 
@@ -620,6 +641,20 @@
             ovs:
               driver: openvswitch
 
+Neutron with VLAN-aware-VMs
+
+.. code-block:: yaml
+
+    neutron:
+      server:
+        vlan_aware_vms: true
+      ....
+      compute:
+        vlan_aware_vms: true
+      ....
+      gateway:
+        vlan_aware_vms: true
+
 Neutron Server
 --------------
 
diff --git a/_modules/neutronng.py b/_modules/neutronng.py
index 6a3eaf8..06f273e 100644
--- a/_modules/neutronng.py
+++ b/_modules/neutronng.py
@@ -328,9 +328,7 @@
     .. code-block:: bash
         salt '*' neutronng.create_port network_id='openstack-network-id'
     '''
-    response = neutron_interface.create_port({'port': port_params})
-    if 'port' in response and 'id' in response['port']:
-        return response['port']['id']
+    return neutron_interface.create_port({'port': port_params})
 
 
 @_autheticate
diff --git a/_states/neutronng.py b/_states/neutronng.py
index 89e0011..92afb7a 100644
--- a/_states/neutronng.py
+++ b/_states/neutronng.py
@@ -409,7 +409,7 @@
 
 def security_group_present(name=None,
                            tenant=None,
-                           description=None,
+                           description='',
                            rules=[],
                            profile=None,
                            endpoint_type=None):
@@ -527,6 +527,109 @@
     # Security group already exists, but the specified rules were added to it.
     return _updated(name, 'security_group', {'New Rules': new_rules})
 
+
+def port_present(network_name, profile=None, endpoint_type=None, name=None,
+                 tenant=None, description='', fixed_ips=None, device_id=None,
+                 device_owner=None, binding_host_id=None, admin_state_up=True,
+                 mac_address=None, vnic_type=None, binding_profile=None,
+                 security_groups=None, extra_dhcp_opt=None, qos_policy=None,
+                 allowed_address_pair=None, dns_name=None):
+    """
+    Ensure the port is present with specified parameters.
+
+    :param network_name: Name of the network to create port in
+    :param profile: Authentication profile
+    :param endpoint_type: Endpoint type
+    :param name: Name of this port
+    :param tenant: Tenant in which the port should be created, avaiable for
+                   admin only.
+    :param description: Port description
+    :param fixed_ips: Desired IP and/or subnet for this port:
+                      subnet_id=<name_or_id>,ip_address=<ip>.
+    :param device_id: Device ID of this port
+    :param device_owner: Device owner of this port
+    :param binding_host_id: he ID of the host where the port resides.
+    :param admin_state_up: Admin state of this port
+    :param mac_address: MAC address of this port
+    :param vnic_type: VNIC type for this port
+    :param binding_profile: Custom data to be passed as binding:profile
+    :param security_groups: Security group associated with the port
+    :param extra_dhcp_opt: Extra dhcp options to be assigned to this port:
+                           opt_na me=<dhcp_option_name>,opt_value=<value>,
+                                     ip_version={4, 6}
+    :param qos_policy: ID or name of the QoS policy that shouldbe attached to
+                       the resource
+    :param allowed_address_pair: ip_address=IP_ADDR|CIDR[,mac_address=MAC_ADDR]
+                                 Allowed address pair associated with the port.
+                                 "ip_address" parameter is required. IP address
+                                 or CIDR can be specified for "ip_address".
+                                 "mac_address" parameter is optional.
+    :param dns_name: Assign DNS name to the port (requires DNS integration
+                     extension)
+    """
+
+    connection_args = _auth(profile, endpoint_type)
+    tenant_id = _get_tenant_id(tenant_name=tenant, **connection_args)
+    network_id = None
+    port_exists = False
+
+    port_arguments = _get_non_null_args(
+        name=name, tenant_id=tenant_id, description=description,
+        fixed_ips=fixed_ips, device_id=device_id, device_owner=device_owner,
+        admin_state_up=admin_state_up,
+        mac_address=mac_address, vnic_type=vnic_type,
+        binding_profile=binding_profile,
+        extra_dhcp_opt=extra_dhcp_opt, qos_policy=qos_policy,
+        allowed_address_pair=allowed_address_pair, dns_name=dns_name)
+    if binding_host_id:
+        port_arguments['binding:host_id'] = binding_host_id
+    if security_groups:
+        sec_group_list = []
+        for sec_group_name in security_groups:
+            security_group = _neutron_module_call(
+                'list_security_groups', name=sec_group_name, **connection_args)
+            if security_group:
+                sec_group_list.append(security_group[sec_group_name]['id'])
+        port_arguments['security_groups'] = sec_group_list
+
+    existing_networks = _neutron_module_call(
+        'list_networks', tenant_id=tenant_id, name=network_name,
+        **connection_args)['networks']
+    if len(existing_networks) == 0:
+        LOG.error("Can't find network with name: {0}".format(network_name))
+    elif len(existing_networks) == 1:
+        network_id = existing_networks[0]['id']
+    elif len(existing_networks) > 1:
+        LOG.error("Multiple networks with name: {0} found.".format(network_name))
+
+    if network_id is None:
+        return _create_failed(name, 'port')
+
+    port_arguments['network_id'] = network_id
+
+    existing_ports = _neutron_module_call(
+        'list_ports', network_id=network_id, tenant_id=tenant_id,
+        **connection_args)
+
+    if name:
+        for key, value in existing_ports.iteritems():
+            try:
+                if value['name'] == name and value['tenant_id'] == tenant_id:
+                    port_exists = True
+                    break
+            except KeyError:
+                pass
+
+    if not port_exists:
+        port_arguments.update(connection_args)
+        res = _neutron_module_call('create_port', **port_arguments)['port']
+        if res['name'] == name:
+            return _created(name, 'port', res)
+        return _create_failed(name, 'port')
+    else:
+        return _no_change('for instance {0}'.format(name), 'port')
+
+
 def _created(name, resource, resource_definition):
     changes_dict = {'name': name,
                     'changes': resource_definition,
diff --git a/metadata/service/client/init.yml b/metadata/service/client/init.yml
new file mode 100644
index 0000000..d156a6b
--- /dev/null
+++ b/metadata/service/client/init.yml
@@ -0,0 +1,6 @@
+applications:
+  - neutron
+parameters:
+  neutron:
+    client:
+      enabled: true
\ No newline at end of file
diff --git a/metadata/service/compute/single.yml b/metadata/service/compute/single.yml
index 3422e39..377ff39 100644
--- a/metadata/service/compute/single.yml
+++ b/metadata/service/compute/single.yml
@@ -18,6 +18,8 @@
       dvr: false
       external_access: false
       security_groups_enabled: true
+      qos: false
+      vlan_aware_vms: false
       metadata:
         host: ${_param:cluster_vip_address}
         password: ${_param:metadata_password}
diff --git a/metadata/service/control/cluster.yml b/metadata/service/control/cluster.yml
index eaa8707..c69a8f8 100644
--- a/metadata/service/control/cluster.yml
+++ b/metadata/service/control/cluster.yml
@@ -9,6 +9,7 @@
     server:
       enabled: true
       dns_domain: novalocal
+      vlan_aware_vms: false
       version: ${_param:neutron_version}
       bind:
         address: ${_param:cluster_local_address}
diff --git a/metadata/service/control/container.yml b/metadata/service/control/container.yml
index bca3d9e..367167b 100644
--- a/metadata/service/control/container.yml
+++ b/metadata/service/control/container.yml
@@ -12,6 +12,7 @@
               server:
                 enabled: true
                 dns_domain: novalocal
+                vlan_aware_vms: false
                 version: ${_param:neutron_version}
                 bind:
                   address: 0.0.0.0
diff --git a/metadata/service/control/single.yml b/metadata/service/control/single.yml
index 24b5a2f..12a603c 100644
--- a/metadata/service/control/single.yml
+++ b/metadata/service/control/single.yml
@@ -13,6 +13,8 @@
       dns_domain: novalocal
       tunnel_type: vxlan
       security_groups_enabled: true
+      qos: false
+      vlan_aware_vms: false
       version: ${_param:neutron_version}
       bind:
         address: ${_param:single_address}
diff --git a/metadata/service/gateway/single.yml b/metadata/service/gateway/single.yml
index a9628f0..1af2525 100644
--- a/metadata/service/gateway/single.yml
+++ b/metadata/service/gateway/single.yml
@@ -18,6 +18,8 @@
       dvr: false
       external_access: True
       security_groups_enabled: true
+      qos: false
+      vlan_aware_vms: false
       metadata:
         host: ${_param:cluster_vip_address}
         password: ${_param:metadata_password}
diff --git a/neutron/client.sls b/neutron/client.sls
index e31e969..44b03f7 100644
--- a/neutron/client.sls
+++ b/neutron/client.sls
@@ -7,6 +7,26 @@
 
 
 {%- for identity_name, identity in client.server.iteritems() %}
+
+{%- if identity.security_group is defined %}
+
+{%- for security_group_name, security_group in identity.security_group.iteritems() %}
+openstack_security_group_{{ security_group_name }}:
+  neutronng.security_group_present:
+    - name: {{ security_group_name }}
+    {%- if security_group.description is defined %}
+    - description: {{ security_group.description }}
+    {%- endif %}
+    - rules: {{ security_group.rules }}
+    - profile: {{ identity_name }}
+    - tenant: {{ security_group.tenant }}
+    {%- if identity.endpoint_type is defined %}
+    - endpoint_type: {{ identity.endpoint_type }}
+    {%- endif %}
+{%- endfor %}
+
+{%- endif %}
+
 {%- if identity.network is defined %}
 
 {%- for network_name, network in identity.network.iteritems() %}
@@ -80,6 +100,66 @@
 
 {%- endif %}
 
+{%- if network.port is defined %}
+
+{%- for port_name, port in network.port.iteritems() %}
+neutron_openstack_port_{{ port_name }}:
+  neutronng.port_present:
+    - network_name: {{ network_name }}
+    - name: {{ port_name }}
+    - profile: {{ identity_name }}
+    - tenant: {{ network.tenant }}
+    {%- if identity.endpoint_type is defined %}
+    - endpoint_type: {{ identity.endpoint_type }}
+    {%- endif %}
+    {%- if port.description is defined %}
+    - description: {{ port.description  }}
+    {%- endif %}
+    {%- if port.fixed_ips is defined %}
+    - fixed_ips: {{ port.fixed_ips }}
+    {%- endif %}
+    {%- if port.device_id is defined %}
+    - device_id: {{ port.device_id }}
+    {%- endif %}
+    {%- if port.device_owner is defined %}
+    - device_owner: {{ port.device_owner }}
+    {%- endif %}
+    {%- if port.binding_host_id is defined %}
+    - binding_host_id: {{ port.binding_host_id }}
+    {%- endif %}
+    {%- if port.admin_state_up is defined %}
+    - admin_state_up: {{ port.admin_state_up }}
+    {%- endif %}
+    {%- if port.mac_address is defined %}
+    - mac_address: {{ port.mac_address }}
+    {%- endif %}
+    {%- if port.vnic_type is defined %}
+    - vnic_type: {{ port.vnic_type }}
+    {%- endif %}
+    {%- if port.binding_profile is defined %}
+    - binding_profile: {{ port.binding_profile }}
+    {%- endif %}
+    {%- if port.security_groups is defined %}
+    - security_groups: {{ port.security_groups }}
+    {%- endif %}
+    {%- if port.extra_dhcp_opt is defined %}
+    - extra_dhcp_opt: {{ port.extra_dhcp_opt }}
+    {%- endif %}
+    {%- if port.qos_policy is defined %}
+    - qos_policy: {{ port.qos_policy }}
+    {%- endif %}
+    {%- if port.allowed_address_pair is defined %}
+    - allowed_address_pair: {{ port.allowed_address_pair }}
+    {%- endif %}
+    {%- if port.dns_name is defined %}
+    - dns_name: {{ port.dns_name }}
+    {%- endif %}
+    - require:
+      - neutronng: neutron_openstack_network_{{ network_name }}
+{%- endfor %}
+
+{%- endif %}
+
 {%- endfor %}
 
 {%- endif %}
@@ -102,23 +182,6 @@
 
 {%- endif %}
 
-{%- if identity.security_group is defined %}
-
-{%- for security_group_name, security_group in identity.security_group.iteritems() %}
-openstack_security_group_{{ security_group_name }}:
-  neutronng.security_group_present:
-    - name: {{ security_group_name }}
-    - description: {{ security_group.description }}
-    - rules: {{ security_group.rules }}
-    - profile: {{ identity_name }}
-    - tenant: {{ security_group.tenant }}
-    {%- if identity.endpoint_type is defined %}
-    - endpoint_type: {{ identity.endpoint_type }}
-    {%- endif %}
-{%- endfor %}
-
-{%- endif %}
-
 {%- if identity.floating_ip is defined %}
 
 {%- for instance_name, instance in identity.floating_ip.iteritems() %}
diff --git a/neutron/compute.sls b/neutron/compute.sls
index bfefb53..9770b85 100644
--- a/neutron/compute.sls
+++ b/neutron/compute.sls
@@ -32,13 +32,15 @@
   service.running:
   - name: neutron-sriov-agent
   - enable: true
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
   - watch_in:
     - service: neutron_compute_services
   - watch:
     - file: /etc/neutron/neutron.conf
     - file: /etc/neutron/plugins/ml2/openvswitch_agent.ini
     - file: /etc/neutron/plugins/ml2/sriov_agent.ini
-  - unless: grains.get('noservices', False)
 
 {% endif %}
 
@@ -57,6 +59,7 @@
       - neutron-l3-agent
       - neutron-metadata-agent
     - watch:
+      - file: /etc/neutron/neutron.conf
       - file: /etc/neutron/l3_agent.ini
       - file: /etc/neutron/metadata_agent.ini
     - require:
diff --git a/neutron/files/grafana_dashboards/neutron_prometheus.json b/neutron/files/grafana_dashboards/neutron_prometheus.json
index e1826ec..d73664d 100755
--- a/neutron/files/grafana_dashboards/neutron_prometheus.json
+++ b/neutron/files/grafana_dashboards/neutron_prometheus.json
@@ -566,7 +566,7 @@
           "tableColumn": "",
           "targets": [
             {
-              "expr": "openstack_neutron_ports{owner=~\"compute:.*\"}",
+              "expr": "openstack_neutron_ports{owner=~\"compute:.*\",state=\"active\"}",
               "format": "time_series",
               "intervalFactor": 2,
               "refId": "A",
diff --git a/neutron/files/mitaka/dhcp_agent.ini b/neutron/files/mitaka/dhcp_agent.ini
index 908086d..17a01ce 100644
--- a/neutron/files/mitaka/dhcp_agent.ini
+++ b/neutron/files/mitaka/dhcp_agent.ini
@@ -19,7 +19,7 @@
 
 # The driver used to manage the virtual interface. (string value)
 #interface_driver = <None>
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = openvswitch
 
 # Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
 #ovs_vsctl_timeout = 10
diff --git a/neutron/files/mitaka/l3_agent.ini b/neutron/files/mitaka/l3_agent.ini
index 0095ab7..ad79623 100644
--- a/neutron/files/mitaka/l3_agent.ini
+++ b/neutron/files/mitaka/l3_agent.ini
@@ -25,7 +25,7 @@
 
 # The driver used to manage the virtual interface. (string value)
 #interface_driver = <None>
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = openvswitch
 
 # Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
 #ovs_vsctl_timeout = 10
diff --git a/neutron/files/mitaka/ml2_conf.ini b/neutron/files/mitaka/ml2_conf.ini
index 67841f2..9a8e79c 100644
--- a/neutron/files/mitaka/ml2_conf.ini
+++ b/neutron/files/mitaka/ml2_conf.ini
@@ -180,12 +180,12 @@
 # Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation
 # (list value)
 #vni_ranges =
-vni_ranges =2:65535
+vni_ranges = {{ server.get('vxlan', {}).vni_ranges|default('2:65535') }}
 
 # Multicast group for VXLAN. When configured, will enable sending all broadcast traffic to this multicast group. When left unconfigured,
 # will disable multicast VXLAN mode. (string value)
 #vxlan_group = <None>
-vxlan_group = 224.0.0.1
+vxlan_group = {{ server.get('vxlan', {}).group|default('224.0.0.1') }}
 
 
 [securitygroup]
@@ -199,7 +199,7 @@
 {%- if server.dpdk %}
 firewall_driver = openvswitch
 {%- else %}
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+firewall_driver = iptables_hybrid
 {%- endif %}
 
 # Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the
diff --git a/neutron/files/mitaka/neutron-generic.conf.Debian b/neutron/files/mitaka/neutron-generic.conf.Debian
index 0741f47..092fd56 100644
--- a/neutron/files/mitaka/neutron-generic.conf.Debian
+++ b/neutron/files/mitaka/neutron-generic.conf.Debian
@@ -518,7 +518,7 @@
 
 # Seconds to wait for a response from a call. (integer value)
 #rpc_response_timeout = 60
-rpc_response_timeout=60
+rpc_response_timeout=120
 
 # A URL representing the messaging driver to use and its full configuration. If
 # not set, we fall back to the rpc_backend option and driver specific
@@ -584,7 +584,7 @@
 #use_helper_for_ns_read = true
 
 # Root helper daemon application to use when possible. (string value)
-#root_helper_daemon = <None>
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
 
 # Seconds between nodes reporting state to server; should be less than
 # agent_down_time, best if it is half or less than agent_down_time. (floating
diff --git a/neutron/files/mitaka/neutron-server.conf.Debian b/neutron/files/mitaka/neutron-server.conf.Debian
index f36b5fa..c75d6a3 100644
--- a/neutron/files/mitaka/neutron-server.conf.Debian
+++ b/neutron/files/mitaka/neutron-server.conf.Debian
@@ -217,10 +217,12 @@
 
 # Number of RPC worker processes for service (integer value)
 #rpc_workers = 1
+rpc_workers = {{ grains.num_cpus }}
 
 # Number of RPC worker processes dedicated to state reports queue (integer
 # value)
 #rpc_state_report_workers = 1
+rpc_state_report_workers = 4
 
 # Range of seconds to randomly delay when starting the periodic task scheduler
 # to reduce stampeding. (Disable by setting to 0) (integer value)
@@ -332,6 +334,7 @@
 # Automatically reschedule routers from offline L3 agents to online L3 agents.
 # (boolean value)
 #allow_automatic_l3agent_failover = false
+allow_automatic_l3agent_failover = true
 
 # Enable HA mode for virtual routers. (boolean value)
 #l3_ha = false
@@ -340,6 +343,7 @@
 # Maximum number of L3 agents which a HA router will be scheduled on. If it is
 # set to 0 then the router will be scheduled on every agent. (integer value)
 #max_l3_agents_per_router = 3
+max_l3_agents_per_router = 0
 
 # Minimum number of L3 agents which a HA router will be scheduled on. If it is
 # set to 0 then the router will be scheduled on every agent. (integer value)
@@ -541,7 +545,7 @@
 
 # Seconds to wait for a response from a call. (integer value)
 #rpc_response_timeout = 60
-rpc_response_timeout=60
+rpc_response_timeout=120
 
 # A URL representing the messaging driver to use and its full configuration. If
 # not set, we fall back to the rpc_backend option and driver specific
@@ -609,7 +613,7 @@
 #use_helper_for_ns_read = true
 
 # Root helper daemon application to use when possible. (string value)
-#root_helper_daemon = <None>
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
 
 # Seconds between nodes reporting state to server; should be less than
 # agent_down_time, best if it is half or less than agent_down_time. (floating
@@ -775,7 +779,7 @@
 # Deprecated group/name - [DEFAULT]/sql_max_pool_size
 # Deprecated group/name - [DATABASE]/sql_max_pool_size
 #max_pool_size = <None>
-max_pool_size = 20
+max_pool_size = {{ [grains.num_cpus * 5, 30] | sort | first }}
 
 # Maximum number of database connection retries during startup. Set to -1 to
 # specify an infinite retry count. (integer value)
diff --git a/neutron/files/mitaka/openvswitch_agent.ini b/neutron/files/mitaka/openvswitch_agent.ini
index ed5231b..01162a0 100644
--- a/neutron/files/mitaka/openvswitch_agent.ini
+++ b/neutron/files/mitaka/openvswitch_agent.ini
@@ -250,7 +250,7 @@
 {%- if neutron.dpdk %}
 firewall_driver = openvswitch
 {%- else %}
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+firewall_driver = iptables_hybrid
 {%- endif %}
 
 # Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the
diff --git a/neutron/files/newton/dhcp_agent.ini b/neutron/files/newton/dhcp_agent.ini
index 908086d..17a01ce 100644
--- a/neutron/files/newton/dhcp_agent.ini
+++ b/neutron/files/newton/dhcp_agent.ini
@@ -19,7 +19,7 @@
 
 # The driver used to manage the virtual interface. (string value)
 #interface_driver = <None>
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = openvswitch
 
 # Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
 #ovs_vsctl_timeout = 10
diff --git a/neutron/files/newton/l3_agent.ini b/neutron/files/newton/l3_agent.ini
index 0095ab7..56bf297 100644
--- a/neutron/files/newton/l3_agent.ini
+++ b/neutron/files/newton/l3_agent.ini
@@ -25,7 +25,7 @@
 
 # The driver used to manage the virtual interface. (string value)
 #interface_driver = <None>
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = openvswitch
 
 # Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
 #ovs_vsctl_timeout = 10
@@ -90,10 +90,12 @@
 # be used. (string value)
 #external_ingress_mark = 0x2
 
-# Name of bridge used for external network traffic. This should be set to an empty value for the Linux Bridge. When this parameter is set,
-# each L3 agent can be associated with no more than one external network. (string value)
-#external_network_bridge = br-ex
-external_network_bridge = 
+# DEPRECATED: Name of bridge used for external network traffic. When this parameter is set, the L3 agent will plug an interface directly
+# into an external bridge which will not allow any wiring by the L2 agent. Using this will result in incorrect port statuses. This option is
+# deprecated and will be removed in Ocata. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#external_network_bridge =
 
 # Seconds between running periodic tasks (integer value)
 #periodic_interval = 40
diff --git a/neutron/files/newton/ml2_conf.ini b/neutron/files/newton/ml2_conf.ini
index 67841f2..9a8e79c 100644
--- a/neutron/files/newton/ml2_conf.ini
+++ b/neutron/files/newton/ml2_conf.ini
@@ -180,12 +180,12 @@
 # Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of VXLAN VNI IDs that are available for tenant network allocation
 # (list value)
 #vni_ranges =
-vni_ranges =2:65535
+vni_ranges = {{ server.get('vxlan', {}).vni_ranges|default('2:65535') }}
 
 # Multicast group for VXLAN. When configured, will enable sending all broadcast traffic to this multicast group. When left unconfigured,
 # will disable multicast VXLAN mode. (string value)
 #vxlan_group = <None>
-vxlan_group = 224.0.0.1
+vxlan_group = {{ server.get('vxlan', {}).group|default('224.0.0.1') }}
 
 
 [securitygroup]
@@ -199,7 +199,7 @@
 {%- if server.dpdk %}
 firewall_driver = openvswitch
 {%- else %}
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+firewall_driver = iptables_hybrid
 {%- endif %}
 
 # Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the
diff --git a/neutron/files/newton/neutron-generic.conf.Debian b/neutron/files/newton/neutron-generic.conf.Debian
index 2f4b89e..8c39aa6 100644
--- a/neutron/files/newton/neutron-generic.conf.Debian
+++ b/neutron/files/newton/neutron-generic.conf.Debian
@@ -518,7 +518,7 @@
 
 # Seconds to wait for a response from a call. (integer value)
 #rpc_response_timeout = 60
-rpc_response_timeout=60
+rpc_response_timeout=120
 
 # A URL representing the messaging driver to use and its full configuration. If
 # not set, we fall back to the rpc_backend option and driver specific
@@ -537,7 +537,6 @@
 # The messaging driver to use, defaults to rabbit. Other drivers include amqp
 # and zmq. (string value)
 #rpc_backend = rabbit
-rpc_backend = rabbit
 
 # The default exchange under which topics are scoped. May be overridden by an
 # exchange name specified in the transport_url option. (string value)
@@ -593,7 +592,7 @@
 #use_helper_for_ns_read = true
 
 # Root helper daemon application to use when possible. (string value)
-#root_helper_daemon = <None>
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
 
 # Seconds between nodes reporting state to server; should be less than
 # agent_down_time, best if it is half or less than agent_down_time. (floating
@@ -1316,7 +1315,6 @@
 # count). (integer value)
 # Deprecated group/name - [DEFAULT]/rabbit_max_retries
 #rabbit_max_retries = 0
-rabbit_max_retries = 0
 
 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
 # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
diff --git a/neutron/files/newton/neutron-server.conf.Debian b/neutron/files/newton/neutron-server.conf.Debian
index 1ae886b..2aacbe3 100644
--- a/neutron/files/newton/neutron-server.conf.Debian
+++ b/neutron/files/newton/neutron-server.conf.Debian
@@ -217,10 +217,12 @@
 
 # Number of RPC worker processes for service (integer value)
 #rpc_workers = 1
+rpc_workers = {{ grains.num_cpus }}
 
 # Number of RPC worker processes dedicated to state reports queue (integer
 # value)
 #rpc_state_report_workers = 1
+rpc_state_report_workers = 4
 
 # Range of seconds to randomly delay when starting the periodic task scheduler
 # to reduce stampeding. (Disable by setting to 0) (integer value)
@@ -332,6 +334,7 @@
 # Automatically reschedule routers from offline L3 agents to online L3 agents.
 # (boolean value)
 #allow_automatic_l3agent_failover = false
+allow_automatic_l3agent_failover = true
 
 # Enable HA mode for virtual routers. (boolean value)
 #l3_ha = false
@@ -340,6 +343,7 @@
 # Maximum number of L3 agents which a HA router will be scheduled on. If it is
 # set to 0 then the router will be scheduled on every agent. (integer value)
 #max_l3_agents_per_router = 3
+max_l3_agents_per_router = 0
 
 # Minimum number of L3 agents which a HA router will be scheduled on. If it is
 # set to 0 then the router will be scheduled on every agent. (integer value)
@@ -541,18 +545,12 @@
 
 # Seconds to wait for a response from a call. (integer value)
 #rpc_response_timeout = 60
-rpc_response_timeout=60
+rpc_response_timeout=120
 
 # A URL representing the messaging driver to use and its full configuration. If
 # not set, we fall back to the rpc_backend option and driver specific
 # configuration. (string value)
 #transport_url = <None>
-
-# The messaging driver to use, defaults to rabbit. Other drivers include amqp
-# and zmq. (string value)
-#rpc_backend = rabbit
-rpc_backend = rabbit
-
 {%- if server.message_queue.members is defined %}
 transport_url = rabbit://{% for member in server.message_queue.members -%}
                              {{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ member.host }}:{{ member.get('port', 5672) }}
@@ -563,6 +561,11 @@
 transport_url = rabbit://{{ server.message_queue.user }}:{{ server.message_queue.password }}@{{ server.message_queue.host }}:{{ server.message_queue.port }}/{{ server.message_queue.virtual_host }}
 {%- endif %}
 
+# The messaging driver to use, defaults to rabbit. Other drivers include amqp
+# and zmq. (string value)
+#rpc_backend = rabbit
+
+
 # The default exchange under which topics are scoped. May be overridden by an
 # exchange name specified in the transport_url option. (string value)
 #control_exchange = neutron
@@ -619,7 +622,7 @@
 #use_helper_for_ns_read = true
 
 # Root helper daemon application to use when possible. (string value)
-#root_helper_daemon = <None>
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
 
 # Seconds between nodes reporting state to server; should be less than
 # agent_down_time, best if it is half or less than agent_down_time. (floating
@@ -785,7 +788,7 @@
 # Deprecated group/name - [DEFAULT]/sql_max_pool_size
 # Deprecated group/name - [DATABASE]/sql_max_pool_size
 #max_pool_size = <None>
-max_pool_size = 20
+max_pool_size = {{ [grains.num_cpus * 5, 30] | sort | first }}
 
 # Maximum number of database connection retries during startup. Set to -1 to
 # specify an infinite retry count. (integer value)
@@ -1398,7 +1401,6 @@
 # count). (integer value)
 # Deprecated group/name - [DEFAULT]/rabbit_max_retries
 #rabbit_max_retries = 0
-rabbit_max_retries = 0
 
 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
 # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
diff --git a/neutron/files/newton/openvswitch_agent.ini b/neutron/files/newton/openvswitch_agent.ini
index ed5231b..01162a0 100644
--- a/neutron/files/newton/openvswitch_agent.ini
+++ b/neutron/files/newton/openvswitch_agent.ini
@@ -250,7 +250,7 @@
 {%- if neutron.dpdk %}
 firewall_driver = openvswitch
 {%- else %}
-firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
+firewall_driver = iptables_hybrid
 {%- endif %}
 
 # Controls whether the neutron security group API is enabled in the server. It should be false when using no security groups or using the
diff --git a/neutron/files/ocata/dhcp_agent.ini b/neutron/files/ocata/dhcp_agent.ini
index 293b968..d327e64 100644
--- a/neutron/files/ocata/dhcp_agent.ini
+++ b/neutron/files/ocata/dhcp_agent.ini
@@ -19,7 +19,7 @@
 
 # The driver used to manage the virtual interface. (string value)
 #interface_driver = <None>
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = openvswitch
 
 # Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs commands will fail with ALARMCLOCK error. (integer value)
 #ovs_vsctl_timeout = 10
diff --git a/neutron/files/ocata/l3_agent.ini b/neutron/files/ocata/l3_agent.ini
index 44b27ff..41b87e6 100644
--- a/neutron/files/ocata/l3_agent.ini
+++ b/neutron/files/ocata/l3_agent.ini
@@ -20,7 +20,7 @@
 
 # The driver used to manage the virtual interface. (string value)
 #interface_driver = <None>
-interface_driver = neutron.agent.linux.interface.OVSInterfaceDriver
+interface_driver = openvswitch
 
 # Timeout in seconds for ovs-vsctl commands. If the timeout expires, ovs
 # commands will fail with ALARMCLOCK error. (integer value)
@@ -106,7 +106,6 @@
 # This option is deprecated for removal.
 # Its value may be silently ignored in the future.
 #external_network_bridge =
-external_network_bridge = 
 
 # Seconds between running periodic tasks. (integer value)
 #periodic_interval = 40
diff --git a/neutron/files/ocata/ml2_conf.ini b/neutron/files/ocata/ml2_conf.ini
index ad4e100..0d48951 100644
--- a/neutron/files/ocata/ml2_conf.ini
+++ b/neutron/files/ocata/ml2_conf.ini
@@ -137,7 +137,7 @@
 # neutron.ml2.extension_drivers namespace. For example: extension_drivers =
 # port_security,qos (list value)
 #extension_drivers =
-extension_drivers = port_security
+extension_drivers = port_security{% if server.get('qos', 'True') %},qos{% endif %}
 
 # Maximum size of an IP packet (MTU) that can traverse the underlying physical
 # network infrastructure without fragmentation when using an overlay/tunnel
@@ -232,13 +232,13 @@
 # Comma-separated list of <vni_min>:<vni_max> tuples enumerating ranges of
 # VXLAN VNI IDs that are available for tenant network allocation (list value)
 #vni_ranges =
-vni_ranges =2:65535
+vni_ranges = {{ server.get('vxlan', {}).vni_ranges|default('2:65535') }}
 
 # Multicast group for VXLAN. When configured, will enable sending all broadcast
 # traffic to this multicast group. When left unconfigured, will disable
 # multicast VXLAN mode. (string value)
 #vxlan_group = <None>
-vxlan_group = 224.0.0.1
+vxlan_group = {{ server.get('vxlan', {}).group|default('224.0.0.1') }}
 
 
 [securitygroup]
@@ -258,7 +258,7 @@
 {%- if not server.get('security_groups_enabled', True) %}
 firewall_driver = neutron.agent.firewall.NoopFirewallDriver
 enable_security_group = False
-{%- elif server.dpdk %}
+{%- elif server.dpdk or server.get('vlan_aware_vms', False) %}
 firewall_driver = openvswitch
 enable_security_group = True
 {%- else %}
diff --git a/neutron/files/ocata/neutron-generic.conf.Debian b/neutron/files/ocata/neutron-generic.conf.Debian
index 6275974..0dde78f 100644
--- a/neutron/files/ocata/neutron-generic.conf.Debian
+++ b/neutron/files/ocata/neutron-generic.conf.Debian
@@ -584,7 +584,7 @@
 
 # Seconds to wait for a response from a call. (integer value)
 #rpc_response_timeout = 60
-rpc_response_timeout=60
+rpc_response_timeout=120
 
 # A URL representing the messaging driver to use and its full configuration.
 # (string value)
@@ -605,7 +605,6 @@
 # Its value may be silently ignored in the future.
 # Reason: Replaced by [DEFAULT]/transport_url
 #rpc_backend = rabbit
-rpc_backend = rabbit
 
 # The default exchange under which topics are scoped. May be overridden by an
 # exchange name specified in the transport_url option. (string value)
@@ -666,7 +665,7 @@
 # needs to execute commands in Dom0 in the hypervisor of XenServer, this item
 # should be set to 'xenapi_root_helper', so that it will keep a XenAPI session
 # to pass commands to Dom0. (string value)
-#root_helper_daemon = <None>
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
 
 # Seconds between nodes reporting state to server; should be less than
 # agent_down_time, best if it is half or less than agent_down_time. (floating
@@ -1646,7 +1645,6 @@
 # This option is deprecated for removal.
 # Its value may be silently ignored in the future.
 #rabbit_max_retries = 0
-rabbit_max_retries = 0
 
 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
 # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
diff --git a/neutron/files/ocata/neutron-server.conf.Debian b/neutron/files/ocata/neutron-server.conf.Debian
index 0047550..229d342 100644
--- a/neutron/files/ocata/neutron-server.conf.Debian
+++ b/neutron/files/ocata/neutron-server.conf.Debian
@@ -43,9 +43,10 @@
 
 core_plugin = neutron.plugins.ml2.plugin.Ml2Plugin
 
-service_plugins =neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,neutron.services.metering.metering_plugin.MeteringPlugin,trunk{%- if server.lbaas is defined -%}
-,neutron_lbaas.services.loadbalancer.plugin.LoadBalancerPluginv2
-{%- endif -%}
+service_plugins =neutron.services.l3_router.l3_router_plugin.L3RouterPlugin,neutron.services.metering.metering_plugin.MeteringPlugin
+{%- if server.lbaas is defined -%},lbaasv2{%- endif -%}
+{%- if server.get('qos', 'True') -%},neutron.services.qos.qos_plugin.QoSPlugin{%- endif -%}
+{%- if server.get('vlan_aware_vms', False) -%},trunk{%- endif -%}
 
 {% endif %}
 
@@ -178,10 +179,12 @@
 
 # Number of RPC worker processes for service. (integer value)
 #rpc_workers = 1
+rpc_workers = {{ grains.num_cpus }}
 
 # Number of RPC worker processes dedicated to state reports queue. (integer
 # value)
 #rpc_state_report_workers = 1
+rpc_state_report_workers = 4
 
 # Range of seconds to randomly delay when starting the periodic task scheduler
 # to reduce stampeding. (Disable by setting to 0) (integer value)
@@ -288,6 +291,7 @@
 # Automatically reschedule routers from offline L3 agents to online L3 agents.
 # (boolean value)
 #allow_automatic_l3agent_failover = false
+allow_automatic_l3agent_failover = true
 
 # Enable HA mode for virtual routers. (boolean value)
 #l3_ha = false
@@ -296,6 +300,7 @@
 # Maximum number of L3 agents which a HA router will be scheduled on. If it is
 # set to 0 then the router will be scheduled on every agent. (integer value)
 #max_l3_agents_per_router = 3
+max_l3_agents_per_router = 0
 
 # Subnet used for the l3 HA admin network. (string value)
 #l3_ha_net_cidr = 169.254.192.0/18
@@ -603,7 +608,7 @@
 
 # Seconds to wait for a response from a call. (integer value)
 #rpc_response_timeout = 60
-rpc_response_timeout=60
+rpc_response_timeout=120
 
 # A URL representing the messaging driver to use and its full configuration.
 # (string value)
@@ -624,7 +629,6 @@
 # Its value may be silently ignored in the future.
 # Reason: Replaced by [DEFAULT]/transport_url
 #rpc_backend = rabbit
-rpc_backend = rabbit
 
 # The default exchange under which topics are scoped. May be overridden by an
 # exchange name specified in the transport_url option. (string value)
@@ -686,7 +690,7 @@
 # needs to execute commands in Dom0 in the hypervisor of XenServer, this item
 # should be set to 'xenapi_root_helper', so that it will keep a XenAPI session
 # to pass commands to Dom0. (string value)
-#root_helper_daemon = <None>
+root_helper_daemon = sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf
 
 # Seconds between nodes reporting state to server; should be less than
 # agent_down_time, best if it is half or less than agent_down_time. (floating
@@ -863,7 +867,7 @@
 # Deprecated group/name - [DEFAULT]/sql_max_pool_size
 # Deprecated group/name - [DATABASE]/sql_max_pool_size
 #max_pool_size = 5
-max_pool_size = 20
+max_pool_size = {{ [grains.num_cpus * 5, 30] | sort | first }}
 
 # Maximum number of database connection retries during startup. Set to -1 to
 # specify an infinite retry count. (integer value)
@@ -1721,7 +1725,6 @@
 # This option is deprecated for removal.
 # Its value may be silently ignored in the future.
 #rabbit_max_retries = 0
-rabbit_max_retries = 0
 
 # Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
 # option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
@@ -2160,10 +2163,24 @@
 
 {%- for lbaas_name, lbaas in server.lbaas.providers.iteritems() %}
 
-{%- if lbaas.engine == "avinetworks" -%}
 service_provider=LOADBALANCERV2:{{ lbaas_name }}:{{ lbaas.get('driver_path', 'avi_lbaasv2.avi_driver.AviDriver') }}:default
 
 [{{ lbaas_name }}]
+
+{% if lbaas.engine == "octavia" %}
+
+base_url = {{ lbaas.base_url }}
+request_poll_timeout = 3000
+
+[service_auth]
+auth_version = 2
+admin_password = {{ server.identity.password }}
+admin_user = {{ server.identity.user }}
+admin_tenant_name = {{ server.identity.tenant }}
+auth_url = http://{{ server.identity.host }}:35357/v2.0
+{%- endif -%}
+
+{% if lbaas.engine == "avinetworks" %}
 address={{ lbaas.controller_address }}
 user={{ lbaas.controller_user }}
 password={{ lbaas.controller_password }}
diff --git a/neutron/files/ocata/openvswitch_agent.ini b/neutron/files/ocata/openvswitch_agent.ini
index 2de0352..00c33b4 100644
--- a/neutron/files/ocata/openvswitch_agent.ini
+++ b/neutron/files/ocata/openvswitch_agent.ini
@@ -195,8 +195,9 @@
 #agent_type = Open vSwitch agent
 
 # Extensions list to use (list value)
-#extensions =
-
+{% if neutron.get('qos', 'True') %}
+extensions = qos
+{% endif %}
 
 [ovs]
 
@@ -316,7 +317,7 @@
 {%- if not neutron.get('security_groups_enabled', True) %}
 firewall_driver = neutron.agent.firewall.NoopFirewallDriver
 enable_security_group = False
-{%- elif neutron.dpdk %}
+{%- elif neutron.dpdk  or neutron.get('vlan_aware_vms', False) %}
 firewall_driver = openvswitch
 enable_security_group = True
 {%- else %}
diff --git a/neutron/files/ocata/sriov_agent.ini b/neutron/files/ocata/sriov_agent.ini
index 5f78a0e..eebd662 100644
--- a/neutron/files/ocata/sriov_agent.ini
+++ b/neutron/files/ocata/sriov_agent.ini
@@ -122,7 +122,9 @@
 #
 
 # Extensions list to use (list value)
-#extensions =
+{% if neutron.get('qos', 'True') %}
+extensions = qos
+{% endif %}
 
 
 [sriov_nic]
diff --git a/neutron/map.jinja b/neutron/map.jinja
index f670fb0..1dd5f2a 100644
--- a/neutron/map.jinja
+++ b/neutron/map.jinja
@@ -65,6 +65,12 @@
     },
 }, merge=pillar.neutron.get('client', {})) %}
 
+{% set monitoring = salt['grains.filter_by']({
+    'default': {
+        'error_log_rate': 0.2,
+    },
+}, grain='os_family', merge=salt['pillar.get']('neutron:monitoring')) %}
+
 {%- if pillar.neutron.server is defined %}
 
 {%- set tmp_server = pillar.neutron.server %}
diff --git a/neutron/meta/grafana.yml b/neutron/meta/grafana.yml
index 5f68dcb..74c3432 100644
--- a/neutron/meta/grafana.yml
+++ b/neutron/meta/grafana.yml
@@ -1,5 +1,6 @@
 dashboard:
   service_level:
+    datasource: influxdb
     row:
       neutron-service-level:
         title: Neutron Service Levels
@@ -61,6 +62,7 @@
                 rawQuery: true
                 query: SELECT count(max) FROM openstack_neutron_http_response_times WHERE environment_label = '$environment' AND http_status = '5xx' AND $timeFilter
   main:
+    datasource: influxdb
     row:
       ost-control-plane:
         title: OpenStack Control Plane
diff --git a/neutron/meta/prometheus.yml b/neutron/meta/prometheus.yml
index e0fb755..9f747cb 100644
--- a/neutron/meta/prometheus.yml
+++ b/neutron/meta/prometheus.yml
@@ -1,4 +1,4 @@
-{%- from "neutron/map.jinja" import server with context %}
+{%- from "neutron/map.jinja" import server, monitoring with context %}
 
 {%- if server.get('enabled', False) %}
 {%- raw %}
@@ -51,8 +51,17 @@
         summary: "All {{ $labels.service }} agents down"
         description: >-
             All '{{ $labels.service}}' agents are down for 2 minutes
+    NeutronErrorLogsTooHigh:
 {%- endraw %}
+      {%- set log_threshold = monitoring.error_log_rate|float %}
+      if: >-
+        sum(rate(log_messages{service="neutron",level=~"error|emergency|fatal"}[5m])) without (level) > {{ log_threshold }}
+{%- raw %}
+      labels:
+        severity: warning
+        service: "{{ $labels.service }}"
+      annotations:
+        summary: 'Too many errors in {{ $labels.service }} logs'
+        description: 'The rate of errors in {{ $labels.service }} logs over the last 5 minutes is too high on node {{ $labels.host }} (current value={{ $value }}, threshold={%- endraw %}{{ log_threshold }}).'
 {%- endif %}
 {%- endif %}
-
-
diff --git a/neutron/meta/telegraf.yml b/neutron/meta/telegraf.yml
new file mode 100644
index 0000000..86c317a
--- /dev/null
+++ b/neutron/meta/telegraf.yml
@@ -0,0 +1,9 @@
+{%- from "neutron/map.jinja" import server with context %}
+{%- if server.get('enabled', False) %}
+agent:
+  input:
+    http_response:
+      neutron-api:
+        address: "http://{{ server.bind.address|replace('0.0.0.0', '127.0.0.1') }}:{{ server.bind.port }}/"
+        expected_code: 200
+{%- endif %}
diff --git a/neutron/server.sls b/neutron/server.sls
index 5bf73e3..0b3a6be 100644
--- a/neutron/server.sls
+++ b/neutron/server.sls
@@ -1,6 +1,31 @@
 {%- from "neutron/map.jinja" import server with context %}
 {%- if server.get('enabled', False) %}
 
+{% if grains.os_family == 'Debian' %}
+# This is here to avoid starting up wrongly configured service and to avoid
+# issue with restart limits on systemd.
+
+policy_rcd_present:
+  file.managed:
+  - name: /usr/sbin/policy-rc.d
+  - mode: 0775
+  - contents: "exit 101"
+  - require_in:
+    - pkg: neutron_server_packages
+
+policy_rcd_absent_ok:
+  file.absent:
+  - name: /usr/sbin/policy-rc.d
+  - require:
+    - pkg: neutron_server_packages
+
+policy_rcd_absent_onfail:
+  file.absent:
+  - name: /usr/sbin/policy-rc.d
+  - onfail:
+    - pkg: neutron_server_packages
+{% endif %}
+
 neutron_server_packages:
   pkg.installed:
   - names: {{ server.pkgs }}
@@ -27,14 +52,15 @@
   pkg.installed:
   - name: neutron-plugin-contrail
 
-{%- if not grains.get('noservices', False) %}
 neutron_server_service:
   service.running:
   - name: neutron-server
   - enable: true
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
   - watch:
     - file: /etc/neutron/neutron.conf
-{%- endif %}
 
 {%- endif %}
 
@@ -55,14 +81,15 @@
   - require:
     - file: /etc/neutron/plugins/ml2/ml2_conf.ini
 
-{%- if not grains.get('noservices', False) %}
 neutron_db_manage:
   cmd.run:
   - name: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/ml2/ml2_conf.ini upgrade head
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
   - require:
     - file: /etc/neutron/neutron.conf
     - file: /etc/neutron/plugins/ml2/ml2_conf.ini
-{%- endif %}
 
 {%- endif %}
 
@@ -112,14 +139,11 @@
   - template: jinja
   - require:
     - pkg: neutron_server_packages
-{%- if not grains.get('noservices', False) %}
   - watch_in:
     - service: neutron_server_services
 
 {%- endif %}
 
-{%- endif %}
-
 {%- if server.backend.engine == "midonet" %}
 
 /etc/neutron/plugins/midonet/midonet.ini:
@@ -132,14 +156,15 @@
     - dir_mode: 755
     - template: jinja
 
-{%- if not grains.get('noservices', False) %}
 neutron_db_manage:
   cmd.run:
   - name: neutron-db-manage --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugins/midonet/midonet.ini upgrade head
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
   - require:
     - file: /etc/neutron/neutron.conf
     - file: /etc/neutron/plugins/midonet/midonet.ini
-{%- endif %}
 
 {%- if server.version == "kilo" %}
 
@@ -162,29 +187,29 @@
     - python-neutron-lbaas
     - python-neutron-fwaas
 
-{%- if not grains.get('noservices', False) %}
 neutron_db_manage:
   cmd.run:
   - name: neutron-db-manage --subproject networking-midonet upgrade head
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
   - require:
     - file: /etc/neutron/neutron.conf
     - file: /etc/neutron/plugins/midonet/midonet.ini
-{%- endif %}
 
 {%- endif %}
 {%- endif %}
 
-{%- if not grains.get('noservices', False) %}
-
 neutron_server_services:
   service.running:
   - names: {{ server.services }}
   - enable: true
+  {%- if grains.get('noservices') %}
+  - onlyif: /bin/false
+  {%- endif %}
   - watch:
     - file: /etc/neutron/neutron.conf
 
-{%- endif %}
-
 {%- if grains.get('virtual_subtype', None) == "Docker" %}
 
 neutron_entrypoint:
diff --git a/tests/pillar/compute_qos.sls b/tests/pillar/compute_qos.sls
new file mode 100644
index 0000000..2122e00
--- /dev/null
+++ b/tests/pillar/compute_qos.sls
@@ -0,0 +1,25 @@
+neutron:
+  compute:
+    agent_mode: legacy
+    backend:
+      engine: ml2
+      tenant_network_types: "flat,vxlan"
+      mechanism:
+        ovs:
+          driver: openvswitch
+    dvr: false
+    enabled: true
+    qos: true
+    external_access: false
+    local_ip: 10.1.0.105
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      password: workshop
+      port: 5672
+      user: openstack
+      virtual_host: /openstack
+    metadata:
+      host: 127.0.0.1
+      password: password
+    version: ocata
\ No newline at end of file
diff --git a/tests/pillar/compute_qos_sriov.sls b/tests/pillar/compute_qos_sriov.sls
new file mode 100644
index 0000000..7d4d4ab
--- /dev/null
+++ b/tests/pillar/compute_qos_sriov.sls
@@ -0,0 +1,29 @@
+neutron:
+  compute:
+    agent_mode: legacy
+    backend:
+      engine: ml2
+      tenant_network_types: "flat,vxlan"
+      sriov:
+        nic_one:
+          devname: eth1
+          physical_network: physnet3
+      mechanism:
+        ovs:
+          driver: openvswitch
+    dvr: false
+    enabled: true
+    qos: true
+    external_access: false
+    local_ip: 10.1.0.105
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      password: workshop
+      port: 5672
+      user: openstack
+      virtual_host: /openstack
+    metadata:
+      host: 127.0.0.1
+      password: password
+    version: ocata
diff --git a/tests/pillar/control_lbaas_octavia.sls b/tests/pillar/control_lbaas_octavia.sls
new file mode 100644
index 0000000..5209b2d
--- /dev/null
+++ b/tests/pillar/control_lbaas_octavia.sls
@@ -0,0 +1,59 @@
+neutron:
+  server:
+    backend:
+      engine: ml2
+      external_mtu: 1500
+      mechanism:
+        ovs:
+          driver: openvswitch
+      tenant_network_types: flat,vxlan
+    bind:
+      address: 172.16.10.101
+      port: 9696
+    compute:
+      host: 127.0.0.1
+      password: workshop
+      region: RegionOne
+      tenant: service
+      user: nova
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      name: neutron
+      password: workshop
+      port: 3306
+      user: neutron
+    version: ocata
+    dns_domain: novalocal
+    dvr: false
+    enabled: true
+    global_physnet_mtu: 1500
+    lbaas:
+      enabled: true
+      providers:
+        octavia:
+          engine: octavia
+          driver_path: 'neutron_lbaas.drivers.octavia.driver.OctaviaDriver'
+          base_url: 'http://127.0.0.1:9876'
+    identity:
+      engine: keystone
+      host: 127.0.0.1
+      password: workshop
+      port: 35357
+      region: RegionOne
+      tenant: service
+      user: neutron
+      endpoint_type: internal
+    l3_ha: false
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      password: workshop
+      port: 5672
+      user: openstack
+      virtual_host: /openstack
+    plugin: ml2
+    policy:
+      create_subnet: 'rule:admin_or_network_owner'
+      'get_network:queue_id': 'rule:admin_only'
+      'create_network:shared':
diff --git a/tests/pillar/control_qos.sls b/tests/pillar/control_qos.sls
new file mode 100644
index 0000000..3c24ccf
--- /dev/null
+++ b/tests/pillar/control_qos.sls
@@ -0,0 +1,53 @@
+neutron:
+  server:
+    backend:
+      engine: ml2
+      external_mtu: 1500
+      mechanism:
+        ovs:
+          driver: openvswitch
+      tenant_network_types: flat,vxlan
+    bind:
+      address: 172.16.10.101
+      port: 9696
+    compute:
+      host: 127.0.0.1
+      password: workshop
+      region: RegionOne
+      tenant: service
+      user: nova
+    database:
+      engine: mysql
+      host: 127.0.0.1
+      name: neutron
+      password: workshop
+      port: 3306
+      user: neutron
+    dns_domain: novalocal
+    dvr: false
+    enabled: true
+    qos: true
+    global_physnet_mtu: 1500
+    identity:
+      engine: keystone
+      host: 127.0.0.1
+      password: workshop
+      port: 35357
+      region: RegionOne
+      tenant: service
+      user: neutron
+      endpoint_type: internal
+    l3_ha: False
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      password: workshop
+      port: 5672
+      user: openstack
+      virtual_host: /openstack
+    plugin: ml2
+    version: ocata
+    policy:
+      create_subnet: 'rule:admin_or_network_owner'
+      'get_network:queue_id': 'rule:admin_only'
+      'create_network:shared':
diff --git a/tests/pillar/gateway_qos.sls b/tests/pillar/gateway_qos.sls
new file mode 100644
index 0000000..fea882e
--- /dev/null
+++ b/tests/pillar/gateway_qos.sls
@@ -0,0 +1,25 @@
+neutron:
+  gateway:
+    agent_mode: legacy
+    backend:
+      engine: ml2
+      tenant_network_types: "flat,vxlan"
+      mechanism:
+        ovs:
+          driver: openvswitch
+    dvr: false
+    enabled: true
+    qos: true
+    external_access: True
+    local_ip: 10.1.0.110
+    message_queue:
+      engine: rabbitmq
+      host: 127.0.0.1
+      password: workshop
+      port: 5672
+      user: openstack
+      virtual_host: /openstack
+    metadata:
+      host: 127.0.0.1
+      password: password
+    version: ocata