Merge "Create port with vnic type from config"
diff --git a/.zuul.yaml b/.zuul.yaml
index 45ef47a..1d7e323 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -14,6 +14,11 @@
       - ^releasenotes/.*$
       - ^doc/.*$
       - ^setup.cfg$
+      - ^.*\.rst$
+      - ^neutron/locale/.*$
+      - ^neutron/tests/unit/.*$
+      - ^tools/.*$
+      - ^tox.ini$
 
 - job:
     name: neutron-tempest-plugin-scenario-linuxbridge
@@ -31,7 +36,11 @@
       - ^releasenotes/.*$
       - ^doc/.*$
       - ^setup.cfg$
-    voting: false
+      - ^.*\.rst$
+      - ^neutron/locale/.*$
+      - ^neutron/tests/unit/.*$
+      - ^tools/.*$
+      - ^tox.ini$
 
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario
@@ -50,6 +59,45 @@
       - ^releasenotes/.*$
       - ^doc/.*$
       - ^setup.cfg$
+      - ^.*\.rst$
+      - ^neutron/locale/.*$
+      - ^neutron/tests/unit/.*$
+      - ^tools/.*$
+      - ^tox.ini$
+    voting: false
+
+- job:
+    name: neutron-tempest-plugin-designate-scenario
+    parent: devstack-tempest
+    description: Neutron designate integration scenario
+    required-projects:
+      - openstack/designate
+      - openstack/designate-dashboard
+      - openstack/designate-tempest-plugin
+      - openstack/neutron
+      - openstack/neutron-tempest-plugin
+      - openstack/tempest
+    timeout: 3600
+    roles:
+      - zuul: openstack-dev/devstack
+    vars:
+      devstack_localrc:
+        DESIGNATE_BACKEND_DRIVER: bind9
+      devstack_plugins:
+        designate: git://git.openstack.org/openstack/designate.git
+        neutron: git://git.openstack.org/openstack/neutron.git
+        neutron-tempest-plugin: git://git.openstack.org/openstack/neutron-tempest-plugin.git
+      devstack_services:
+        cinder: false
+        designate: true
+        neutron-dns: true
+        tempest: true
+      tempest_test_regex: ^neutron_tempest_plugin\.scenario\.test_dns_integration
+      tox_envlist: all-plugin
+    irrelevant-files:
+      - ^(test-|)requirements.txt$
+      - ^releasenotes/.*$
+      - ^setup.cfg$
     voting: false
 
 - project-template:
@@ -57,13 +105,15 @@
     check:
       jobs:
         - neutron-tempest-plugin-api
+        - neutron-tempest-plugin-designate-scenario
         - neutron-tempest-plugin-dvr-multinode-scenario
         - neutron-tempest-plugin-scenario-linuxbridge
+        - build-openstack-sphinx-docs
     gate:
       jobs:
         - neutron-tempest-plugin-api
+        - build-openstack-sphinx-docs
 
 - project:
-    name: openstack/neutron-tempest-plugin
     templates:
       - neutron-tempest-plugin-jobs
diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst
index a4755cc..30628c0 100644
--- a/CONTRIBUTING.rst
+++ b/CONTRIBUTING.rst
@@ -1,14 +1,14 @@
 If you would like to contribute to the development of OpenStack, you must
 follow the steps in this page:
 
-   http://docs.openstack.org/infra/manual/developers.html
+   https://docs.openstack.org/infra/manual/developers.html
 
 If you already have a good understanding of how the system works and your
 OpenStack accounts are set up, you can skip to the development workflow
 section of this documentation to learn how changes to OpenStack should be
 submitted for review via the Gerrit tool:
 
-   http://docs.openstack.org/infra/manual/developers.html#development-workflow
+   https://docs.openstack.org/infra/manual/developers.html#development-workflow
 
 Pull requests submitted through GitHub will be ignored.
 
diff --git a/HACKING.rst b/HACKING.rst
index 8c6d928..cd3c49c 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -1,4 +1,4 @@
 openstack Style Commandments
 ===============================================
 
-Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/
+Read the OpenStack Style Commandments https://docs.openstack.org/hacking/latest/
diff --git a/README.rst b/README.rst
index e81ffbe..b3883b8 100644
--- a/README.rst
+++ b/README.rst
@@ -7,6 +7,6 @@
 It contains the tempest plugin for the functional testing of Neutron Project.
 
 * Free software: Apache license
-* Documentation: http://docs.openstack.org/developer/neutron
-* Source: http://git.openstack.org/cgit/openstack/neutron-tempest-plugin
-* Bugs: http://bugs.launchpad.net/neutron
+* Documentation: https://docs.openstack.org/neutron/latest/
+* Source: https://git.openstack.org/cgit/openstack/neutron-tempest-plugin
+* Bugs: https://bugs.launchpad.net/neutron
diff --git a/doc/source/contributing.rst b/doc/source/contributing.rst
index 2aa0707..1728a61 100644
--- a/doc/source/contributing.rst
+++ b/doc/source/contributing.rst
@@ -1,4 +1,4 @@
 ============
 Contributing
 ============
-.. include:: ../../../CONTRIBUTING.rst
+.. include:: ../../CONTRIBUTING.rst
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 1bc38e1..91388a9 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -14,7 +14,6 @@
 
    readme
    installation
-   usage
    contributing
 
 Indices and tables
diff --git a/neutron_tempest_plugin/api/admin/test_logging.py b/neutron_tempest_plugin/api/admin/test_logging.py
new file mode 100644
index 0000000..f4cbe29
--- /dev/null
+++ b/neutron_tempest_plugin/api/admin/test_logging.py
@@ -0,0 +1,74 @@
+# Copyright 2017 Fujitsu Limited.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+import testscenarios
+
+from neutron_tempest_plugin.api import base
+
+load_tests = testscenarios.load_tests_apply_scenarios
+
+
+class LoggingTestJSON(base.BaseAdminNetworkTest):
+
+    required_extensions = ['logging', 'standard-attr-description']
+
+    @decorators.idempotent_id('8d2e1ba5-455b-4519-a88e-e587002faba6')
+    def test_log_lifecycle(self):
+        name = data_utils.rand_name('test-log')
+        description = data_utils.rand_name('test-log-desc')
+        log = self.create_log(name=name, description=description,
+                              resource_type='security_group', enabled=True)
+
+        # Test 'show log'
+        retrieved_log = self.admin_client.show_log(log['id'])['log']
+        self.assertEqual(name, retrieved_log['name'])
+        self.assertEqual(description, retrieved_log['description'])
+        self.assertEqual('security_group', retrieved_log['resource_type'])
+        self.assertTrue(retrieved_log['enabled'])
+
+        # Test 'list logs'
+        logs = self.admin_client.list_logs()['logs']
+        logs_ids = [log_object['id'] for log_object in logs]
+        self.assertIn(log['id'], logs_ids)
+
+        # Test 'update log'
+        update_description = data_utils.rand_name('test-log')
+        self.admin_client.update_log(log['id'],
+                                     description=update_description,
+                                     enabled=False)
+        retrieved_log = self.admin_client.show_log(log['id'])['log']
+        self.assertEqual(update_description, retrieved_log['description'])
+        self.assertFalse(retrieved_log['enabled'])
+
+        # Test 'delete log'
+        self.admin_client.delete_log(log['id'])
+        self.assertRaises(exceptions.NotFound,
+                          self.admin_client.show_log, log['id'])
+
+    @decorators.idempotent_id('1af6cdab-0eb0-4e13-8027-d89cf1c7a87a')
+    def test_list_supported_logging_types(self):
+        # List supported logging types
+        # Since returned logging types depends on loaded backend drivers
+        # this test is checking only if returned keys are same as expected keys
+        expected_log_keys = ['type']
+
+        log_types = self.admin_client.list_loggable_resources()
+        actual_list_log_types = log_types['loggable_resources']
+
+        # Verify that only required fields present in logging types
+        for log_type in actual_list_log_types:
+            self.assertEqual(tuple(expected_log_keys), tuple(log_type.keys()))
diff --git a/neutron_tempest_plugin/api/admin/test_logging_negative.py b/neutron_tempest_plugin/api/admin/test_logging_negative.py
new file mode 100644
index 0000000..b975cd6
--- /dev/null
+++ b/neutron_tempest_plugin/api/admin/test_logging_negative.py
@@ -0,0 +1,52 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_utils import uuidutils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.api import base
+
+
+class LoggingNegativeTestJSON(base.BaseAdminNetworkTest):
+
+    required_extensions = ['logging', 'standard-attr-description']
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('5fc61e24-cad5-4d86-a2d4-f40c0fa0a54c')
+    def test_create_log_with_invalid_resource_type(self):
+        log_args = {'name': data_utils.rand_name('test-log'),
+                    'description': data_utils.rand_name('test-log-desc'),
+                    'resource_type': 'fake_resource'}
+        self.assertRaises(lib_exc.BadRequest,
+                          self.admin_client.create_log, **log_args)
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('7ed63170-0748-44b7-b0a0-64bfd9390dac')
+    def test_create_log_with_nonexistent_port(self):
+        log_args = {'name': data_utils.rand_name('test-log'),
+                    'description': data_utils.rand_name('test-log-desc'),
+                    'resource_type': 'security_group',
+                    'target_id': uuidutils.generate_uuid()}
+        self.assertRaises(lib_exc.NotFound,
+                          self.admin_client.create_log, **log_args)
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('89194c6b-8f47-400b-979b-072b1c1f767b')
+    def test_create_log_with_nonexistent_sg(self):
+        log_args = {'name': data_utils.rand_name('test-log'),
+                    'description': data_utils.rand_name('test-log-desc'),
+                    'resource_type': 'security_group',
+                    'resource_id': uuidutils.generate_uuid()}
+        self.assertRaises(lib_exc.NotFound,
+                          self.admin_client.create_log, **log_args)
diff --git a/neutron_tempest_plugin/api/admin/test_routers_ha.py b/neutron_tempest_plugin/api/admin/test_routers_ha.py
index fafe209..b8227bd 100644
--- a/neutron_tempest_plugin/api/admin/test_routers_ha.py
+++ b/neutron_tempest_plugin/api/admin/test_routers_ha.py
@@ -10,6 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.common import utils as tutils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
@@ -19,6 +20,7 @@
 class RoutersTestHA(base.BaseRouterTest):
 
     required_extensions = ['router', 'l3-ha']
+    HA_NETWORK_NAME_TEMPL = "HA network tenant %s"
 
     @classmethod
     def resource_setup(cls):
@@ -31,8 +33,8 @@
         super(RoutersTestHA, cls).resource_setup()
         name = data_utils.rand_name('pretest-check')
         router = cls.admin_client.create_router(name)
+        cls.admin_client.delete_router(router['router']['id'])
         if 'ha' not in router['router']:
-            cls.admin_client.delete_router(router['router']['id'])
             msg = "'ha' attribute not found. HA Possibly not enabled"
             raise cls.skipException(msg)
 
@@ -46,10 +48,8 @@
         The router is created and the "ha" attribute is set to True
         """
         name = data_utils.rand_name('router')
-        router = self.admin_client.create_router(name, ha=True)
-        self.addCleanup(self.admin_client.delete_router,
-                        router['router']['id'])
-        self.assertTrue(router['router']['ha'])
+        router = self._create_admin_router(name, ha=True)
+        self.assertTrue(router['ha'])
 
     @decorators.idempotent_id('97b5f7ef-2192-4fa3-901e-979cd5c1097a')
     def test_legacy_router_creation(self):
@@ -63,10 +63,8 @@
         as opposed to a "High Availability Router"
         """
         name = data_utils.rand_name('router')
-        router = self.admin_client.create_router(name, ha=False)
-        self.addCleanup(self.admin_client.delete_router,
-                        router['router']['id'])
-        self.assertFalse(router['router']['ha'])
+        router = self.create_admin_router(name, ha=False)
+        self.assertFalse(router['ha'])
 
     @decorators.idempotent_id('5a6bfe82-5b23-45a4-b027-5160997d4753')
     def test_legacy_router_update_to_ha(self):
@@ -82,11 +80,36 @@
         """
         name = data_utils.rand_name('router')
         # router needs to be in admin state down in order to be upgraded to HA
-        router = self.admin_client.create_router(name, ha=False,
-                                                 admin_state_up=False)
-        self.addCleanup(self.admin_client.delete_router,
-                        router['router']['id'])
-        self.assertFalse(router['router']['ha'])
-        router = self.admin_client.update_router(router['router']['id'],
+        router = self._create_admin_router(name, ha=False,
+                                           admin_state_up=False)
+        self.assertFalse(router['ha'])
+        router = self.admin_client.update_router(router['id'],
                                                  ha=True)
         self.assertTrue(router['router']['ha'])
+
+    @decorators.idempotent_id('0d8c0c8f-3809-4acc-a2c8-e0941333ff6c')
+    @tutils.requires_ext(extension="provider", service="network")
+    def test_delete_ha_router_keeps_ha_network_segment_data(self):
+        """Test deleting an HA router keeps correct segment data for network.
+
+        Each tenant with HA router has an HA network. The HA network is a
+        normal tenant network with segmentation data like type (vxlan) and
+        segmenation id. This test makes sure that after an HA router is
+        deleted, those segmentation data are kept in HA network. This tests
+        regression of https://bugs.launchpad.net/neutron/+bug/1732543.
+        """
+        for i in range(2):
+            router = self._create_admin_router(
+                data_utils.rand_name('router%d' % i),
+                ha=True)
+        ha_net_name = self.HA_NETWORK_NAME_TEMPL % router['tenant_id']
+        ha_network_pre_delete = self.admin_client.list_networks(
+            name=ha_net_name)['networks'][0]
+        segmentation_id = ha_network_pre_delete['provider:segmentation_id']
+        self._delete_router(router['id'], self.admin_client)
+
+        ha_network_post_delete = self.admin_client.show_network(
+            ha_network_pre_delete['id'])['network']
+        self.assertEqual(
+            ha_network_post_delete['provider:segmentation_id'],
+            segmentation_id)
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 903692e..51a7d3e 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -69,6 +69,9 @@
             force_new=force_new
         )
         # Neutron uses a different clients manager than the one in the Tempest
+        # save the original in case mixed tests need it
+        if credential_type == 'primary':
+            cls.os_tempest = manager
         return clients.Manager(manager.credentials)
 
     @classmethod
@@ -118,6 +121,7 @@
         cls.admin_subnetpools = []
         cls.security_groups = []
         cls.projects = []
+        cls.log_objects = []
 
     @classmethod
     def resource_cleanup(cls):
@@ -210,6 +214,11 @@
                 cls._try_delete_resource(cls.admin_client.delete_qos_policy,
                                          qos_policy['id'])
 
+            # Clean up log_objects
+            for log_object in cls.log_objects:
+                cls._try_delete_resource(cls.admin_client.delete_log,
+                                         log_object['id'])
+
         super(BaseNetworkTest, cls).resource_cleanup()
 
     @classmethod
@@ -414,17 +423,18 @@
         return qos_rule
 
     @classmethod
-    def delete_router(cls, router):
-        body = cls.client.list_router_interfaces(router['id'])
+    def delete_router(cls, router, client=None):
+        client = client or cls.client
+        body = client.list_router_interfaces(router['id'])
         interfaces = [port for port in body['ports']
                       if port['device_owner'] in const.ROUTER_INTERFACE_OWNERS]
         for i in interfaces:
             try:
-                cls.client.remove_router_interface_with_subnet_id(
+                client.remove_router_interface_with_subnet_id(
                     router['id'], i['fixed_ips'][0]['subnet_id'])
             except lib_exc.NotFound:
                 pass
-        cls.client.delete_router(router['id'])
+        client.delete_router(router['id'])
 
     @classmethod
     def create_address_scope(cls, name, is_admin=False, **kwargs):
@@ -514,6 +524,23 @@
         return service_profile
 
     @classmethod
+    def create_log(cls, name, description=None,
+                   resource_type='security_group', resource_id=None,
+                   target_id=None, event='ALL', enabled=True):
+        """Wrapper utility that returns a test log object."""
+        log_args = {'name': name,
+                    'description': description,
+                    'resource_type': resource_type,
+                    'resource_id': resource_id,
+                    'target_id': target_id,
+                    'event': event,
+                    'enabled': enabled}
+        body = cls.admin_client.create_log(**log_args)
+        log_object = body['log']
+        cls.log_objects.append(log_object)
+        return log_object
+
+    @classmethod
     def get_unused_ip(cls, net_id, ip_version=None):
         """Get an unused ip address in a allocation pool of net"""
         body = cls.admin_client.list_ports(network_id=net_id)
diff --git a/neutron_tempest_plugin/api/base_routers.py b/neutron_tempest_plugin/api/base_routers.py
index c8d3783..52db742 100644
--- a/neutron_tempest_plugin/api/base_routers.py
+++ b/neutron_tempest_plugin/api/base_routers.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.lib import exceptions
+
 from neutron_tempest_plugin.api import base
 
 
@@ -21,9 +23,12 @@
     # as some router operations, such as enabling or disabling SNAT
     # require admin credentials by default
 
-    def _cleanup_router(self, router):
-        self.delete_router(router)
-        self.routers.remove(router)
+    def _cleanup_router(self, router, client=None):
+        try:
+            self.delete_router(router, client)
+            self.routers.remove(router)
+        except exceptions.NotFound:
+            pass
 
     def _create_router(self, name, admin_state_up=False,
                        external_network_id=None, enable_snat=None):
@@ -33,6 +38,12 @@
         self.addCleanup(self._cleanup_router, router)
         return router
 
+    def _create_admin_router(self, *args, **kwargs):
+        router = self.create_admin_router(*args, **kwargs)
+        self.addCleanup(
+            self._cleanup_router, router, self.os_admin.network_client)
+        return router
+
     def _delete_router(self, router_id, network_client=None):
         client = network_client or self.client
         client.delete_router(router_id)
diff --git a/neutron_tempest_plugin/api/base_security_groups.py b/neutron_tempest_plugin/api/base_security_groups.py
index cda18b8..127bbd9 100644
--- a/neutron_tempest_plugin/api/base_security_groups.py
+++ b/neutron_tempest_plugin/api/base_security_groups.py
@@ -19,17 +19,45 @@
 from neutron_tempest_plugin.api import base
 
 
-V4_PROTOCOL_NAMES = set(key for key in constants.IP_PROTOCOL_MAP if
-                        'v6' not in key)
-V4_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items()
-                       if 'v6' not in k)
+# NOTE(yamamoto): The list of protocols here is what we had in Ocata.
+# (neutron-lib 1.1.0)
+# Why don't we just use neutron_lib.constants.IP_PROTOCOL_MAP etc here?
+# Tempest is branchless and thus supposed to work against older deployments.
+# Also, it's supposed to work against other implementations, which might not
+# support the same set as the reference implementation. Ideally SG can have
+# a way to discover the set of usable protocols. But for now, we need to be
+# conservative.
+
+V4_PROTOCOL_NAMES = {
+    'ah',
+    'dccp',
+    'egp',
+    'esp',
+    'gre',
+    'icmp',
+    'igmp',
+    'ospf',
+    'pgm',
+    'rsvp',
+    'sctp',
+    'tcp',
+    'udp',
+    'udplite',
+    'vrrp',
+}
+V4_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items() if
+                       k in V4_PROTOCOL_NAMES)
 V6_PROTOCOL_LEGACY = set([constants.PROTO_NAME_IPV6_ICMP_LEGACY])
-V6_PROTOCOL_NAMES = (
-    set(key for key in constants.IP_PROTOCOL_MAP if 'v6' in key) -
-    V6_PROTOCOL_LEGACY
-)
+V6_PROTOCOL_NAMES = {
+    'ipv6-encap',
+    'ipv6-frag',
+    'ipv6-icmp',
+    'ipv6-nonxt',
+    'ipv6-opts',
+    'ipv6-route',
+}
 V6_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items() if
-                       'v6' in k)
+                       k in (V6_PROTOCOL_NAMES | V6_PROTOCOL_LEGACY))
 
 
 class BaseSecGroupTest(base.BaseNetworkTest):
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index ae0a420..ba7aad8 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -386,6 +386,19 @@
         self.assertEqual(
             policy['id'], retrieved_network['network']['qos_policy_id'])
 
+    @decorators.idempotent_id('06060880-2956-4c16-9a63-f284c3879229')
+    def test_user_create_port_with_admin_qos_policy(self):
+        qos_policy = self.create_qos_policy(
+            name='test-policy',
+            tenant_id=self.admin_client.tenant_id,
+            shared=False)
+        network = self.create_network(
+            'test network', client=self.admin_client,
+            project_id=self.client.tenant_id,
+            qos_policy_id=qos_policy['id'])
+        port = self.create_port(network)
+        self.assertEqual(network['id'], port['network_id'])
+
 
 class QosBandwidthLimitRuleTestJSON(base.BaseAdminNetworkTest):
 
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index 6c781ab..e02cf92 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -54,8 +54,20 @@
         trunks_cleanup(cls.client, cls.trunks)
         super(TrunkTestJSONBase, cls).resource_cleanup()
 
-    def _create_trunk_with_network_and_parent(self, subports, **kwargs):
-        network = self.create_network()
+    @classmethod
+    def is_type_driver_enabled(cls, type_driver):
+        return (type_driver in
+                config.CONF.neutron_plugin_options.available_type_drivers)
+
+    def _create_trunk_with_network_and_parent(
+            self, subports, parent_network_type=None, **kwargs):
+        client = None
+        network_kwargs = {}
+        if parent_network_type:
+            client = self.admin_client
+            network_kwargs = {"provider:network_type": parent_network_type,
+                              "tenant_id": self.client.tenant_id}
+        network = self.create_network(client=client, **network_kwargs)
         parent_port = self.create_port(network)
         trunk = self.client.create_trunk(parent_port['id'], subports, **kwargs)
         self.trunks.append(trunk['trunk'])
@@ -266,9 +278,7 @@
     @classmethod
     def skip_checks(cls):
         super(TrunkTestMtusJSONBase, cls).skip_checks()
-        if any(t
-               not in config.CONF.neutron_plugin_options.available_type_drivers
-               for t in ['gre', 'vxlan']):
+        if not all(cls.is_type_driver_enabled(t) for t in ['gre', 'vxlan']):
             msg = "Either vxlan or gre type driver not enabled."
             raise cls.skipException(msg)
 
diff --git a/neutron_tempest_plugin/api/test_trunk_negative.py b/neutron_tempest_plugin/api/test_trunk_negative.py
index 699b26f..4d7ead1 100644
--- a/neutron_tempest_plugin/api/test_trunk_negative.py
+++ b/neutron_tempest_plugin/api/test_trunk_negative.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 from oslo_utils import uuidutils
+from tempest.common import utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 import testtools
@@ -103,8 +104,14 @@
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('40aed9be-e976-47d0-dada-bde2c7e74e57')
+    @utils.requires_ext(extension="provider", service="network")
     def test_create_subport_invalid_inherit_network_segmentation_type(self):
-        trunk = self._create_trunk_with_network_and_parent([])
+        if not self.is_type_driver_enabled('vxlan'):
+            msg = "Vxlan type driver must be enabled for this test."
+            raise self.skipException(msg)
+
+        trunk = self._create_trunk_with_network_and_parent(
+            subports=[], parent_network_type='vxlan')
         subport_network = self.create_network()
         parent_port = self.create_port(subport_network)
         self.assertRaises(lib_exc.BadRequest, self.client.add_subports,
diff --git a/neutron_tempest_plugin/common/constants.py b/neutron_tempest_plugin/common/constants.py
index 4ad780d..4dc7844 100644
--- a/neutron_tempest_plugin/common/constants.py
+++ b/neutron_tempest_plugin/common/constants.py
@@ -123,10 +123,6 @@
 # agent has just returned to alive after being dead
 AGENT_REVIVED = 'revived'
 
-INGRESS_DIRECTION = 'ingress'
-EGRESS_DIRECTION = 'egress'
-
-VALID_DIRECTIONS = (INGRESS_DIRECTION, EGRESS_DIRECTION)
 VALID_ETHERTYPES = (lib_constants.IPv4, lib_constants.IPv6)
 
 IP_ALLOWED_VERSIONS = [lib_constants.IP_VERSION_4, lib_constants.IP_VERSION_6]
diff --git a/neutron_tempest_plugin/common/utils.py b/neutron_tempest_plugin/common/utils.py
index ecccd18..d6d0aee 100644
--- a/neutron_tempest_plugin/common/utils.py
+++ b/neutron_tempest_plugin/common/utils.py
@@ -19,6 +19,7 @@
 """Utilities and helper functions."""
 
 import eventlet
+import functools
 import threading
 import time
 
@@ -70,3 +71,19 @@
             #pylint: disable=raising-bad-type
             raise exception
         raise WaitTimeout("Timed out after %d seconds" % timeout)
+
+
+# TODO(haleyb): move to neutron-lib
+# code copied from neutron repository - neutron/tests/base.py
+def unstable_test(reason):
+    def decor(f):
+        @functools.wraps(f)
+        def inner(self, *args, **kwargs):
+            try:
+                return f(self, *args, **kwargs)
+            except Exception as e:
+                msg = ("%s was marked as unstable because of %s, "
+                       "failure was: %s") % (self.id(), reason, e)
+                raise self.skipTest(msg)
+        return inner
+    return decor
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index 804fece..d6db315 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -38,6 +38,11 @@
                help='The availability zone for all agents in the deployment. '
                     'Configure this only when the single value is used by '
                     'all agents in the deployment.'),
+    cfg.IntOpt('max_networks_per_project',
+               default=4,
+               help='Max number of networks per project. '
+                    'Configure this only when project is limited with real '
+                    'vlans in deployment.'),
 ]
 
 # TODO(amuller): Redo configuration options registration as part of the planned
diff --git a/neutron_tempest_plugin/scenario/test_dns_integration.py b/neutron_tempest_plugin/scenario/test_dns_integration.py
new file mode 100644
index 0000000..923f013
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_dns_integration.py
@@ -0,0 +1,123 @@
+# Copyright (c) 2017 x-ion GmbH
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import ipaddress
+
+import testtools
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.scenario import constants
+
+
+CONF = config.CONF
+
+# Note(jh): Need to do a bit of juggling here in order to avoid failures
+# when designate_tempest_plugin is not available
+dns_base = testtools.try_import('designate_tempest_plugin.tests.base')
+dns_waiters = testtools.try_import('designate_tempest_plugin.common.waiters')
+if dns_base:
+    DNSMixin = dns_base.BaseDnsV2Test
+else:
+    DNSMixin = object
+
+
+class DNSIntegrationTests(base.BaseTempestTestCase, DNSMixin):
+    credentials = ['primary']
+
+    @classmethod
+    def setup_clients(cls):
+        super(DNSIntegrationTests, cls).setup_clients()
+        cls.dns_client = cls.os_tempest.zones_client
+        cls.query_client = cls.os_tempest.query_client
+        cls.query_client.build_timeout = 30
+
+    @classmethod
+    def skip_checks(cls):
+        super(DNSIntegrationTests, cls).skip_checks()
+        if not ('designate' in CONF.service_available and
+                CONF.service_available.designate):
+            raise cls.skipException("Designate support is required")
+        if not (dns_base and dns_waiters):
+            raise cls.skipException("Designate tempest plugin is missing")
+
+    @classmethod
+    @utils.requires_ext(extension="dns-integration", service="network")
+    def resource_setup(cls):
+        super(DNSIntegrationTests, cls).resource_setup()
+        _, cls.zone = cls.dns_client.create_zone()
+        cls.addClassResourceCleanup(cls.dns_client.delete_zone,
+            cls.zone['id'], ignore_errors=lib_exc.NotFound)
+        dns_waiters.wait_for_zone_status(
+            cls.dns_client, cls.zone['id'], 'ACTIVE')
+
+        cls.network = cls.create_network(dns_domain=cls.zone['name'])
+        cls.subnet = cls.create_subnet(cls.network)
+        cls.router = cls.create_router_by_client()
+        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+        cls.keypair = cls.create_keypair()
+
+    def _create_floatingip_with_dns(self, dns_name):
+        fip = self.os_primary.network_client.create_floatingip(
+            CONF.network.public_network_id, dns_name=dns_name,
+            dns_domain=self.zone['name'])['floatingip']
+        self.floating_ips.append(fip)
+        return fip
+
+    def _create_server(self, name=None):
+        port = self.create_port(self.network)
+        server = self.create_server(
+            flavor_ref=CONF.compute.flavor_ref,
+            image_ref=CONF.compute.image_ref,
+            key_name=self.keypair['name'], name=name,
+            networks=[{'port': port['id']}])['server']
+        waiters.wait_for_server_status(self.os_primary.servers_client,
+                                       server['id'],
+                                       constants.SERVER_STATUS_ACTIVE)
+        fip = self.create_and_associate_floatingip(port['id'])
+        return {'port': port, 'fip': fip, 'server': server}
+
+    def _verify_dns_records(self, address, name):
+        forward = name + '.' + self.zone['name']
+        reverse = ipaddress.ip_address(address).reverse_pointer
+        dns_waiters.wait_for_query(self.query_client, forward, 'A')
+        dns_waiters.wait_for_query(self.query_client, reverse, 'PTR')
+        fwd_response = self.query_client.query(forward, 'A')
+        rev_response = self.query_client.query(reverse, 'PTR')
+        for r in fwd_response:
+            for rr in r.answer:
+                self.assertIn(address, rr.to_text())
+        for r in rev_response:
+            for rr in r.answer:
+                self.assertIn(forward, rr.to_text())
+
+    @decorators.idempotent_id('850ee378-4b5a-4f71-960e-0e7b12e03a34')
+    def test_server_with_fip(self):
+        name = data_utils.rand_name('server-test')
+        server = self._create_server(name=name)
+        server_ip = server['fip']['floating_ip_address']
+        self._verify_dns_records(server_ip, name)
+
+    @decorators.idempotent_id('a8f2fade-8d5c-40f9-80f0-3de4b8d91985')
+    def test_fip(self):
+        name = data_utils.rand_name('fip-test')
+        fip = self._create_floatingip_with_dns(name)
+        self._verify_dns_records(fip['floating_ip_address'], name)
diff --git a/neutron_tempest_plugin/scenario/test_floatingip.py b/neutron_tempest_plugin/scenario/test_floatingip.py
index 5fcbdc0..b253890 100644
--- a/neutron_tempest_plugin/scenario/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/test_floatingip.py
@@ -21,7 +21,9 @@
 import testscenarios
 from testscenarios.scenarios import multiply_scenarios
 
+from neutron_lib import constants as lib_constants
 from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils as common_utils
 from neutron_tempest_plugin import config
 from neutron_tempest_plugin.scenario import base
 from neutron_tempest_plugin.scenario import constants
@@ -58,6 +60,17 @@
             cls._dest_network = cls._create_dest_network()
 
     @classmethod
+    def _get_external_gateway(cls):
+        if CONF.network.public_network_id:
+            subnets = cls.os_admin.network_client.list_subnets(
+                network_id=CONF.network.public_network_id)
+
+            for subnet in subnets['subnets']:
+                if (subnet['gateway_ip']
+                    and subnet['ip_version'] == lib_constants.IP_VERSION_4):
+                    return subnet['gateway_ip']
+
+    @classmethod
     def _create_dest_network(cls):
         network = cls.create_network()
         subnet = cls.create_subnet(network,
@@ -134,6 +147,7 @@
 
     same_network = True
 
+    @common_utils.unstable_test("bug 1717302")
     @decorators.idempotent_id('05c4e3b3-7319-4052-90ad-e8916436c23b')
     def test_east_west(self):
         self._test_east_west()
@@ -151,6 +165,34 @@
 
     same_network = False
 
+    @common_utils.unstable_test("bug 1717302")
     @decorators.idempotent_id('f18f0090-3289-4783-b956-a0f8ac511e8b')
     def test_east_west(self):
         self._test_east_west()
+
+
+class DefaultSnatToExternal(FloatingIpTestCasesMixin,
+                            base.BaseTempestTestCase):
+    same_network = True
+
+    @decorators.idempotent_id('3d73ea1a-27c6-45a9-b0f8-04a283d9d764')
+    def test_snat_external_ip(self):
+        """Check connectivity to an external IP"""
+        gateway_external_ip = self._get_external_gateway()
+
+        if not gateway_external_ip:
+            raise self.skipTest("IPv4 gateway is not configured for public "
+                                "network or public_network_id is not "
+                                "configured")
+        proxy = self._create_server()
+        proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
+                                  CONF.validation.image_ssh_user,
+                                  pkey=self.keypair['private_key'])
+        src_server = self._create_server(create_floating_ip=False)
+        src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
+        ssh_client = ssh.Client(src_server_ip,
+                                CONF.validation.image_ssh_user,
+                                pkey=self.keypair['private_key'],
+                                proxy_client=proxy_client)
+        self.check_remote_connectivity(ssh_client,
+                                       gateway_external_ip)
diff --git a/neutron_tempest_plugin/scenario/test_mtu.py b/neutron_tempest_plugin/scenario/test_mtu.py
index 9cbb4d8..932c645 100644
--- a/neutron_tempest_plugin/scenario/test_mtu.py
+++ b/neutron_tempest_plugin/scenario/test_mtu.py
@@ -17,6 +17,7 @@
 from neutron_lib.api.definitions import provider_net
 from tempest.common import utils
 from tempest.common import waiters
+from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
 from neutron_tempest_plugin.common import ssh
@@ -28,21 +29,8 @@
 
 
 class NetworkMtuBaseTest(base.BaseTempestTestCase):
-    credentials = ['primary', 'admin']
-    servers = []
-    networks = []
 
     @classmethod
-    def skip_checks(cls):
-        super(NetworkMtuBaseTest, cls).skip_checks()
-        if ("vxlan" not in
-                config.CONF.neutron_plugin_options.available_type_drivers
-            or "gre" not in
-                config.CONF.neutron_plugin_options.available_type_drivers):
-            raise cls.skipException("GRE or VXLAN type_driver is not enabled")
-
-    @classmethod
-    @utils.requires_ext(extension=provider_net.ALIAS, service="network")
     def resource_setup(cls):
         super(NetworkMtuBaseTest, cls).resource_setup()
         # setup basic topology for servers we can log into it
@@ -56,6 +44,42 @@
         cls.create_pingable_secgroup_rule(
             secgroup_id=cls.secgroup['security_group']['id'])
 
+    def create_pingable_vm(self, net, keypair, secgroup):
+        server = self.create_server(
+            flavor_ref=CONF.compute.flavor_ref,
+            image_ref=CONF.compute.image_ref,
+            key_name=keypair['name'],
+            networks=[{'uuid': net['id']}],
+            security_groups=[{'name': secgroup[
+                'security_group']['name']}])
+        waiters.wait_for_server_status(
+            self.os_primary.servers_client, server['server']['id'],
+            constants.SERVER_STATUS_ACTIVE)
+        port = self.client.list_ports(
+            network_id=net['id'], device_id=server['server']['id'])['ports'][0]
+        fip = self.create_and_associate_floatingip(port['id'])
+        return server, fip
+
+
+class NetworkMtuTest(NetworkMtuBaseTest):
+    credentials = ['primary', 'admin']
+    servers = []
+    networks = []
+
+    @classmethod
+    def skip_checks(cls):
+        super(NetworkMtuTest, cls).skip_checks()
+        if ("vxlan" not in
+                config.CONF.neutron_plugin_options.available_type_drivers
+            or "gre" not in
+                config.CONF.neutron_plugin_options.available_type_drivers):
+            raise cls.skipException("GRE or VXLAN type_driver is not enabled")
+
+    @classmethod
+    @utils.requires_ext(extension=provider_net.ALIAS, service="network")
+    def resource_setup(cls):
+        super(NetworkMtuTest, cls).resource_setup()
+
     def _create_setup(self):
         self.admin_client = self.os_admin.network_client
         net_kwargs = {'tenant_id': self.client.tenant_id}
@@ -75,12 +99,14 @@
         # check that MTUs are different for 2 networks
         self.assertNotEqual(self.networks[0]['mtu'], self.networks[1]['mtu'])
         self.networks.sort(key=lambda net: net['mtu'])
-        server1, fip1 = self.create_pingable_vm(self.networks[0])
+        server1, fip1 = self.create_pingable_vm(self.networks[0],
+            self.keypair, self.secgroup)
         server_ssh_client1 = ssh.Client(
             self.floating_ips[0]['floating_ip_address'],
             CONF.validation.image_ssh_user,
             pkey=self.keypair['private_key'])
-        server2, fip2 = self.create_pingable_vm(self.networks[1])
+        server2, fip2 = self.create_pingable_vm(self.networks[1],
+            self.keypair, self.secgroup)
         server_ssh_client2 = ssh.Client(
             self.floating_ips[0]['floating_ip_address'],
             CONF.validation.image_ssh_user,
@@ -91,22 +117,6 @@
                                     self.keypair['private_key'])
         return server_ssh_client1, fip1, server_ssh_client2, fip2
 
-    def create_pingable_vm(self, net):
-        server = self.create_server(
-            flavor_ref=CONF.compute.flavor_ref,
-            image_ref=CONF.compute.image_ref,
-            key_name=self.keypair['name'],
-            networks=[{'uuid': net['id']}],
-            security_groups=[{'name': self.secgroup[
-                'security_group']['name']}])
-        waiters.wait_for_server_status(
-            self.os_primary.servers_client, server['server']['id'],
-            constants.SERVER_STATUS_ACTIVE)
-        port = self.client.list_ports(
-            network_id=net['id'], device_id=server['server']['id'])['ports'][0]
-        fip = self.create_and_associate_floatingip(port['id'])
-        return server, fip
-
     @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a273d9d344')
     def test_connectivity_min_max_mtu(self):
         server_ssh_client, _, _, fip2 = self._create_setup()
@@ -132,3 +142,93 @@
         self.check_remote_connectivity(
             server_ssh_client, fip2['fixed_ip_address'], should_succeed=False,
             mtu=self.networks[1]['mtu'], fragmentation=False)
+
+
+class NetworkWritableMtuTest(NetworkMtuBaseTest):
+    credentials = ['primary', 'admin']
+    servers = []
+    networks = []
+
+    @classmethod
+    def skip_checks(cls):
+        super(NetworkWritableMtuTest, cls).skip_checks()
+        if ("vxlan" not in
+            config.CONF.neutron_plugin_options.available_type_drivers):
+            raise cls.skipException("VXLAN type_driver is not enabled")
+
+    @classmethod
+    @utils.requires_ext(extension="net-mtu-writable", service="network")
+    def resource_setup(cls):
+        super(NetworkWritableMtuTest, cls).resource_setup()
+
+    def _create_setup(self):
+        self.admin_client = self.os_admin.network_client
+        net_kwargs = {'tenant_id': self.client.tenant_id,
+                      'provider:network_type': 'vxlan'}
+        for sub in ('10.100.0.0/16', '10.200.0.0/16'):
+            net_kwargs['name'] = data_utils.rand_name('net')
+            network = self.admin_client.create_network(**net_kwargs)[
+                'network']
+            self.networks.append(network)
+            self.addCleanup(self.admin_client.delete_network, network['id'])
+            cidr = netaddr.IPNetwork(sub)
+            subnet = self.create_subnet(network, cidr=cidr)
+            self.create_router_interface(self.router['id'], subnet['id'])
+            self.addCleanup(self.client.remove_router_interface_with_subnet_id,
+                            self.router['id'], subnet['id'])
+
+        # Update network mtu.
+        net_mtu = self.admin_client.show_network(
+            self.networks[0]['id'])['network']['mtu']
+        self.admin_client.update_network(self.networks[0]['id'],
+            mtu=(net_mtu - 1))
+        self.networks[0]['mtu'] = (
+            self.admin_client.show_network(
+                self.networks[0]['id'])['network']['mtu'])
+
+        # check that MTUs are different for 2 networks
+        self.assertNotEqual(self.networks[0]['mtu'], self.networks[1]['mtu'])
+        self.networks.sort(key=lambda net: net['mtu'])
+        server1, fip1 = self.create_pingable_vm(self.networks[0],
+            self.keypair, self.secgroup)
+        server_ssh_client1 = ssh.Client(
+            self.floating_ips[0]['floating_ip_address'],
+            CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+        server2, fip2 = self.create_pingable_vm(self.networks[1],
+            self.keypair, self.secgroup)
+        server_ssh_client2 = ssh.Client(
+            self.floating_ips[0]['floating_ip_address'],
+            CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+        for fip in (fip1, fip2):
+            self.check_connectivity(fip['floating_ip_address'],
+                                    CONF.validation.image_ssh_user,
+                                    self.keypair['private_key'])
+        return server_ssh_client1, fip1, server_ssh_client2, fip2
+
+    @decorators.idempotent_id('bc470200-d8f4-4f07-b294-1b4cbaaa35b9')
+    def test_connectivity_min_max_mtu(self):
+        server_ssh_client, _, _, fip2 = self._create_setup()
+        # ping with min mtu of 2 networks succeeds even when
+        # fragmentation is disabled
+        self.check_remote_connectivity(
+            server_ssh_client, fip2['fixed_ip_address'],
+            mtu=self.networks[0]['mtu'], fragmentation=False)
+
+        # ping with the size above min mtu of 2 networks
+        # fails when fragmentation is disabled
+        self.check_remote_connectivity(
+            server_ssh_client, fip2['fixed_ip_address'], should_succeed=False,
+            mtu=self.networks[0]['mtu'] + 2, fragmentation=False)
+
+        # ping with max mtu of 2 networks succeeds when
+        # fragmentation is enabled
+        self.check_remote_connectivity(
+            server_ssh_client, fip2['fixed_ip_address'],
+            mtu=self.networks[1]['mtu'])
+
+        # ping with max mtu of 2 networks fails when fragmentation is disabled
+        self.check_remote_connectivity(
+            server_ssh_client, fip2['fixed_ip_address'], should_succeed=False,
+            mtu=self.networks[1]['mtu'], fragmentation=False)
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index 1244535..9503fe3 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -26,13 +26,13 @@
 CONF = config.CONF
 
 
-class NetworkDefaultSecGroupTest(base.BaseTempestTestCase):
+class NetworkSecGroupTest(base.BaseTempestTestCase):
     credentials = ['primary', 'admin']
     required_extensions = ['router', 'security-group']
 
     @classmethod
     def resource_setup(cls):
-        super(NetworkDefaultSecGroupTest, cls).resource_setup()
+        super(NetworkSecGroupTest, cls).resource_setup()
         # setup basic topology for servers we can log into it
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
@@ -40,15 +40,26 @@
         cls.create_router_interface(router['id'], cls.subnet['id'])
         cls.keypair = cls.create_keypair()
 
-    def create_vm_testing_sec_grp(self, num_servers=2, security_groups=None):
+    def create_vm_testing_sec_grp(self, num_servers=2, security_groups=None,
+                                  ports=None):
+        """Create instance for security group testing
+        :param num_servers (int): number of servers to spawn
+        :param security_groups (list): list of security groups
+        :param ports* (list): list of ports
+        *Needs to be the same length as num_servers
+        """
         servers, fips, server_ssh_clients = ([], [], [])
         for i in range(num_servers):
-            servers.append(self.create_server(
-                flavor_ref=CONF.compute.flavor_ref,
-                image_ref=CONF.compute.image_ref,
-                key_name=self.keypair['name'],
-                networks=[{'uuid': self.network['id']}],
-                security_groups=security_groups))
+            server_args = {
+                'flavor_ref': CONF.compute.flavor_ref,
+                'image_ref': CONF.compute.image_ref,
+                'key_name': self.keypair['name'],
+                'networks': [{'uuid': self.network['id']}],
+                'security_groups': security_groups
+            }
+            if ports is not None:
+                server_args['networks'][0].update({'port': ports[i]['id']})
+            servers.append(self.create_server(**server_args))
         for i, server in enumerate(servers):
             waiters.wait_for_server_status(
                 self.os_primary.servers_client, server['server']['id'],
@@ -210,3 +221,70 @@
                       'direction': constants.INGRESS_DIRECTION,
                       'remote_ip_prefix': cidr}]
         self._test_ip_prefix(rule_list, should_succeed=False)
+
+    @decorators.idempotent_id('7ed39b86-006d-40fb-887a-ae46693dabc9')
+    def test_remote_group(self):
+        # create a new sec group
+        ssh_secgrp_name = data_utils.rand_name('ssh_secgrp')
+        ssh_secgrp = self.os_primary.network_client.create_security_group(
+            name=ssh_secgrp_name)
+        # add cleanup
+        self.security_groups.append(ssh_secgrp['security_group'])
+        # configure sec group to support SSH connectivity
+        self.create_loginable_secgroup_rule(
+            secgroup_id=ssh_secgrp['security_group']['id'])
+        # spawn two instances with the sec group created
+        server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
+            security_groups=[{'name': ssh_secgrp_name}])
+        # verify SSH functionality
+        for i in range(2):
+            self.check_connectivity(fips[i]['floating_ip_address'],
+                                    CONF.validation.image_ssh_user,
+                                    self.keypair['private_key'])
+        # try to ping instances without ICMP permissions
+        self.check_remote_connectivity(
+            server_ssh_clients[0], fips[1]['fixed_ip_address'],
+            should_succeed=False)
+        # add ICMP support to the remote group
+        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+                      'direction': constants.INGRESS_DIRECTION,
+                      'remote_group_id': ssh_secgrp['security_group']['id']}]
+        self.create_secgroup_rules(
+            rule_list, secgroup_id=ssh_secgrp['security_group']['id'])
+        # verify ICMP connectivity between instances works
+        self.check_remote_connectivity(
+            server_ssh_clients[0], fips[1]['fixed_ip_address'])
+        # make sure ICMP connectivity doesn't work from framework
+        self.ping_ip_address(fips[0]['floating_ip_address'],
+                             should_succeed=False)
+
+    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad488')
+    def test_multiple_ports_secgroup_inheritance(self):
+        """This test creates two ports with security groups, then
+        boots two instances and verify that the security group was
+        inherited properly and enforced in these instances.
+        """
+        # create a security group and make it loginable and pingable
+        secgrp = self.os_primary.network_client.create_security_group(
+            name=data_utils.rand_name('secgrp'))
+        self.create_loginable_secgroup_rule(
+            secgroup_id=secgrp['security_group']['id'])
+        self.create_pingable_secgroup_rule(
+            secgroup_id=secgrp['security_group']['id'])
+        # add security group to cleanup
+        self.security_groups.append(secgrp['security_group'])
+        # create two ports with fixed IPs and the security group created
+        ports = []
+        for i in range(2):
+            ports.append(self.create_port(
+                self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],
+                security_groups=[secgrp['security_group']['id']]))
+        # spawn instances with the ports created
+        server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
+            ports=ports)
+        # verify ICMP reachability and ssh connectivity
+        for fip in fips:
+            self.ping_ip_address(fip['floating_ip_address'])
+            self.check_connectivity(fip['floating_ip_address'],
+                                    CONF.validation.image_ssh_user,
+                                    self.keypair['private_key'])
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
index 0008b0a..44f5ba7 100644
--- a/neutron_tempest_plugin/scenario/test_trunk.py
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -173,9 +173,12 @@
             exception=RuntimeError("Timed out waiting for trunk %s to "
                                    "transition to ACTIVE." % trunk2_id))
         # create a few more networks and ports for subports
+        # check limit of networks per project
+        max_vlan = 3 + CONF.neutron_plugin_options.max_networks_per_project
+        allowed_vlans = range(3, max_vlan)
         subports = [{'port_id': self.create_port(self.create_network())['id'],
                      'segmentation_type': 'vlan', 'segmentation_id': seg_id}
-                    for seg_id in range(3, 7)]
+                    for seg_id in allowed_vlans]
         # add all subports to server1
         self.client.add_subports(trunk1_id, subports)
         # ensure trunk transitions to ACTIVE
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 48b537d..a48db36 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -58,6 +58,8 @@
             'minimum_bandwidth_rules': 'qos',
             'rule_types': 'qos',
             'rbac-policies': '',
+            'logs': 'log',
+            'loggable_resources': 'log',
         }
         service_prefix = service_resource_prefix_map.get(
             plural_name)
diff --git a/playbooks/neutron-tempest-plugin-dvr-multinode-scenario/run.yaml b/playbooks/neutron-tempest-plugin-dvr-multinode-scenario/run.yaml
index c2d33a9..a9ce3e0 100644
--- a/playbooks/neutron-tempest-plugin-dvr-multinode-scenario/run.yaml
+++ b/playbooks/neutron-tempest-plugin-dvr-multinode-scenario/run.yaml
@@ -1,4 +1,4 @@
-- hosts: all
+- hosts: primary
   name: neutron-tempest-plugin-dvr-multinode-scenario
   tasks:
 
@@ -34,7 +34,6 @@
           export DEVSTACK_GATE_CONFIGDRIVE=0
           export DEVSTACK_GATE_TEMPEST_REGEX="(neutron_tempest_plugin.scenario)"
           export DEVSTACK_LOCAL_CONFIG="enable_plugin neutron-tempest-plugin git://git.openstack.org/openstack/neutron-tempest-plugin"
-          export TEMPEST_CONCURRENCY=2
           # Test DVR works multinode
           export DEVSTACK_GATE_NEUTRON_DVR=1
           export BRANCH_OVERRIDE=default
diff --git a/playbooks/neutron-tempest-plugin-scenario-linuxbridge/run.yaml b/playbooks/neutron-tempest-plugin-scenario-linuxbridge/run.yaml
index 65e8b12..02cdf83 100644
--- a/playbooks/neutron-tempest-plugin-scenario-linuxbridge/run.yaml
+++ b/playbooks/neutron-tempest-plugin-scenario-linuxbridge/run.yaml
@@ -53,7 +53,6 @@
               export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
           fi
 
-          export TEMPEST_CONCURRENCY=2
           export PROJECTS="openstack/neutron-tempest-plugin $PROJECTS"
           function gate_hook {
               bash -xe $BASE/new/neutron/neutron/tests/contrib/gate_hook.sh dsvm-scenario-linuxbridge dvrskip
diff --git a/requirements.txt b/requirements.txt
index 84ee391..77875ed 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,16 +2,17 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-pbr>=2.0 # Apache-2.0
-neutron-lib>=1.9.0 # Apache-2.0
-oslo.config!=4.3.0,!=4.4.0,>=4.0.0 # Apache-2.0
-netaddr!=0.7.16,>=0.7.13 # BSD
-oslo.log>=3.22.0 # Apache-2.0
-oslo.serialization!=2.19.1,>=1.10.0 # Apache-2.0
-oslo.utils>=3.20.0 # Apache-2.0
-six>=1.9.0 # MIT
-tempest>=16.1.0 # Apache-2.0
+pbr!=2.1.0,>=2.0.0 # Apache-2.0
+neutron-lib>=1.13.0 # Apache-2.0
+oslo.config>=5.1.0 # Apache-2.0
+ipaddress>=1.0.16;python_version<'3.3' # PSF
+netaddr>=0.7.18 # BSD
+oslo.log>=3.36.0 # Apache-2.0
+oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
+oslo.utils>=3.33.0 # Apache-2.0
+six>=1.10.0 # MIT
+tempest>=17.1.0 # Apache-2.0
 ddt>=1.0.1 # MIT
-testtools>=1.4.0 # MIT
+testtools>=2.2.0 # MIT
 testscenarios>=0.4 # Apache-2.0/BSD
 eventlet!=0.18.3,!=0.20.1,<0.21.0,>=0.18.2 # MIT
diff --git a/setup.cfg b/setup.cfg
index b0b7405..a54cc6c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -5,7 +5,7 @@
     README.rst
 author = OpenStack
 author-email = openstack-dev@lists.openstack.org
-home-page = http://www.openstack.org/
+home-page = https://git.openstack.org/cgit/openstack/neutron-tempest-plugin
 classifier =
     Environment :: OpenStack
     Intended Audience :: Information Technology
diff --git a/setup.py b/setup.py
index 056c16c..566d844 100644
--- a/setup.py
+++ b/setup.py
@@ -25,5 +25,5 @@
     pass
 
 setuptools.setup(
-    setup_requires=['pbr'],
+    setup_requires=['pbr>=2.0.0'],
     pbr=True)
diff --git a/test-requirements.txt b/test-requirements.txt
index f559c0e..f4f8c0a 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,14 +2,14 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-hacking>=0.12.0,<0.13 # Apache-2.0
+hacking<0.13,>=0.12.0 # Apache-2.0
 
-coverage>=4.0,!=4.4 # Apache-2.0
-python-subunit>=0.0.18 # Apache-2.0/BSD
-sphinx>=1.6.2 # BSD
-oslotest>=1.10.0 # Apache-2.0
-testrepository>=0.0.18  # Apache-2.0/BSD
-testtools>=1.4.0 # MIT
-openstackdocstheme>=1.11.0  # Apache-2.0
+coverage!=4.4,>=4.0 # Apache-2.0
+python-subunit>=1.0.0 # Apache-2.0/BSD
+sphinx!=1.6.6,>=1.6.2 # BSD
+oslotest>=3.2.0 # Apache-2.0
+testrepository>=0.0.18 # Apache-2.0/BSD
+testtools>=2.2.0 # MIT
+openstackdocstheme>=1.18.1 # Apache-2.0
 # releasenotes
-reno>=1.8.0 # Apache-2.0
+reno>=2.5.0 # Apache-2.0
diff --git a/tools/misc-sanity-checks.sh b/tools/misc-sanity-checks.sh
new file mode 100755
index 0000000..234cbb1
--- /dev/null
+++ b/tools/misc-sanity-checks.sh
@@ -0,0 +1,38 @@
+#! /bin/sh
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+TMPDIR=`mktemp -d /tmp/${0##*/}.XXXXXX` || exit 1
+export TMPDIR
+trap "rm -rf $TMPDIR" EXIT
+
+FAILURES=$TMPDIR/failures
+
+check_no_duplicate_api_test_idempotent_ids() {
+    # For API tests, an idempotent ID is assigned to each single API test,
+    # those IDs should be unique
+    output=$(check-uuid --package neutron_tempest_plugin)
+    if [ "$?" -ne 0 ]; then
+        echo "There are duplicate idempotent ids in the API tests" >>$FAILURES
+        echo "please, assign unique uuids to each API test:" >>$FAILURES
+        echo "$output" >>$FAILURES
+    fi
+}
+
+check_no_duplicate_api_test_idempotent_ids
+
+# Fail, if there are emitted failures
+if [ -f $FAILURES ]; then
+    cat $FAILURES
+    exit 1
+fi
diff --git a/tox.ini b/tox.ini
index 74c85e5..c16664d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -13,7 +13,11 @@
 commands = python setup.py test --slowest --testr-args='{posargs}'
 
 [testenv:pep8]
-commands = flake8 {posargs}
+commands =
+  sh ./tools/misc-sanity-checks.sh
+  flake8 {posargs}
+whitelist_externals =
+  sh
 
 [testenv:venv]
 commands = {posargs}
@@ -29,7 +33,7 @@
   sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
 
 [testenv:debug]
-commands = oslo_debug_helper {posargs}
+commands = oslo_debug_helper -t neutron_tempest_plugin/ {posargs}
 
 [flake8]
 # E125 continuation line does not distinguish itself from next logical line