Merge "Using "Designate API" in addition to DNS query client check"
diff --git a/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py b/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py
deleted file mode 100644
index 9dc4438..0000000
--- a/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py
+++ /dev/null
@@ -1,110 +0,0 @@
-# Copyright 2013 IBM Corp.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from neutron_lib import constants
-from neutron_lib.utils import test
-from tempest.lib import decorators
-
-from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.common import utils
-
-
-class DHCPAgentSchedulersTestJSON(base.BaseAdminNetworkTest):
-
-    required_extensions = ['dhcp_agent_scheduler']
-
-    @classmethod
-    def resource_setup(cls):
-        super(DHCPAgentSchedulersTestJSON, cls).resource_setup()
-        # Create a network and make sure it will be hosted by a
-        # dhcp agent: this is done by creating a regular port
-        cls.network = cls.create_network()
-        cls.subnet = cls.create_subnet(cls.network)
-        cls.cidr = cls.subnet['cidr']
-        cls.port = cls.create_port(cls.network)
-
-    @test.unstable_test("bug 1906654")
-    @decorators.idempotent_id('f164801e-1dd8-4b8b-b5d3-cc3ac77cfaa5')
-    def test_dhcp_port_status_active(self):
-
-        def dhcp_port_active():
-            for p in self.client.list_ports(
-                    network_id=self.network['id'])['ports']:
-                if (p['device_owner'] == constants.DEVICE_OWNER_DHCP and
-                        p['status'] == constants.PORT_STATUS_ACTIVE):
-                    return True
-            return False
-        utils.wait_until_true(dhcp_port_active)
-
-    @decorators.idempotent_id('5032b1fe-eb42-4a64-8f3b-6e189d8b5c7d')
-    def test_list_dhcp_agent_hosting_network(self):
-        self.admin_client.list_dhcp_agent_hosting_network(
-            self.network['id'])
-
-    @decorators.idempotent_id('30c48f98-e45d-4ffb-841c-b8aad57c7587')
-    def test_list_networks_hosted_by_one_dhcp(self):
-        body = self.admin_client.list_dhcp_agent_hosting_network(
-            self.network['id'])
-        agents = body['agents']
-        self.assertIsNotNone(agents)
-        agent = agents[0]
-        self.assertTrue(self._check_network_in_dhcp_agent(
-            self.network['id'], agent))
-
-    def _check_network_in_dhcp_agent(self, network_id, agent):
-        network_ids = []
-        body = self.admin_client.list_networks_hosted_by_one_dhcp_agent(
-            agent['id'])
-        networks = body['networks']
-        for network in networks:
-            network_ids.append(network['id'])
-        return network_id in network_ids
-
-    @decorators.idempotent_id('a0856713-6549-470c-a656-e97c8df9a14d')
-    def test_add_remove_network_from_dhcp_agent(self):
-        # The agent is now bound to the network, we can free the port
-        self.client.delete_port(self.port['id'])
-        self.ports.remove(self.port)
-        agent = dict()
-        agent['agent_type'] = None
-        body = self.admin_client.list_agents()
-        agents = body['agents']
-        for a in agents:
-            if a['agent_type'] == 'DHCP agent':
-                agent = a
-                break
-        self.assertEqual(agent['agent_type'], 'DHCP agent', 'Could not find '
-                         'DHCP agent in agent list though dhcp_agent_scheduler'
-                         ' is enabled.')
-        network = self.create_network()
-        network_id = network['id']
-        if self._check_network_in_dhcp_agent(network_id, agent):
-            self._remove_network_from_dhcp_agent(network_id, agent)
-            self._add_dhcp_agent_to_network(network_id, agent)
-        else:
-            self._add_dhcp_agent_to_network(network_id, agent)
-            self._remove_network_from_dhcp_agent(network_id, agent)
-
-    def _remove_network_from_dhcp_agent(self, network_id, agent):
-        self.admin_client.remove_network_from_dhcp_agent(
-            agent_id=agent['id'],
-            network_id=network_id)
-        self.assertFalse(self._check_network_in_dhcp_agent(
-            network_id, agent))
-
-    def _add_dhcp_agent_to_network(self, network_id, agent):
-        self.admin_client.add_dhcp_agent_to_network(agent['id'],
-                                                    network_id)
-        self.assertTrue(self._check_network_in_dhcp_agent(
-            network_id, agent))
diff --git a/neutron_tempest_plugin/api/admin/test_external_network_extension.py b/neutron_tempest_plugin/api/admin/test_external_network_extension.py
index cf6c44d..c4b55c9 100644
--- a/neutron_tempest_plugin/api/admin/test_external_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_external_network_extension.py
@@ -53,7 +53,7 @@
         self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_external',
-            target_tenant=self.client2.tenant_id)
+            target_tenant=self.client2.project_id)
         body = self.client2.list_networks()
         networks_list = [n['id'] for n in body['networks']]
         self.assertIn(net['id'], networks_list)
@@ -107,7 +107,7 @@
         # changing wildcard to specific tenant should be okay since its the
         # only one using the network
         self.admin_client.update_rbac_policy(
-            rbac_pol['id'], target_tenant=self.client2.tenant_id)
+            rbac_pol['id'], target_tenant=self.client2.project_id)
 
     @decorators.idempotent_id('a5539002-5bdb-48b5-b124-e9eedd5975e6')
     def test_external_conversion_on_policy_create(self):
@@ -115,7 +115,7 @@
         self.admin_client.create_rbac_policy(
             object_type='network', object_id=net_id,
             action='access_as_external',
-            target_tenant=self.client2.tenant_id)
+            target_tenant=self.client2.project_id)
         body = self.admin_client.show_network(net_id)['network']
         self.assertTrue(body['router:external'])
 
@@ -138,13 +138,13 @@
         self.admin_client.create_rbac_policy(
             object_type='network', object_id=net_id,
             action='access_as_external',
-            target_tenant=self.admin_client.tenant_id)
+            target_tenant=self.admin_client.project_id)
         body = self.admin_client.show_network(net_id)['network']
         self.assertTrue(body['router:external'])
         policy2 = self.admin_client.create_rbac_policy(
             object_type='network', object_id=net_id,
             action='access_as_external',
-            target_tenant=self.client2.tenant_id)
+            target_tenant=self.client2.project_id)
         self.admin_client.delete_rbac_policy(policy2['rbac_policy']['id'])
         body = self.admin_client.show_network(net_id)['network']
         self.assertTrue(body['router:external'])
@@ -168,14 +168,14 @@
         self.admin_client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_external',
-            target_tenant=self.admin_client.tenant_id)
+            target_tenant=self.admin_client.project_id)
         self.create_subnet(net, client=self.admin_client, enable_dhcp=False)
         with testtools.ExpectedException(lib_exc.NotFound):
             self.create_floatingip(net['id'], client=self.client2)
         self.admin_client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_external',
-            target_tenant=self.client2.tenant_id)
+            target_tenant=self.client2.project_id)
         self.create_floatingip(net['id'], client=self.client2)
 
     @decorators.idempotent_id('476be1e0-f72e-47dc-9a14-4435926bbe82')
@@ -185,7 +185,7 @@
         self.admin_client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_external',
-            target_tenant=self.client2.tenant_id)
+            target_tenant=self.client2.project_id)
         r = self.client2.create_router(
             data_utils.rand_name('router'),
             external_gateway_info={'network_id': net['id']})['router']
@@ -209,7 +209,7 @@
         tenant = self.admin_client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_external',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         # now we can delete the policy because the tenant has its own policy
         # to allow it access
         self.admin_client.delete_rbac_policy(wildcard['id'])
diff --git a/neutron_tempest_plugin/api/admin/test_networks.py b/neutron_tempest_plugin/api/admin/test_networks.py
index 74e72ef..17a8990 100644
--- a/neutron_tempest_plugin/api/admin/test_networks.py
+++ b/neutron_tempest_plugin/api/admin/test_networks.py
@@ -25,7 +25,7 @@
     @decorators.idempotent_id('d3c76044-d067-4cb0-ae47-8cdd875c7f67')
     @utils.requires_ext(extension="project-id", service="network")
     def test_create_network_with_project(self):
-        project_id = self.client.tenant_id  # non-admin
+        project_id = self.client.project_id  # non-admin
 
         name = 'admin-created-with-project_id'
         network = self.create_network(name, project_id=project_id,
@@ -43,7 +43,7 @@
     @decorators.idempotent_id('8d21aaca-4364-4eb9-8b79-44b4fff6373b')
     @utils.requires_ext(extension="project-id", service="network")
     def test_create_network_with_project_and_tenant(self):
-        project_id = self.client.tenant_id  # non-admin
+        project_id = self.client.project_id  # non-admin
 
         name = 'created-with-project-and-tenant'
         network = self.create_network(name, project_id=project_id,
@@ -62,7 +62,7 @@
     @decorators.idempotent_id('08b92179-669d-45ee-8233-ef6611190809')
     @utils.requires_ext(extension="project-id", service="network")
     def test_create_network_with_project_and_other_tenant(self):
-        project_id = self.client.tenant_id  # non-admin
+        project_id = self.client.project_id  # non-admin
         other_tenant = uuidutils.generate_uuid()
 
         name = 'created-with-project-and-other-tenant'
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index 1444b2d..5522194 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -214,7 +214,7 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff1fff')
     def test_network_only_visible_to_policy_target(self):
         net = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)['network']
+            self.client.project_id)['network']
         self.client.show_network(net['id'])
         with testtools.ExpectedException(lib_exc.NotFound):
             # client2 has not been granted access
@@ -223,7 +223,7 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff2fff')
     def test_subnet_on_network_only_visible_to_policy_target(self):
         sub = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)['subnet']
+            self.client.project_id)['subnet']
         self.client.show_subnet(sub['id'])
         with testtools.ExpectedException(lib_exc.NotFound):
             # client2 has not been granted access
@@ -232,11 +232,11 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff2eee')
     def test_policy_target_update(self):
         res = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         # change to client2
         update_res = self.admin_client.update_rbac_policy(
-                res['policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
+                res['policy']['id'], target_tenant=self.client2.project_id)
+        self.assertEqual(self.client2.project_id,
                          update_res['rbac_policy']['target_tenant'])
         # make sure everything else stayed the same
         res['policy'].pop('target_tenant')
@@ -246,16 +246,17 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-affefefef321')
     def test_duplicate_policy_error(self):
         res = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         with testtools.ExpectedException(lib_exc.Conflict):
             self.admin_client.create_rbac_policy(
                 object_type='network', object_id=res['network']['id'],
-                action='access_as_shared', target_tenant=self.client.tenant_id)
+                action='access_as_shared',
+                target_tenant=self.client.project_id)
 
     @decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff3fff')
     def test_port_presence_prevents_network_rbac_policy_deletion(self):
         res = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         port = self.create_port(res['network'])
         # a port on the network should prevent the deletion of a policy
         # required for it to exist
@@ -282,7 +283,7 @@
         net = self.create_network()  # owned by self.client
         self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         port = self.client2.create_port(network_id=net['id'])['port']
         self.client.delete_port(port['id'])
 
@@ -300,7 +301,7 @@
             self.client2.show_network(net['id'])
         pol = self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         self.client2.show_network(net['id'])
 
         self.assertIn(pol['rbac_policy'],
@@ -316,7 +317,7 @@
         net = self.create_network()
         self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
                       ('project_id', 'target_tenant'))
         for fields in field_args:
@@ -326,7 +327,7 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff5fff')
     def test_policy_show(self):
         res = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         p1 = res['policy']
         p2 = self.admin_client.create_rbac_policy(
             object_type='network', object_id=res['network']['id'],
@@ -344,11 +345,11 @@
         pol1 = self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         pol2 = self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         res1 = self.client.list_rbac_policies(id=pol1['id'])['rbac_policies']
         res2 = self.client.list_rbac_policies(id=pol2['id'])['rbac_policies']
         self.assertEqual(1, len(res1))
@@ -359,16 +360,17 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-afffffff6fff')
     def test_regular_client_blocked_from_sharing_anothers_network(self):
         net = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)['network']
+            self.client.project_id)['network']
         with testtools.ExpectedException(lib_exc.BadRequest):
             self.client.create_rbac_policy(
                 object_type='network', object_id=net['id'],
-                action='access_as_shared', target_tenant=self.client.tenant_id)
+                action='access_as_shared',
+                target_tenant=self.client.project_id)
 
     @decorators.idempotent_id('c5f8f785-ce8d-4430-af7e-a236205862fb')
     @utils.requires_ext(extension="quotas", service="network")
     def test_rbac_policy_quota(self):
-        quota = self.client.show_quotas(self.client.tenant_id)['quota']
+        quota = self.client.show_quotas(self.client.project_id)['quota']
         max_policies = quota['rbac_policy']
         self.assertGreater(max_policies, 0)
         net = self.client.create_network(
@@ -391,7 +393,7 @@
         # ensure it works on update as well
         pol = self.client.create_rbac_policy(
             object_type='network', object_id=net['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         with testtools.ExpectedException(lib_exc.Forbidden):
             self.client.update_rbac_policy(pol['rbac_policy']['id'],
                                            target_tenant='*')
@@ -405,14 +407,14 @@
                          target_tenant=net['project_id'])['rbac_policy']
         port = self.create_port(net)
         self.client.update_rbac_policy(self_share['id'],
-                                       target_tenant=self.client2.tenant_id)
+                                       target_tenant=self.client2.project_id)
         self.client.delete_port(port['id'])
 
     @utils.requires_ext(extension="standard-attr-revisions", service="network")
     @decorators.idempotent_id('86c3529b-1231-40de-1234-89664291a4cb')
     def test_rbac_bumps_network_revision(self):
         resp = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         net_id = resp['network']['id']
         rev = self.client.show_network(net_id)['network']['revision_number']
         self.admin_client.create_rbac_policy(
@@ -426,7 +428,7 @@
     @decorators.idempotent_id('86c3529b-1231-40de-803c-aeeeeeee7fff')
     def test_filtering_works_with_rbac_records_present(self):
         resp = self._make_admin_net_and_subnet_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         net = resp['network']['id']
         sub = resp['subnet']['id']
         self.admin_client.create_rbac_policy(
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index e080d42..b66fe0d 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -386,7 +386,7 @@
                     # Keep this network visible from current project
                     project_id = (kwargs.get('project_id') or
                                   kwargs.get('tenant_id') or
-                                  cls.client.tenant_id)
+                                  cls.client.project_id)
                     kwargs.update(project_id=project_id, tenant_id=project_id)
             else:
                 # Use default client
@@ -994,8 +994,9 @@
         ip_version = ip_version or cls._ip_version
         default_params = (
             constants.DEFAULT_SECURITY_GROUP_RULE_PARAMS[ip_version])
-        if ('remote_address_group_id' in kwargs and 'remote_ip_prefix' in
-                default_params):
+        if (('remote_address_group_id' in kwargs or
+             'remote_group_id' in kwargs) and
+                'remote_ip_prefix' in default_params):
             default_params.pop('remote_ip_prefix')
         for key, value in default_params.items():
             kwargs.setdefault(key, value)
diff --git a/neutron_tempest_plugin/api/test_address_groups.py b/neutron_tempest_plugin/api/test_address_groups.py
index 69f22d0..ee0064c 100644
--- a/neutron_tempest_plugin/api/test_address_groups.py
+++ b/neutron_tempest_plugin/api/test_address_groups.py
@@ -171,11 +171,11 @@
     @decorators.idempotent_id('95f59a88-c47e-4dd9-a231-85f1782753a7')
     def test_policy_target_update(self):
         res = self._make_admin_ag_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         # change to client2
         update_res = self.admin_client.update_rbac_policy(
-                res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
+            res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+        self.assertEqual(self.client2.project_id,
                          update_res['rbac_policy']['target_tenant'])
         # make sure everything else stayed the same
         res['rbac_policy'].pop('target_tenant')
@@ -185,7 +185,7 @@
     @decorators.idempotent_id('35a214c9-5c99-468f-9242-34d0529cabfa')
     def test_secgrprule_presence_prevents_policy_rbac_policy_deletion(self):
         res = self._make_admin_ag_shared_to_project_id(
-            self.client2.tenant_id)
+            self.client2.project_id)
         ag_id = res['address_group']['id']
         security_group = self.create_security_group(client=self.client2)
         protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
@@ -213,7 +213,7 @@
         rbac_policy = self.admin_client.create_rbac_policy(
             object_type='address_group', object_id=ag['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         self.client.show_address_group(ag['id'])
 
         self.assertIn(rbac_policy,
@@ -228,7 +228,7 @@
         ag = self._create_address_group()
         self.admin_client.create_rbac_policy(
             object_type='address_group', object_id=ag['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
                       ('project_id', 'target_tenant'))
         for fields in field_args:
@@ -238,7 +238,7 @@
     @decorators.idempotent_id('20b2706b-1cea-4724-ab72-d7452ecb1fc4')
     def test_rbac_policy_show(self):
         res = self._make_admin_ag_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         p1 = res['rbac_policy']
         p2 = self.admin_client.create_rbac_policy(
             object_type='address_group',
@@ -257,11 +257,11 @@
         rbac_pol1 = self.admin_client.create_rbac_policy(
             object_type='address_group', object_id=ag['id'],
             action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         rbac_pol2 = self.admin_client.create_rbac_policy(
             object_type='address_group', object_id=ag['id'],
             action='access_as_shared',
-            target_tenant=self.admin_client.tenant_id)['rbac_policy']
+            target_tenant=self.admin_client.project_id)['rbac_policy']
         res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
             'rbac_policies']
         res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -274,12 +274,12 @@
     @decorators.idempotent_id('a0f3a01a-e2c7-47d6-9385-0cd7a7f0c996')
     def test_regular_client_blocked_from_sharing_anothers_policy(self):
         ag = self._make_admin_ag_shared_to_project_id(
-            self.client.tenant_id)['address_group']
+            self.client.project_id)['address_group']
         with testtools.ExpectedException(exceptions.BadRequest):
             self.client.create_rbac_policy(
                 object_type='address_group', object_id=ag['id'],
                 action='access_as_shared',
-                target_tenant=self.client2.tenant_id)
+                target_tenant=self.client2.project_id)
 
         # make sure the rbac-policy is invisible to the tenant for which it's
         # being shared
@@ -292,7 +292,7 @@
         self.admin_client.create_rbac_policy(
             object_type='address_group', object_id=ag['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         self.client.show_address_group(ag['id'])
         with testtools.ExpectedException(exceptions.NotFound):
             self.client.update_address_group(ag['id'], name='new_name')
diff --git a/neutron_tempest_plugin/api/test_address_scopes.py b/neutron_tempest_plugin/api/test_address_scopes.py
index b8c143a..76592a0 100644
--- a/neutron_tempest_plugin/api/test_address_scopes.py
+++ b/neutron_tempest_plugin/api/test_address_scopes.py
@@ -86,8 +86,8 @@
         show_addr_scope = body['address_scope']
         self.assertIn('project_id', show_addr_scope)
         self.assertIn('tenant_id', show_addr_scope)
-        self.assertEqual(self.client.tenant_id, show_addr_scope['project_id'])
-        self.assertEqual(self.client.tenant_id, show_addr_scope['tenant_id'])
+        self.assertEqual(self.client.project_id, show_addr_scope['project_id'])
+        self.assertEqual(self.client.project_id, show_addr_scope['tenant_id'])
 
     @decorators.idempotent_id('85a259b2-ace6-4e32-9657-a9a392b452aa')
     def test_tenant_update_address_scope(self):
@@ -142,11 +142,11 @@
     @decorators.idempotent_id('038e999b-cd4b-4021-a9ff-ebb734f6e056')
     def test_policy_target_update(self):
         res = self._make_admin_as_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         # change to client2
         update_res = self.admin_client.update_rbac_policy(
-                res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
+            res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+        self.assertEqual(self.client2.project_id,
                          update_res['rbac_policy']['target_tenant'])
         # make sure everything else stayed the same
         res['rbac_policy'].pop('target_tenant')
@@ -156,7 +156,7 @@
     @decorators.idempotent_id('798ac6c6-96cc-49ce-ba5c-c6eced7a09d3')
     def test_subnet_pool_presence_prevents_rbac_policy_deletion(self):
         res = self._make_admin_as_shared_to_project_id(
-            self.client2.tenant_id)
+            self.client2.project_id)
         snp = self.create_subnetpool(
             data_utils.rand_name("rbac-address-scope"),
             default_prefixlen=24, prefixes=['10.0.0.0/8'],
@@ -183,7 +183,7 @@
         rbac_policy = self.admin_client.create_rbac_policy(
             object_type='address_scope', object_id=a_s['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         self.client.show_address_scope(a_s['id'])
 
         self.assertIn(rbac_policy,
@@ -198,7 +198,7 @@
         a_s = self._create_address_scope(ip_version=4)
         self.admin_client.create_rbac_policy(
             object_type='address_scope', object_id=a_s['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
                       ('project_id', 'target_tenant'))
         for fields in field_args:
@@ -208,7 +208,7 @@
     @decorators.idempotent_id('19cbd62e-c6c3-4495-98b9-b9c6c6c9c127')
     def test_rbac_policy_show(self):
         res = self._make_admin_as_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         p1 = res['rbac_policy']
         p2 = self.admin_client.create_rbac_policy(
             object_type='address_scope',
@@ -227,11 +227,11 @@
         rbac_pol1 = self.admin_client.create_rbac_policy(
             object_type='address_scope', object_id=a_s['id'],
             action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         rbac_pol2 = self.admin_client.create_rbac_policy(
             object_type='address_scope', object_id=a_s['id'],
             action='access_as_shared',
-            target_tenant=self.admin_client.tenant_id)['rbac_policy']
+            target_tenant=self.admin_client.project_id)['rbac_policy']
         res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
             'rbac_policies']
         res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -244,12 +244,12 @@
     @decorators.idempotent_id('222a638d-819e-41a7-a3fe-550265c06e79')
     def test_regular_client_blocked_from_sharing_anothers_policy(self):
         a_s = self._make_admin_as_shared_to_project_id(
-            self.client.tenant_id)['address_scope']
+            self.client.project_id)['address_scope']
         with testtools.ExpectedException(lib_exc.BadRequest):
             self.client.create_rbac_policy(
                 object_type='address_scope', object_id=a_s['id'],
                 action='access_as_shared',
-                target_tenant=self.client2.tenant_id)
+                target_tenant=self.client2.project_id)
 
         # make sure the rbac-policy is invisible to the tenant for which it's
         # being shared
diff --git a/neutron_tempest_plugin/api/test_networks.py b/neutron_tempest_plugin/api/test_networks.py
index c685256..d79b7ab 100644
--- a/neutron_tempest_plugin/api/test_networks.py
+++ b/neutron_tempest_plugin/api/test_networks.py
@@ -49,7 +49,7 @@
             fields.append('mtu')
         for key in fields:
             self.assertEqual(network[key], self.network[key])
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         self.assertEqual(project_id, network['tenant_id'])
         if utils.is_extension_enabled('project-id', 'network'):
             self.assertEqual(project_id, network['project_id'])
@@ -76,7 +76,7 @@
     @decorators.idempotent_id('0cc0552f-afaf-4231-b7a7-c2a1774616da')
     @utils.requires_ext(extension="project-id", service="network")
     def test_create_network_with_project(self):
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
 
         name = 'created-with-project_id'
         network = self.create_network(name, project_id=project_id)
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index f1dfe5c..8d5772b 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -80,7 +80,7 @@
                        service="network")
     def test_create_update_port_with_dns_name(self):
         # NOTE(manjeets) dns_domain is set to openstackgate.local
-        # so dns_name for port can be set
+        # (or any other configured value) so dns_name for port can be set
         self.create_subnet(self.network)
         body = self.create_port(self.network, dns_name='d1')
         self.assertEqual('d1', body['dns_name'])
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index 372bf1e..448f391 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -86,7 +86,7 @@
         body = self.admin_client.show_qos_policy(policy['id'])
         show_policy = body['policy']
         self.assertIn('project_id', show_policy)
-        self.assertEqual(self.admin_client.tenant_id,
+        self.assertEqual(self.admin_client.project_id,
                          show_policy['project_id'])
 
     @decorators.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815')
@@ -134,10 +134,11 @@
 
     @decorators.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6')
     def test_policy_update(self):
-        policy = self.create_qos_policy(name=self.policy_name,
-                                        description='',
-                                        shared=False,
-                                        project_id=self.admin_client.tenant_id)
+        policy = self.create_qos_policy(
+            name=self.policy_name,
+            description='',
+            shared=False,
+            project_id=self.admin_client.project_id)
         self.admin_client.update_qos_policy(policy['id'],
                                             description='test policy desc2',
                                             shared=True)
@@ -153,7 +154,7 @@
         policy = self.create_qos_policy(name=self.policy_name,
                                         description='',
                                         shared=False,
-                                        project_id=self.client.tenant_id)
+                                        project_id=self.client.project_id)
         self.assertRaises(
             exceptions.Forbidden,
             self.client.update_qos_policy,
@@ -161,10 +162,11 @@
 
     @decorators.idempotent_id('4ecfd7e7-47b6-4702-be38-be9235901a87')
     def test_policy_update_forbidden_for_regular_tenants_foreign_policy(self):
-        policy = self.create_qos_policy(name=self.policy_name,
-                                        description='',
-                                        shared=False,
-                                        project_id=self.admin_client.tenant_id)
+        policy = self.create_qos_policy(
+            name=self.policy_name,
+            description='',
+            shared=False,
+            project_id=self.admin_client.project_id)
         self.assertRaises(
             exceptions.NotFound,
             self.client.update_qos_policy,
@@ -172,10 +174,11 @@
 
     @decorators.idempotent_id('ee263db4-009a-4641-83e5-d0e83506ba4c')
     def test_shared_policy_update(self):
-        policy = self.create_qos_policy(name=self.policy_name,
-                                        description='',
-                                        shared=True,
-                                        project_id=self.admin_client.tenant_id)
+        policy = self.create_qos_policy(
+            name=self.policy_name,
+            description='',
+            shared=True,
+            project_id=self.admin_client.project_id)
 
         self.admin_client.update_qos_policy(policy['id'],
                                             description='test policy desc2')
@@ -396,7 +399,7 @@
             name='test-policy-shared',
             description='shared policy',
             shared=True,
-            project_id=self.admin_client.tenant_id)
+            project_id=self.admin_client.project_id)
         obtained_policy = self.client.show_qos_policy(policy['id'])['policy']
         self.assertEqual(obtained_policy, policy)
 
@@ -438,11 +441,11 @@
     def test_user_create_port_with_admin_qos_policy(self):
         qos_policy = self.create_qos_policy(
             name=self.policy_name,
-            project_id=self.admin_client.tenant_id,
+            project_id=self.admin_client.project_id,
             shared=False)
         network = self.create_network(
             'test network', client=self.admin_client,
-            project_id=self.client.tenant_id,
+            project_id=self.client.project_id,
             qos_policy_id=qos_policy['id'])
         port = self.create_port(network)
         self.assertEqual(network['id'], port['network_id'])
@@ -603,7 +606,7 @@
         policy = self.create_qos_policy(name=self.policy_name,
                                         description='test policy',
                                         shared=False,
-                                        project_id=self.client.tenant_id)
+                                        project_id=self.client.project_id)
         rule = self._create_qos_bw_limit_rule(
             policy['id'],
             {'max_kbps': 1, 'max_burst_kbps': 1})
@@ -618,7 +621,7 @@
             name=self.policy_name,
             description='test policy',
             shared=False,
-            project_id=self.admin_client.tenant_id)
+            project_id=self.admin_client.project_id)
         rule = self._create_qos_bw_limit_rule(
             policy['id'], {'max_kbps': 1, 'max_burst_kbps': 1})
         self.assertRaises(
@@ -797,7 +800,7 @@
         qos_pol = self.create_qos_policy(
             name=data_utils.rand_name('test-policy'),
             description='test-shared-policy', shared=False,
-            project_id=self.admin_client.tenant_id)
+            project_id=self.admin_client.project_id)
         self.assertNotIn(qos_pol, self.client2.list_qos_policies()['policies'])
 
         # test update shared False -> True
@@ -805,8 +808,8 @@
         qos_pol['shared'] = True
         self.client2.show_qos_policy(qos_pol['id'])
         rbac_pol = {'target_tenant': '*',
-                    'tenant_id': self.admin_client.tenant_id,
-                    'project_id': self.admin_client.tenant_id,
+                    'tenant_id': self.admin_client.project_id,
+                    'project_id': self.admin_client.project_id,
                     'object_type': 'qos_policy',
                     'object_id': qos_pol['id'],
                     'action': 'access_as_shared'}
@@ -829,7 +832,7 @@
 
     def _create_net_bound_qos_rbacs(self):
         res = self._make_admin_policy_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         qos_policy, rbac_for_client_tenant = res['policy'], res['rbac_policy']
 
         # add a wildcard rbac rule - now the policy globally shared
@@ -862,7 +865,7 @@
     @decorators.idempotent_id('2ace9adc-da6e-11e5-aafe-54ee756c66df')
     def test_policy_sharing_with_wildcard_and_project_id(self):
         res = self._make_admin_policy_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         qos_policy, rbac = res['policy'], res['rbac_policy']
         qos_pol = self.client.show_qos_policy(qos_policy['id'])['policy']
         self.assertTrue(qos_pol['shared'])
@@ -885,11 +888,11 @@
     @decorators.idempotent_id('9f85c76a-a350-11e5-8ae5-54ee756c66df')
     def test_policy_target_update(self):
         res = self._make_admin_policy_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         # change to client2
         update_res = self.admin_client.update_rbac_policy(
-                res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
+            res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+        self.assertEqual(self.client2.project_id,
                          update_res['rbac_policy']['target_tenant'])
         # make sure everything else stayed the same
         res['rbac_policy'].pop('target_tenant')
@@ -899,7 +902,7 @@
     @decorators.idempotent_id('a9b39f46-a350-11e5-97c7-54ee756c66df')
     def test_network_presence_prevents_policy_rbac_policy_deletion(self):
         res = self._make_admin_policy_shared_to_project_id(
-            self.client2.tenant_id)
+            self.client2.project_id)
         qos_policy_id = res['policy']['id']
         self._create_network(qos_policy_id, self.client2)
         # a network with shared qos-policy should prevent the deletion of an
@@ -922,7 +925,7 @@
         # we can't update the policy to a different tenant
         with testtools.ExpectedException(exceptions.Conflict):
             self.admin_client.update_rbac_policy(
-                wild['id'], target_tenant=self.client2.tenant_id)
+                wild['id'], target_tenant=self.client2.project_id)
 
     @decorators.idempotent_id('b0fe87e8-a350-11e5-9f08-54ee756c66df')
     def test_regular_client_shares_to_another_regular_client(self):
@@ -933,7 +936,7 @@
         rbac_policy = self.admin_client.create_rbac_policy(
             object_type='qos_policy', object_id=policy['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         self.client.show_qos_policy(policy['id'])
 
         self.assertIn(rbac_policy,
@@ -948,7 +951,7 @@
         policy = self._create_qos_policy()
         self.admin_client.create_rbac_policy(
             object_type='qos_policy', object_id=policy['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
                       ('project_id', 'target_tenant'))
         for fields in field_args:
@@ -958,7 +961,7 @@
     @decorators.idempotent_id('c10d993a-a350-11e5-9c7a-54ee756c66df')
     def test_rbac_policy_show(self):
         res = self._make_admin_policy_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         p1 = res['rbac_policy']
         p2 = self.admin_client.create_rbac_policy(
             object_type='qos_policy', object_id=res['policy']['id'],
@@ -976,11 +979,11 @@
         rbac_pol1 = self.admin_client.create_rbac_policy(
             object_type='qos_policy', object_id=policy['id'],
             action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         rbac_pol2 = self.admin_client.create_rbac_policy(
             object_type='qos_policy', object_id=policy['id'],
             action='access_as_shared',
-            target_tenant=self.admin_client.tenant_id)['rbac_policy']
+            target_tenant=self.admin_client.project_id)['rbac_policy']
         res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
             'rbac_policies']
         res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -993,12 +996,12 @@
     @decorators.idempotent_id('cd7d755a-a350-11e5-a344-54ee756c66df')
     def test_regular_client_blocked_from_sharing_anothers_policy(self):
         qos_policy = self._make_admin_policy_shared_to_project_id(
-            self.client.tenant_id)['policy']
+            self.client.project_id)['policy']
         with testtools.ExpectedException(exceptions.BadRequest):
             self.client.create_rbac_policy(
                 object_type='qos_policy', object_id=qos_policy['id'],
                 action='access_as_shared',
-                target_tenant=self.client2.tenant_id)
+                target_tenant=self.client2.project_id)
 
         # make sure the rbac-policy is invisible to the tenant for which it's
         # being shared
@@ -1195,6 +1198,7 @@
         dscp_policy_id = self.create_qos_policy(
             name=self.policy_name,
             description='test-qos-policy',
+            project_id=self.client.project_id,
             shared=True)['id']
 
         # Associate QoS to the network
diff --git a/neutron_tempest_plugin/api/test_revisions.py b/neutron_tempest_plugin/api/test_revisions.py
index 0d590f6..09bb3f1 100644
--- a/neutron_tempest_plugin/api/test_revisions.py
+++ b/neutron_tempest_plugin/api/test_revisions.py
@@ -344,7 +344,7 @@
     def test_update_router_extra_attributes_bumps_revision(self):
         # updates from CVR to CVR-HA are supported on every release,
         # but only the admin can forcibly create a non-HA router
-        router_args = {'tenant_id': self.client.tenant_id,
+        router_args = {'tenant_id': self.client.project_id,
                        'ha': False}
         router = self.admin_client.create_router('r1', True,
             **router_args)['router']
diff --git a/neutron_tempest_plugin/api/test_router_interface_fip.py b/neutron_tempest_plugin/api/test_router_interface_fip.py
index 4369838..5d8ab67 100644
--- a/neutron_tempest_plugin/api/test_router_interface_fip.py
+++ b/neutron_tempest_plugin/api/test_router_interface_fip.py
@@ -61,7 +61,7 @@
         subnet1 = self.create_subnet(net1, cidr=cidr1)
         self.create_router_interface(router1['id'], subnet1['id'])
         net2 = self.admin_client.create_network(
-            project_id=self.client.tenant_id,
+            project_id=self.client.project_id,
             **{'router:external': True})['network']
         self.networks.append(net2)
         subnet2 = self.create_subnet(net2, cidr=cidr2)
diff --git a/neutron_tempest_plugin/api/test_routers.py b/neutron_tempest_plugin/api/test_routers.py
index 5e916f5..4179e6d 100644
--- a/neutron_tempest_plugin/api/test_routers.py
+++ b/neutron_tempest_plugin/api/test_routers.py
@@ -316,7 +316,7 @@
     @decorators.idempotent_id('644d7a4a-01a1-4b68-bb8d-0c0042cb1729')
     def test_convert_distributed_router_back_to_centralized(self):
         # Convert a centralized router to distributed firstly
-        router_args = {'tenant_id': self.client.tenant_id,
+        router_args = {'tenant_id': self.client.project_id,
                        'distributed': False, 'ha': False}
         router = self._create_admin_router(
             data_utils.rand_name('router'), admin_state_up=False,
@@ -348,7 +348,7 @@
 
     @decorators.idempotent_id('0ffb9973-0c1a-4b76-a1f2-060178057661')
     def test_convert_centralized_router_to_distributed_extended(self):
-        router_args = {'tenant_id': self.client.tenant_id,
+        router_args = {'tenant_id': self.client.project_id,
                        'distributed': False, 'ha': False}
         router = self._create_admin_router(
             data_utils.rand_name('router'), admin_state_up=True,
@@ -371,7 +371,7 @@
 
     @decorators.idempotent_id('e9a8f55b-c535-44b7-8b0a-20af6a7c2921')
     def test_convert_distributed_router_to_centralized_extended(self):
-        router_args = {'tenant_id': self.client.tenant_id,
+        router_args = {'tenant_id': self.client.project_id,
                        'distributed': True, 'ha': False}
         router = self._create_admin_router(
             data_utils.rand_name('router'), admin_state_up=True,
diff --git a/neutron_tempest_plugin/api/test_routers_negative.py b/neutron_tempest_plugin/api/test_routers_negative.py
index 9c83fc7..b51485b 100644
--- a/neutron_tempest_plugin/api/test_routers_negative.py
+++ b/neutron_tempest_plugin/api/test_routers_negative.py
@@ -124,7 +124,7 @@
     @decorators.idempotent_id('5379fe06-e45e-4a4f-8b4a-9e28a924b451')
     def test_router_update_distributed_returns_exception(self):
         # create a centralized router
-        router_args = {'tenant_id': self.client.tenant_id,
+        router_args = {'tenant_id': self.client.project_id,
                        'distributed': False}
         router = self._create_admin_router(
             data_utils.rand_name('router'), admin_state_up=True,
@@ -141,7 +141,7 @@
     @decorators.idempotent_id('c277e945-3b39-442d-b149-e2e8cc6a2b40')
     def test_router_update_centralized_returns_exception(self):
         # create a centralized router
-        router_args = {'tenant_id': self.client.tenant_id,
+        router_args = {'tenant_id': self.client.project_id,
                        'distributed': False}
         router = self._create_admin_router(
             data_utils.rand_name('router'), admin_state_up=True,
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index d251f8c..14e0c66 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -29,13 +29,15 @@
 LOG = log.getLogger(__name__)
 
 
-class SecGroupTest(base.BaseAdminNetworkTest):
+class BaseSecGroupTest(base.BaseAdminNetworkTest):
 
     required_extensions = ['security-group']
 
-    @decorators.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
-    def test_create_list_update_show_delete_security_group(self):
-        security_group = self.create_security_group()
+    def _test_create_list_update_show_delete_security_group(self):
+        sg_kwargs = {}
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
+        security_group = self.create_security_group(**sg_kwargs)
 
         # List security groups and verify if created group is there in response
         security_groups = self.client.list_security_groups()['security_groups']
@@ -61,13 +63,15 @@
         self.assertEqual(observed_security_group['description'],
                          new_description)
 
-    @decorators.idempotent_id('1fff0d57-bb6c-4528-9c1d-2326dce1c087')
-    def test_show_security_group_contains_all_rules(self):
-        security_group = self.create_security_group()
+    def _test_show_security_group_contains_all_rules(self):
+        sg_kwargs = {}
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
+        security_group = self.create_security_group(**sg_kwargs)
         protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
         security_group_rule = self.create_security_group_rule(
             security_group=security_group,
-            project={'id': self.admin_client.tenant_id},
+            project={'id': self.admin_client.project_id},
             client=self.admin_client,
             protocol=protocol,
             direction=constants.INGRESS_DIRECTION)
@@ -80,30 +84,36 @@
         self.assertIn(
             security_group_rule['id'], observerd_security_group_rules_ids)
 
-    @decorators.idempotent_id('b5923b1a-4d33-44e1-af25-088dcb55b02b')
-    def test_list_security_group_rules_contains_all_rules(self):
+    def _test_list_security_group_rules_contains_all_rules(self):
         """Test list security group rules.
 
         This test checks if all SG rules which belongs to the tenant OR
         which belongs to the tenant's security group are listed.
         """
-        security_group = self.create_security_group()
+        sg_kwargs = {}
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
+        security_group = self.create_security_group(**sg_kwargs)
         protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
         security_group_rule = self.create_security_group_rule(
             security_group=security_group,
-            project={'id': self.admin_client.tenant_id},
+            project={'id': self.admin_client.project_id},
             client=self.admin_client,
             protocol=protocol,
             direction=constants.INGRESS_DIRECTION)
 
         # Create also other SG with some custom rule to check that regular user
         # can't see this rule
-        admin_security_group = self.create_security_group(
-            project={'id': self.admin_client.tenant_id},
-            client=self.admin_client)
+        sg_kwargs = {
+            'project': {'id': self.admin_client.project_id},
+            'client': self.admin_client
+        }
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
+        admin_security_group = self.create_security_group(**sg_kwargs)
         admin_security_group_rule = self.create_security_group_rule(
             security_group=admin_security_group,
-            project={'id': self.admin_client.tenant_id},
+            project={'id': self.admin_client.project_id},
             client=self.admin_client,
             protocol=protocol,
             direction=constants.INGRESS_DIRECTION)
@@ -113,12 +123,12 @@
         self.assertIn(security_group_rule['id'], rules_ids)
         self.assertNotIn(admin_security_group_rule['id'], rules_ids)
 
-    @decorators.idempotent_id('7c0ecb10-b2db-11e6-9b14-000c29248b0d')
-    def test_create_bulk_sec_groups(self):
+    def _test_create_bulk_sec_groups(self):
         # Creates 2 sec-groups in one request
         sec_nm = [data_utils.rand_name('secgroup'),
                   data_utils.rand_name('secgroup')]
-        body = self.client.create_bulk_security_groups(sec_nm)
+        body = self.client.create_bulk_security_groups(
+            sec_nm, stateless=self.stateless_sg)
         created_sec_grps = body['security_groups']
         self.assertEqual(2, len(created_sec_grps))
         for secgrp in created_sec_grps:
@@ -127,13 +137,16 @@
             self.assertIn(secgrp['name'], sec_nm)
             self.assertIsNotNone(secgrp['id'])
 
-    @decorators.idempotent_id('e93f33d8-57ea-11eb-b69b-74e5f9e2a801')
-    def test_create_sec_groups_with_the_same_name(self):
+    def _test_create_sec_groups_with_the_same_name(self):
         same_name_sg_number = 5
         sg_name = 'sg_zahlabut'
         sg_names = [sg_name] * same_name_sg_number
+        sg_kwargs = {}
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
         for name in sg_names:
-            self.create_security_group(name=name)
+            sg_kwargs['name'] = name
+            self.create_security_group(**sg_kwargs)
         sec_groups = [item['id'] for item in
                       self.client.list_security_groups(
                           name=sg_name)['security_groups']]
@@ -143,9 +156,55 @@
             ' is: {}'.format(same_name_sg_number))
 
 
-class StatelessSecGroupTest(base.BaseAdminNetworkTest):
+class StatefulSecGroupTest(BaseSecGroupTest):
+
+    stateless_sg = False
+
+    @decorators.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
+    def test_create_list_update_show_delete_security_group(self):
+        self._test_create_list_update_show_delete_security_group()
+
+    @decorators.idempotent_id('1fff0d57-bb6c-4528-9c1d-2326dce1c087')
+    def test_show_security_group_contains_all_rules(self):
+        self._test_show_security_group_contains_all_rules()
+
+    @decorators.idempotent_id('b5923b1a-4d33-44e1-af25-088dcb55b02b')
+    def test_list_security_group_rules_contains_all_rules(self):
+        self._test_list_security_group_rules_contains_all_rules()
+
+    @decorators.idempotent_id('7c0ecb10-b2db-11e6-9b14-000c29248b0d')
+    def test_create_bulk_sec_groups(self):
+        self._test_create_bulk_sec_groups()
+
+    @decorators.idempotent_id('e93f33d8-57ea-11eb-b69b-74e5f9e2a801')
+    def test_create_sec_groups_with_the_same_name(self):
+        self._test_create_sec_groups_with_the_same_name()
+
+
+class StatelessSecGroupTest(BaseSecGroupTest):
 
     required_extensions = ['security-group', 'stateful-security-group']
+    stateless_sg = True
+
+    @decorators.idempotent_id('0214d58a-2177-47e1-af83-dcd45c024829')
+    def test_create_list_update_show_delete_security_group(self):
+        self._test_create_list_update_show_delete_security_group()
+
+    @decorators.idempotent_id('ddbc0e4c-840f-44ab-8718-0b95b7c7b575')
+    def test_show_security_group_contains_all_rules(self):
+        self._test_show_security_group_contains_all_rules()
+
+    @decorators.idempotent_id('cdf3a63a-08fe-4091-bab4-62180847990f')
+    def test_list_security_group_rules_contains_all_rules(self):
+        self._test_list_security_group_rules_contains_all_rules()
+
+    @decorators.idempotent_id('b33e612e-65f0-467b-9bf2-b5b2ce67f72f')
+    def test_create_bulk_sec_groups(self):
+        self._test_create_bulk_sec_groups()
+
+    @decorators.idempotent_id('a6896935-db18-413d-95f5-4f465e0e2209')
+    def test_create_sec_groups_with_the_same_name(self):
+        self._test_create_sec_groups_with_the_same_name()
 
     @decorators.idempotent_id('0a6c1476-3d1a-11ec-b0ec-0800277ac3d9')
     def test_stateless_security_group_update(self):
@@ -213,18 +272,18 @@
 
     def _set_sg_quota(self, val):
         sg_quota = self._get_sg_quota()
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         self.admin_client.update_quotas(project_id, **{'security_group': val})
         self.addCleanup(self.admin_client.update_quotas,
                         project_id, **{'security_group': sg_quota})
 
     def _get_sg_quota(self):
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         quotas = self.admin_client.show_quotas(project_id)
         return quotas['quota']['security_group']
 
     def _get_sg_amount(self):
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         filter_query = {'project_id': project_id}
         security_groups = self.client.list_security_groups(**filter_query)
         return len(security_groups['security_groups'])
@@ -282,7 +341,7 @@
     def _create_security_group_rules(self, amount, port_index=1):
         for i in range(amount):
             ingress_rule = self.create_security_group_rule(**{
-                'project_id': self.client.tenant_id,
+                'project_id': self.client.project_id,
                 'direction': 'ingress',
                 'port_range_max': port_index + i,
                 'port_range_min': port_index + i,
@@ -305,18 +364,18 @@
         return new_sg_rules_quota
 
     def _set_sg_rules_quota(self, val):
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         self.admin_client.update_quotas(project_id,
                                         **{'security_group_rule': val})
         LOG.info('Trying to update security group rule quota {} '.format(val))
 
     def _get_sg_rules_quota(self):
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         quotas = self.admin_client.show_quotas(project_id)
         return quotas['quota']['security_group_rule']
 
     def _get_sg_rules_amount(self):
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         filter_query = {'project_id': project_id}
         security_group_rules = self.client.list_security_group_rules(
                 **filter_query)
@@ -331,7 +390,7 @@
     def setUp(self):
         super(SecGroupRulesQuotaTest, self).setUp()
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.admin_client.reset_quotas, self.client.tenant_id)
+                        self.admin_client.reset_quotas, self.client.project_id)
         self._set_sg_rules_quota(10)
 
     @decorators.idempotent_id('77ec038c-5638-11ea-8e2d-0242ac130003')
@@ -357,7 +416,7 @@
         values, different values.
         """
         sg_rules_quota = self._get_sg_rules_quota()
-        project_id = self.client.tenant_id
+        project_id = self.client.project_id
         self.addCleanup(self.admin_client.update_quotas,
                         project_id, **{'security_group_rule': sg_rules_quota})
         values = [-1, 0, 10, 2147483647]
@@ -380,21 +439,16 @@
         self.assertEqual(self._get_sg_rules_quota(), new_quota)
 
 
-class SecGroupProtocolTest(base.BaseNetworkTest):
+class BaseSecGroupProtocolTest(base.BaseNetworkTest):
 
     protocol_names = base_security_groups.V4_PROTOCOL_NAMES
     protocol_ints = base_security_groups.V4_PROTOCOL_INTS
 
-    @decorators.idempotent_id('282e3681-aa6e-42a7-b05c-c341aa1e3cdf')
-    def test_security_group_rule_protocol_names(self):
-        self._test_security_group_rule_protocols(protocols=self.protocol_names)
-
-    @decorators.idempotent_id('66e47f1f-20b6-4417-8839-3cc671c7afa3')
-    def test_security_group_rule_protocol_ints(self):
-        self._test_security_group_rule_protocols(protocols=self.protocol_ints)
-
     def _test_security_group_rule_protocols(self, protocols):
-        security_group = self.create_security_group()
+        sg_kwargs = {}
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
+        security_group = self.create_security_group(**sg_kwargs)
         for protocol in protocols:
             self._test_security_group_rule(
                 security_group=security_group,
@@ -414,14 +468,38 @@
                              "{!r} does not match.".format(key))
 
 
-class SecGroupProtocolIPv6Test(SecGroupProtocolTest):
+class StatefulSecGroupProtocolTest(BaseSecGroupProtocolTest):
+    stateless_sg = False
+
+    @decorators.idempotent_id('282e3681-aa6e-42a7-b05c-c341aa1e3cdf')
+    def test_security_group_rule_protocol_names(self):
+        self._test_security_group_rule_protocols(protocols=self.protocol_names)
+
+    @decorators.idempotent_id('66e47f1f-20b6-4417-8839-3cc671c7afa3')
+    def test_security_group_rule_protocol_ints(self):
+        self._test_security_group_rule_protocols(protocols=self.protocol_ints)
+
+
+class StatelessSecGroupProtocolTest(BaseSecGroupProtocolTest):
+    required_extensions = ['security-group', 'stateful-security-group']
+    stateless_sg = True
+
+    @decorators.idempotent_id('3a065cdd-99bd-409f-a08e-385c6674bec2')
+    def test_security_group_rule_protocol_names(self):
+        self._test_security_group_rule_protocols(protocols=self.protocol_names)
+
+    @decorators.idempotent_id('b0332b5d-6fac-49d5-a79d-ae4fe62600f7')
+    def test_security_group_rule_protocol_ints(self):
+        self._test_security_group_rule_protocols(protocols=self.protocol_ints)
+
+
+class BaseSecGroupProtocolIPv6Test(BaseSecGroupProtocolTest):
 
     _ip_version = constants.IP_VERSION_6
     protocol_names = base_security_groups.V6_PROTOCOL_NAMES
     protocol_ints = base_security_groups.V6_PROTOCOL_INTS
 
-    @decorators.idempotent_id('c7d17b41-3b4e-4add-bb3b-6af59baaaffa')
-    def test_security_group_rule_protocol_legacy_icmpv6(self):
+    def _test_security_group_rule_protocol_legacy_icmpv6(self):
         # These legacy protocols can be used to create security groups,
         # but they could be shown either with their passed protocol name,
         # or a canonical-ized version, depending on the neutron version.
@@ -439,7 +517,10 @@
                 ethertype=self.ethertype)
 
     def _test_security_group_rule_legacy(self, protocol_list, **kwargs):
-        security_group = self.create_security_group()
+        sg_kwargs = {}
+        if self.stateless_sg:
+            sg_kwargs['stateful'] = False
+        security_group = self.create_security_group(**sg_kwargs)
         security_group_rule = self.create_security_group_rule(
             security_group=security_group, **kwargs)
         observed_security_group_rule = self.client.show_security_group_rule(
@@ -457,6 +538,23 @@
                                  "{!r} does not match.".format(key))
 
 
+class StatefulSecGroupProtocolIPv6Test(BaseSecGroupProtocolIPv6Test):
+    stateless_sg = False
+
+    @decorators.idempotent_id('c7d17b41-3b4e-4add-bb3b-6af59baaaffa')
+    def test_security_group_rule_protocol_legacy_icmpv6(self):
+        self._test_security_group_rule_protocol_legacy_icmpv6()
+
+
+class StatelessSecGroupProtocolIPv6Test(BaseSecGroupProtocolIPv6Test):
+    required_extensions = ['security-group', 'stateful-security-group']
+    stateless_sg = True
+
+    @decorators.idempotent_id('a034814e-0fa5-4437-8e6f-0d2eebd668b3')
+    def test_security_group_rule_protocol_legacy_icmpv6(self):
+        self._test_security_group_rule_protocol_legacy_icmpv6()
+
+
 class RbacSharedSecurityGroupTest(base.BaseAdminNetworkTest):
 
     force_tenant_isolation = True
@@ -471,7 +569,7 @@
     def _create_security_group(self):
         return self.create_security_group(
             name=data_utils.rand_name('test-sg'),
-            project={'id': self.admin_client.tenant_id})
+            project={'id': self.admin_client.project_id})
 
     def _make_admin_sg_shared_to_project_id(self, project_id):
         sg = self._create_security_group()
@@ -486,11 +584,11 @@
     @decorators.idempotent_id('2a41eb8f-2a35-11e9-bae9-acde48001122')
     def test_policy_target_update(self):
         res = self._make_admin_sg_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         # change to client2
         update_res = self.admin_client.update_rbac_policy(
-                res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
+            res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+        self.assertEqual(self.client2.project_id,
                          update_res['rbac_policy']['target_tenant'])
         # make sure everything else stayed the same
         res['rbac_policy'].pop('target_tenant')
@@ -500,7 +598,7 @@
     @decorators.idempotent_id('2a619a8a-2a35-11e9-90d9-acde48001122')
     def test_port_presence_prevents_policy_rbac_policy_deletion(self):
         res = self._make_admin_sg_shared_to_project_id(
-            self.client2.tenant_id)
+            self.client2.project_id)
         sg_id = res['security_group']['id']
         net = self.create_network(client=self.client2)
         port = self.client2.create_port(
@@ -525,7 +623,7 @@
         rbac_policy = self.admin_client.create_rbac_policy(
             object_type='security_group', object_id=sg['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         self.client.show_security_group(sg['id'])
 
         self.assertIn(rbac_policy,
@@ -540,7 +638,7 @@
         sg = self._create_security_group()
         self.admin_client.create_rbac_policy(
             object_type='security_group', object_id=sg['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
                       ('project_id', 'target_tenant'))
         for fields in field_args:
@@ -550,7 +648,7 @@
     @decorators.idempotent_id('2abf8f9e-2a35-11e9-85f7-acde48001122')
     def test_rbac_policy_show(self):
         res = self._make_admin_sg_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         p1 = res['rbac_policy']
         p2 = self.admin_client.create_rbac_policy(
             object_type='security_group',
@@ -569,11 +667,11 @@
         rbac_pol1 = self.admin_client.create_rbac_policy(
             object_type='security_group', object_id=sg['id'],
             action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         rbac_pol2 = self.admin_client.create_rbac_policy(
             object_type='security_group', object_id=sg['id'],
             action='access_as_shared',
-            target_tenant=self.admin_client.tenant_id)['rbac_policy']
+            target_tenant=self.admin_client.project_id)['rbac_policy']
         res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
             'rbac_policies']
         res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -586,12 +684,12 @@
     @decorators.idempotent_id('2aff3900-2a35-11e9-96b3-acde48001122')
     def test_regular_client_blocked_from_sharing_anothers_policy(self):
         sg = self._make_admin_sg_shared_to_project_id(
-            self.client.tenant_id)['security_group']
+            self.client.project_id)['security_group']
         with testtools.ExpectedException(exceptions.BadRequest):
             self.client.create_rbac_policy(
                 object_type='security_group', object_id=sg['id'],
                 action='access_as_shared',
-                target_tenant=self.client2.tenant_id)
+                target_tenant=self.client2.project_id)
 
         # make sure the rbac-policy is invisible to the tenant for which it's
         # being shared
diff --git a/neutron_tempest_plugin/api/test_security_groups_negative.py b/neutron_tempest_plugin/api/test_security_groups_negative.py
index 7efa70e..07fc606 100644
--- a/neutron_tempest_plugin/api/test_security_groups_negative.py
+++ b/neutron_tempest_plugin/api/test_security_groups_negative.py
@@ -182,10 +182,73 @@
     def setUp(self):
         super(NegativeSecGroupRulesQuotaTest, self).setUp()
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.admin_client.reset_quotas, self.client.tenant_id)
+                        self.admin_client.reset_quotas, self.client.project_id)
         self._set_sg_rules_quota(10)
 
     @decorators.idempotent_id('8336e6ea-2e0a-4a1a-8673-a6f81b577d57')
     def test_sg_creation_with_insufficient_sg_rules_quota(self):
         self._set_sg_rules_quota(0)
         self.assertRaises(lib_exc.Conflict, self.create_security_group)
+
+
+class NegativeStatelessSecGroupTest(base.BaseNetworkTest):
+
+    required_extensions = ['security-group', 'stateful-security-group']
+
+    @classmethod
+    def resource_setup(cls):
+        super().resource_setup()
+        cls.network = cls.create_network()
+        cls.stateless_sg = cls.create_security_group(stateful=False)
+        cls.stateful_sg = cls.create_security_group(stateful=True)
+
+    @decorators.idempotent_id('9e85ce0d-37b2-4044-88a8-09ae965069ba')
+    def test_create_port_with_stateful_and_stateless_sg(self):
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.create_port,
+            network=self.network,
+            security_groups=[self.stateful_sg['id'], self.stateless_sg['id']])
+
+    def _test_adding_sg_to_port_with_different_type_of_sg(
+            self, initial_sg, updated_sg):
+        port = self.create_port(
+            network=self.network,
+            security_groups=[initial_sg['id']]
+        )
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.update_port,
+            port,
+            security_groups=[initial_sg['id'], updated_sg['id']]
+        )
+
+    @decorators.idempotent_id('63374580-3154-410b-ab31-e98a136094f8')
+    def test_adding_stateful_sg_to_port_with_stateless_sg(self):
+        self._test_adding_sg_to_port_with_different_type_of_sg(
+            self.stateless_sg, self.stateful_sg)
+
+    @decorators.idempotent_id('3854a4c6-4ace-4133-be83-4a2820ede06f')
+    def test_adding_stateless_sg_to_port_with_stateful_sg(self):
+        self._test_adding_sg_to_port_with_different_type_of_sg(
+            self.stateful_sg, self.stateless_sg)
+
+    def _test_update_used_sg(self, security_group):
+        self.create_port(
+            network=self.network,
+            security_groups=[security_group['id']]
+        )
+        self.assertRaises(
+            lib_exc.Conflict,
+            self.client.update_security_group,
+            security_group['id'],
+            stateful=not security_group['stateful']
+        )
+
+    @decorators.idempotent_id('5e1e3053-16dc-4f0b-a327-ff953f527248')
+    def test_update_used_stateless_sg_to_stateful(self):
+        self._test_update_used_sg(self.stateless_sg)
+
+    @decorators.idempotent_id('afe4d777-7a98-44ed-a1dc-588861f6daba')
+    def test_update_used_stateful_sg_to_stateless(self):
+        self._test_update_used_sg(self.stateful_sg)
diff --git a/neutron_tempest_plugin/api/test_subnetpools.py b/neutron_tempest_plugin/api/test_subnetpools.py
index 38c721f..eaaee33 100644
--- a/neutron_tempest_plugin/api/test_subnetpools.py
+++ b/neutron_tempest_plugin/api/test_subnetpools.py
@@ -145,8 +145,8 @@
         show_subnetpool = body['subnetpool']
         self.assertIn('project_id', show_subnetpool)
         self.assertIn('tenant_id', show_subnetpool)
-        self.assertEqual(self.client.tenant_id, show_subnetpool['project_id'])
-        self.assertEqual(self.client.tenant_id, show_subnetpool['tenant_id'])
+        self.assertEqual(self.client.project_id, show_subnetpool['project_id'])
+        self.assertEqual(self.client.project_id, show_subnetpool['tenant_id'])
 
     @decorators.idempotent_id('764f1b93-1c4a-4513-9e7b-6c2fc5e9270c')
     def test_tenant_update_subnetpool(self):
@@ -446,11 +446,11 @@
     @decorators.idempotent_id('71b35ad0-51cd-40da-985d-89a51c95ec6a')
     def test_policy_target_update(self):
         res = self._make_admin_snp_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         # change to client2
         update_res = self.admin_client.update_rbac_policy(
-                res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
-        self.assertEqual(self.client2.tenant_id,
+            res['rbac_policy']['id'], target_tenant=self.client2.project_id)
+        self.assertEqual(self.client2.project_id,
                          update_res['rbac_policy']['target_tenant'])
         # make sure everything else stayed the same
         res['rbac_policy'].pop('target_tenant')
@@ -460,7 +460,7 @@
     @decorators.idempotent_id('451d9d38-65a0-4916-a805-1460d6a938d1')
     def test_subnet_presence_prevents_rbac_policy_deletion(self):
         res = self._make_admin_snp_shared_to_project_id(
-            self.client2.tenant_id)
+            self.client2.project_id)
         network = self.create_network(client=self.client2)
         subnet = self.client2.create_subnet(
             network_id=network['id'],
@@ -491,7 +491,7 @@
         rbac_policy = self.admin_client.create_rbac_policy(
             object_type='address_scope', object_id=a_s['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
 
         # Create subnet pool owned by client with shared AS
         snp = self._create_subnetpool(address_scope_id=a_s["id"])
@@ -500,7 +500,7 @@
             self.client.create_rbac_policy(
                 object_type='subnetpool', object_id=snp['id'],
                 action='access_as_shared',
-                target_tenant=self.client2.tenant_id
+                target_tenant=self.client2.project_id
             )
 
         # cleanup
@@ -517,7 +517,7 @@
         rbac_policy = self.admin_client.create_rbac_policy(
             object_type='subnetpool', object_id=snp['id'],
             action='access_as_shared',
-            target_tenant=self.client.tenant_id)['rbac_policy']
+            target_tenant=self.client.project_id)['rbac_policy']
         self.client.show_subnetpool(snp['id'])
 
         self.assertIn(rbac_policy,
@@ -532,7 +532,7 @@
         snp = self._create_subnetpool()
         self.admin_client.create_rbac_policy(
             object_type='subnetpool', object_id=snp['id'],
-            action='access_as_shared', target_tenant=self.client2.tenant_id)
+            action='access_as_shared', target_tenant=self.client2.project_id)
         field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
                       ('project_id', 'target_tenant'))
         for fields in field_args:
@@ -542,7 +542,7 @@
     @decorators.idempotent_id('e59e4502-4e6a-4e49-b446-a5d5642bbd69')
     def test_rbac_policy_show(self):
         res = self._make_admin_snp_shared_to_project_id(
-            self.client.tenant_id)
+            self.client.project_id)
         p1 = res['rbac_policy']
         p2 = self.admin_client.create_rbac_policy(
             object_type='subnetpool',
@@ -561,11 +561,11 @@
         rbac_pol1 = self.admin_client.create_rbac_policy(
             object_type='subnetpool', object_id=snp['id'],
             action='access_as_shared',
-            target_tenant=self.client2.tenant_id)['rbac_policy']
+            target_tenant=self.client2.project_id)['rbac_policy']
         rbac_pol2 = self.admin_client.create_rbac_policy(
             object_type='subnetpool', object_id=snp['id'],
             action='access_as_shared',
-            target_tenant=self.admin_client.tenant_id)['rbac_policy']
+            target_tenant=self.admin_client.project_id)['rbac_policy']
         res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
             'rbac_policies']
         res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
@@ -578,12 +578,12 @@
     @decorators.idempotent_id('63d9acbe-403c-4e77-9ffd-80e636a4621e')
     def test_regular_client_blocked_from_sharing_anothers_policy(self):
         snp = self._make_admin_snp_shared_to_project_id(
-            self.client.tenant_id)['subnetpool']
+            self.client.project_id)['subnetpool']
         with testtools.ExpectedException(lib_exc.BadRequest):
             self.client.create_rbac_policy(
                 object_type='subnetpool', object_id=snp['id'],
                 action='access_as_shared',
-                target_tenant=self.client2.tenant_id)
+                target_tenant=self.client2.project_id)
 
         # make sure the rbac-policy is invisible to the tenant for which it's
         # being shared
diff --git a/neutron_tempest_plugin/api/test_subnetpools_negative.py b/neutron_tempest_plugin/api/test_subnetpools_negative.py
index 1e222df..934d3cd 100644
--- a/neutron_tempest_plugin/api/test_subnetpools_negative.py
+++ b/neutron_tempest_plugin/api/test_subnetpools_negative.py
@@ -289,5 +289,5 @@
             lib_exc.BadRequest,
             self.admin_client.update_subnetpool,
             subnetpool['id'],
-            tenant_id=self.admin_client.tenant_id,
+            tenant_id=self.admin_client.project_id,
         )
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index 26f8de8..1006617 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -48,7 +48,7 @@
         if parent_network_type:
             client = cls.admin_client
             network_kwargs = {"provider:network_type": parent_network_type,
-                              "tenant_id": cls.client.tenant_id}
+                              "tenant_id": cls.client.project_id}
         network = cls.create_network(client=client, **network_kwargs)
         parent_port = cls.create_port(network)
         return cls.create_trunk(parent_port, subports, **kwargs)
@@ -98,7 +98,7 @@
         observed_trunk = self._show_trunk(trunk)
         for key in ['project_id', 'tenant_id']:
             self.assertIn(key, observed_trunk)
-            self.assertEqual(self.client.tenant_id, observed_trunk[key])
+            self.assertEqual(self.client.project_id, observed_trunk[key])
 
     @decorators.idempotent_id('4ce46c22-a2b6-4659-bc5a-0ef2463cab32')
     def test_create_update_trunk(self):
diff --git a/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py b/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
index f3a7b11..4610686 100644
--- a/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
+++ b/neutron_tempest_plugin/bgpvpn/api/test_bgpvpn.py
@@ -54,21 +54,21 @@
     @decorators.idempotent_id('709b23b0-9719-47df-9f53-b0812a5d5a48')
     def test_delete_bgpvpn(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         self.delete_bgpvpn(self.bgpvpn_admin_client, bgpvpn)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('596abfc2-fd89-491d-863d-25459db1df4b')
     def test_delete_bgpvpn_as_non_admin_fail(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         self.assertRaises(exceptions.Forbidden,
                           self.bgpvpn_client.delete_bgpvpn, bgpvpn['id'])
 
     @decorators.idempotent_id('9fa29db8-35d0-4beb-a986-23c369499ab1')
     def test_show_bgpvpn(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         bgpvpn_details = self.bgpvpn_client.show_bgpvpn(bgpvpn['id'])['bgpvpn']
         self.assertEqual(bgpvpn['id'], bgpvpn_details['id'])
 
@@ -76,14 +76,14 @@
     @decorators.idempotent_id('b20110bb-393b-4342-8b30-6486cd2b4fc6')
     def test_show_bgpvpn_as_non_owner_fail(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         self.assertRaises(exceptions.NotFound,
                           self.bgpvpn_alt_client.show_bgpvpn, bgpvpn['id'])
 
     @decorators.idempotent_id('7a7feca2-1c24-4f5d-ad4b-b0e5a712adb1')
     def test_list_bgpvpn(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         bgpvpns = self.bgpvpn_client.list_bgpvpns()['bgpvpns']
         self.assertIn(bgpvpn['id'],
                       [bgpvpn_alt['id'] for bgpvpn_alt in bgpvpns])
@@ -92,7 +92,7 @@
     @decorators.idempotent_id('4875e65d-0b65-40c0-9efd-309420686ab4')
     def test_list_bgpvpn_as_non_owner_fail(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         bgpvpns_alt = self.bgpvpn_alt_client.list_bgpvpns()['bgpvpns']
         self.assertNotIn(bgpvpn['id'],
                          [bgpvpn_alt['id'] for bgpvpn_alt in bgpvpns_alt])
@@ -100,7 +100,7 @@
     @decorators.idempotent_id('096281da-356d-4c04-bd55-784a26bb1b0c')
     def test_list_show_network_association(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         network = self.networks_client.create_network()['network']
 
         association = self.bgpvpn_client.create_network_association(
@@ -118,7 +118,7 @@
     @decorators.idempotent_id('57b0da93-8e37-459f-9aaf-f903acc36025')
     def test_show_netassoc_as_non_owner_fail(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         network = self.networks_client.create_network()['network']
 
         net_assoc = self.bgpvpn_client.create_network_association(
@@ -132,7 +132,7 @@
     @decorators.idempotent_id('2cbb10af-bf9c-4b32-b6a6-4066de783758')
     def test_list_netassoc_as_non_owner_fail(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         network = self.networks_client.create_network()['network']
 
         self.bgpvpn_client.create_network_association(bgpvpn['id'],
@@ -144,7 +144,7 @@
     @decorators.idempotent_id('51e1b079-aefa-4c37-8b1a-0567b3ef7954')
     def test_associate_disassociate_network(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         network = self.networks_client.create_network()
         network_id = network['network']['id']
 
@@ -187,7 +187,7 @@
     def test_update_route_target_non_admin_fail(self):
         bgpvpn = self.create_bgpvpn(
             self.bgpvpn_admin_client,
-            tenant_id=self.bgpvpn_client.tenant_id,
+            tenant_id=self.bgpvpn_client.project_id,
             route_targets=['64512:1'])
         with ExpectedException(exceptions.Forbidden):
             self.bgpvpn_client.update_bgpvpn(
@@ -206,21 +206,21 @@
         """
         postdata = {
             "name": "testbgpvpn",
-            "tenant_id": self.bgpvpn_client.tenant_id,
+            "tenant_id": self.bgpvpn_client.project_id,
             "route_targets": ["0"]
         }
         self.assertRaises(exceptions.BadRequest,
                           self.bgpvpn_admin_client.create_bgpvpn, **postdata)
         postdata = {
             "name": "testbgpvpn",
-            "tenant_id": self.bgpvpn_client.tenant_id,
+            "tenant_id": self.bgpvpn_client.project_id,
             "import_targets": ["test", " "]
         }
         self.assertRaises(exceptions.BadRequest,
                           self.bgpvpn_admin_client.create_bgpvpn, **postdata)
         postdata = {
             "name": "testbgpvpn",
-            "tenant_id": self.bgpvpn_client.tenant_id,
+            "tenant_id": self.bgpvpn_client.project_id,
             "export_targets": ["64512:1000000000000", "xyz"]
         }
         self.assertRaises(exceptions.BadRequest,
@@ -236,7 +236,7 @@
         """
         postdata = {
             "name": "testbgpvpn",
-            "tenant_id": self.bgpvpn_client.tenant_id,
+            "tenant_id": self.bgpvpn_client.project_id,
         }
         bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
         updatedata = {
@@ -268,7 +268,7 @@
         """
         postdata = {
             "name": "testbgpvpn",
-            "tenant_id": self.bgpvpn_client.tenant_id,
+            "tenant_id": self.bgpvpn_client.project_id,
         }
         bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
         network = self.networks_client.create_network()
@@ -290,7 +290,7 @@
         """
         postdata = {
             "name": "testbgpvpn",
-            "tenant_id": self.bgpvpn_client.tenant_id,
+            "tenant_id": self.bgpvpn_client.project_id,
         }
         bgpvpn = self.bgpvpn_admin_client.create_bgpvpn(**postdata)
         network = self.networks_client.create_network()
@@ -310,7 +310,7 @@
     @decorators.idempotent_id('de8d94b0-0239-4a48-9574-c3a4a4f7cacb')
     def test_associate_disassociate_router(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         router = self.routers_client.create_router()
         router_id = router['router']['id']
 
@@ -334,7 +334,7 @@
     @decorators.idempotent_id('3ae91755-b1b6-4c62-a699-a44eeb4ee522')
     def test_list_show_router_association(self):
         bgpvpn = self.create_bgpvpn(self.bgpvpn_admin_client,
-                                    tenant_id=self.bgpvpn_client.tenant_id)
+                                    tenant_id=self.bgpvpn_client.project_id)
         router = self.routers_client.create_router()
         router_id = router['router']['id']
 
@@ -355,7 +355,7 @@
         # Create a first bgpvpn and associate a network with a subnet to it
         bgpvpn_net = self.create_bgpvpn(
             self.bgpvpn_admin_client,
-            tenant_id=self.bgpvpn_client.tenant_id)
+            tenant_id=self.bgpvpn_client.project_id)
         network = self.create_network()
         subnet = self.create_subnet(network)
         self.bgpvpn_client.create_network_association(
@@ -364,7 +364,7 @@
         # Create a second bgpvpn and associate a router to it
         bgpvpn_router = self.create_bgpvpn(
             self.bgpvpn_admin_client,
-            tenant_id=self.bgpvpn_client.tenant_id)
+            tenant_id=self.bgpvpn_client.project_id)
 
         router = self.create_router(
             router_name=data_utils.rand_name('test-bgpvpn-'))
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/manager.py b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
index 90c2bb1..398c764 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/manager.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
@@ -147,9 +147,9 @@
     def _create_router(self, client=None, tenant_id=None,
                        namestart='router-smoke'):
         if not client:
-            client = self.routers_client
+            client = self.admin_routers_client
         if not tenant_id:
-            tenant_id = client.tenant_id
+            tenant_id = client.project_id
         name = data_utils.rand_name(namestart)
         result = client.create_router(name=name,
                                       admin_state_up=True,
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
index 0142045..9cca602 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
@@ -108,6 +108,20 @@
         self.RT3 = self.new_rt()
         self.RT4 = self.new_rt()
 
+    @classmethod
+    def setup_clients(cls):
+        """This setup the service clients for the tests"""
+        super(TestBGPVPNBasic, cls).setup_clients()
+        cls.admin_security_group_client = cls.os_admin.security_groups_client
+        cls.admin_security_group_rule_client = (
+            cls.os_admin.security_group_rules_client)
+        cls.admin_routers_client = cls.os_admin.routers_client
+        cls.admin_ports_client = cls.os_admin.ports_client
+        cls.admin_networks_client = cls.os_admin.networks_client
+        cls.admin_subnets_client = cls.os_admin.subnets_client
+        cls.admin_fips_client = cls.os_admin.floating_ips_client
+        cls.admin_keys_client = cls.os_admin.keypairs_client
+
     @decorators.idempotent_id('afdd6cad-871a-4343-b97b-6319c76c815d')
     @utils.services('compute', 'network')
     def test_bgpvpn_basic(self):
@@ -164,6 +178,7 @@
         self._create_networks_and_subnets()
         self._create_servers()
         self.router_b = self._create_fip_router(
+            client=self.admin_routers_client,
             subnet_id=self.subnets[NET_B][0]['id'])
         self._create_l3_bgpvpn()
         self._associate_all_nets_to_bgpvpn()
@@ -187,10 +202,13 @@
         self._create_networks_and_subnets()
         self._create_servers()
         self.router_b = self._create_fip_router(
+            client=self.admin_routers_client,
             subnet_id=self.subnets[NET_B][0]['id'])
         self._create_l3_bgpvpn()
         self._associate_all_nets_to_bgpvpn()
-        self.delete_router(self.router_b)
+        self._delete_router(self.router_b,
+                            routers_client=self.admin_routers_client,
+                            ports_client=self.admin_ports_client)
         self._associate_fip_and_check_l3_bgpvpn()
 
     @decorators.idempotent_id('973ab26d-c7d8-4a32-9aa9-2d7e6f406135')
@@ -212,6 +230,7 @@
         self._create_l3_bgpvpn()
         self._associate_all_nets_to_bgpvpn()
         self.router_b = self._create_fip_router(
+            client=self.admin_routers_client,
             subnet_id=self.subnets[NET_B][0]['id'])
         self._associate_fip_and_check_l3_bgpvpn()
 
@@ -231,6 +250,7 @@
         """
         self._create_networks_and_subnets()
         self.router_b = self._create_fip_router(
+            client=self.admin_routers_client,
             subnet_id=self.subnets[NET_B][0]['id'])
         self._create_l3_bgpvpn()
         self._associate_all_nets_to_bgpvpn()
@@ -255,6 +275,7 @@
         self._create_l3_bgpvpn()
         self._associate_all_nets_to_bgpvpn()
         self.router_b = self._create_fip_router(
+            client=self.admin_routers_client,
             subnet_id=self.subnets[NET_B][0]['id'])
         self._create_servers()
         self._associate_fip_and_check_l3_bgpvpn()
@@ -344,10 +365,10 @@
             0, self.subnets[NET_A][0])
         self._create_l3_bgpvpn(rts=[], export_rts=[self.RT1],
                                import_rts=[self.RT2])
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         self._check_l3_bgpvpn(should_succeed=False)
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_B]['id'])
         self._check_l3_bgpvpn(should_succeed=False)
         self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
@@ -388,13 +409,13 @@
                              [self.networks[NET_B], IP_B_S1_1],
                              [self.networks[NET_A], IP_A_S1_2],
                              [self.networks[NET_B], IP_B_S1_2]])
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         self.router_a = self._create_router_and_associate_fip(
             0, self.subnets[NET_A][0])
         self._check_l3_bgpvpn(should_succeed=False)
         self._check_l3_bgpvpn(self.servers[0], self.servers[2])
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_B]['id'])
         self.router_b = self._create_router_and_associate_fip(
             1, self.subnets[NET_B][0])
@@ -444,11 +465,11 @@
             0, self.subnets[NET_A][0])
         router_b = self._create_router_and_associate_fip(
             3, self.subnets[NET_B][0])
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         self._check_l3_bgpvpn(should_succeed=False)
         self._check_l3_bgpvpn(self.servers[0], self.servers[2])
-        self.bgpvpn_client.create_router_association(self.bgpvpn['id'],
+        self.bgpvpn_admin_client.create_router_association(self.bgpvpn['id'],
                                                      router_b['id'])
         self._check_l3_bgpvpn(should_succeed=False)
         self._check_l3_bgpvpn(self.servers[3], self.servers[1])
@@ -513,16 +534,16 @@
                                   'local_pref': 100,
                                   'prefix': NET_C_S1}]
 
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
 
         port_id_1 = self.ports[self.servers[1]['id']]['id']
-        body = self.bgpvpn_client.create_port_association(
+        body = self.bgpvpn_admin_client.create_port_association(
             self.bgpvpn['id'], port_id=port_id_1, routes=primary_port_routes)
         port_association_1 = body['port_association']
 
         port_id_2 = self.ports[self.servers[2]['id']]['id']
-        body = self.bgpvpn_client.create_port_association(
+        body = self.bgpvpn_admin_client.create_port_association(
             self.bgpvpn['id'], port_id=port_id_2, routes=alternate_port_routes)
         port_association_2 = body['port_association']
 
@@ -535,10 +556,10 @@
             to_server_ip=IP_C_S1_1,
             validate_server=destination_srv_1)
 
-        self.bgpvpn_client.update_port_association(
+        self.bgpvpn_admin_client.update_port_association(
             self.bgpvpn['id'], port_association_1['id'],
             routes=alternate_port_routes)
-        self.bgpvpn_client.update_port_association(
+        self.bgpvpn_admin_client.update_port_association(
             self.bgpvpn['id'], port_association_2['id'],
             routes=primary_port_routes)
 
@@ -581,9 +602,9 @@
                                           rts=[self.RT1])
         bgpvpn_a_bis = self._create_l3_bgpvpn(name='test-l3-bgpvpn-a-bis',
                                               rts=[self.RT2])
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             bgpvpn_a['id'], self.networks[NET_A]['id'])
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             bgpvpn_a_bis['id'], self.networks[NET_A_BIS]['id'])
         self._create_servers([[self.networks[NET_A], IP_A_S1_1],
                              [self.networks[NET_A_BIS], IP_A_BIS_S1_2],
@@ -652,18 +673,17 @@
 
         self._setup_ip_forwarding(1)
         self._setup_ip_address(1, IP_C_S1_1)
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         port_id = self.ports[self.servers[1]['id']]['id']
         port_routes = [{'type': 'prefix',
                         'prefix': NET_C_S1}]
-        body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
-                                                          port_id=port_id,
-                                                          routes=port_routes)
+        body = self.bgpvpn_admin_client.create_port_association(
+            self.bgpvpn['id'], port_id=port_id, routes=port_routes)
         port_association = body['port_association']
         self._check_l3_bgpvpn_by_specific_ip(
             to_server_ip=IP_C_S1_1)
-        self.bgpvpn_client.update_port_association(
+        self.bgpvpn_admin_client.update_port_association(
             self.bgpvpn['id'], port_association['id'], routes=[])
         self._check_l3_bgpvpn_by_specific_ip(
             should_succeed=False, to_server_ip=IP_C_S1_1)
@@ -717,16 +737,15 @@
 
         self._setup_range_ip_address(1, LOOPBACKS)
 
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         port_id = self.ports[self.servers[1]['id']]['id']
         port_routes = [{'type': 'prefix',
                         'prefix': ip + "/32"}
                        for ip in LOOPBACKS]
 
-        body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
-                                                          port_id=port_id,
-                                                          routes=port_routes)
+        body = self.bgpvpn_admin_client.create_port_association(
+            self.bgpvpn['id'], port_id=port_id, routes=port_routes)
         port_association = body['port_association']
 
         for ip in random.sample(LOOPBACKS, SAMPLE_SIZE):
@@ -735,7 +754,7 @@
             self._check_l3_bgpvpn_by_specific_ip(
                 to_server_ip=ip)
 
-        self.bgpvpn_client.update_port_association(
+        self.bgpvpn_admin_client.update_port_association(
             self.bgpvpn['id'], port_association['id'], routes=[])
 
         for ip in SUB_LOOPBACKS:
@@ -782,18 +801,17 @@
 
         self._setup_ip_forwarding(1)
         self._setup_ip_address(1, IP_C_S1_1)
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         port_id = self.ports[self.servers[1]['id']]['id']
         port_routes = [{'type': 'prefix',
                         'prefix': NET_C_S1}]
-        body = self.bgpvpn_client.create_port_association(self.bgpvpn['id'],
-                                                          port_id=port_id,
-                                                          routes=port_routes)
+        body = self.bgpvpn_admin_client.create_port_association(
+            self.bgpvpn['id'], port_id=port_id, routes=port_routes)
         port_association = body['port_association']
         self._check_l3_bgpvpn_by_specific_ip(
             to_server_ip=IP_C_S1_1)
-        self.bgpvpn_client.delete_port_association(
+        self.bgpvpn_admin_client.delete_port_association(
             self.bgpvpn['id'], port_association['id'])
         self._check_l3_bgpvpn_by_specific_ip(
             should_succeed=False, to_server_ip=IP_C_S1_1)
@@ -866,21 +884,21 @@
         self._setup_ip_forwarding(0)
 
         # connect network A to its BGPVPN
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             bgpvpn_a['id'], self.networks[NET_A]['id'])
 
         # connect network B to its BGPVPN
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             bgpvpn_b['id'], self.networks[NET_B]['id'])
 
         # connect network C to its BGPVPN
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             bgpvpn_c['id'], self.networks[NET_C]['id'])
 
         # create port associations for A->C traffic
         # (leak routes imported by BGPVPN B -- which happen to include the
         # routes net C -- into net A)
-        self.bgpvpn_client.create_port_association(
+        self.bgpvpn_admin_client.create_port_association(
             bgpvpn_to_a['id'],
             port_id=self.ports[vm2['id']]['id'],
             routes=[{'type': 'bgpvpn',
@@ -890,7 +908,7 @@
         # create port associations for C->A traffic
         # (leak routes imported by BGPVPN B -- which happen to include the
         # routes from net A -- into net C)
-        body = self.bgpvpn_client.create_port_association(
+        body = self.bgpvpn_admin_client.create_port_association(
             bgpvpn_to_c['id'],
             port_id=self.ports[vm2['id']]['id'],
             routes=[{'type': 'bgpvpn',
@@ -914,7 +932,7 @@
                                              should_succeed=True)
 
         # remove port association 1
-        self.bgpvpn_client.delete_port_association(self.bgpvpn['id'],
+        self.bgpvpn_admin_client.delete_port_association(self.bgpvpn['id'],
                                                    port_association['id'])
 
         # check that connectivity is actually interrupted
@@ -938,7 +956,7 @@
         """
         self._create_networks_and_subnets()
         self._create_l3_bgpvpn()
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         self._create_servers()
         self._associate_fip_and_check_l3_bgpvpn(should_succeed=False)
@@ -1007,10 +1025,10 @@
         """
         self._create_networks_and_subnets()
         self._create_l3_bgpvpn()
-        body = self.bgpvpn_client.create_network_association(
+        body = self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
         assoc_b = body['network_association']
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_B]['id'])
         self._create_servers()
         self._associate_fip_and_check_l3_bgpvpn()
@@ -1040,10 +1058,10 @@
         router_b = self._create_fip_router(
             subnet_id=self.subnets[NET_B][0]['id'])
         self._create_l3_bgpvpn()
-        self.bgpvpn_client.create_network_association(
+        self.bgpvpn_admin_client.create_network_association(
             self.bgpvpn['id'], self.networks[NET_A]['id'])
-        body = self.bgpvpn_client.create_router_association(self.bgpvpn['id'],
-                                                            router_b['id'])
+        body = self.bgpvpn_admin_client.create_router_association(
+            self.bgpvpn['id'], router_b['id'])
         assoc_b = body['router_association']
         self._create_servers()
         self._associate_fip_and_check_l3_bgpvpn()
@@ -1104,7 +1122,9 @@
 
     def _create_security_group_for_test(self):
         self.security_group = self.create_security_group(
-            project_id=self.bgpvpn_client.project_id)
+            project_id=self.bgpvpn_admin_client.project_id,
+            security_groups_client=self.admin_security_group_client,
+            security_group_rules_client=self.admin_security_group_rule_client)
 
     def _create_networks_and_subnets(self, names=None, subnet_cidrs=None,
                                      port_security=True):
@@ -1115,15 +1135,15 @@
         for (name, subnet_cidrs) in zip(names, subnet_cidrs):
             network = super(manager.NetworkScenarioTest,
                             self).create_network(namestart=name,
-                    port_security_enabled=port_security)
+                    port_security_enabled=port_security,
+                    networks_client=self.admin_networks_client)
             self.networks[name] = network
             self.subnets[name] = []
             for (j, cidr) in enumerate(subnet_cidrs):
                 sub_name = "subnet-%s-%d" % (name, j + 1)
-                subnet = self._create_subnet_with_cidr(network,
-                                                       namestart=sub_name,
-                                                       cidr=cidr,
-                                                       ip_version=4)
+                subnet = self._create_subnet_with_cidr(
+                    network, namestart=sub_name, cidr=cidr, ip_version=4,
+                    subnets_client=self.admin_subnets_client)
                 self.subnets[name].append(subnet)
 
     def _create_subnet_with_cidr(self, network, subnets_client=None,
@@ -1146,6 +1166,8 @@
 
     def _create_fip_router(self, client=None, public_network_id=None,
                            subnet_id=None):
+        if not client:
+            client = self.admin_routers_client
         router = self._create_router(client, namestart='router-')
         router_id = router['id']
         if public_network_id is None:
@@ -1165,12 +1187,14 @@
         server = self.servers[server_index]
         fip = self.create_floating_ip(
             server, external_network_id=CONF.network.public_network_id,
-            port_id=self.ports[server['id']]['id'])
+            port_id=self.ports[server['id']]['id'],
+            client=self.admin_fips_client)
         self.server_fips[server['id']] = fip
         return fip
 
     def _create_router_and_associate_fip(self, server_index, subnet):
-        router = self._create_fip_router(subnet_id=subnet['id'])
+        router = self._create_fip_router(client=self.admin_routers_client,
+                                         subnet_id=subnet['id'])
         self._associate_fip(server_index)
         return router
 
@@ -1185,7 +1209,7 @@
 
         port = super(manager.NetworkScenarioTest,
                     self).create_port(network_id=network['id'],
-                                    client=clients.ports_client,
+                                    client=self.admin_ports_client,
                                     **create_port_body)
 
         create_server_kwargs = {
@@ -1205,7 +1229,7 @@
         return server
 
     def _create_servers(self, ports_config=None, port_security=True):
-        keypair = self.create_keypair()
+        keypair = self.create_keypair(client=self.admin_keys_client)
         security_group_ids = [self.security_group['id']]
         if ports_config is None:
             ports_config = [[self.networks[NET_A], IP_A_S1_1],
@@ -1214,7 +1238,7 @@
             network = port_config[0]
             server = self._create_server(
                 'server-' + str(i + 1), keypair, network, port_config[1],
-                security_group_ids, self.os_primary, port_security)
+                security_group_ids, self.os_admin, port_security)
             self.servers.append(server)
             self.servers_keypairs[server['id']] = keypair
             self.server_fixed_ips[server['id']] = (
@@ -1228,7 +1252,8 @@
         import_rts = import_rts or []
         export_rts = export_rts or []
         self.bgpvpn = self.create_bgpvpn(
-            self.bgpvpn_admin_client, tenant_id=self.bgpvpn_client.tenant_id,
+            self.bgpvpn_admin_client,
+            tenant_id=self.bgpvpn_admin_client.project_id,
             name=name, route_targets=rts, export_targets=export_rts,
             import_targets=import_rts)
         return self.bgpvpn
@@ -1249,7 +1274,7 @@
     def _associate_all_nets_to_bgpvpn(self, bgpvpn=None):
         bgpvpn = bgpvpn or self.bgpvpn
         for network in self.networks.values():
-            self.bgpvpn_client.create_network_association(
+            self.bgpvpn_admin_client.create_network_association(
                 bgpvpn['id'], network['id'])
         LOG.debug('BGPVPN network associations completed')
 
@@ -1360,3 +1385,16 @@
         subnet = self.subnets[NET_A][0]
         self.router = self._create_router_and_associate_fip(0, subnet)
         self._check_l3_bgpvpn(should_succeed=should_succeed)
+
+    def _delete_router(self, router, routers_client=None, ports_client=None):
+        if not routers_client:
+            routers_client = self.routers_client
+        if not ports_client:
+            ports_client = self.ports_client
+        ports_rsp = ports_client.list_ports(device_id=router['id'])
+        interfaces = ports_rsp['ports']
+        for i in interfaces:
+            test_utils.call_and_ignore_notfound_exc(
+                routers_client.remove_router_interface, router['id'],
+                subnet_id=i['fixed_ips'][0]['subnet_id'])
+        routers_client.delete_router(router['id'])
diff --git a/neutron_tempest_plugin/common/ip.py b/neutron_tempest_plugin/common/ip.py
index e2f6a4a..07bbe69 100644
--- a/neutron_tempest_plugin/common/ip.py
+++ b/neutron_tempest_plugin/common/ip.py
@@ -391,9 +391,9 @@
 
 
 def list_iptables(version=constants.IP_VERSION_4, namespace=None):
-    cmd = ''
+    cmd = 'sudo '
     if namespace:
-        cmd = 'sudo ip netns exec %s ' % namespace
+        cmd += 'ip netns exec %s ' % namespace
     cmd += ('iptables-save' if version == constants.IP_VERSION_4 else
             'ip6tables-save')
     return shell.execute(cmd).stdout
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index aea79ad..4fad1fa 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -73,6 +73,10 @@
                choices=['None', 'openvswitch', 'ovn',
                         'iptables_hybrid', 'iptables'],
                help='Driver for security groups firewall in the L2 agent'),
+    cfg.StrOpt('dns_domain',
+               default='openstackgate.local',
+               help='dns_domain value configured at neutron.conf, which will '
+                    'be used for the DNS configuration of the instances'),
 
     # Multicast tests settings
     cfg.StrOpt('multicast_group_range',
diff --git a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
index 0dd18f1..4341ec7 100644
--- a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
+++ b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
@@ -336,7 +336,10 @@
             ports=[intf_2['port_id']])
         updated_fwg = body["firewall_group"]
         self.assertEqual([intf_2['port_id']], updated_fwg['ports'])
-
+        # Wait for the firewall resource to become ready
+        self._wait_until_ready(fwg_id)
+        # Disassociate all ports with this firewall group
+        self.firewall_groups_client.update_firewall_group(fwg_id, ports=[])
         # Delete firewall_group
         self.firewall_groups_client.delete_firewall_group(fwg_id)
 
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
index 517c96e..9cc0a6a 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
@@ -67,7 +67,7 @@
         if not client:
             client = self.routers_client
         if not tenant_id:
-            tenant_id = client.tenant_id
+            tenant_id = client.project_id
         name = data_utils.rand_name(namestart)
         result = client.create_router(name=name,
                                       admin_state_up=True,
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
index 3ec231e..fe0f3fc 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
@@ -127,7 +127,7 @@
             admin_state_up=True,
             external_network_id=CONF.network.public_network_id,
             enable_snat=False,
-            project_id=cls.os_primary.network_client.tenant_id)
+            project_id=cls.os_primary.network_client.project_id)
         network = cls.create_network(network_name='right-network')
         subnet = cls.create_subnet(
             network,
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index b9bf36f..9d53f79 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -74,16 +74,16 @@
     return cmd
 
 
-def get_ncat_client_cmd(ip_address, port, protocol):
-    udp = ''
-    if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
-        udp = '-u'
+def get_ncat_client_cmd(ip_address, port, protocol, ssh_client=None):
     cmd = 'echo "knock knock" | nc '
-    ncat_version = get_ncat_version()
+    ncat_version = get_ncat_version(ssh_client=ssh_client)
     if ncat_version > packaging_version.Version('7.60'):
-        cmd += '-z '
-    cmd += '-w 1 %(udp)s %(host)s %(port)s' % {
-        'udp': udp, 'host': ip_address, 'port': port}
+        cmd += '-d 1 '
+    if protocol.lower() == neutron_lib_constants.PROTO_NAME_UDP:
+        cmd += '-u '
+        if ncat_version > packaging_version.Version('7.60'):
+            cmd += '-z '
+    cmd += '-w 1 %(host)s %(port)s' % {'host': ip_address, 'port': port}
     return cmd
 
 
@@ -190,6 +190,26 @@
             port_range_max=22)
 
     @classmethod
+    def create_ingress_metadata_secgroup_rule(cls, secgroup_id=None):
+        """This rule is intended to permit inbound metadata traffic
+
+        Allowing ingress traffic from metadata server, required only for
+        stateless security groups.
+        """
+        # NOTE(slaweq): in case of stateless security groups, there is no
+        # "related" or "established" traffic matching at all so even if
+        # egress traffic to 169.254.169.254 is allowed by default SG, we
+        # need to explicitly allow ingress traffic from the metadata server
+        # to be able to receive responses in the guest vm
+        cls.create_security_group_rule(
+            security_group_id=secgroup_id,
+            direction=neutron_lib_constants.INGRESS_DIRECTION,
+            protocol=neutron_lib_constants.PROTO_NAME_TCP,
+            remote_ip_prefix='169.254.169.254/32',
+            description='metadata out'
+        )
+
+    @classmethod
     def create_pingable_secgroup_rule(cls, secgroup_id=None,
                                       client=None):
         """This rule is intended to permit inbound ping
@@ -610,14 +630,15 @@
             self._log_local_network_status()
             raise
 
-    def nc_client(self, ip_address, port, protocol):
+    def nc_client(self, ip_address, port, protocol, ssh_client=None):
         """Check connectivity to TCP/UDP port at host via nc.
 
-        Client is always executed locally on host where tests are executed.
+        If ssh_client is not given, it is executed locally on host where tests
+        are executed. Otherwise ssh_client object is used to execute it.
         """
-        cmd = get_ncat_client_cmd(ip_address, port, protocol)
-        result = shell.execute_local_command(cmd)
-        self.assertEqual(0, result.exit_status)
+        cmd = get_ncat_client_cmd(ip_address, port, protocol,
+                                  ssh_client=ssh_client)
+        result = shell.execute(cmd, ssh_client=ssh_client, check=False)
         return result.stdout
 
     def _ensure_public_router(self, client=None, tenant_id=None):
@@ -632,7 +653,7 @@
         if not client:
             client = self.client
         if not tenant_id:
-            tenant_id = client.tenant_id
+            tenant_id = client.project_id
         router_id = CONF.network.public_router_id
         network_id = CONF.network.public_network_id
         if router_id:
@@ -654,3 +675,15 @@
         router = self.client.update_router(
             router['id'], **kwargs)['router']
         self.assertEqual(admin_state_up, router['admin_state_up'])
+
+    def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
+        try:
+            ssh_client.execute_script('which %s' % cmd)
+        except SSH_EXC_TUPLE as ssh_e:
+            LOG.debug(ssh_e)
+            self._log_console_output([server])
+            self._log_local_network_status()
+            raise
+        except exceptions.SSHScriptFailed:
+            raise self.skipException(
+                "%s is not available on server %s" % (cmd, server['id']))
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
index ca7d755..5608dae 100644
--- a/neutron_tempest_plugin/scenario/test_connectivity.py
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -211,7 +211,7 @@
             network, cidr=str(subnet_cidr), gateway=str(gw_ip))
 
         non_dvr_router = self.create_router_by_client(
-            tenant_id=self.client.tenant_id,
+            tenant_id=self.client.project_id,
             is_admin=True,
             router_name=data_utils.rand_name("nondvr-2-routers-same-network"),
             admin_state_up=True,
@@ -219,7 +219,7 @@
         self.create_router_interface(non_dvr_router['id'], subnet['id'])
 
         dvr_router = self.create_router_by_client(
-            tenant_id=self.client.tenant_id,
+            tenant_id=self.client.project_id,
             is_admin=True,
             router_name=data_utils.rand_name("dvr-2-rotuers-same-network"),
             admin_state_up=True,
diff --git a/neutron_tempest_plugin/scenario/test_dns_integration.py b/neutron_tempest_plugin/scenario/test_dns_integration.py
index 67824fc..be9a477 100644
--- a/neutron_tempest_plugin/scenario/test_dns_integration.py
+++ b/neutron_tempest_plugin/scenario/test_dns_integration.py
@@ -290,7 +290,7 @@
 
         name = data_utils.rand_name('test-domain')
         zone_name = "%s.%s.%s.zone." % (cls.client.user_id,
-                                        cls.client.tenant_id,
+                                        cls.client.project_id,
                                         name)
         dns_domain_template = "<user_id>.<project_id>.%s.zone." % name
 
diff --git a/neutron_tempest_plugin/scenario/test_dvr.py b/neutron_tempest_plugin/scenario/test_dvr.py
index fa2e9d4..a37abf4 100644
--- a/neutron_tempest_plugin/scenario/test_dvr.py
+++ b/neutron_tempest_plugin/scenario/test_dvr.py
@@ -64,7 +64,7 @@
         The test is done by putting the SNAT port down on controller node.
         """
         router = self.create_router_by_client(
-            distributed=True, tenant_id=self.client.tenant_id, is_admin=True,
+            distributed=True, tenant_id=self.client.project_id, is_admin=True,
             ha=False)
         self.setup_network_and_server(router=router)
         self._check_snat_port_connectivity()
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
index 692bb70..e705241 100644
--- a/neutron_tempest_plugin/scenario/test_internal_dns.py
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -129,12 +129,13 @@
             servers=[self.server, leia])
 
         resolv_conf = ssh_client.exec_command('cat /etc/resolv.conf')
-        self.assertIn('openstackgate.local', resolv_conf)
+        dns_domain = CONF.neutron_plugin_options.dns_domain
+        self.assertIn(dns_domain, resolv_conf)
         self.assertNotIn('starwars', resolv_conf)
 
         self.check_remote_connectivity(ssh_client, 'leia',
                                        servers=[self.server, leia])
-        self.check_remote_connectivity(ssh_client, 'leia.openstackgate.local',
+        self.check_remote_connectivity(ssh_client, 'leia.' + dns_domain,
                                        servers=[self.server, leia])
 
     @utils.requires_ext(extension="dns-integration", service="network")
diff --git a/neutron_tempest_plugin/scenario/test_mac_learning.py b/neutron_tempest_plugin/scenario/test_mac_learning.py
index 409a6d8..db340ed 100644
--- a/neutron_tempest_plugin/scenario/test_mac_learning.py
+++ b/neutron_tempest_plugin/scenario/test_mac_learning.py
@@ -116,18 +116,6 @@
                                           pkey=self.keypair['private_key'])
         return server
 
-    def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
-        try:
-            ssh_client.execute_script('which %s' % cmd)
-        except base.SSH_EXC_TUPLE as ssh_e:
-            LOG.debug(ssh_e)
-            self._log_console_output([server])
-            self._log_local_network_status()
-            raise
-        except exceptions.SSHScriptFailed:
-            raise self.skipException(
-                "%s is not available on server %s" % (cmd, server['id']))
-
     def _prepare_sender(self, server, address):
         check_script = get_sender_script(self.sender_output_file, address,
                                          self.completed_message)
diff --git a/neutron_tempest_plugin/scenario/test_migration.py b/neutron_tempest_plugin/scenario/test_migration.py
index 410c64e..3c0d384 100644
--- a/neutron_tempest_plugin/scenario/test_migration.py
+++ b/neutron_tempest_plugin/scenario/test_migration.py
@@ -125,7 +125,7 @@
     def _test_migration(self, before_dvr, before_ha, after_dvr, after_ha):
         router = self.create_router_by_client(
             distributed=before_dvr, ha=before_ha,
-            tenant_id=self.client.tenant_id, is_admin=True)
+            tenant_id=self.client.project_id, is_admin=True)
 
         self.setup_network_and_server(router=router)
         self._wait_until_router_ports_ready(
diff --git a/neutron_tempest_plugin/scenario/test_mtu.py b/neutron_tempest_plugin/scenario/test_mtu.py
index 31319ec..ea62fcf 100644
--- a/neutron_tempest_plugin/scenario/test_mtu.py
+++ b/neutron_tempest_plugin/scenario/test_mtu.py
@@ -101,7 +101,7 @@
 
     def _create_setup(self):
         self.admin_client = self.os_admin.network_client
-        net_kwargs = {'tenant_id': self.client.tenant_id}
+        net_kwargs = {'tenant_id': self.client.project_id}
         for net_type in ['vxlan', 'gre']:
             net_kwargs['name'] = '-'.join([net_type, 'net'])
             net_kwargs['provider:network_type'] = net_type
@@ -186,7 +186,7 @@
     def _create_setup(self):
         self.admin_client = self.os_admin.network_client
         for test_net in self._get_network_params():
-            test_net['tenant_id'] = self.client.tenant_id
+            test_net['tenant_id'] = self.client.project_id
             test_net['name'] = data_utils.rand_name('net')
             cidr = None if 'cidr' not in test_net else test_net.pop('cidr')
             network = self.admin_client.create_network(**test_net)[
diff --git a/neutron_tempest_plugin/scenario/test_multicast.py b/neutron_tempest_plugin/scenario/test_multicast.py
index 4fd41cf..390e0f0 100644
--- a/neutron_tempest_plugin/scenario/test_multicast.py
+++ b/neutron_tempest_plugin/scenario/test_multicast.py
@@ -213,18 +213,6 @@
                                             server, PYTHON3_BIN)
         return server
 
-    def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
-        try:
-            ssh_client.execute_script('which %s' % cmd)
-        except base.SSH_EXC_TUPLE as ssh_e:
-            LOG.debug(ssh_e)
-            self._log_console_output([server])
-            self._log_local_network_status()
-            raise
-        except exceptions.SSHScriptFailed:
-            raise self.skipException(
-                "%s is not available on server %s" % (cmd, server['id']))
-
     def _prepare_sender(self, server, mcast_address):
         check_script = get_sender_script(
             group=mcast_address, port=self.multicast_port,
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index 5af84db..ed9ff8c 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -17,22 +17,37 @@
 from neutron_lib import constants
 import testtools
 
+from oslo_log import log
 from tempest.common import utils as tempest_utils
 from tempest.common import waiters
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 
+from neutron_tempest_plugin.common import ip
 from neutron_tempest_plugin.common import ssh
 from neutron_tempest_plugin.common import utils
 from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
 from neutron_tempest_plugin.scenario import base
 from neutron_tempest_plugin.scenario import constants as const
 
 CONF = config.CONF
+LOG = log.getLogger(__name__)
+EPHEMERAL_PORT_RANGE = {'min': 32768, 'max': 65535}
 
 
-class NetworkSecGroupTest(base.BaseTempestTestCase):
+def get_capture_script(interface, tcp_port, packet_types, result_file):
+    return """#!/bin/bash
+tcpdump -i %(interface)s -vvneA -s0 -l -c1 \
+"dst port %(port)s and tcp[tcpflags] == %(packet_types)s" &> %(result)s &
+    """ % {'interface': interface,
+           'port': tcp_port,
+           'packet_types': packet_types,
+           'result': result_file}
+
+
+class BaseNetworkSecGroupTest(base.BaseTempestTestCase):
     credentials = ['primary', 'admin']
     required_extensions = ['router', 'security-group']
 
@@ -70,46 +85,69 @@
 
     @classmethod
     def setup_credentials(cls):
-        super(NetworkSecGroupTest, cls).setup_credentials()
+        super(BaseNetworkSecGroupTest, cls).setup_credentials()
         cls.network_client = cls.os_admin.network_client
 
     @classmethod
     def setup_clients(cls):
-        super(NetworkSecGroupTest, cls).setup_clients()
+        super(BaseNetworkSecGroupTest, cls).setup_clients()
         cls.project_id = cls.os_primary.credentials.tenant_id
 
     @classmethod
     def resource_setup(cls):
-        super(NetworkSecGroupTest, cls).resource_setup()
+        super(BaseNetworkSecGroupTest, cls).resource_setup()
         # setup basic topology for servers we can log into it
+        cls.reserve_external_subnet_cidrs()
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
-        router = cls.create_router_by_client()
-        cls.create_router_interface(router['id'], cls.subnet['id'])
+        cls.router = cls.create_router_by_client()
+        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+        if cls.ipv6_mode:
+            cls.subnet_v6 = cls.create_subnet(
+                cls.network,
+                ip_version=constants.IP_VERSION_6,
+                ipv6_ra_mode=cls.ipv6_mode,
+                ipv6_address_mode=cls.ipv6_mode)
+            cls.create_router_interface(cls.router['id'], cls.subnet_v6['id'])
         cls.keypair = cls.create_keypair()
 
     def setUp(self):
-        super(NetworkSecGroupTest, self).setUp()
+        super(BaseNetworkSecGroupTest, self).setUp()
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
                         self.network_client.reset_quotas, self.project_id)
         self.network_client.update_quotas(self.project_id, security_group=-1)
+        self.network_client.update_quotas(self.project_id,
+                                          security_group_rule=-1)
 
     def create_vm_testing_sec_grp(self, num_servers=2, security_groups=None,
-                                  ports=None):
+                                  ports=None, network_id=None,
+                                  use_advanced_image=False):
         """Create instance for security group testing
 
         :param num_servers (int): number of servers to spawn
         :param security_groups (list): list of security groups
         :param ports* (list): list of ports
+        :param: use_advanced_image (bool): use Cirros (False) or
+                advanced guest image
         *Needs to be the same length as num_servers
         """
+        if (not use_advanced_image or
+                CONF.neutron_plugin_options.default_image_is_advanced):
+            flavor_ref = CONF.compute.flavor_ref
+            image_ref = CONF.compute.image_ref
+            username = CONF.validation.image_ssh_user
+        else:
+            flavor_ref = CONF.neutron_plugin_options.advanced_image_flavor_ref
+            image_ref = CONF.neutron_plugin_options.advanced_image_ref
+            username = CONF.neutron_plugin_options.advanced_image_ssh_user
+        network_id = network_id or self.network['id']
         servers, fips, server_ssh_clients = ([], [], [])
         for i in range(num_servers):
             server_args = {
-                'flavor_ref': CONF.compute.flavor_ref,
-                'image_ref': CONF.compute.image_ref,
+                'flavor_ref': flavor_ref,
+                'image_ref': image_ref,
                 'key_name': self.keypair['name'],
-                'networks': [{'uuid': self.network['id']}],
+                'networks': [{'uuid': network_id}],
                 'security_groups': security_groups
             }
             if ports is not None:
@@ -120,30 +158,135 @@
                 self.os_primary.servers_client, server['server']['id'],
                 const.SERVER_STATUS_ACTIVE)
             port = self.client.list_ports(
-                network_id=self.network['id'], device_id=server['server'][
+                network_id=network_id, device_id=server['server'][
                     'id'])['ports'][0]
             fips.append(self.create_floatingip(port=port))
             server_ssh_clients.append(ssh.Client(
-                fips[i]['floating_ip_address'], CONF.validation.image_ssh_user,
+                fips[i]['floating_ip_address'], username,
                 pkey=self.keypair['private_key']))
         return server_ssh_clients, fips, servers
 
+    def _get_default_security_group(self):
+        sgs = self.os_primary.network_client.list_security_groups(
+            project_id=self.project_id)['security_groups']
+        for sg in sgs:
+            if sg['name'] == 'default':
+                return sg
+
+    def _create_security_group(self, name_prefix, **kwargs):
+        if self.stateless_sg:
+            kwargs['stateful'] = False
+        return super(BaseNetworkSecGroupTest, self).create_security_group(
+            name=data_utils.rand_name(name_prefix), **kwargs)
+
+    def _create_client_and_server_vms(
+            self, allowed_tcp_port=None, use_advanced_image=False):
+        networks = {
+            'server': self.network,
+            'client': self.create_network()}
+        subnet = self.create_subnet(networks['client'])
+        self.create_router_interface(self.router['id'], subnet['id'])
+
+        security_groups = {}
+        for sg_name in ["server", "client"]:
+            sg = self._create_security_group('vm_%s_secgrp' % sg_name)
+            self.create_loginable_secgroup_rule(
+                secgroup_id=sg['id'])
+            if allowed_tcp_port:
+                self.create_security_group_rule(
+                    security_group_id=sg['id'],
+                    protocol=constants.PROTO_NAME_TCP,
+                    direction=constants.INGRESS_DIRECTION,
+                    port_range_min=allowed_tcp_port,
+                    port_range_max=allowed_tcp_port)
+            else:
+                self.create_pingable_secgroup_rule(sg['id'])
+            if self.stateless_sg:
+                self.create_ingress_metadata_secgroup_rule(
+                    secgroup_id=sg['id'])
+            security_groups[sg_name] = sg
+        # NOTE(slaweq): we need to iterate over create_vm_testing_sec_grp as
+        # this method plugs all SGs to all VMs and we need each vm to use other
+        # SGs
+        ssh_clients = {}
+        fips = {}
+        servers = {}
+        for server_name, sg in security_groups.items():
+            _ssh_clients, _fips, _servers = self.create_vm_testing_sec_grp(
+                num_servers=1,
+                security_groups=[{'name': sg['name']}],
+                network_id=networks[server_name]['id'],
+                use_advanced_image=use_advanced_image)
+            ssh_clients[server_name] = _ssh_clients[0]
+            fips[server_name] = _fips[0]
+            servers[server_name] = _servers[0]
+        return ssh_clients, fips, servers, security_groups
+
+    def _test_connectivity_between_vms_using_different_sec_groups(self):
+        TEST_TCP_PORT = 1022
+        ssh_clients, fips, servers, security_groups = (
+            self._create_client_and_server_vms(TEST_TCP_PORT))
+
+        # make sure tcp connectivity between vms works fine
+        for fip in fips.values():
+            self.check_connectivity(fip['floating_ip_address'],
+                                    CONF.validation.image_ssh_user,
+                                    self.keypair['private_key'])
+        # Check connectivity between servers
+        def _message_received(server_ssh_client, client_ssh_client,
+                              dest_fip, servers):
+            expected_msg = "Test_msg"
+            utils.kill_nc_process(server_ssh_client)
+            self.nc_listen(server_ssh_client,
+                           TEST_TCP_PORT,
+                           constants.PROTO_NAME_TCP,
+                           expected_msg,
+                           list(servers.values()))
+            try:
+                received_msg = self.nc_client(
+                    dest_fip,
+                    TEST_TCP_PORT,
+                    constants.PROTO_NAME_TCP,
+                    ssh_client=client_ssh_client)
+                return received_msg and expected_msg in received_msg
+            except exceptions.ShellCommandFailed:
+                return False
+
+        if self.stateless_sg:
+            # In case of stateless SG connectivity will not work without
+            # explicit allow ingress response from server to client
+            utils.wait_until_true(
+                lambda: not _message_received(
+                    ssh_clients['server'], ssh_clients['client'],
+                    fips['server']['fixed_ip_address'], servers))
+            self.create_security_group_rule(
+                security_group_id=security_groups['client']['id'],
+                protocol=constants.PROTO_NAME_TCP,
+                direction=constants.INGRESS_DIRECTION,
+                port_range_min=EPHEMERAL_PORT_RANGE['min'],
+                port_range_max=EPHEMERAL_PORT_RANGE['max'])
+
+        utils.wait_until_true(
+            lambda: _message_received(
+                ssh_clients['server'], ssh_clients['client'],
+                fips['server']['fixed_ip_address'], servers))
+
     def _test_ip_prefix(self, rule_list, should_succeed):
         # Add specific remote prefix to VMs and check connectivity
-        ssh_secgrp_name = data_utils.rand_name('ssh_secgrp')
-        icmp_secgrp_name = data_utils.rand_name('icmp_secgrp_with_cidr')
-        ssh_secgrp = self.os_primary.network_client.create_security_group(
-            name=ssh_secgrp_name)
+        ssh_secgrp = self._create_security_group('ssh_secgrp')
         self.create_loginable_secgroup_rule(
-            secgroup_id=ssh_secgrp['security_group']['id'])
-        icmp_secgrp = self.os_primary.network_client.create_security_group(
-            name=icmp_secgrp_name)
+            secgroup_id=ssh_secgrp['id'])
+        if self.stateless_sg:
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=ssh_secgrp['id'])
+        icmp_secgrp = self._create_security_group('icmp_secgrp')
         self.create_secgroup_rules(
-            rule_list, secgroup_id=icmp_secgrp['security_group']['id'])
+            rule_list, secgroup_id=icmp_secgrp['id'])
         for sec_grp in (ssh_secgrp, icmp_secgrp):
-            self.security_groups.append(sec_grp['security_group'])
-        security_groups_list = [{'name': ssh_secgrp_name},
-                                {'name': icmp_secgrp_name}]
+            self.security_groups.append(sec_grp)
+        security_groups_list = [
+            {'name': ssh_secgrp['name']},
+            {'name': icmp_secgrp['name']}]
         server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
             security_groups=security_groups_list)
 
@@ -157,14 +300,18 @@
             'fixed_ip_address'], should_succeed=should_succeed,
             servers=servers)
 
-    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d764')
-    def test_default_sec_grp_scenarios(self):
+    def _test_default_sec_grp_scenarios(self):
+        # Ensure that SG used in tests is stateful or stateless as required
+        default_sg_id = self._get_default_security_group()['id']
+        self.os_primary.network_client.update_security_group(
+            default_sg_id, stateful=not self.stateless_sg)
+        if self.stateless_sg:
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=default_sg_id)
         server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp()
+
         # Check ssh connectivity when you add sec group rule, enabling ssh
-        self.create_loginable_secgroup_rule(
-            self.os_primary.network_client.list_security_groups()[
-                'security_groups'][0]['id']
-        )
+        self.create_loginable_secgroup_rule(default_sg_id)
         self.check_connectivity(fips[0]['floating_ip_address'],
                                 CONF.validation.image_ssh_user,
                                 self.keypair['private_key'])
@@ -180,6 +327,10 @@
             servers=servers)
 
         # Check ICMP connectivity from VM to external network
+        if self.stateless_sg:
+            # NOTE(slaweq): in case of stateless SG explicit ingress rule for
+            # the ICMP replies needs to be added too
+            self.create_pingable_secgroup_rule(default_sg_id)
         subnets = self.os_admin.network_client.list_subnets(
             network_id=CONF.network.public_network_id)['subnets']
         ext_net_ip = None
@@ -190,12 +341,12 @@
         self.assertTrue(ext_net_ip)
         self.check_remote_connectivity(server_ssh_clients[0], ext_net_ip,
                                        servers=servers)
+        return server_ssh_clients, fips, servers
 
-    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d864')
-    def test_protocol_number_rule(self):
+    def _test_protocol_number_rule(self):
         # protocol number is added instead of str in security rule creation
         name = data_utils.rand_name("test_protocol_number_rule")
-        security_group = self.create_security_group(name=name)
+        security_group = self._create_security_group(name)
         port = self.create_port(network=self.network, name=name,
                                 security_groups=[security_group['id']])
         _, fips, _ = self.create_vm_testing_sec_grp(num_servers=1,
@@ -208,23 +359,22 @@
         self.create_secgroup_rules(rule_list, secgroup_id=security_group['id'])
         self.ping_ip_address(fips[0]['floating_ip_address'])
 
-    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d964')
-    def test_two_sec_groups(self):
+    def _test_two_sec_groups(self):
         # add 2 sec groups to VM and test rules of both are working
-        ssh_secgrp_name = data_utils.rand_name('ssh_secgrp')
-        icmp_secgrp_name = data_utils.rand_name('icmp_secgrp')
-        ssh_secgrp = self.os_primary.network_client.create_security_group(
-            name=ssh_secgrp_name)
+        ssh_secgrp = self._create_security_group('ssh_secgrp')
         self.create_loginable_secgroup_rule(
-            secgroup_id=ssh_secgrp['security_group']['id'])
-        icmp_secgrp = self.os_primary.network_client.create_security_group(
-            name=icmp_secgrp_name)
+            secgroup_id=ssh_secgrp['id'])
+        icmp_secgrp = self._create_security_group('icmp_secgrp')
         self.create_pingable_secgroup_rule(
-            secgroup_id=icmp_secgrp['security_group']['id'])
+            secgroup_id=icmp_secgrp['id'])
+        if self.stateless_sg:
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=ssh_secgrp['id'])
         for sec_grp in (ssh_secgrp, icmp_secgrp):
-            self.security_groups.append(sec_grp['security_group'])
-        security_groups_list = [{'name': ssh_secgrp_name},
-                                {'name': icmp_secgrp_name}]
+            self.security_groups.append(sec_grp)
+        security_groups_list = [
+            {'name': ssh_secgrp['name']},
+            {'name': icmp_secgrp['name']}]
         server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
             num_servers=1, security_groups=security_groups_list)
         # make sure ssh connectivity works
@@ -239,7 +389,7 @@
 
         # update port with ssh security group only
         self.os_primary.network_client.update_port(
-            port_id, security_groups=[ssh_secgrp['security_group']['id']])
+            port_id, security_groups=[ssh_secgrp['id']])
 
         # make sure ssh connectivity works
         self.check_connectivity(fips[0]['floating_ip_address'],
@@ -252,9 +402,7 @@
 
         # update port with ssh and ICMP security groups
         self.os_primary.network_client.update_port(
-            port_id, security_groups=[
-                icmp_secgrp['security_group']['id'],
-                ssh_secgrp['security_group']['id']])
+            port_id, security_groups=[icmp_secgrp['id'], ssh_secgrp['id']])
 
         # make sure ssh connectivity  works after update
         self.check_connectivity(fips[0]['floating_ip_address'],
@@ -264,82 +412,18 @@
         # make sure ICMP connectivity works after update
         self.ping_ip_address(fips[0]['floating_ip_address'])
 
-    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d664')
-    def test_ip_prefix(self):
-        cidr = self.subnet['cidr']
-        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
-                      'direction': constants.INGRESS_DIRECTION,
-                      'remote_ip_prefix': cidr}]
-        self._test_ip_prefix(rule_list, should_succeed=True)
-
-    @decorators.attr(type='negative')
-    @decorators.idempotent_id('a01cd2ef-3cfc-4614-8aac-9d1333ea21dd')
-    def test_ip_prefix_negative(self):
-        # define bad CIDR
-        cidr = '10.100.0.254/32'
-        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
-                      'direction': constants.INGRESS_DIRECTION,
-                      'remote_ip_prefix': cidr}]
-        self._test_ip_prefix(rule_list, should_succeed=False)
-
-    @decorators.idempotent_id('01f0ddca-b049-47eb-befd-82acb502c9ec')
-    def test_established_tcp_session_after_re_attachinging_sg(self):
-        """Test existing connection remain open after sg has been re-attached
-
-        Verifies that new packets can pass over the existing connection when
-        the security group has been removed from the server and then added
-        back
-        """
-
-        ssh_sg = self.create_security_group()
-        self.create_loginable_secgroup_rule(secgroup_id=ssh_sg['id'])
-        vm_ssh, fips, vms = self.create_vm_testing_sec_grp(
-                security_groups=[{'name': ssh_sg['name']}])
-        sg = self.create_security_group()
-        nc_rule = [{'protocol': constants.PROTO_NUM_TCP,
-                    'direction': constants.INGRESS_DIRECTION,
-                    'port_range_min': 6666,
-                    'port_range_max': 6666}]
-        self.create_secgroup_rules(nc_rule, secgroup_id=sg['id'])
-        srv_port = self.client.list_ports(network_id=self.network['id'],
-                device_id=vms[1]['server']['id'])['ports'][0]
-        srv_ip = srv_port['fixed_ips'][0]['ip_address']
-        with utils.StatefulConnection(
-                vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
-            self.client.update_port(srv_port['id'],
-                    security_groups=[ssh_sg['id'], sg['id']])
-            con.test_connection()
-        with utils.StatefulConnection(
-                vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
-            self.client.update_port(
-                    srv_port['id'], security_groups=[ssh_sg['id']])
-            con.test_connection(should_pass=False)
-        with utils.StatefulConnection(
-                vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
-            self.client.update_port(srv_port['id'],
-                    security_groups=[ssh_sg['id'], sg['id']])
-            con.test_connection()
-            self.client.update_port(srv_port['id'],
-                    security_groups=[ssh_sg['id']])
-            con.test_connection(should_pass=False)
-            self.client.update_port(srv_port['id'],
-                    security_groups=[ssh_sg['id'], sg['id']])
-            con.test_connection()
-
-    @decorators.idempotent_id('7ed39b86-006d-40fb-887a-ae46693dabc9')
-    def test_remote_group(self):
+    def _test_remote_group(self):
         # create a new sec group
-        ssh_secgrp_name = data_utils.rand_name('ssh_secgrp')
-        ssh_secgrp = self.os_primary.network_client.create_security_group(
-            name=ssh_secgrp_name)
-        # add cleanup
-        self.security_groups.append(ssh_secgrp['security_group'])
+        ssh_secgrp = self._create_security_group('ssh_secgrp')
         # configure sec group to support SSH connectivity
         self.create_loginable_secgroup_rule(
-            secgroup_id=ssh_secgrp['security_group']['id'])
+            secgroup_id=ssh_secgrp['id'])
+        if self.stateless_sg:
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=ssh_secgrp['id'])
         # spawn two instances with the sec group created
         server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
-            security_groups=[{'name': ssh_secgrp_name}])
+            security_groups=[{'name': ssh_secgrp['name']}])
         # verify SSH functionality
         for i in range(2):
             self.check_connectivity(fips[i]['floating_ip_address'],
@@ -352,9 +436,9 @@
         # add ICMP support to the remote group
         rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
                       'direction': constants.INGRESS_DIRECTION,
-                      'remote_group_id': ssh_secgrp['security_group']['id']}]
+                      'remote_group_id': ssh_secgrp['id']}]
         self.create_secgroup_rules(
-            rule_list, secgroup_id=ssh_secgrp['security_group']['id'])
+            rule_list, secgroup_id=ssh_secgrp['id'])
         # verify ICMP connectivity between instances works
         self.check_remote_connectivity(
             server_ssh_clients[0], fips[1]['fixed_ip_address'],
@@ -363,14 +447,7 @@
         self.ping_ip_address(fips[0]['floating_ip_address'],
                              should_succeed=False)
 
-    @testtools.skipUnless(
-        CONF.neutron_plugin_options.firewall_driver == 'openvswitch',
-        "Openvswitch agent is required to run this test")
-    @decorators.idempotent_id('678dd4c0-2953-4626-b89c-8e7e4110ec4b')
-    @tempest_utils.requires_ext(extension="address-group", service="network")
-    @tempest_utils.requires_ext(
-        extension="security-groups-remote-address-group", service="network")
-    def test_remote_group_and_remote_address_group(self):
+    def _test_remote_group_and_remote_address_group(self):
         """Test SG rules with remote group and remote address group
 
         This test checks the ICMP connection among two servers using a security
@@ -380,17 +457,13 @@
         them should not disable the connection.
         """
         # create a new sec group
-        ssh_secgrp_name = data_utils.rand_name('ssh_secgrp')
-        ssh_secgrp = self.os_primary.network_client.create_security_group(
-            name=ssh_secgrp_name)
-        # add cleanup
-        self.security_groups.append(ssh_secgrp['security_group'])
+        ssh_secgrp = self._create_security_group('ssh_secgrp')
         # configure sec group to support SSH connectivity
         self.create_loginable_secgroup_rule(
-            secgroup_id=ssh_secgrp['security_group']['id'])
+            secgroup_id=ssh_secgrp['id'])
         # spawn two instances with the sec group created
         server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
-            security_groups=[{'name': ssh_secgrp_name}])
+            security_groups=[{'name': ssh_secgrp['name']}])
         # verify SSH functionality
         for i in range(2):
             self.check_connectivity(fips[i]['floating_ip_address'],
@@ -403,9 +476,9 @@
         # add ICMP support to the remote group
         rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
                       'direction': constants.INGRESS_DIRECTION,
-                      'remote_group_id': ssh_secgrp['security_group']['id']}]
+                      'remote_group_id': ssh_secgrp['id']}]
         remote_sg_rid = self.create_secgroup_rules(
-            rule_list, secgroup_id=ssh_secgrp['security_group']['id'])[0]['id']
+            rule_list, secgroup_id=ssh_secgrp['id'])[0]['id']
         # verify ICMP connectivity between instances works
         self.check_remote_connectivity(
             server_ssh_clients[0], fips[1]['fixed_ip_address'],
@@ -422,7 +495,7 @@
                       'direction': constants.INGRESS_DIRECTION,
                       'remote_address_group_id': test_ag['id']}]
         remote_ag_rid = self.create_secgroup_rules(
-            rule_list, secgroup_id=ssh_secgrp['security_group']['id'])[0]['id']
+            rule_list, secgroup_id=ssh_secgrp['id'])[0]['id']
         # verify ICMP connectivity between instances still works
         self.check_remote_connectivity(
             server_ssh_clients[0], fips[1]['fixed_ip_address'],
@@ -452,8 +525,7 @@
         self.ping_ip_address(fips[0]['floating_ip_address'],
                              should_succeed=False)
 
-    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad488')
-    def test_multiple_ports_secgroup_inheritance(self):
+    def _test_multiple_ports_secgroup_inheritance(self):
         """Test multiple port security group inheritance
 
         This test creates two ports with security groups, then
@@ -461,20 +533,20 @@
         inherited properly and enforced in these instances.
         """
         # create a security group and make it loginable and pingable
-        secgrp = self.os_primary.network_client.create_security_group(
-            name=data_utils.rand_name('secgrp'))
+        secgrp = self._create_security_group('secgrp')
         self.create_loginable_secgroup_rule(
-            secgroup_id=secgrp['security_group']['id'])
+            secgroup_id=secgrp['id'])
         self.create_pingable_secgroup_rule(
-            secgroup_id=secgrp['security_group']['id'])
-        # add security group to cleanup
-        self.security_groups.append(secgrp['security_group'])
+            secgroup_id=secgrp['id'])
+        if self.stateless_sg:
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=secgrp['id'])
         # create two ports with fixed IPs and the security group created
         ports = []
         for i in range(2):
             ports.append(self.create_port(
                 self.network, fixed_ips=[{'subnet_id': self.subnets[0]['id']}],
-                security_groups=[secgrp['security_group']['id']]))
+                security_groups=[secgrp['id']]))
         # spawn instances with the ports created
         server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
             ports=ports)
@@ -485,17 +557,24 @@
                                     CONF.validation.image_ssh_user,
                                     self.keypair['private_key'])
 
-    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad489')
-    def test_multiple_ports_portrange_remote(self):
+    def _test_multiple_ports_portrange_remote(self):
+        initial_security_groups = []
+        if self.stateless_sg:
+            md_secgrp = self._create_security_group('metadata_secgrp')
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=md_secgrp['id'])
+            initial_security_groups.append(
+                {'name': md_secgrp['name']})
+
         ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
-            num_servers=3)
+            num_servers=3, security_groups=initial_security_groups)
         secgroups = []
         ports = []
 
         # Create remote and test security groups
         for i in range(0, 2):
             secgroups.append(
-                self.create_security_group(name='secgrp-%d' % i))
+                self._create_security_group('secgrp-%d' % i))
             # configure sec groups to support SSH connectivity
             self.create_loginable_secgroup_rule(
                 secgroup_id=secgroups[-1]['id'])
@@ -535,6 +614,21 @@
                       'port_range_min': '82',
                       'port_range_max': '83',
                       'remote_group_id': secgroups[0]['id']}]
+        if self.stateless_sg:
+            rule_list.append({
+                'protocol': constants.PROTO_NUM_TCP,
+                'direction': constants.EGRESS_DIRECTION,
+                'remote_group_id': secgroups[0]['id']})
+            # NOTE(slaweq): in case of stateless SG, client needs to have also
+            # rule which will explicitly accept ingress connections from
+            # secgroup[1]
+
+            self.create_security_group_rule(
+                security_group_id=secgroups[0]['id'],
+                protocol=constants.PROTO_NAME_TCP,
+                direction=constants.INGRESS_DIRECTION,
+                remote_group_id=secgroups[1]['id'])
+
         self.create_secgroup_rules(
             rule_list, secgroup_id=secgroups[1]['id'])
 
@@ -558,84 +652,19 @@
                     ssh_clients[0], ssh_clients[2], test_ip, port) as con:
                 con.test_connection(should_pass=False)
 
-    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad490')
-    def test_intra_sg_isolation(self):
-        """Test intra security group isolation
-
-        This test creates a security group that does not allow ingress
-        packets from vms of the same security group. The purpose of this
-        test is to verify that intra SG traffic is properly blocked, while
-        traffic like metadata and DHCP remains working due to the
-        allow-related behavior of the egress rules (added via default).
-        """
-        # create a security group and make it loginable
-        secgrp_name = data_utils.rand_name('secgrp')
-        secgrp = self.os_primary.network_client.create_security_group(
-            name=secgrp_name)
-        secgrp_id = secgrp['security_group']['id']
-        # add security group to cleanup
-        self.security_groups.append(secgrp['security_group'])
-
-        # remove all rules and add ICMP, DHCP and metadata as egress,
-        # and ssh as ingress.
-        for sgr in secgrp['security_group']['security_group_rules']:
-            self.client.delete_security_group_rule(sgr['id'])
-
-        self.create_loginable_secgroup_rule(secgroup_id=secgrp_id)
-        rule_list = [{'direction': constants.EGRESS_DIRECTION,
-                      'protocol': constants.PROTO_NAME_TCP,
-                      'remote_ip_prefix': '169.254.169.254/32',
-                      'description': 'metadata out',
-                      },
-                     {'direction': constants.EGRESS_DIRECTION,
-                      'protocol': constants.PROTO_NAME_UDP,
-                      'port_range_min': '67',
-                      'port_range_max': '67',
-                      'description': 'dhcpv4 out',
-                      },
-                     {'direction': constants.EGRESS_DIRECTION,
-                      'protocol': constants.PROTO_NAME_ICMP,
-                      'description': 'ping out',
-                      },
-                     ]
-        self.create_secgroup_rules(rule_list, secgroup_id=secgrp_id)
-
-        # go vms, go!
-        ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
-            num_servers=2, security_groups=[{'name': secgrp_name}])
-
-        # verify SSH functionality. This will ensure that servers were
-        # able to reach dhcp + metadata servers
-        for fip in fips:
-            self.check_connectivity(fip['floating_ip_address'],
-                                    CONF.validation.image_ssh_user,
-                                    self.keypair['private_key'])
-
-        # try to ping instances without intra SG permission (should fail)
-        self.check_remote_connectivity(
-            ssh_clients[0], fips[1]['fixed_ip_address'],
-            should_succeed=False)
-        self.check_remote_connectivity(
-            ssh_clients[1], fips[0]['fixed_ip_address'],
-            should_succeed=False)
-
-        # add intra sg rule. This will allow packets from servers that
-        # are in the same sg
-        rule_list = [{'direction': constants.INGRESS_DIRECTION,
-                      'remote_group_id': secgrp_id}]
-        self.create_secgroup_rules(rule_list, secgroup_id=secgrp_id)
-
-        # try to ping instances with intra SG permission
-        self.check_remote_connectivity(
-            ssh_clients[0], fips[1]['fixed_ip_address'])
-        self.check_remote_connectivity(
-            ssh_clients[1], fips[0]['fixed_ip_address'])
-
-    @decorators.idempotent_id('cd66b826-d86c-4fb4-ab37-17c8391753cb')
-    def test_overlapping_sec_grp_rules(self):
+    def _test_overlapping_sec_grp_rules(self):
         """Test security group rules with overlapping port ranges"""
-        client_ssh, _, vms = self.create_vm_testing_sec_grp(num_servers=2)
-        tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(num_servers=1)
+        initial_security_groups = []
+        if self.stateless_sg:
+            md_secgrp = self._create_security_group('metadata_secgrp')
+            self.create_ingress_metadata_secgroup_rule(
+                secgroup_id=md_secgrp['id'])
+            initial_security_groups.append(
+                {'name': md_secgrp['name']})
+        client_ssh, _, vms = self.create_vm_testing_sec_grp(
+            num_servers=2, security_groups=initial_security_groups)
+        tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(
+            num_servers=1, security_groups=initial_security_groups)
         srv_ssh = tmp_ssh[0]
         srv_vm = tmp_vm[0]
         srv_port = self.client.list_ports(network_id=self.network['id'],
@@ -643,7 +672,7 @@
         srv_ip = srv_port['fixed_ips'][0]['ip_address']
         secgrps = []
         for i, vm in enumerate(vms):
-            sg = self.create_security_group(name='secgrp-%d' % i)
+            sg = self._create_security_group('secgrp-%d' % i)
             self.create_loginable_secgroup_rule(secgroup_id=sg['id'])
             port = self.client.list_ports(network_id=self.network['id'],
                     device_id=vm['server']['id'])['ports'][0]
@@ -663,6 +692,22 @@
         self.client.update_port(srv_port['id'],
                 security_groups=[secgrps[0]['id'], secgrps[1]['id']])
         self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])
+
+        if self.stateless_sg:
+            # NOTE(slaweq): in case of stateless SG, client needs to have also
+            # rule which will explicitly accept ingress TCP connections which
+            # will be replies from the TCP server so it will use random
+            # destination port (depends on the src port choosen by client while
+            # establishing connection)
+            self.create_security_group_rule(
+                security_group_id=secgrps[0]['id'],
+                protocol=constants.PROTO_NAME_TCP,
+                direction=constants.INGRESS_DIRECTION)
+            self.create_security_group_rule(
+                security_group_id=secgrps[1]['id'],
+                protocol=constants.PROTO_NAME_TCP,
+                direction=constants.INGRESS_DIRECTION)
+
         # The conntrack entries are ruled by the OF definitions but conntrack
         # status can change the datapath. Let's check the rules in two
         # attempts
@@ -675,8 +720,7 @@
                         client_ssh[1], srv_ssh, srv_ip, port) as con:
                     con.test_connection()
 
-    @decorators.idempotent_id('96dcd5ff-9d45-4e0d-bea0-0b438cbd388f')
-    def test_remove_sec_grp_from_active_vm(self):
+    def _test_remove_sec_grp_from_active_vm(self):
         """Tests the following:
 
         1. Create SG associated with ICMP rule
@@ -685,18 +729,15 @@
         4. Remove the security group from VM by Port update
         5. Ping the VM, expected should be FAIL
         """
-        sec_grp_name = data_utils.rand_name('test_sg')
-        secgrp = self.os_primary.network_client.create_security_group(
-            name=sec_grp_name)
-        self.security_groups.append(secgrp['security_group'])
-        sec_grp_id = secgrp['security_group']['id']
-        self.create_pingable_secgroup_rule(sec_grp_id)
+        secgrp = self._create_security_group('test_sg')
+        self.security_groups.append(secgrp)
+        self.create_pingable_secgroup_rule(secgrp['id'])
 
         ex_port = self.create_port(
             self.network, fixed_ips=[{'subnet_id': self.subnet['id']}],
-            security_groups=[sec_grp_id])
+            security_groups=[secgrp['id']])
         fip = self.create_vm_testing_sec_grp(
-            num_servers=1, security_groups=[{'name': sec_grp_name}],
+            num_servers=1, security_groups=[{'name': secgrp['name']}],
             ports=[ex_port])[1][0]
 
         self.ping_ip_address(fip['floating_ip_address'])
@@ -704,3 +745,447 @@
                                 security_groups=[])
         self.ping_ip_address(fip['floating_ip_address'],
                              should_succeed=False)
+
+
+class StatefulNetworkSecGroupTest(BaseNetworkSecGroupTest):
+    stateless_sg = False
+    ipv6_mode = None
+
+    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d764')
+    def test_default_sec_grp_scenarios(self):
+        self._test_default_sec_grp_scenarios()
+
+    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d864')
+    def test_protocol_number_rule(self):
+        self._test_protocol_number_rule()
+
+    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d964')
+    def test_two_sec_groups(self):
+        self._test_two_sec_groups()
+
+    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d664')
+    def test_ip_prefix(self):
+        cidr = self.subnet['cidr']
+        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+                      'direction': constants.INGRESS_DIRECTION,
+                      'remote_ip_prefix': cidr}]
+        self._test_ip_prefix(rule_list, should_succeed=True)
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('a01cd2ef-3cfc-4614-8aac-9d1333ea21dd')
+    def test_ip_prefix_negative(self):
+        # define bad CIDR
+        cidr = '10.100.0.254/32'
+        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+                      'direction': constants.INGRESS_DIRECTION,
+                      'remote_ip_prefix': cidr}]
+        self._test_ip_prefix(rule_list, should_succeed=False)
+
+    @decorators.idempotent_id('7ed39b86-006d-40fb-887a-ae46693dabc9')
+    def test_remote_group(self):
+        self._test_remote_group()
+
+    @testtools.skipUnless(
+        CONF.neutron_plugin_options.firewall_driver == 'openvswitch',
+        "Openvswitch agent is required to run this test")
+    @decorators.idempotent_id('678dd4c0-2953-4626-b89c-8e7e4110ec4b')
+    @tempest_utils.requires_ext(extension="address-group", service="network")
+    @tempest_utils.requires_ext(
+        extension="security-groups-remote-address-group", service="network")
+    def test_remote_group_and_remote_address_group(self):
+        self._test_remote_group_and_remote_address_group()
+
+    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad488')
+    def test_multiple_ports_secgroup_inheritance(self):
+        self._test_multiple_ports_secgroup_inheritance()
+
+    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad489')
+    def test_multiple_ports_portrange_remote(self):
+        self._test_multiple_ports_portrange_remote()
+
+    @decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad490')
+    def test_intra_sg_isolation(self):
+        """Test intra security group isolation
+
+        This test creates a security group that does not allow ingress
+        packets from vms of the same security group. The purpose of this
+        test is to verify that intra SG traffic is properly blocked, while
+        traffic like metadata and DHCP remains working due to the
+        allow-related behavior of the egress rules (added via default).
+        """
+        # create a security group and make it loginable
+        secgrp = self._create_security_group('secgrp')
+
+        # remove all rules and add ICMP, DHCP and metadata as egress,
+        # and ssh as ingress.
+        for sgr in secgrp['security_group_rules']:
+            self.client.delete_security_group_rule(sgr['id'])
+
+        self.create_loginable_secgroup_rule(secgroup_id=secgrp['id'])
+        rule_list = [{'direction': constants.EGRESS_DIRECTION,
+                      'protocol': constants.PROTO_NAME_TCP,
+                      'remote_ip_prefix': '169.254.169.254/32',
+                      'description': 'metadata out',
+                      },
+                     {'direction': constants.EGRESS_DIRECTION,
+                      'protocol': constants.PROTO_NAME_UDP,
+                      'port_range_min': '67',
+                      'port_range_max': '67',
+                      'description': 'dhcpv4 out',
+                      },
+                     {'direction': constants.EGRESS_DIRECTION,
+                      'protocol': constants.PROTO_NAME_ICMP,
+                      'description': 'ping out',
+                      },
+                     ]
+        self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])
+
+        # go vms, go!
+        ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
+            num_servers=2,
+            security_groups=[{'name': secgrp['name']}])
+
+        # verify SSH functionality. This will ensure that servers were
+        # able to reach dhcp + metadata servers
+        for fip in fips:
+            self.check_connectivity(fip['floating_ip_address'],
+                                    CONF.validation.image_ssh_user,
+                                    self.keypair['private_key'])
+
+        # try to ping instances without intra SG permission (should fail)
+        self.check_remote_connectivity(
+            ssh_clients[0], fips[1]['fixed_ip_address'],
+            should_succeed=False)
+        self.check_remote_connectivity(
+            ssh_clients[1], fips[0]['fixed_ip_address'],
+            should_succeed=False)
+
+        # add intra sg rule. This will allow packets from servers that
+        # are in the same sg
+        rule_list = [{'direction': constants.INGRESS_DIRECTION,
+                      'remote_group_id': secgrp['id']}]
+        self.create_secgroup_rules(rule_list, secgroup_id=secgrp['id'])
+
+        # try to ping instances with intra SG permission
+        self.check_remote_connectivity(
+            ssh_clients[0], fips[1]['fixed_ip_address'])
+        self.check_remote_connectivity(
+            ssh_clients[1], fips[0]['fixed_ip_address'])
+
+    @decorators.idempotent_id('cd66b826-d86c-4fb4-ab37-17c8391753cb')
+    def test_overlapping_sec_grp_rules(self):
+        self._test_overlapping_sec_grp_rules()
+
+    @decorators.idempotent_id('96dcd5ff-9d45-4e0d-bea0-0b438cbd388f')
+    def test_remove_sec_grp_from_active_vm(self):
+        self._test_remove_sec_grp_from_active_vm()
+
+    @decorators.idempotent_id('01f0ddca-b049-47eb-befd-82acb502c9ec')
+    def test_established_tcp_session_after_re_attachinging_sg(self):
+        """Test existing connection remain open after sg has been re-attached
+
+        Verifies that new packets can pass over the existing connection when
+        the security group has been removed from the server and then added
+        back
+        """
+
+        ssh_sg = self._create_security_group('ssh_sg')
+        self.create_loginable_secgroup_rule(secgroup_id=ssh_sg['id'])
+        vm_ssh, fips, vms = self.create_vm_testing_sec_grp(
+                security_groups=[{'name': ssh_sg['name']}])
+        sg = self._create_security_group('sg')
+        nc_rule = [{'protocol': constants.PROTO_NUM_TCP,
+                    'direction': constants.INGRESS_DIRECTION,
+                    'port_range_min': 6666,
+                    'port_range_max': 6666}]
+        self.create_secgroup_rules(nc_rule, secgroup_id=sg['id'])
+        srv_port = self.client.list_ports(network_id=self.network['id'],
+                device_id=vms[1]['server']['id'])['ports'][0]
+        srv_ip = srv_port['fixed_ips'][0]['ip_address']
+        with utils.StatefulConnection(
+                vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
+            self.client.update_port(srv_port['id'],
+                    security_groups=[ssh_sg['id'], sg['id']])
+            con.test_connection()
+        with utils.StatefulConnection(
+                vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
+            self.client.update_port(
+                    srv_port['id'], security_groups=[ssh_sg['id']])
+            con.test_connection(should_pass=False)
+        with utils.StatefulConnection(
+                vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
+            self.client.update_port(srv_port['id'],
+                    security_groups=[ssh_sg['id'], sg['id']])
+            con.test_connection()
+            self.client.update_port(srv_port['id'],
+                    security_groups=[ssh_sg['id']])
+            con.test_connection(should_pass=False)
+            self.client.update_port(srv_port['id'],
+                    security_groups=[ssh_sg['id'], sg['id']])
+            con.test_connection()
+
+    @decorators.idempotent_id('4a724164-bbc0-4029-a844-644ece66c026')
+    def test_connectivity_between_vms_using_different_sec_groups(self):
+        self._test_connectivity_between_vms_using_different_sec_groups()
+
+
+@testtools.skipIf(
+    CONF.neutron_plugin_options.firewall_driver in ['openvswitch', 'None'],
+    "Firewall driver other than 'openvswitch' is required to use "
+    "stateless security groups.")
+class StatelessNetworkSecGroupIPv4Test(BaseNetworkSecGroupTest):
+    required_extensions = ['security-group', 'stateful-security-group']
+    stateless_sg = True
+    ipv6_mode = None
+
+    @decorators.idempotent_id('9e193e3f-56f2-4f4e-886c-988a147958ef')
+    def test_default_sec_grp_scenarios(self):
+        self._test_default_sec_grp_scenarios()
+
+    @decorators.idempotent_id('afae8654-a389-4887-b21d-7f07ec350177')
+    def test_protocol_number_rule(self):
+        self._test_protocol_number_rule()
+
+    @decorators.idempotent_id('b51cc0eb-8f9a-49e7-96ab-61cd31243b67')
+    def test_two_sec_groups(self):
+        self._test_two_sec_groups()
+
+    @decorators.idempotent_id('07985496-58da-4c1f-a6ef-2fdd88128a81')
+    def test_ip_prefix(self):
+        cidr = self.subnet['cidr']
+        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+                      'direction': constants.INGRESS_DIRECTION,
+                      'remote_ip_prefix': cidr}]
+        self._test_ip_prefix(rule_list, should_succeed=True)
+
+    @decorators.attr(type='negative')
+    @decorators.idempotent_id('1ad469c4-0d8f-42ae-8ec3-46cc424565c4')
+    def test_ip_prefix_negative(self):
+        # define bad CIDR
+        cidr = '10.100.0.254/32'
+        rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+                      'direction': constants.INGRESS_DIRECTION,
+                      'remote_ip_prefix': cidr}]
+        self._test_ip_prefix(rule_list, should_succeed=False)
+
+    @decorators.idempotent_id('fa1e93bf-67c5-4590-9962-38ee1f43a46a')
+    def test_remote_group(self):
+        self._test_remote_group()
+
+    @testtools.skipUnless(
+        CONF.neutron_plugin_options.firewall_driver == 'openvswitch',
+        "Openvswitch agent is required to run this test")
+    @decorators.idempotent_id('9fae530d-2711-4c61-a4a5-8efe6e58ab14')
+    @tempest_utils.requires_ext(extension="address-group", service="network")
+    @tempest_utils.requires_ext(
+        extension="security-groups-remote-address-group", service="network")
+    def test_remote_group_and_remote_address_group(self):
+        self._test_remote_group_and_remote_address_group()
+
+    @decorators.idempotent_id('4f1eb6db-ae7f-4f26-b371-cbd8363f9b0b')
+    def test_multiple_ports_secgroup_inheritance(self):
+        self._test_multiple_ports_secgroup_inheritance()
+
+    @decorators.idempotent_id('4043ca0a-eabb-4198-be53-3d3051cc0804')
+    def test_multiple_ports_portrange_remote(self):
+        self._test_multiple_ports_portrange_remote()
+
+    @decorators.idempotent_id('bfe25138-ceac-4944-849a-b9b90aff100f')
+    def test_overlapping_sec_grp_rules(self):
+        self._test_overlapping_sec_grp_rules()
+
+    @decorators.idempotent_id('e4340e47-39cd-49ed-967c-fc2c40b47c5a')
+    def test_remove_sec_grp_from_active_vm(self):
+        self._test_remove_sec_grp_from_active_vm()
+
+    @decorators.idempotent_id('8d4753cc-cd7a-48a0-8ece-e11efce2af10')
+    def test_reattach_sg_with_changed_mode(self):
+        sg_kwargs = {'stateful': True}
+        secgrp = self.os_primary.network_client.create_security_group(
+            name=data_utils.rand_name('secgrp'), **sg_kwargs)['security_group']
+        # add cleanup
+        self.security_groups.append(secgrp)
+
+        # now configure sec group to support required connectivity
+        self.create_pingable_secgroup_rule(secgroup_id=secgrp['id'])
+        # and create server
+        ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
+            num_servers=1, security_groups=[{'name': secgrp['name']}])
+        server_ports = self.network_client.list_ports(
+            device_id=servers[0]['server']['id'])['ports']
+
+        # make sure connectivity works
+        self.ping_ip_address(fips[0]['floating_ip_address'],
+                             should_succeed=True)
+        # remove SG from ports
+        for port in server_ports:
+            self.network_client.update_port(port['id'], security_groups=[])
+        # make sure there is now no connectivity as there's no SG attached
+        # to the port
+        self.ping_ip_address(fips[0]['floating_ip_address'],
+                             should_succeed=False)
+
+        # Update SG to be stateless
+        self.os_primary.network_client.update_security_group(
+            secgrp['id'], stateful=False)
+        # Add SG back to the ports
+        for port in server_ports:
+            self.network_client.update_port(
+                port['id'], security_groups=[secgrp['id']])
+        # Make sure connectivity works fine again
+        self.ping_ip_address(fips[0]['floating_ip_address'],
+                             should_succeed=True)
+
+    @decorators.idempotent_id('7ede9ab5-a615-46c5-9dea-cf2aa1ea43cb')
+    def test_connectivity_between_vms_using_different_sec_groups(self):
+        self._test_connectivity_between_vms_using_different_sec_groups()
+
+    @testtools.skipUnless(
+        (CONF.neutron_plugin_options.advanced_image_ref or
+         CONF.neutron_plugin_options.default_image_is_advanced),
+        "Advanced image is required to run this test.")
+    @decorators.idempotent_id('c3bb8073-97a2-4bea-a6fb-0a9d2e4df13f')
+    def test_packets_of_any_connection_state_can_reach_dest(self):
+        TEST_TCP_PORT = 1022
+        PKT_TYPES = [
+            {'nping': 'syn', 'tcpdump': 'tcp-syn'},
+            {'nping': 'ack', 'tcpdump': 'tcp-ack'},
+            {'nping': 'syn,ack', 'tcpdump': 'tcp-syn|tcp-ack'},
+            {'nping': 'rst', 'tcpdump': 'tcp-rst'},
+            {'nping': 'fin', 'tcpdump': 'tcp-fin'},
+            {'nping': 'psh', 'tcpdump': 'tcp-push'}]
+        ssh_clients, fips, servers, _ = self._create_client_and_server_vms(
+            TEST_TCP_PORT, use_advanced_image=True)
+
+        self._check_cmd_installed_on_server(
+            ssh_clients['server'], servers['server']['server'], 'nping')
+        self._check_cmd_installed_on_server(
+            ssh_clients['client'], servers['client']['server'], 'tcpdump')
+        server_port = self.network_client.show_port(
+            fips['server']['port_id'])['port']
+        server_ip_command = ip.IPCommand(ssh_client=ssh_clients['server'])
+        addresses = server_ip_command.list_addresses(port=server_port)
+        port_iface = ip.get_port_device_name(addresses, server_port)
+
+        def _get_file_suffix(pkt_type):
+            return pkt_type['tcpdump'].replace(
+                'tcp-', '').replace('|', '')
+
+        for pkt_type in PKT_TYPES:
+            file_suffix = _get_file_suffix(pkt_type)
+            capture_script_path = "/tmp/capture_%s.sh" % file_suffix
+            capture_out = "/tmp/capture_%s.out" % file_suffix
+            capture_script = get_capture_script(
+                port_iface, TEST_TCP_PORT, pkt_type['tcpdump'], capture_out)
+            ssh_clients['server'].execute_script(
+                'echo \'%s\' > %s' % (capture_script, capture_script_path))
+            ssh_clients['server'].execute_script(
+                "bash %s" % capture_script_path, become_root=True)
+
+        for pkt_type in PKT_TYPES:
+            ssh_clients['client'].execute_script(
+                "nping --tcp -p %(tcp_port)s --flags %(tcp_flag)s --ttl 10 "
+                "%(ip_address)s -c 3" % {
+                    'tcp_port': TEST_TCP_PORT,
+                    'tcp_flag': pkt_type['nping'],
+                    'ip_address': fips['server']['fixed_ip_address']},
+                become_root=True)
+
+        def _packtet_received(pkt_type):
+            file_suffix = _get_file_suffix(pkt_type)
+            expected_msg = "1 packet captured"
+            result = ssh_clients['server'].execute_script(
+                "cat {path} || echo '{path} not exists yet'".format(
+                    path="/tmp/capture_%s.out" % file_suffix))
+            return expected_msg in result
+
+        for pkt_type in PKT_TYPES:
+            utils.wait_until_true(
+                lambda: _packtet_received(pkt_type),
+                timeout=10,
+                exception=RuntimeError(
+                    'No TCP packet of type %s received by server %s' % (
+                        pkt_type['nping'],
+                        fips['server']['fixed_ip_address'])))
+
+    @testtools.skipUnless(
+        (CONF.neutron_plugin_options.advanced_image_ref or
+         CONF.neutron_plugin_options.default_image_is_advanced),
+        "Advanced image is required to run this test.")
+    @decorators.idempotent_id('14c4af2c-8077-4756-a6e3-6bebd642ed92')
+    def test_fragmented_traffic_is_accepted(self):
+        ssh_clients, fips, servers, security_groups = (
+            self._create_client_and_server_vms(use_advanced_image=True))
+
+        # make sure tcp connectivity to vms works fine
+        for fip in fips.values():
+            self.check_connectivity(
+                fip['floating_ip_address'],
+                CONF.neutron_plugin_options.advanced_image_ssh_user,
+                self.keypair['private_key'])
+
+        # Check that ICMP packets bigger than MTU aren't working without
+        # fragmentation allowed
+        self.check_remote_connectivity(
+            ssh_clients['client'], fips['server']['fixed_ip_address'],
+            mtu=self.network['mtu'] + 1, fragmentation=False,
+            should_succeed=False)
+        # and are working fine with fragmentation enabled:
+        self.check_remote_connectivity(
+            ssh_clients['client'], fips['server']['fixed_ip_address'],
+            mtu=self.network['mtu'] + 1, fragmentation=True,
+            should_succeed=True)
+
+
+class StatelessSecGroupDualStackSlaacTest(BaseNetworkSecGroupTest):
+    required_extensions = ['security-group', 'stateful-security-group']
+    stateless_sg = True
+    ipv6_mode = 'slaac'
+
+    def _get_port_cidrs(self, port):
+        ips = []
+        subnet_cidrs = {}
+        for fixed_ip in port['fixed_ips']:
+            subnet_id = fixed_ip['subnet_id']
+            subnet_cidr = subnet_cidrs.get('subnet_id')
+            if not subnet_cidr:
+                subnet = self.client.show_subnet(subnet_id)['subnet']
+                subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
+                subnet_cidrs[subnet_id] = subnet_cidr
+            ips.append(
+                netaddr.IPNetwork(
+                    "%s/%s" % (fixed_ip['ip_address'], subnet_cidr.prefixlen)))
+        LOG.debug("On port %s found IP cidrs: %s", port['id'], ips)
+        return ips
+
+    def _test_default_sec_grp_scenarios(self):
+        # Make "regular" test like for IPv4 case
+        server_ssh_clients, _, servers = (
+            super()._test_default_sec_grp_scenarios())
+
+        # And additionally ensure that IPv6 addresses are configured properly
+        # in the VM
+        for ssh_client, server in zip(server_ssh_clients, servers):
+            ip_cmd = ip.IPCommand(ssh_client=ssh_client)
+            ports = self.client.list_ports(
+                device_id=server['server']['id'])['ports']
+            for port in ports:
+                configured_cidrs = [ip.network for ip in
+                                    ip_cmd.list_addresses(port=port)]
+                for port_cidr in self._get_port_cidrs(port):
+                    self.assertIn(port_cidr, configured_cidrs)
+
+    @decorators.idempotent_id('e7d64384-ea6a-40aa-b454-854f0990153c')
+    def test_default_sec_grp_scenarios(self):
+        self._test_default_sec_grp_scenarios()
+
+
+class StatelessSecGroupDualStackDHCPv6StatelessTest(
+        StatelessSecGroupDualStackSlaacTest):
+    required_extensions = ['security-group', 'stateful-security-group']
+    stateless_sg = True
+    ipv6_mode = 'dhcpv6-stateless'
+
+    @decorators.idempotent_id('c61c127c-e08f-4ddf-87a3-58b3c86e5476')
+    def test_default_sec_grp_scenarios(self):
+        self._test_default_sec_grp_scenarios()
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index a917b4f..0666297 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -273,9 +273,13 @@
         self.expected_success(201, resp.status)
         return service_client.ResponseBody(resp, body)
 
-    def create_bulk_security_groups(self, security_group_list):
+    def create_bulk_security_groups(self, security_group_list,
+                                    stateless=False):
         group_list = [{'security_group': {'name': name}}
                       for name in security_group_list]
+        if stateless:
+            for group in group_list:
+                group['security_group']['stateful'] = False
         post_data = {'security_groups': group_list}
         body = self.serialize_list(post_data, 'security_groups',
                                    'security_group')
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/manager.py b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
index 394fb02..4861931 100644
--- a/neutron_tempest_plugin/sfc/tests/scenario/manager.py
+++ b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
@@ -89,7 +89,7 @@
         if not client:
             client = self.routers_client
         if not tenant_id:
-            tenant_id = client.tenant_id
+            tenant_id = client.project_id
         name = data_utils.rand_name(namestart)
         result = client.create_router(name=name,
                                       admin_state_up=True,
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
index 5598fbe..92f515a 100644
--- a/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
@@ -82,7 +82,7 @@
         if not networks_client:
             networks_client = self.networks_client
         if not tenant_id:
-            tenant_id = networks_client.tenant_id
+            tenant_id = networks_client.project_id
         name = data_utils.rand_name(namestart)
         network_kwargs = dict(name=name, tenant_id=tenant_id)
         # Neutron disables port security by default so we have to check the
diff --git a/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py b/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
index ab48a2f..953360e 100644
--- a/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
+++ b/neutron_tempest_plugin/vpnaas/api/test_vpnaas.py
@@ -137,7 +137,7 @@
 
     def _get_tenant_id(self):
         """Returns the tenant_id of the client current user"""
-        return self.client.tenant_id
+        return self.client.project_id
 
     @decorators.attr(type='smoke')
     @decorators.idempotent_id('74dcf2d3-a40e-4a6c-a25a-747d764bee81')
diff --git a/tools/customize_ubuntu_image b/tools/customize_ubuntu_image
index fdd2d12..cb96b17 100755
--- a/tools/customize_ubuntu_image
+++ b/tools/customize_ubuntu_image
@@ -19,6 +19,7 @@
    iperf3
    iputils-ping
    ncat
+   nmap
    psmisc  # provides killall command
    python3
    tcpdump
diff --git a/tox.ini b/tox.ini
index ff50b9d..c2fc078 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,11 +1,10 @@
 [tox]
 minversion = 3.18.0
 envlist = pep8
-skipsdist = True
 ignore_basepython_conflict = True
 
 [testenv]
-basepython = python3
+basepython = {env:TOX_PYTHON:python3}
 usedevelop = True
 setenv =
    VIRTUAL_ENV={envdir}
@@ -15,15 +14,16 @@
    OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true}
 deps =
   -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+  -r{toxinidir}/requirements.txt
   -r{toxinidir}/test-requirements.txt
 commands = stestr run --slowest {posargs}
 
 [testenv:pep8]
 commands =
-  sh ./tools/misc-sanity-checks.sh
+  bash ./tools/misc-sanity-checks.sh
   flake8
 allowlist_externals =
-  sh
+  bash
 
 [testenv:venv]
 commands = {posargs}
diff --git a/zuul.d/2023_1_jobs.yaml b/zuul.d/2023_1_jobs.yaml
new file mode 100644
index 0000000..b9f293f
--- /dev/null
+++ b/zuul.d/2023_1_jobs.yaml
@@ -0,0 +1,274 @@
+- job:
+    name: neutron-tempest-plugin-openvswitch-2023-1
+    parent: neutron-tempest-plugin-openvswitch
+    override-checkout: stable/2023.1
+    vars:
+      network_api_extensions_openvswitch:
+        - dhcp_agent_scheduler
+        - local_ip
+        - qos-bw-minimum-ingress
+        - port-resource-request
+        - port-resource-request-groups
+      tempest_test_regex: "\
+          (^neutron_tempest_plugin.api)|\
+          (^neutron_tempest_plugin.scenario)|\
+          (^tempest.api.compute.servers.test_attach_interfaces)|\
+          (^tempest.api.compute.servers.test_multiple_create)"
+      network_available_features: &available_features
+        - ipv6_metadata
+      network_api_extensions_common: &api_extensions
+        - address-group
+        - address-scope
+        - agent
+        - allowed-address-pairs
+        - auto-allocated-topology
+        - availability_zone
+        - binding
+        - default-subnetpools
+        - dns-domain-ports
+        - dns-integration
+        - dns-integration-domain-keywords
+        - empty-string-filtering
+        - expose-port-forwarding-in-fip
+        - expose-l3-conntrack-helper
+        - ext-gw-mode
+        - external-net
+        - extra_dhcp_opt
+        - extraroute
+        - extraroute-atomic
+        - filter-validation
+        - fip-port-details
+        - flavors
+        - floating-ip-port-forwarding
+        - floating-ip-port-forwarding-detail
+        - floatingip-pools
+        - ip-substring-filtering
+        - l3-conntrack-helper
+        - l3-ext-ndp-proxy
+        - l3-flavors
+        - l3-ha
+        - l3-ndp-proxy
+        - l3_agent_scheduler
+        - metering
+        - multi-provider
+        - net-mtu
+        - net-mtu-writable
+        - network-ip-availability
+        - network_availability_zone
+        - network-segment-range
+        - pagination
+        - port-device-profile
+        - port-mac-address-regenerate
+        - port-security
+        - port-security-groups-filtering
+        - project-id
+        - provider
+        - qos
+        - qos-fip
+        - quotas
+        - quota_details
+        - rbac-address-group
+        - rbac-address-scope
+        - rbac-policies
+        - rbac-security-groups
+        - rbac-subnetpool
+        - router
+        - router_availability_zone
+        - security-group
+        - security-groups-remote-address-group
+        - segment
+        - service-type
+        - sorting
+        - standard-attr-description
+        - standard-attr-revisions
+        - standard-attr-segment
+        - standard-attr-tag
+        - standard-attr-timestamp
+        - stateful-security-group
+        - subnet_allocation
+        - subnet-dns-publish-fixed-ip
+        - subnet-service-types
+        - subnetpool-prefix-ops
+        - tag-ports-during-bulk-creation
+        - trunk
+        - trunk-details
+        - uplink-status-propagation
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_openvswitch) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+    name: neutron-tempest-plugin-openvswitch-iptables_hybrid-2023-1
+    parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
+    override-checkout: stable/2023.1
+    vars:
+      network_api_extensions_common: *api_extensions
+      network_api_extensions_openvswitch:
+        - dhcp_agent_scheduler
+        - local_ip
+        - logging
+        - port-resource-request
+        - port-resource-request-groups
+      network_available_features: *available_features
+      tempest_test_regex: "\
+          (^neutron_tempest_plugin.api)|\
+          (^neutron_tempest_plugin.scenario)|\
+          (^tempest.api.compute.servers.test_attach_interfaces)|\
+          (^tempest.api.compute.servers.test_multiple_create)"
+      # TODO(slaweq): remove trunks subport_connectivity test from blacklist
+      # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
+      # TODO(akatz): remove established tcp session verification test when the
+      # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
+      tempest_exclude_regex: "\
+          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_openvswitch) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: "{{ network_available_features | join(',') }}"
+            neutron_plugin_options:
+              available_type_drivers: flat,vlan,local,vxlan
+              firewall_driver: iptables_hybrid
+
+- job:
+    name: neutron-tempest-plugin-openvswitch-enforce-scope-new-defaults-2023-1
+    parent: neutron-tempest-plugin-openvswitch-2023-1
+    override-checkout: stable/2023.1
+    vars:
+      devstack_localrc:
+        # Enabeling the scope and new defaults for services.
+        # NOTE: (gmann) We need to keep keystone scope check disable as
+        # services (except ironic) does not support the system scope and
+        # they need keystone to continue working with project scope. Until
+        # Keystone policies are changed to work for both system as well as
+        # for project scoped, we need to keep scope check disable for
+        # keystone.
+        NOVA_ENFORCE_SCOPE: true
+        GLANCE_ENFORCE_SCOPE: true
+        NEUTRON_ENFORCE_SCOPE: true
+
+- job:
+    name: neutron-tempest-plugin-linuxbridge-2023-1
+    parent: neutron-tempest-plugin-linuxbridge
+    override-checkout: stable/2023.1
+    vars:
+      network_api_extensions_common: *api_extensions
+      network_api_extensions_linuxbridge:
+        - dhcp_agent_scheduler
+        - vlan-transparent
+      network_available_features: *available_features
+      tempest_test_regex: "\
+          (^neutron_tempest_plugin.api)|\
+          (^neutron_tempest_plugin.scenario)|\
+          (^tempest.api.compute.servers.test_attach_interfaces)|\
+          (^tempest.api.compute.servers.test_multiple_create)"
+      # TODO(eolivare): remove VLAN Transparency tests from blacklist
+      # when bug https://bugs.launchpad.net/neutron/+bug/1907548 will be fixed
+      # TODO(slaweq): remove
+      # test_established_tcp_session_after_re_attachinging_sg from the
+      # exclude regex when bug https://bugs.launchpad.net/neutron/+bug/1936911
+      # will be fixed
+      # TODO(slaweq) remove test_floatingip_port_details from the exclude
+      # regex when bug https://bugs.launchpad.net/neutron/+bug/1799790 will be
+      # fixed
+      tempest_exclude_regex: "\
+          (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPPortDetailsTest.test_floatingip_port_details)"
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_linuxbridge) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: "{{ network_available_features | join(',') }}"
+            neutron_plugin_options:
+              available_type_drivers: flat,vlan,local,vxlan
+              q_agent: linuxbridge
+              firewall_driver: iptables
+
+- job:
+    name: neutron-tempest-plugin-ovn-2023-1
+    parent: neutron-tempest-plugin-ovn
+    override-checkout: stable/2023.1
+    vars:
+      network_api_extensions_ovn:
+        - vlan-transparent
+      tempest_test_regex: "\
+          (^neutron_tempest_plugin.api)|\
+          (^neutron_tempest_plugin.scenario)|\
+          (^tempest.api.compute.servers.test_attach_interfaces)|\
+          (^tempest.api.compute.servers.test_multiple_create)"
+      # TODO(jlibosva): Remove the NetworkWritableMtuTest test from the list
+      # once east/west fragmentation is supported in core OVN
+      tempest_exclude_regex: "\
+          (^neutron_tempest_plugin.scenario.test_mtu.NetworkWritableMtuTest)"
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_ovn) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: ""
+            neutron_plugin_options:
+              available_type_drivers: local,flat,vlan,geneve
+              is_igmp_snooping_enabled: True
+              firewall_driver: ovn
+
+- job:
+    name: neutron-tempest-plugin-dvr-multinode-scenario-2023-1
+    parent: neutron-tempest-plugin-dvr-multinode-scenario
+    override-checkout: stable/2023.1
+    vars:
+      network_api_extensions_common: *api_extensions
+      network_api_extensions_dvr:
+        - dhcp_agent_scheduler
+        - dvr
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
+
+- job:
+    name: neutron-tempest-plugin-designate-scenario-2023-1
+    parent: neutron-tempest-plugin-designate-scenario
+    override-checkout: stable/2023.1
+    vars:
+      network_api_extensions_common: *api_extensions
+
+- job:
+    name: neutron-tempest-plugin-sfc-2023-1
+    parent: neutron-tempest-plugin-sfc
+    override-checkout: stable/2023.1
+
+- job:
+    name: neutron-tempest-plugin-bgpvpn-bagpipe-2023-1
+    parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    override-checkout: stable/2023.1
+
+- job:
+    name: neutron-tempest-plugin-dynamic-routing-2023-1
+    parent: neutron-tempest-plugin-dynamic-routing
+    override-checkout: stable/2023.1
+
+- job:
+    name: neutron-tempest-plugin-fwaas-2023-1
+    parent: neutron-tempest-plugin-fwaas
+    override-checkout: stable/2023.1
+
+- job:
+    name: neutron-tempest-plugin-vpnaas-2023-1
+    parent: neutron-tempest-plugin-vpnaas
+    override-checkout: stable/2023.1
+
+- job:
+    name: neutron-tempest-plugin-tap-as-a-service-2023-1
+    parent: neutron-tempest-plugin-tap-as-a-service
+    override-checkout: stable/2023.1
diff --git a/zuul.d/base-nested-switch.yaml b/zuul.d/base-nested-switch.yaml
index dcc0175..760e5a1 100644
--- a/zuul.d/base-nested-switch.yaml
+++ b/zuul.d/base-nested-switch.yaml
@@ -8,25 +8,46 @@
         nodes:
           - controller
 
-# Base nested switch job for non EM releases
+- nodeset:
+    name: neutron-nested-virt-ubuntu-jammy
+    nodes:
+      - name: controller
+        label: nested-virt-ubuntu-jammy
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+# Base nested switch job for 2023.1 and later
 - job:
     name: neutron-tempest-plugin-base-nested-switch
     parent: neutron-tempest-plugin-base
     abstract: true
-    branches: ^(?!stable/(queens|rocky|stein|train|ussuri)).*$
+    branches: ^(?!stable/(train|ussuri|victoria|wallaby|xena|yoga|zed)).*$
     # Comment nodeset and vars to switch back to non nested nodes
-    nodeset: neutron-nested-virt-ubuntu-focal
-    vars:
+    nodeset: neutron-nested-virt-ubuntu-jammy
+    vars: &nested_virt_vars
       devstack_localrc:
         LIBVIRT_TYPE: kvm
-        LIBVIRT_CPU_MODE: host-passthrough
-        CIRROS_VERSION: 0.5.1
-        DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-disk
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-disk.img
+        # cirros 0.6.1 not booting when host-passthrough is used
+        # LIBVIRT_CPU_MODE: host-passthrough
+        CIRROS_VERSION: 0.6.1
+        DEFAULT_IMAGE_NAME: cirros-0.6.1-x86_64-disk
+        DEFAULT_IMAGE_FILE_NAME: cirros-0.6.1-x86_64-disk.img
+
+# Base nested switch job for yoga and zed
+- job:
+    name: neutron-tempest-plugin-base-nested-switch
+    parent: neutron-tempest-plugin-base
+    abstract: true
+    branches: ^stable/(yoga|zed)$
+    # Comment nodeset and vars to switch back to non nested nodes
+    nodeset: neutron-nested-virt-ubuntu-focal
+    vars: *nested_virt_vars
 
 # Base nested switch job for EM releases
 - job:
-    name: neutron-tempest-plugin-scenario-nested-switch
+    name: neutron-tempest-plugin-base-nested-switch
     parent: neutron-tempest-plugin-base
     abstract: true
-    branches: ^(stable/(queens|rocky|stein|train|ussuri)).*$
+    branches: ^(stable/(train|ussuri|victoria|wallaby|xena)).*$
diff --git a/zuul.d/master_jobs.yaml b/zuul.d/master_jobs.yaml
index 48b146d..11bdd9d 100644
--- a/zuul.d/master_jobs.yaml
+++ b/zuul.d/master_jobs.yaml
@@ -27,9 +27,9 @@
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
         PHYSICAL_NETWORK: public
         IMAGE_URLS: https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img
-        CIRROS_VERSION: 0.5.1
-        DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
+        CIRROS_VERSION: 0.6.1
+        DEFAULT_IMAGE_NAME: cirros-0.6.1-x86_64-uec
+        DEFAULT_IMAGE_FILE_NAME: cirros-0.6.1-x86_64-uec.tar.gz
         ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
         ADVANCED_INSTANCE_TYPE: ntp_image_256M
         ADVANCED_INSTANCE_USER: ubuntu
@@ -38,11 +38,13 @@
         # TODO(lucasagomes): Re-enable MOD_WSGI after
         # https://bugs.launchpad.net/neutron/+bug/1912359 is implemented
         NEUTRON_DEPLOY_MOD_WSGI: false
-        # TODO(ralonsoh): remove OVN_BUILD_FROM_SOURCE once the OS packages
-        # include at least OVN v20.12.0.
+        # TODO(ihrachys): remove OVN_BUILD_FROM_SOURCE once the OS packages
+        # include at least OVN v22.03.3.
         OVN_BUILD_FROM_SOURCE: True
-        OVN_BRANCH: "v21.03.0"
-        OVS_BRANCH: "8dc1733eaea866dce033b3c44853e1b09bf59fc7"
+        # TODO(ihrachys): switch back to a tagged version when it's released
+        # OVN_BRANCH: "v22.03.3"
+        OVN_BRANCH: "36e3ab9b47e93af0599a818e9d6b2930e49473f0"
+        OVS_BRANCH: "2410b95597fcec5f733caf77febdb46f4ffacd27"
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron.git
         neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
@@ -164,9 +166,6 @@
               quota_floatingip: 500
               quota_security_group: 150
               quota_security_group_rule: 1000
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             ml2:
               type_drivers: flat,geneve,vlan,gre,local,vxlan
@@ -207,6 +206,7 @@
       - ^neutron/tests/functional/.*
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^neutron/agent/.*$
       - ^neutron/privileged/.*$
       - ^neutron_lib/tests/unit/.*$
@@ -256,9 +256,6 @@
             DEFAULT:
               enable_dvr: false
               l3_ha: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             agent:
               tunnel_types: vxlan,gre
@@ -288,6 +285,7 @@
       - ^neutron/tests/functional/.*
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^neutron/agent/ovn/.*$
       - ^neutron/agent/windows/.*$
       - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
@@ -344,7 +342,8 @@
       # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
       tempest_exclude_regex: "\
           (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
       devstack_localrc:
         Q_AGENT: openvswitch
         Q_ML2_TENANT_NETWORK_TYPE: vxlan
@@ -356,9 +355,6 @@
             DEFAULT:
               enable_dvr: false
               l3_ha: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             agent:
               tunnel_types: vxlan,gre
@@ -389,6 +385,7 @@
       - ^neutron/tests/functional/.*
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^neutron/agent/linux/openvswitch_firewall/.*$
       - ^neutron/agent/ovn/.*$
       - ^neutron/agent/windows/.*$
@@ -413,6 +410,23 @@
       - ^vagrant/.*$
       - ^zuul.d/(?!(project)).*\.yaml
 
+- job:
+    name: neutron-tempest-plugin-openvswitch-enforce-scope-new-defaults
+    parent: neutron-tempest-plugin-openvswitch
+    vars:
+      devstack_localrc:
+        # Enabeling the scope and new defaults for services.
+        # NOTE: (gmann) We need to keep keystone scope check disable as
+        # services (except ironic) does not support the system scope and
+        # they need keystone to continue working with project scope. Until
+        # Keystone policies are changed to work for both system as well as
+        # for project scoped, we need to keep scope check disable for
+        # keystone.
+        NOVA_ENFORCE_SCOPE: true
+        GLANCE_ENFORCE_SCOPE: true
+        NEUTRON_ENFORCE_SCOPE: true
+
+
 # TODO(slaweq): remove that job's definition as soon as new job
 # "neutron-tempest-plugin-openvswitch-iptables_hybrid" will be used in the
 # neutron repo as a parent for a
@@ -496,7 +510,8 @@
       # fixed
       tempest_exclude_regex: "\
           (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
           (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPPortDetailsTest.test_floatingip_port_details)"
       devstack_localrc:
         Q_AGENT: linuxbridge
@@ -514,9 +529,6 @@
               debug_iptables_rules: true
             EXPERIMENTAL:
               linuxbridge: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             ml2:
               type_drivers: flat,vlan,local,vxlan
@@ -544,6 +556,7 @@
       - ^neutron/tests/functional/.*
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^neutron/agent/linux/openvswitch_firewall/.*$
       - ^neutron/agent/ovn/.*$
       - ^neutron/agent/windows/.*$
@@ -594,24 +607,18 @@
         Q_ML2_PLUGIN_TYPE_DRIVERS: local,flat,vlan,geneve
         Q_ML2_TENANT_NETWORK_TYPE: geneve
         Q_USE_PROVIDERNET_FOR_PUBLIC: true
-        # NOTE(slaweq): In the job with OVN backend we can't use Ubuntu minimal
-        # image because kernel in that image don't supports MULTICAST traffic
-        # thus multicast scenario test with IGMP snooping enabled would fail
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ntp_image_384M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
         ENABLE_CHASSIS_AS_GW: true
         OVN_L3_CREATE_PUBLIC_NETWORK: true
         OVN_DBS_LOG_LEVEL: dbg
         ENABLE_TLS: True
         OVN_IGMP_SNOOPING_ENABLE: True
-        # TODO(eolivare): Remove OVN_BUILD_FROM_SOURCE once vlan-transparency
-        # is included in an ovn released version
+        # TODO(ihrachys): remove OVN_BUILD_FROM_SOURCE once the OS packages
+        # include at least OVN v22.03.3.
         OVN_BUILD_FROM_SOURCE: True
-        OVN_BRANCH: "v21.06.0"
-        OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+        # TODO(ihrachys): switch back to a tagged version when it's released
+        # OVN_BRANCH: "v22.03.3"
+        OVN_BRANCH: "36e3ab9b47e93af0599a818e9d6b2930e49473f0"
+        OVS_BRANCH: "2410b95597fcec5f733caf77febdb46f4ffacd27"
         OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
       devstack_services:
         br-ex-tcpdump: true
@@ -678,6 +685,7 @@
       - ^neutron/tests/functional/.*
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^neutron/agent/dhcp/.*$
       - ^neutron/agent/l2/.*$
       - ^neutron/agent/l3/.*$
@@ -742,9 +750,9 @@
         USE_PYTHON3: true
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
         PHYSICAL_NETWORK: default
-        CIRROS_VERSION: 0.5.1
-        DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
+        CIRROS_VERSION: 0.6.1
+        DEFAULT_IMAGE_NAME: cirros-0.6.1-x86_64-uec
+        DEFAULT_IMAGE_FILE_NAME: cirros-0.6.1-x86_64-uec.tar.gz
         IMAGE_URLS: https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img
         ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
         ADVANCED_INSTANCE_TYPE: ntp_image_256M
@@ -803,9 +811,6 @@
               quota_security_group_rule: 1000
             DEFAULT:
               router_distributed: True
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           "/$NEUTRON_CORE_PLUGIN_CONF":
             ml2:
               type_drivers: flat,geneve,vlan,gre,local,vxlan
@@ -885,9 +890,6 @@
             $NEUTRON_CONF:
               DEFAULT:
                 router_distributed: True
-            # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-            # devstack-tempest job will be switched to use lib/neutron instead of
-            # lib/neutron-legacy
             "/$NEUTRON_CORE_PLUGIN_CONF":
               agent:
                 enable_distributed_routing: True
@@ -944,6 +946,7 @@
       - ^neutron/tests/functional/.*
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^neutron/agent/.*$
       - ^neutron/cmd/.*$
       - ^neutron/privileged/.*$
@@ -992,6 +995,11 @@
         - flow_classifier
         - sfc
       devstack_localrc:
+        # TODO(slaweq): check why traceroute output is different in Cirros >
+        # 0.6.1 which is causing failures of the networking-sfc jobs
+        CIRROS_VERSION: 0.5.1
+        DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
+        DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
         Q_AGENT: openvswitch
         Q_ML2_TENANT_NETWORK_TYPE: vxlan
         Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
@@ -1014,10 +1022,13 @@
       - ^neutron/tests/fullstack/.*
       - ^neutron/tests/functional/.*
       - ^neutron_tempest_plugin/api/test_.*$
+      - ^neutron_tempest_plugin/scenario/admin/.*$
+      - ^neutron_tempest_plugin/scenario/test_.*$
       - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|tap_as_a_service|vpnaas).*$
       - ^neutron_tempest_plugin/services/bgp/.*$
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^rally-jobs/.*$
       - ^roles/.*functional.*$
       - ^playbooks/.*functional.*$
@@ -1076,10 +1087,13 @@
       - ^neutron/tests/fullstack/.*
       - ^neutron/tests/functional/.*
       - ^neutron_tempest_plugin/api/test_.*$
+      - ^neutron_tempest_plugin/scenario/admin/.*$
+      - ^neutron_tempest_plugin/scenario/test_.*$
       - ^neutron_tempest_plugin/(fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
       - ^neutron_tempest_plugin/services/bgp/.*$
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^rally-jobs/.*$
       - ^roles/.*functional.*$
       - ^playbooks/.*functional.*$
@@ -1107,24 +1121,7 @@
         - bgp_4byte_asn
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
       devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-meta: true
-        q-metering: true
-        q-l3: true
         neutron-dr: true
         neutron-dr-agent: true
       tempest_concurrency: 1
@@ -1143,9 +1140,12 @@
       - ^neutron/tests/fullstack/.*
       - ^neutron/tests/functional/.*
       - ^neutron_tempest_plugin/api/test_.*$
+      - ^neutron_tempest_plugin/scenario/admin/.*$
+      - ^neutron_tempest_plugin/scenario/test_.*$
       - ^neutron_tempest_plugin/(bgpvpn|fwaas|sfc|tap_as_a_service|vpnaas).*$
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^rally-jobs/.*$
       - ^roles/.*functional.*$
       - ^playbooks/.*functional.*$
@@ -1202,10 +1202,13 @@
       - ^neutron/tests/fullstack/.*
       - ^neutron/tests/functional/.*
       - ^neutron_tempest_plugin/api/test_.*$
+      - ^neutron_tempest_plugin/scenario/admin/.*$
+      - ^neutron_tempest_plugin/scenario/test_.*$
       - ^neutron_tempest_plugin/(bgpvpn|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
       - ^neutron_tempest_plugin/services/bgp/.*$
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^rally-jobs/.*$
       - ^roles/.*functional.*$
       - ^playbooks/.*functional.*$
@@ -1265,10 +1268,13 @@
       - ^neutron/tests/fullstack/.*
       - ^neutron/tests/functional/.*
       - ^neutron_tempest_plugin/api/test_.*$
+      - ^neutron_tempest_plugin/scenario/admin/.*$
+      - ^neutron_tempest_plugin/scenario/test_.*$
       - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service).*$
       - ^neutron_tempest_plugin/services/bgp/.*$
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^rally-jobs/.*$
       - ^roles/.*functional.*$
       - ^playbooks/.*functional.*$
@@ -1297,11 +1303,6 @@
         - taas-vlan-filter
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ntp_image_384M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
         BUILD_TIMEOUT: 784
         Q_AGENT: openvswitch
         Q_ML2_TENANT_NETWORK_TYPE: vxlan,vlan
@@ -1375,10 +1376,13 @@
       - ^neutron/tests/fullstack/.*
       - ^neutron/tests/functional/.*
       - ^neutron_tempest_plugin/api/test_.*$
+      - ^neutron_tempest_plugin/scenario/admin/.*$
+      - ^neutron_tempest_plugin/scenario/test_.*$
       - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|vpnaas).*$
       - ^neutron_tempest_plugin/services/bgp/.*$
       - ^tools/.*$
       - ^tox.ini$
+      - ^plugin.spec$
       - ^rally-jobs/.*$
       - ^roles/.*functional.*$
       - ^playbooks/.*functional.*$
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 6adf860..9ce0212 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -2,82 +2,28 @@
     name: neutron-tempest-plugin-jobs
     check:
       jobs:
-        - neutron-tempest-plugin-linuxbridge
         - neutron-tempest-plugin-openvswitch
         - neutron-tempest-plugin-openvswitch-iptables_hybrid
+        - neutron-tempest-plugin-openvswitch-enforce-scope-new-defaults
         - neutron-tempest-plugin-ovn
         - neutron-tempest-plugin-designate-scenario
     gate:
       jobs:
-        - neutron-tempest-plugin-linuxbridge
         - neutron-tempest-plugin-openvswitch
         - neutron-tempest-plugin-ovn
         - neutron-tempest-plugin-openvswitch-iptables_hybrid
+        - neutron-tempest-plugin-openvswitch-enforce-scope-new-defaults
     #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
     #              the experimental queue when it will be more stable
     experimental:
       jobs:
+        - neutron-tempest-plugin-linuxbridge
         - neutron-tempest-plugin-dvr-multinode-scenario
         - neutron-tempest-plugin-openvswitch-distributed-dhcp
         - neutron-tempest-plugin-openvswitch-iptables_hybrid-distributed-dhcp
 
 
 - project-template:
-    name: neutron-tempest-plugin-jobs-queens
-    check:
-      jobs:
-        - neutron-tempest-plugin-api-queens
-        - neutron-tempest-plugin-scenario-linuxbridge-queens
-        - neutron-tempest-plugin-scenario-openvswitch-queens
-    gate:
-      jobs:
-        - neutron-tempest-plugin-api-queens
-    #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
-    #              the experimental queue when it will be more stable
-    experimental:
-      jobs:
-        - neutron-tempest-plugin-dvr-multinode-scenario-queens
-
-
-- project-template:
-    name: neutron-tempest-plugin-jobs-rocky
-    check:
-      jobs:
-        - neutron-tempest-plugin-api-rocky
-        - neutron-tempest-plugin-scenario-linuxbridge-rocky
-        - neutron-tempest-plugin-scenario-openvswitch-rocky
-        - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
-        - neutron-tempest-plugin-designate-scenario-rocky
-    gate:
-      jobs:
-        - neutron-tempest-plugin-api-rocky
-    #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
-    #              the experimental queue when it will be more stable
-    experimental:
-      jobs:
-        - neutron-tempest-plugin-dvr-multinode-scenario-rocky
-
-
-- project-template:
-    name: neutron-tempest-plugin-jobs-stein
-    check:
-      jobs:
-        - neutron-tempest-plugin-api-stein
-        - neutron-tempest-plugin-scenario-linuxbridge-stein
-        - neutron-tempest-plugin-scenario-openvswitch-stein
-        - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-stein
-        - neutron-tempest-plugin-designate-scenario-stein
-    gate:
-      jobs:
-        - neutron-tempest-plugin-api-stein
-    #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
-    #              the experimental queue when it will be more stable
-    experimental:
-      jobs:
-        - neutron-tempest-plugin-dvr-multinode-scenario-stein
-
-
-- project-template:
     name: neutron-tempest-plugin-jobs-train
     check:
       jobs:
@@ -85,7 +31,6 @@
         - neutron-tempest-plugin-scenario-linuxbridge-train
         - neutron-tempest-plugin-scenario-openvswitch-train
         - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-train
-        - neutron-tempest-plugin-designate-scenario-train
     gate:
       jobs:
         - neutron-tempest-plugin-api-train
@@ -212,45 +157,59 @@
       jobs:
         - neutron-tempest-plugin-dvr-multinode-scenario-zed
 
+- project-template:
+    name: neutron-tempest-plugin-jobs-2023-1
+    check:
+      jobs:
+        - neutron-tempest-plugin-linuxbridge-2023-1
+        - neutron-tempest-plugin-openvswitch-2023-1
+        - neutron-tempest-plugin-openvswitch-iptables_hybrid-2023-1
+        - neutron-tempest-plugin-ovn-2023-1
+        - neutron-tempest-plugin-designate-scenario-2023-1
+    gate:
+      jobs:
+        - neutron-tempest-plugin-ovn-2023-1
+    #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
+    #              the experimental queue when it will be more stable
+    experimental:
+      jobs:
+        - neutron-tempest-plugin-dvr-multinode-scenario-2023-1
+
 - project:
     templates:
       - build-openstack-docs-pti
       - neutron-tempest-plugin-jobs
-      - neutron-tempest-plugin-jobs-wallaby
-      - neutron-tempest-plugin-jobs-xena
       - neutron-tempest-plugin-jobs-yoga
       - neutron-tempest-plugin-jobs-zed
+      - neutron-tempest-plugin-jobs-2023-1
       - check-requirements
       - tempest-plugin-jobs
       - release-notes-jobs-python3
     check:
       jobs:
         - neutron-tempest-plugin-sfc
-        - neutron-tempest-plugin-sfc-wallaby
-        - neutron-tempest-plugin-sfc-xena
         - neutron-tempest-plugin-sfc-yoga
         - neutron-tempest-plugin-sfc-zed
+        - neutron-tempest-plugin-sfc-2023-1
         - neutron-tempest-plugin-bgpvpn-bagpipe
-        - neutron-tempest-plugin-bgpvpn-bagpipe-wallaby
-        - neutron-tempest-plugin-bgpvpn-bagpipe-xena
         - neutron-tempest-plugin-bgpvpn-bagpipe-yoga
         - neutron-tempest-plugin-bgpvpn-bagpipe-zed
+        - neutron-tempest-plugin-bgpvpn-bagpipe-2023-1
         - neutron-tempest-plugin-dynamic-routing
-        - neutron-tempest-plugin-dynamic-routing-wallaby
-        - neutron-tempest-plugin-dynamic-routing-xena
         - neutron-tempest-plugin-dynamic-routing-yoga
         - neutron-tempest-plugin-dynamic-routing-zed
+        - neutron-tempest-plugin-dynamic-routing-2023-1
         - neutron-tempest-plugin-fwaas
         - neutron-tempest-plugin-fwaas-zed
+        - neutron-tempest-plugin-fwaas-2023-1
         - neutron-tempest-plugin-vpnaas
-        - neutron-tempest-plugin-vpnaas-wallaby
-        - neutron-tempest-plugin-vpnaas-xena
         - neutron-tempest-plugin-vpnaas-yoga
         - neutron-tempest-plugin-vpnaas-zed
+        - neutron-tempest-plugin-vpnaas-2023-1
         - neutron-tempest-plugin-tap-as-a-service
-        - neutron-tempest-plugin-tap-as-a-service-xena
         - neutron-tempest-plugin-tap-as-a-service-yoga
         - neutron-tempest-plugin-tap-as-a-service-zed
+        - neutron-tempest-plugin-tap-as-a-service-2023-1
 
     gate:
       jobs:
diff --git a/zuul.d/queens_jobs.yaml b/zuul.d/queens_jobs.yaml
deleted file mode 100644
index 483d11d..0000000
--- a/zuul.d/queens_jobs.yaml
+++ /dev/null
@@ -1,275 +0,0 @@
-- job:
-    name: neutron-tempest-plugin-api-queens
-    nodeset: openstack-single-node-xenial
-    parent: neutron-tempest-plugin-base
-    override-checkout: stable/queens
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.3.0
-      - openstack/tempest
-    vars:
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-      tempest_concurrency: 4
-      tempest_test_regex: ^neutron_tempest_plugin\.api
-      # TODO(slaweq): find a way to put this list of extensions in
-      # neutron repository and keep it different per branch,
-      # then it could be removed from here
-      network_api_extensions_common: &api_extensions
-        - address-scope
-        - agent
-        - allowed-address-pairs
-        - auto-allocated-topology
-        - availability_zone
-        - binding
-        - default-subnetpools
-        - dhcp_agent_scheduler
-        - dns-domain-ports
-        - dns-integration
-        - ext-gw-mode
-        - external-net
-        - extra_dhcp_opt
-        - extraroute
-        - flavors
-        - ip-substring-filtering
-        - l3-flavors
-        - l3-ha
-        - l3_agent_scheduler
-        - logging
-        - metering
-        - multi-provider
-        - net-mtu
-        - net-mtu-writable
-        - network-ip-availability
-        - network_availability_zone
-        - pagination
-        - port-security
-        - project-id
-        - provider
-        - qos
-        - qos-fip
-        - quotas
-        - quota_details
-        - rbac-policies
-        - router
-        - router_availability_zone
-        - security-group
-        - segment
-        - service-type
-        - sorting
-        - standard-attr-description
-        - standard-attr-revisions
-        - standard-attr-timestamp
-        - standard-attr-tag
-        - subnet_allocation
-        - subnet-service-types
-        - trunk
-        - trunk-details
-      network_api_extensions_tempest:
-        - dvr
-      network_available_features: &available_features
-        -
-      devstack_localrc:
-        NEUTRON_DEPLOY_MOD_WSGI: false
-        USE_PYTHON3: false
-        CIRROS_VERSION: 0.3.5
-        DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
-        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
-        ML2_L3_PLUGIN: router
-      devstack_local_conf:
-        post-config:
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            AGENT:
-              tunnel_types: gre,vxlan
-            ml2:
-              type_drivers: flat,geneve,vlan,gre,local,vxlan
-        test-config:
-          $TEMPEST_CONFIG:
-            neutron_plugin_options:
-              available_type_drivers: flat,geneve,vlan,gre,local,vxlan
-
-
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-queens
-    parent: neutron-tempest-plugin-openvswitch
-    nodeset: openstack-single-node-xenial
-    override-checkout: stable/queens
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.3.0
-      - openstack/tempest
-    vars:
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      network_api_extensions: *api_extensions
-      network_available_features: *available_features
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Xenial keepalived don't knows this option yet
-              keepalived_use_no_track: False
-      # TODO(slaweq): remove trunks subport_connectivity test from blacklist
-      # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
-      # NOTE(bcafarel): remove DNS test as queens pinned version does not have
-      # fix for https://bugs.launchpad.net/neutron/+bug/1826419
-      tempest_black_regex: "\
-          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
-          (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
-      devstack_localrc:
-        USE_PYTHON3: false
-        CIRROS_VERSION: 0.3.5
-        DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
-        # Queens
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-
-- job:
-    name: neutron-tempest-plugin-scenario-linuxbridge-queens
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-xenial
-    timeout: 10000
-    roles:
-      - zuul: openstack/neutron
-    override-checkout: stable/queens
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.3.0
-      - openstack/tempest
-    vars:
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      network_api_extensions: *api_extensions
-      network_available_features: *available_features
-      # NOTE(bcafarel): remove DNS test as queens pinned version does not have
-      # fix for https://bugs.launchpad.net/neutron/+bug/1826419
-      tempest_black_regex: "\
-          (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
-      devstack_localrc:
-        USE_PYTHON3: false
-        CIRROS_VERSION: 0.3.5
-        DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
-        Q_AGENT: linuxbridge
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
-        # Queens
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            DEFAULT:
-              enable_dvr: false
-            AGENT:
-              debug_iptables_rules: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            ml2:
-              type_drivers: flat,vlan,local,vxlan
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Xenial keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          # NOTE: ignores linux bridge's trunk delete on bound port test
-          # for rocky branch (as https://review.opendev.org/#/c/605589/
-          # fix will not apply for rocky branch)
-          $TEMPEST_CONFIG:
-            neutron_plugin_options:
-              available_type_drivers: flat,vlan,local,vxlan
-              q_agent: None
-
-- job:
-    name: neutron-tempest-plugin-dvr-multinode-scenario-queens
-    parent: neutron-tempest-plugin-dvr-multinode-scenario
-    nodeset: openstack-two-node-xenial
-    override-checkout: stable/queens
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.3.0
-      - openstack/tempest
-    vars:
-      network_api_extensions_common: *api_extensions
-      # TODO(slaweq): remove trunks subport_connectivity test from blacklist
-      # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
-      # NOTE(bcafarel): remove DNS test as queens pinned version does not have
-      # fix for https://bugs.launchpad.net/neutron/+bug/1826419
-      tempest_black_regex: "\
-          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
-          (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
-      devstack_localrc:
-        USE_PYTHON3: false
-        CIRROS_VERSION: 0.3.5
-        DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-
-- job:
-    name: neutron-tempest-plugin-designate-scenario-queens
-    parent: neutron-tempest-plugin-designate-scenario
-    nodeset: openstack-single-node-xenial
-    override-checkout: stable/queens
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.3.0
-      - name: openstack/designate-tempest-plugin
-        override-checkout: 0.7.0
-      - openstack/tempest
-    vars:
-      network_api_extensions_common: *api_extensions
-      # NOTE(bcafarel): remove DNS test as queens pinned version does not have
-      # fix for https://bugs.launchpad.net/neutron/+bug/1826419
-      tempest_black_regex: "\
-          (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
-      devstack_localrc:
-        USE_PYTHON3: false
-        CIRROS_VERSION: 0.3.5
-        DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
-        TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
-        ADVANCED_INSTANCE_TYPE: ds512M
diff --git a/zuul.d/rocky_jobs.yaml b/zuul.d/rocky_jobs.yaml
deleted file mode 100644
index c6bbca8..0000000
--- a/zuul.d/rocky_jobs.yaml
+++ /dev/null
@@ -1,667 +0,0 @@
-- job:
-    name: neutron-tempest-plugin-api-rocky
-    nodeset: openstack-single-node-xenial
-    parent: neutron-tempest-plugin-base
-    description: |
-      This job run on py2 for stable/rocky gate.
-    override-checkout: stable/rocky
-    required-projects: &required-projects-rocky
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.9.0
-      - openstack/tempest
-    vars: &api_vars_rocky
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-      tempest_concurrency: 4
-      tempest_test_regex: ^neutron_tempest_plugin\.api
-      # TODO(slaweq): find a way to put this list of extensions in
-      # neutron repository and keep it different per branch,
-      # then it could be removed from here
-      network_api_extensions_common: &api_extensions
-        - address-scope
-        - agent
-        - allowed-address-pairs
-        - auto-allocated-topology
-        - availability_zone
-        - binding
-        - default-subnetpools
-        - dhcp_agent_scheduler
-        - dns-domain-ports
-        - dns-integration
-        - empty-string-filtering
-        - expose-port-forwarding-in-fip
-        - ext-gw-mode
-        - external-net
-        - extra_dhcp_opt
-        - extraroute
-        - fip-port-details
-        - flavors
-        - floating-ip-port-forwarding
-        - ip-substring-filtering
-        - l3-flavors
-        - l3-ha
-        - l3_agent_scheduler
-        - logging
-        - metering
-        - multi-provider
-        - net-mtu
-        - net-mtu-writable
-        - network-ip-availability
-        - network_availability_zone
-        - pagination
-        - port-mac-address-regenerate
-        - port-security
-        - port-security-groups-filtering
-        - project-id
-        - provider
-        - qos
-        - qos-fip
-        - quotas
-        - quota_details
-        - rbac-policies
-        - router
-        - router_availability_zone
-        - security-group
-        - segment
-        - service-type
-        - sorting
-        - standard-attr-description
-        - standard-attr-revisions
-        - standard-attr-segment
-        - standard-attr-timestamp
-        - standard-attr-tag
-        - subnet_allocation
-        - subnet-service-types
-        - trunk
-        - trunk-details
-      network_api_extensions_tempest:
-        - dvr
-      devstack_localrc:
-        NEUTRON_DEPLOY_MOD_WSGI: false
-        USE_PYTHON3: false
-        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
-        ML2_L3_PLUGIN: router
-      devstack_local_conf:
-        post-config:
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            AGENT:
-              tunnel_types: gre,vxlan
-            ml2:
-              type_drivers: flat,geneve,vlan,gre,local,vxlan
-        test-config:
-          $TEMPEST_CONFIG:
-            neutron_plugin_options:
-              available_type_drivers: flat,geneve,vlan,gre,local,vxlan
-    # NOTE(gmann): This job run on py2 for stable/rocky gate.
-    branches:
-      - stable/rocky
-
-- job:
-    name: neutron-tempest-plugin-api-rocky
-    nodeset: openstack-single-node-xenial
-    parent: neutron-tempest-plugin-base
-    description: |
-      This job run on py3 for other than stable/rocky gate
-      which is nothing but neutron-tempest-pluign master gate.
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars:
-      <<: *api_vars_rocky
-      devstack_localrc:
-        USE_PYTHON3: True
-    branches: ^(?!stable/rocky).*$
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-rocky
-    parent: neutron-tempest-plugin-base
-    description: |
-      This job run on py2 for stable/rocky gate.
-    nodeset: openstack-single-node-xenial
-    timeout: 10000
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars: &scenario_vars_rocky
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-      network_api_extensions: *api_extensions
-      network_available_features: &available_features
-        -
-      devstack_localrc:
-        USE_PYTHON3: false
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
-        # Rocky
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            DEFAULT:
-              enable_dvr: false
-              l3_ha: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            agent:
-              tunnel_types: vxlan,gre
-            ovs:
-              tunnel_bridge: br-tun
-              bridge_mappings: public:br-ex
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Xenial keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              available_features: "{{ network_available_features | join(',') }}"
-            neutron_plugin_options:
-              available_type_drivers: flat,vlan,local,vxlan
-              firewall_driver: openvswitch
-      # NOTE(bcafarel): filtering out unstable tests or tests with known
-      # issues in the used pinned version for this EM branch
-      tempest_black_regex: &rocky_tempest_exclude "\
-          (^neutron_tempest_plugin.scenario.admin.test_floatingip.FloatingIpTestCasesAdmin.test_two_vms_fips)|\
-          (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPQosTest.test_qos)|\
-          (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)|\
-          (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
-          (^neutron_tempest_plugin.scenario.test_ports.PortsTest.test_previously_used_port)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_ip_prefix)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_secgroup_inheritance)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_remote_group)|\
-          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
-          (^tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_reassign_port_between_servers)|\
-          (^tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesUnderV243Test.test_add_remove_fixed_ip)"
-    branches:
-      - stable/rocky
-    irrelevant-files: &openvswitch-scenario-irrelevant-files
-      - ^(test-|)requirements.txt$
-      - ^releasenotes/.*$
-      - ^doc/.*$
-      - ^setup.cfg$
-      - ^.*\.rst$
-      - ^neutron/locale/.*$
-      - ^neutron/tests/unit/.*$
-      - ^neutron/tests/fullstack/.*
-      - ^neutron/tests/functional/.*
-      - ^tools/.*$
-      - ^tox.ini$
-      - ^neutron/agent/windows/.*$
-      - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
-      - ^neutron/plugins/ml2/drivers/macvtap/.*$
-      - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-rocky
-    parent: neutron-tempest-plugin-openvswitch
-    nodeset: openstack-single-node-xenial
-    description: |
-      This job run on py3 for other than stable/rocky gate
-      which is nothing but neutron-tempest-pluign master gate.
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars:
-      <<: *scenario_vars_rocky
-      devstack_localrc:
-        USE_PYTHON3: True
-        ADVANCED_INSTANCE_TYPE: ds512M
-    branches: ^(?!stable/rocky).*$
-    irrelevant-files: *openvswitch-scenario-irrelevant-files
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-xenial
-    timeout: 10000
-    description: |
-      This job run on py2 for stable/rocky gate.
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars: &openvswitch_vars_rocky
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-      network_api_extensions: *api_extensions
-      network_available_features: *available_features
-      devstack_localrc:
-        USE_PYTHON3: false
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
-        # Rocky
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            DEFAULT:
-              enable_dvr: false
-              l3_ha: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            agent:
-              tunnel_types: vxlan,gre
-            ovs:
-              tunnel_bridge: br-tun
-              bridge_mappings: public:br-ex
-            securitygroup:
-              firewall_driver: iptables_hybrid
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Xenial keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              available_features: "{{ network_available_features | join(',') }}"
-            neutron_plugin_options:
-              available_type_drivers: flat,vlan,local,vxlan
-              firewall_driver: iptables_hybrid
-      tempest_black_regex: *rocky_tempest_exclude
-    branches:
-      - stable/rocky
-    irrelevant-files: &iptables_hybrid_irrelevant_files
-      - ^(test-|)requirements.txt$
-      - ^releasenotes/.*$
-      - ^doc/.*$
-      - ^setup.cfg$
-      - ^.*\.rst$
-      - ^neutron/locale/.*$
-      - ^neutron/tests/unit/.*$
-      - ^neutron/tests/fullstack/.*
-      - ^neutron/tests/functional/.*
-      - ^tools/.*$
-      - ^tox.ini$
-      - ^neutron/agent/linux/openvswitch_firewall/.*$
-      - ^neutron/agent/windows/.*$
-      - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
-      - ^neutron/plugins/ml2/drivers/macvtap/.*$
-      - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-xenial
-    timeout: 10000
-    description: |
-      This job run on py3 for other than stable/rocky gate
-      which is nothing but neutron-tempest-pluign master gate.
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars:
-      <<: *openvswitch_vars_rocky
-      devstack_localrc:
-        USE_PYTHON3: True
-        ADVANCED_INSTANCE_TYPE: ds512M
-    branches: ^(?!stable/rocky).*$
-    irrelevant-files: *iptables_hybrid_irrelevant_files
-
-- job:
-    name: neutron-tempest-plugin-scenario-linuxbridge-rocky
-    parent: neutron-tempest-plugin-base
-    timeout: 10000
-    description: |
-      This job run on py2 for stable/rocky gate.
-    nodeset: openstack-single-node-xenial
-    roles:
-      - zuul: openstack/neutron
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars: &linuxbridge_vars_rocky
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      network_api_extensions: *api_extensions
-      devstack_localrc:
-        USE_PYTHON3: false
-        Q_AGENT: linuxbridge
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
-        # Rocky
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            DEFAULT:
-              enable_dvr: false
-            AGENT:
-              debug_iptables_rules: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            ml2:
-              type_drivers: flat,vlan,local,vxlan
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Xenial keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          # NOTE: ignores linux bridge's trunk delete on bound port test
-          # for rocky branch (as https://review.opendev.org/#/c/605589/
-          # fix will not apply for rocky branch)
-          $TEMPEST_CONFIG:
-            neutron_plugin_options:
-              available_type_drivers: flat,vlan,local,vxlan
-              q_agent: None
-      tempest_black_regex: *rocky_tempest_exclude
-    branches:
-      - stable/rocky
-
-- job:
-    name: neutron-tempest-plugin-scenario-linuxbridge-rocky
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-xenial
-    timeout: 10000
-    description: |
-      This job run on py3 for other than stable/rocky gate
-      which is nothing but neutron-tempest-pluign master gate.
-    roles:
-      - zuul: openstack/neutron
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars:
-      <<: *linuxbridge_vars_rocky
-      devstack_localrc:
-        USE_PYTHON3: True
-        Q_AGENT: linuxbridge
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        ADVANCED_INSTANCE_TYPE: ds512M
-    branches: ^(?!stable/rocky).*$
-
-- job:
-    name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
-    parent: tempest-multinode-full
-    description: |
-      This job run on py2 for stable/rocky gate.
-    nodeset: openstack-two-node-xenial
-    override-checkout: stable/rocky
-    roles:
-      - zuul: openstack/devstack
-    required-projects: *required-projects-rocky
-    pre-run: playbooks/dvr-multinode-scenario-pre-run.yaml
-    voting: false
-    vars: &multinode_scenario_vars_rocky
-      tempest_concurrency: 4
-      tox_envlist: all
-      tempest_test_regex: ^neutron_tempest_plugin\.scenario
-      # NOTE(slaweq): in case of some tests, which requires advanced image,
-      # default test timeout set to 1200 seconds may be not enough if job is
-      # run on slow node
-      tempest_test_timeout: 2400
-      network_api_extensions_common: *api_extensions
-      network_api_extensions_dvr:
-        - dvr
-      devstack_localrc:
-        USE_PYTHON3: false
-        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
-        PHYSICAL_NETWORK: default
-        CIRROS_VERSION: 0.5.1
-        DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
-        DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        BUILD_TIMEOUT: 784
-        TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-      devstack_plugins:
-        neutron: https://opendev.org/openstack/neutron.git
-        neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
-      tempest_plugins:
-        - neutron-tempest-plugin
-      devstack_services:
-        tls-proxy: false
-        tempest: true
-        neutron-dns: true
-        neutron-qos: true
-        neutron-segments: true
-        neutron-trunk: true
-        neutron-log: true
-        neutron-port-forwarding: true
-        # Cinder services
-        c-api: false
-        c-bak: false
-        c-sch: false
-        c-vol: false
-        cinder: false
-        # We don't need Swift to be run in the Neutron jobs
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            quotas:
-              quota_router: 100
-              quota_floatingip: 500
-              quota_security_group: 100
-              quota_security_group_rule: 1000
-            DEFAULT:
-              router_distributed: True
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ml2:
-              type_drivers: flat,geneve,vlan,gre,local,vxlan
-              mechanism_drivers: openvswitch,l2population
-            ml2_type_vlan:
-              network_vlan_ranges: foo:1:10
-            ml2_type_vxlan:
-              vni_ranges: 1:2000
-            ml2_type_gre:
-              tunnel_id_ranges: 1:1000
-            agent:
-              enable_distributed_routing: True
-              l2_population: True
-              tunnel_types: vxlan,gre
-            ovs:
-              tunnel_bridge: br-tun
-              bridge_mappings: public:br-ex
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              agent_mode: dvr_snat
-            agent:
-              availability_zone: nova
-          $NEUTRON_DHCP_CONF:
-            agent:
-              availability_zone: nova
-          "/etc/neutron/api-paste.ini":
-            "composite:neutronapi_v2_0":
-              use: "call:neutron.auth:pipeline_factory"
-              noauth: "cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0"
-              keystone: "cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0"
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              available_features: *available_features
-            neutron_plugin_options:
-              provider_vlans: foo,
-              agent_availability_zone: nova
-              image_is_advanced: true
-              available_type_drivers: flat,geneve,vlan,gre,local,vxlan
-              l3_agent_mode: dvr_snat
-              firewall_driver: openvswitch
-      tempest_black_regex: *rocky_tempest_exclude
-    branches:
-      - stable/rocky
-    group-vars: &multinode_scenario_group_vars_rocky
-      subnode:
-        devstack_services:
-          tls-proxy: false
-          q-agt: true
-          q-l3: true
-          q-meta: true
-          neutron-qos: true
-          neutron-trunk: true
-          neutron-log: true
-          neutron-port-forwarding: true
-          # Cinder services
-          c-bak: false
-          c-vol: false
-          # We don't need Swift to be run in the Neutron jobs
-          s-account: false
-          s-container: false
-          s-object: false
-          s-proxy: false
-        devstack_localrc:
-          USE_PYTHON3: true
-        devstack_local_conf:
-          post-config:
-            $NEUTRON_CONF:
-              DEFAULT:
-                router_distributed: True
-            # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-            # devstack-tempest job will be switched to use lib/neutron instead of
-            # lib/neutron-legacy
-            "/$NEUTRON_CORE_PLUGIN_CONF":
-              agent:
-                enable_distributed_routing: True
-                l2_population: True
-                tunnel_types: vxlan,gre
-              ovs:
-                tunnel_bridge: br-tun
-                bridge_mappings: public:br-ex
-            $NEUTRON_L3_CONF:
-              DEFAULT:
-                agent_mode: dvr_snat
-              agent:
-                availability_zone: nova
-    irrelevant-files: *openvswitch-scenario-irrelevant-files
-
-- job:
-    name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
-    parent: tempest-multinode-full
-    nodeset: openstack-two-node-xenial
-    description: |
-      This job run on py3 for other than stable/rocky gate
-      which is nothing but neutron-tempest-pluign master gate.
-    override-checkout: stable/rocky
-    vars:
-      <<: *multinode_scenario_vars_rocky
-      devstack_localrc:
-        USE_PYTHON3: True
-    required-projects: *required-projects-rocky
-    group-vars:
-      <<: *multinode_scenario_group_vars_rocky
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: True
-    branches: ^(?!stable/rocky).*$
-
-- job:
-    name: neutron-tempest-plugin-designate-scenario-rocky
-    parent: neutron-tempest-plugin-designate-scenario
-    description: |
-      This job run on py2 for stable/rocky gate.
-    nodeset: openstack-single-node-xenial
-    override-checkout: stable/rocky
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 0.9.0
-      - name: openstack/designate-tempest-plugin
-        override-checkout: 0.7.0
-      - openstack/tempest
-    vars: &designate_scenario_vars_rocky
-      network_api_extensions_common: *api_extensions
-      devstack_localrc:
-        USE_PYTHON3: false
-        TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
-        ADVANCED_INSTANCE_TYPE: ds512M
-      # NOTE(bcafarel): filtering out unstable tests or tests with known
-      # issues in the used pinned version for this EM branch
-      tempest_black_regex: "(^neutron_tempest_plugin.scenario.test_dns_integration.DNSIntegrationAdminTests.test_port_on_special_network)"
-    branches:
-      - stable/rocky
-
-- job:
-    name: neutron-tempest-plugin-designate-scenario-rocky
-    parent: neutron-tempest-plugin-designate-scenario
-    nodeset: openstack-single-node-xenial
-    description: |
-      This job run on py3 for other than stable/rocky gate
-      which is nothing but neutron-tempest-plugin master gate.
-    override-checkout: stable/rocky
-    required-projects: *required-projects-rocky
-    vars:
-      <<: *designate_scenario_vars_rocky
-      devstack_localrc:
-        USE_PYTHON3: True
-    branches: ^(?!stable/rocky).*$
diff --git a/zuul.d/stein_jobs.yaml b/zuul.d/stein_jobs.yaml
deleted file mode 100644
index dc77ad3..0000000
--- a/zuul.d/stein_jobs.yaml
+++ /dev/null
@@ -1,383 +0,0 @@
-- job:
-    name: neutron-tempest-plugin-api-stein
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/stein
-    required-projects: &required-projects-stein
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 1.1.0
-      - openstack/tempest
-    vars:
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-      tempest_concurrency: 4
-      tempest_test_regex: ^neutron_tempest_plugin\.api
-      # TODO(slaweq): find a way to put this list of extensions in
-      # neutron repository and keep it different per branch,
-      # then it could be removed from here
-      network_api_extensions_common: &api_extensions
-        - address-scope
-        - agent
-        - allowed-address-pairs
-        - auto-allocated-topology
-        - availability_zone
-        - binding
-        - default-subnetpools
-        - dhcp_agent_scheduler
-        - dns-domain-ports
-        - dns-integration
-        - empty-string-filtering
-        - expose-port-forwarding-in-fip
-        - ext-gw-mode
-        - external-net
-        - extra_dhcp_opt
-        - extraroute
-        - filter-validation
-        - fip-port-details
-        - flavors
-        - floatingip-pools
-        - floating-ip-port-forwarding
-        - ip-substring-filtering
-        - l3-flavors
-        - l3-ha
-        - l3_agent_scheduler
-        - logging
-        - metering
-        - multi-provider
-        - net-mtu
-        - net-mtu-writable
-        - network-ip-availability
-        - network_availability_zone
-        - network-segment-range
-        - pagination
-        - port-resource-request
-        - port-mac-address-regenerate
-        - port-security
-        - port-security-groups-filtering
-        - project-id
-        - provider
-        - qos
-        - qos-bw-minimum-ingress
-        - qos-fip
-        - quotas
-        - quota_details
-        - rbac-policies
-        - rbac-security-groups
-        - router
-        - router_availability_zone
-        - security-group
-        - segment
-        - service-type
-        - sorting
-        - standard-attr-description
-        - standard-attr-revisions
-        - standard-attr-segment
-        - standard-attr-tag
-        - standard-attr-timestamp
-        - subnet_allocation
-        - subnet-service-types
-        - trunk
-        - trunk-details
-        - uplink-status-propagation
-      network_api_extensions_tempest:
-        - dvr
-      network_available_features: &available_features
-        -
-      # NOTE(bcafarel): filtering out unstable tests or tests with known
-      # issues in the used pinned version for this EM branch
-      tempest_black_regex: &stein_tempest_exclude "\
-          (^neutron_tempest_plugin.scenario.test_mtu.NetworkWritableMtuTest.test_connectivity_min_max_mtu)|\
-          (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
-          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_parent_port_connectivity_after_trunk_deleted_lb)|\
-          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
-          (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)"
-      devstack_localrc:
-        NEUTRON_DEPLOY_MOD_WSGI: false
-        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
-        ML2_L3_PLUGIN: router
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-      devstack_local_conf:
-        post-config:
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            AGENT:
-              tunnel_types: gre,vxlan
-            ml2:
-              type_drivers: flat,geneve,vlan,gre,local,vxlan
-        test-config:
-          $TEMPEST_CONFIG:
-            neutron_plugin_options:
-              available_type_drivers: flat,geneve,vlan,gre,local,vxlan
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-stein
-    parent: neutron-tempest-plugin-openvswitch
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/stein
-    required-projects: *required-projects-stein
-    vars:
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      network_api_extensions: *api_extensions
-      network_available_features: *available_features
-      tempest_black_regex: *stein_tempest_exclude
-      devstack_localrc:
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        # NOTE(bcafarel) guestmount binary not available on host OS
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Bionic keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              available_features: ""
-            neutron_plugin_options:
-              ipv6_metadata: False
-
-- job:
-    name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-stein
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-bionic
-    timeout: 10000
-    override-checkout: stable/stein
-    required-projects: *required-projects-stein
-    vars:
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-      network_api_extensions: *api_extensions
-      network_available_features: *available_features
-      tempest_black_regex: *stein_tempest_exclude
-      devstack_localrc:
-        Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
-        # NOTE(bcafarel) guestmount binary not available on host OS
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            DEFAULT:
-              enable_dvr: false
-              l3_ha: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            agent:
-              tunnel_types: vxlan,gre
-            ovs:
-              tunnel_bridge: br-tun
-              bridge_mappings: public:br-ex
-            securitygroup:
-              firewall_driver: iptables_hybrid
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Bionic keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              available_features: "{{ network_available_features | join(',') }}"
-            neutron_plugin_options:
-              available_type_drivers: flat,vlan,local,vxlan
-              firewall_driver: iptables_hybrid
-              ipv6_metadata: False
-    irrelevant-files:
-      - ^(test-|)requirements.txt$
-      - ^releasenotes/.*$
-      - ^doc/.*$
-      - ^setup.cfg$
-      - ^.*\.rst$
-      - ^neutron/locale/.*$
-      - ^neutron/tests/unit/.*$
-      - ^neutron/tests/fullstack/.*
-      - ^neutron/tests/functional/.*
-      - ^tools/.*$
-      - ^tox.ini$
-      - ^neutron/agent/linux/openvswitch_firewall/.*$
-      - ^neutron/agent/ovn/.*$
-      - ^neutron/agent/windows/.*$
-      - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
-      - ^neutron/plugins/ml2/drivers/macvtap/.*$
-      - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
-      - ^neutron/plugins/ml2/drivers/ovn/.*$
-
-- job:
-    name: neutron-tempest-plugin-scenario-linuxbridge-stein
-    parent: neutron-tempest-plugin-base
-    nodeset: openstack-single-node-bionic
-    timeout: 10000
-    roles:
-      - zuul: openstack/neutron
-    pre-run: playbooks/linuxbridge-scenario-pre-run.yaml
-    override-checkout: stable/stein
-    required-projects: *required-projects-stein
-    vars:
-      tempest_test_regex: "\
-          (^neutron_tempest_plugin.scenario)|\
-          (^tempest.api.compute.servers.test_attach_interfaces)|\
-          (^tempest.api.compute.servers.test_multiple_create)"
-      devstack_services:
-        # Disable OVN services
-        br-ex-tcpdump: false
-        br-int-flows: false
-        ovn-controller: false
-        ovn-northd: false
-        ovs-vswitchd: false
-        ovsdb-server: false
-        q-ovn-metadata-agent: false
-        # Neutron services
-        q-agt: true
-        q-dhcp: true
-        q-l3: true
-        q-meta: true
-        q-metering: true
-        # SG logging isn't supported by linuxbridge backend
-        neutron-log: false
-      network_api_extensions: *api_extensions
-      network_api_extensions_linuxbridge:
-        - vlan-transparent
-      network_available_features: *available_features
-      tempest_black_regex: *stein_tempest_exclude
-      devstack_localrc:
-        Q_AGENT: linuxbridge
-        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_linuxbridge) | join(',') }}"
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
-        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
-        # NOTE(bcafarel) guestmount binary not available on host OS
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-      devstack_local_conf:
-        post-config:
-          $NEUTRON_CONF:
-            DEFAULT:
-              enable_dvr: false
-              vlan_transparent: true
-              l3_ha: true
-            AGENT:
-              debug_iptables_rules: true
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
-          /$NEUTRON_CORE_PLUGIN_CONF:
-            ml2:
-              type_drivers: flat,vlan,local,vxlan
-              mechanism_drivers: linuxbridge
-          $NEUTRON_L3_CONF:
-            DEFAULT:
-              # NOTE(slaweq): on Bionic keepalived don't knows this option yet
-              keepalived_use_no_track: False
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              available_features: "{{ network_available_features | join(',') }}"
-            neutron_plugin_options:
-              available_type_drivers: flat,vlan,local,vxlan
-              q_agent: linuxbridge
-              firewall_driver: iptables
-              ipv6_metadata: False
-    irrelevant-files:
-      - ^(test-|)requirements.txt$
-      - ^releasenotes/.*$
-      - ^doc/.*$
-      - ^setup.cfg$
-      - ^.*\.rst$
-      - ^neutron/locale/.*$
-      - ^neutron/tests/unit/.*$
-      - ^neutron/tests/fullstack/.*
-      - ^neutron/tests/functional/.*
-      - ^tools/.*$
-      - ^tox.ini$
-      - ^neutron/agent/linux/openvswitch_firewall/.*$
-      - ^neutron/agent/ovn/.*$
-      - ^neutron/agent/windows/.*$
-      - ^neutron/plugins/ml2/drivers/openvswitch/.*$
-      - ^neutron/plugins/ml2/drivers/macvtap/.*$
-      - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
-      - ^neutron/plugins/ml2/drivers/ovn/.*$
-
-- job:
-    name: neutron-tempest-plugin-dvr-multinode-scenario-stein
-    parent: neutron-tempest-plugin-dvr-multinode-scenario
-    nodeset: openstack-two-node-bionic
-    override-checkout: stable/stein
-    required-projects: *required-projects-stein
-    vars:
-      network_api_extensions_common: *api_extensions
-
-- job:
-    name: neutron-tempest-plugin-designate-scenario-stein
-    parent: neutron-tempest-plugin-designate-scenario
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/stein
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 1.3.0
-      - name: openstack/designate-tempest-plugin
-        override-checkout: 0.7.0
-      - openstack/tempest
-    vars:
-      network_api_extensions_common: *api_extensions
-      devstack_localrc:
-        # NOTE(bcafarel) guestmount binary not available on host OS
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
diff --git a/zuul.d/train_jobs.yaml b/zuul.d/train_jobs.yaml
index 1cb5801..159feb2 100644
--- a/zuul.d/train_jobs.yaml
+++ b/zuul.d/train_jobs.yaml
@@ -117,9 +117,6 @@
         ML2_L3_PLUGIN: router
       devstack_local_conf:
         post-config:
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             AGENT:
               tunnel_types: gre,vxlan
@@ -242,28 +239,6 @@
       network_api_extensions_common: *api_extensions
 
 - job:
-    name: neutron-tempest-plugin-designate-scenario-train
-    parent: neutron-tempest-plugin-designate-scenario
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/train
-    required-projects:
-      - openstack/neutron
-      - name: openstack/neutron-tempest-plugin
-        override-checkout: 1.5.0
-      - openstack/tempest
-      - name: openstack/designate-tempest-plugin
-        override-checkout: 0.7.0
-    vars:
-      network_api_extensions_common: *api_extensions
-      devstack_localrc:
-        # NOTE(bcafarel) guestmount binary not available on host OS
-        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
-        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
-        ADVANCED_INSTANCE_TYPE: ds512M
-        ADVANCED_INSTANCE_USER: ubuntu
-        CUSTOMIZE_IMAGE: false
-
-- job:
     name: neutron-tempest-plugin-sfc-train
     parent: neutron-tempest-plugin-sfc
     nodeset: openstack-single-node-bionic
diff --git a/zuul.d/ussuri_jobs.yaml b/zuul.d/ussuri_jobs.yaml
index 8614fa9..5abc741 100644
--- a/zuul.d/ussuri_jobs.yaml
+++ b/zuul.d/ussuri_jobs.yaml
@@ -121,9 +121,6 @@
         CUSTOMIZE_IMAGE: false
       devstack_local_conf:
         post-config:
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             AGENT:
               tunnel_types: gre,vxlan
@@ -148,8 +145,14 @@
           (^tempest.api.compute.servers.test_multiple_create)"
       network_api_extensions: *api_extensions
       network_available_features: *available_features
-      devstack_localrc:
+      devstack_localrc: &localrc_scenarios_common
         NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+        # NOTE(bcafarel) guestmount binary not available on host OS
+        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+        ADVANCED_INSTANCE_TYPE: ds512M
+        ADVANCED_INSTANCE_USER: ubuntu
+        CUSTOMIZE_IMAGE: false
       devstack_local_conf:
         post-config:
           $NEUTRON_L3_CONF:
@@ -177,8 +180,11 @@
           (^tempest.api.compute.servers.test_multiple_create)"
       network_api_extensions: *api_extensions
       network_available_features: *available_features
-      devstack_localrc:
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+      # TODO(akatz): remove established tcp session verification test when the
+      # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
+      tempest_exclude_regex: "\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
+      devstack_localrc: *localrc_scenarios_common
       devstack_local_conf:
         post-config:
           $NEUTRON_L3_CONF:
@@ -205,8 +211,7 @@
           (^tempest.api.compute.servers.test_multiple_create)"
       network_api_extensions: *api_extensions
       network_available_features: *available_features
-      devstack_localrc:
-        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+      devstack_localrc: *localrc_scenarios_common
       devstack_local_conf:
         post-config:
           $NEUTRON_L3_CONF:
@@ -254,6 +259,12 @@
         OVN_BRANCH: "v20.03.0"
         # NOTE(slaweq): IGMP Snooping requires OVN 20.12
         OVN_IGMP_SNOOPING_ENABLE: False
+        # NOTE(bcafarel) guestmount binary not available on host OS
+        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+        ADVANCED_INSTANCE_TYPE: ds512M
+        ADVANCED_INSTANCE_USER: ubuntu
+        CUSTOMIZE_IMAGE: false
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -279,9 +290,11 @@
       - name: openstack/neutron-tempest-plugin
         override-checkout: 1.6.0
       - openstack/tempest
-      - openstack/designate-tempest-plugin
+      - name: openstack/designate-tempest-plugin
+        override-checkout: 0.7.0
     vars:
-      network_api_extensions_common: *api_extensions
+      network_api_extensions: *api_extensions
+      devstack_localrc: *localrc_scenarios_common
 
 - job:
     name: neutron-tempest-plugin-sfc-ussuri
diff --git a/zuul.d/victoria_jobs.yaml b/zuul.d/victoria_jobs.yaml
index f7cbc3f..1f611db 100644
--- a/zuul.d/victoria_jobs.yaml
+++ b/zuul.d/victoria_jobs.yaml
@@ -1,6 +1,7 @@
 - job:
     name: neutron-tempest-plugin-api-victoria
     parent: neutron-tempest-plugin-base
+    nodeset: openstack-single-node-focal
     override-checkout: stable/victoria
     required-projects: &required-projects-victoria
       - openstack/neutron
@@ -120,9 +121,6 @@
         CUSTOMIZE_IMAGE: false
       devstack_local_conf:
         post-config:
-          # NOTE(slaweq): We can get rid of this hardcoded absolute path when
-          # devstack-tempest job will be switched to use lib/neutron instead of
-          # lib/neutron-legacy
           /$NEUTRON_CORE_PLUGIN_CONF:
             AGENT:
               tunnel_types: gre,vxlan
@@ -173,6 +171,10 @@
           (^tempest.api.compute.servers.test_multiple_create)"
       network_api_extensions: *api_extensions
       network_available_features: *available_features
+      # TODO(akatz): remove established tcp session verification test when the
+      # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
+      tempest_exclude_regex: "\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
         # NOTE(bcafarel) guestmount binary not available on host OS
@@ -236,6 +238,8 @@
         ADVANCED_INSTANCE_TYPE: ds512M
         ADVANCED_INSTANCE_USER: ubuntu
         CUSTOMIZE_IMAGE: false
+        OVN_BRANCH: "v21.06.0"
+        OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -245,6 +249,7 @@
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario-victoria
     parent: neutron-tempest-plugin-dvr-multinode-scenario
+    nodeset: openstack-two-node-focal
     override-checkout: stable/victoria
     required-projects: *required-projects-victoria
     vars:
@@ -274,6 +279,7 @@
 - job:
     name: neutron-tempest-plugin-sfc-victoria
     parent: neutron-tempest-plugin-sfc
+    nodeset: openstack-single-node-focal
     override-checkout: stable/victoria
     required-projects: *required-projects-victoria
     vars:
@@ -282,6 +288,7 @@
 - job:
     name: neutron-tempest-plugin-bgpvpn-bagpipe-victoria
     parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    nodeset: openstack-single-node-focal
     override-checkout: stable/victoria
     required-projects: *required-projects-victoria
     vars:
@@ -290,6 +297,7 @@
 - job:
     name: neutron-tempest-plugin-dynamic-routing-victoria
     parent: neutron-tempest-plugin-dynamic-routing
+    nodeset: openstack-single-node-focal
     override-checkout: stable/victoria
     required-projects: *required-projects-victoria
     vars:
@@ -298,6 +306,7 @@
 - job:
     name: neutron-tempest-plugin-vpnaas-victoria
     parent: neutron-tempest-plugin-vpnaas
+    nodeset: openstack-single-node-focal
     override-checkout: stable/victoria
     required-projects: *required-projects-victoria
     vars:
diff --git a/zuul.d/wallaby_jobs.yaml b/zuul.d/wallaby_jobs.yaml
index 17a5931..92a3e50 100644
--- a/zuul.d/wallaby_jobs.yaml
+++ b/zuul.d/wallaby_jobs.yaml
@@ -1,7 +1,13 @@
 - job:
     name: neutron-tempest-plugin-api-wallaby
     parent: neutron-tempest-plugin-base
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
+    required-projects: &required-projects-wallaby
+      - openstack/neutron
+      - name: openstack/neutron-tempest-plugin
+        override-checkout: 1.8.0
+      - openstack/tempest
     vars:
       tempest_concurrency: 4
       tempest_test_regex: ^neutron_tempest_plugin\.api
@@ -93,6 +99,7 @@
     name: neutron-tempest-plugin-scenario-openvswitch-wallaby
     parent: neutron-tempest-plugin-openvswitch
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -112,6 +119,7 @@
     name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-wallaby
     parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -119,6 +127,13 @@
           (^tempest.api.compute.servers.test_multiple_create)"
       network_api_extensions: *api_extensions
       network_available_features: *available_features
+      # TODO(slaweq): remove trunks subport_connectivity test from blacklist
+      # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
+      # TODO(akatz): remove established tcp session verification test when the
+      # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
+      tempest_exclude_regex: "\
+          (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
       devstack_local_conf:
@@ -131,6 +146,7 @@
     name: neutron-tempest-plugin-scenario-linuxbridge-wallaby
     parent: neutron-tempest-plugin-linuxbridge
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -150,6 +166,7 @@
     name: neutron-tempest-plugin-scenario-ovn-wallaby
     parent: neutron-tempest-plugin-ovn
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -158,6 +175,8 @@
       network_api_extensions: *api_extensions
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+        OVN_BRANCH: "v21.06.0"
+        OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -167,7 +186,9 @@
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario-wallaby
     parent: neutron-tempest-plugin-dvr-multinode-scenario
+    nodeset: openstack-two-node-focal
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       network_api_extensions_common: *api_extensions
 
@@ -175,33 +196,48 @@
     name: neutron-tempest-plugin-designate-scenario-wallaby
     parent: neutron-tempest-plugin-designate-scenario
     override-checkout: stable/wallaby
+    required-projects:
+      - openstack/neutron
+      - name: openstack/neutron-tempest-plugin
+        override-checkout: 1.8.0
+      - openstack/tempest
+      - name: openstack/designate-tempest-plugin
+        override-checkout: 0.16.0
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-sfc-wallaby
     parent: neutron-tempest-plugin-sfc
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-bgpvpn-bagpipe-wallaby
     parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       network_api_extensions: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-dynamic-routing-wallaby
     parent: neutron-tempest-plugin-dynamic-routing
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-vpnaas-wallaby
     parent: neutron-tempest-plugin-vpnaas
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
+    required-projects: *required-projects-wallaby
     vars:
       network_api_extensions_common: *api_extensions
diff --git a/zuul.d/xena_jobs.yaml b/zuul.d/xena_jobs.yaml
index 2cd82d1..25d63a9 100644
--- a/zuul.d/xena_jobs.yaml
+++ b/zuul.d/xena_jobs.yaml
@@ -1,7 +1,13 @@
 - job:
     name: neutron-tempest-plugin-api-xena
     parent: neutron-tempest-plugin-base
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
+    required-projects: &required-projects-xena
+      - openstack/neutron
+      - name: openstack/neutron-tempest-plugin
+        override-checkout: 2.3.0
+      - openstack/tempest
     vars:
       tempest_concurrency: 4
       tempest_test_regex: ^neutron_tempest_plugin\.api
@@ -95,6 +101,7 @@
     name: neutron-tempest-plugin-scenario-openvswitch-xena
     parent: neutron-tempest-plugin-openvswitch
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -114,6 +121,7 @@
     name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-xena
     parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -133,6 +141,7 @@
     name: neutron-tempest-plugin-scenario-linuxbridge-xena
     parent: neutron-tempest-plugin-linuxbridge
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -152,6 +161,7 @@
     name: neutron-tempest-plugin-scenario-ovn-xena
     parent: neutron-tempest-plugin-ovn
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -169,7 +179,9 @@
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario-xena
     parent: neutron-tempest-plugin-dvr-multinode-scenario
+    nodeset: openstack-two-node-focal
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions_common: *api_extensions
 
@@ -177,40 +189,73 @@
     name: neutron-tempest-plugin-designate-scenario-xena
     parent: neutron-tempest-plugin-designate-scenario
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-sfc-xena
     parent: neutron-tempest-plugin-sfc
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-bgpvpn-bagpipe-xena
     parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-dynamic-routing-xena
     parent: neutron-tempest-plugin-dynamic-routing
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions_common: *api_extensions
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+        Q_AGENT: openvswitch
+        Q_ML2_TENANT_NETWORK_TYPE: vxlan
+        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+      devstack_services:
+        # Disable OVN services
+        br-ex-tcpdump: false
+        br-int-flows: false
+        ovn-controller: false
+        ovn-northd: false
+        ovs-vswitchd: false
+        ovsdb-server: false
+        q-ovn-metadata-agent: false
+        # Neutron services
+        q-agt: true
+        q-dhcp: true
+        q-meta: true
+        q-metering: true
+        q-l3: true
+        neutron-dr: true
+        neutron-dr-agent: true
 
 - job:
     name: neutron-tempest-plugin-vpnaas-xena
     parent: neutron-tempest-plugin-vpnaas
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-tap-as-a-service-xena
     parent: neutron-tempest-plugin-tap-as-a-service
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
+    required-projects: *required-projects-xena
     vars:
       network_api_extensions_common: *api_extensions
diff --git a/zuul.d/yoga_jobs.yaml b/zuul.d/yoga_jobs.yaml
index 46b9ca2..04c9ddd 100644
--- a/zuul.d/yoga_jobs.yaml
+++ b/zuul.d/yoga_jobs.yaml
@@ -1,6 +1,7 @@
 - job:
     name: neutron-tempest-plugin-api-yoga
     parent: neutron-tempest-plugin-base
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
     vars:
       tempest_concurrency: 4
@@ -97,6 +98,7 @@
     name: neutron-tempest-plugin-scenario-openvswitch-yoga
     parent: neutron-tempest-plugin-openvswitch
     override-checkout: stable/yoga
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -116,6 +118,7 @@
     name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
     parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
     override-checkout: stable/yoga
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -135,6 +138,7 @@
     name: neutron-tempest-plugin-scenario-linuxbridge-yoga
     parent: neutron-tempest-plugin-linuxbridge
     override-checkout: stable/yoga
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -154,6 +158,7 @@
     name: neutron-tempest-plugin-scenario-ovn-yoga
     parent: neutron-tempest-plugin-ovn
     override-checkout: stable/yoga
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.scenario)|\
@@ -173,6 +178,7 @@
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario-yoga
     parent: neutron-tempest-plugin-dvr-multinode-scenario
+    nodeset: openstack-two-node-focal
     override-checkout: stable/yoga
     vars:
       network_api_extensions_common: *api_extensions
@@ -181,12 +187,14 @@
     name: neutron-tempest-plugin-designate-scenario-yoga
     parent: neutron-tempest-plugin-designate-scenario
     override-checkout: stable/yoga
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-sfc-yoga
     parent: neutron-tempest-plugin-sfc
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
     vars:
       network_api_extensions_common: *api_extensions
@@ -194,6 +202,7 @@
 - job:
     name: neutron-tempest-plugin-bgpvpn-bagpipe-yoga
     parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
     vars:
       network_api_extensions: *api_extensions
@@ -201,13 +210,37 @@
 - job:
     name: neutron-tempest-plugin-dynamic-routing-yoga
     parent: neutron-tempest-plugin-dynamic-routing
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
     vars:
       network_api_extensions_common: *api_extensions
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+        Q_AGENT: openvswitch
+        Q_ML2_TENANT_NETWORK_TYPE: vxlan
+        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+      devstack_services:
+        # Disable OVN services
+        br-ex-tcpdump: false
+        br-int-flows: false
+        ovn-controller: false
+        ovn-northd: false
+        ovs-vswitchd: false
+        ovsdb-server: false
+        q-ovn-metadata-agent: false
+        # Neutron services
+        q-agt: true
+        q-dhcp: true
+        q-meta: true
+        q-metering: true
+        q-l3: true
+        neutron-dr: true
+        neutron-dr-agent: true
 
 - job:
     name: neutron-tempest-plugin-vpnaas-yoga
     parent: neutron-tempest-plugin-vpnaas
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
     vars:
       network_api_extensions_common: *api_extensions
@@ -215,6 +248,7 @@
 - job:
     name: neutron-tempest-plugin-tap-as-a-service-yoga
     parent: neutron-tempest-plugin-tap-as-a-service
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
     vars:
       network_api_extensions_common: *api_extensions
diff --git a/zuul.d/zed_jobs.yaml b/zuul.d/zed_jobs.yaml
index acbd234..8c70a66 100644
--- a/zuul.d/zed_jobs.yaml
+++ b/zuul.d/zed_jobs.yaml
@@ -2,9 +2,12 @@
     name: neutron-tempest-plugin-openvswitch-zed
     parent: neutron-tempest-plugin-openvswitch
     override-checkout: stable/zed
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       network_api_extensions_openvswitch:
         - local_ip
+        - port-resource-request
+        - port-resource-request-groups
         - qos-bw-minimum-ingress
       tempest_test_regex: "\
           (^neutron_tempest_plugin.api)|\
@@ -45,7 +48,6 @@
         - l3-ha
         - l3-ndp-proxy
         - l3_agent_scheduler
-        - logging
         - metering
         - multi-provider
         - net-mtu
@@ -55,8 +57,6 @@
         - network-segment-range
         - pagination
         - port-device-profile
-        - port-resource-request
-        - port-resource-request-groups
         - port-mac-address-regenerate
         - port-security
         - port-security-groups-filtering
@@ -107,10 +107,14 @@
     name: neutron-tempest-plugin-openvswitch-iptables_hybrid-zed
     parent: neutron-tempest-plugin-openvswitch-iptables_hybrid
     override-checkout: stable/zed
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
+      network_api_extensions_common: *api_extensions
       network_api_extensions_openvswitch:
         - local_ip
         - logging
+        - port-resource-request
+        - port-resource-request-groups
       tempest_test_regex: "\
           (^neutron_tempest_plugin.api)|\
           (^neutron_tempest_plugin.scenario)|\
@@ -122,7 +126,8 @@
       # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
       tempest_exclude_regex: "\
           (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
       network_available_features: *available_features
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_openvswitch) | join(',') }}"
@@ -136,7 +141,9 @@
     name: neutron-tempest-plugin-linuxbridge-zed
     parent: neutron-tempest-plugin-linuxbridge
     override-checkout: stable/zed
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
+      network_api_extensions_common: *api_extensions
       network_api_extensions_linuxbridge:
         - vlan-transparent
       tempest_test_regex: "\
@@ -146,7 +153,8 @@
           (^tempest.api.compute.servers.test_multiple_create)"
       tempest_exclude_regex: "\
           (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatefulNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_security_groups.StatelessNetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
           (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPPortDetailsTest.test_floatingip_port_details)"
       network_available_features: *available_features
       devstack_localrc:
@@ -161,6 +169,7 @@
     name: neutron-tempest-plugin-ovn-zed
     parent: neutron-tempest-plugin-ovn
     override-checkout: stable/zed
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       tempest_test_regex: "\
           (^neutron_tempest_plugin.api)|\
@@ -183,6 +192,7 @@
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario-zed
     parent: neutron-tempest-plugin-dvr-multinode-scenario
+    nodeset: openstack-two-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions_common: *api_extensions
@@ -191,12 +201,14 @@
     name: neutron-tempest-plugin-designate-scenario-zed
     parent: neutron-tempest-plugin-designate-scenario
     override-checkout: stable/zed
+    nodeset: neutron-nested-virt-ubuntu-focal
     vars:
       network_api_extensions_common: *api_extensions
 
 - job:
     name: neutron-tempest-plugin-sfc-zed
     parent: neutron-tempest-plugin-sfc
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions_common: *api_extensions
@@ -204,6 +216,7 @@
 - job:
     name: neutron-tempest-plugin-bgpvpn-bagpipe-zed
     parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions: *api_extensions
@@ -211,13 +224,37 @@
 - job:
     name: neutron-tempest-plugin-dynamic-routing-zed
     parent: neutron-tempest-plugin-dynamic-routing
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions_common: *api_extensions
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+        Q_AGENT: openvswitch
+        Q_ML2_TENANT_NETWORK_TYPE: vxlan
+        Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+      devstack_services:
+        # Disable OVN services
+        br-ex-tcpdump: false
+        br-int-flows: false
+        ovn-controller: false
+        ovn-northd: false
+        ovs-vswitchd: false
+        ovsdb-server: false
+        q-ovn-metadata-agent: false
+        # Neutron services
+        q-agt: true
+        q-dhcp: true
+        q-meta: true
+        q-metering: true
+        q-l3: true
+        neutron-dr: true
+        neutron-dr-agent: true
 
 - job:
     name: neutron-tempest-plugin-fwaas-zed
     parent: neutron-tempest-plugin-fwaas
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions_common: *api_extensions
@@ -225,6 +262,7 @@
 - job:
     name: neutron-tempest-plugin-vpnaas-zed
     parent: neutron-tempest-plugin-vpnaas
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions_common: *api_extensions
@@ -232,6 +270,7 @@
 - job:
     name: neutron-tempest-plugin-tap-as-a-service-zed
     parent: neutron-tempest-plugin-tap-as-a-service
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
     vars:
       network_api_extensions_common: *api_extensions