Merge "[sfc] Remove setup_clients method"
diff --git a/neutron_tempest_plugin/api/clients.py b/neutron_tempest_plugin/api/clients.py
index 2855a7a..053e5ea 100644
--- a/neutron_tempest_plugin/api/clients.py
+++ b/neutron_tempest_plugin/api/clients.py
@@ -89,7 +89,8 @@
         self.interfaces_client = interfaces_client.InterfacesClient(
             self.auth_provider, **params)
         self.keypairs_client = keypairs_client.KeyPairsClient(
-            self.auth_provider, **params)
+            self.auth_provider, ssh_key_type=CONF.validation.ssh_key_type,
+            **params)
         self.hv_client = hypervisor_client.HypervisorClient(
             self.auth_provider, **params)
         self.az_client = availability_zone_client.AvailabilityZoneClient(
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index 59a0eb6..2929542 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -1399,8 +1399,6 @@
 
 
 class QosMinimumPpsRuleTestJSON(base.BaseAdminNetworkTest):
-    RULE_NAME = qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE + "_rule"
-    RULES_NAME = RULE_NAME + "s"
     required_extensions = [qos_apidef.ALIAS]
 
     @classmethod
@@ -1419,6 +1417,8 @@
     def setUp(self):
         super(QosMinimumPpsRuleTestJSON, self).setUp()
         self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+        self.RULE_NAME = qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE + "_rule"
+        self.RULES_NAME = self.RULE_NAME + "s"
 
     def _create_qos_min_pps_rule(self, policy_id, rule_data):
         rule = self.min_pps_client.create_minimum_packet_rate_rule(
diff --git a/neutron_tempest_plugin/api/test_routers_negative.py b/neutron_tempest_plugin/api/test_routers_negative.py
index a4b3619..9c83fc7 100644
--- a/neutron_tempest_plugin/api/test_routers_negative.py
+++ b/neutron_tempest_plugin/api/test_routers_negative.py
@@ -13,6 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from neutron_lib import constants
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
@@ -91,7 +92,13 @@
         self.create_router_interface(self.router['id'], subnet['id'])
         port_ids = [
             item['id'] for item in self.admin_client.list_ports(
-                device_id=self.router['id'])['ports']]
+                device_id=self.router['id'])['ports']
+            if item['device_owner'] not in [
+                constants.DEVICE_OWNER_ROUTER_HA_INTF,
+                constants.DEVICE_OWNER_HA_REPLICATED_INT]]
+        if not port_ids:
+            self.fail("No ports other than HA ports found for the router %s" %
+                      self.router['id'])
         for port_id in port_ids:
             with testtools.ExpectedException(lib_exc.Conflict):
                 self.admin_client.delete_port(port_id)
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/manager.py b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
index 4ff1c0d..90c2bb1 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/manager.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
@@ -14,325 +14,25 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import subprocess
-
-import netaddr
 from oslo_log import log
-from oslo_utils import netutils
 
-from tempest.common import compute
 from tempest.common import utils
-from tempest.common.utils.linux import remote_client
-from tempest.common.utils import net_utils
-from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import exceptions as lib_exc
-import tempest.test
+from tempest.scenario import manager
 
 CONF = config.CONF
 
 LOG = log.getLogger(__name__)
 
 
-class ScenarioTest(tempest.test.BaseTestCase):
+class ScenarioTest(manager.NetworkScenarioTest):
     """Base class for scenario tests. Uses tempest own clients. """
 
     credentials = ['primary']
 
-    @classmethod
-    def setup_clients(cls):
-        super(ScenarioTest, cls).setup_clients()
-        # Clients (in alphabetical order)
-        cls.keypairs_client = cls.os_primary.keypairs_client
-        cls.servers_client = cls.os_primary.servers_client
-        # Neutron network client
-        cls.networks_client = cls.os_primary.networks_client
-        cls.ports_client = cls.os_primary.ports_client
-        cls.routers_client = cls.os_primary.routers_client
-        cls.subnets_client = cls.os_primary.subnets_client
-        cls.floating_ips_client = cls.os_primary.floating_ips_client
-        cls.security_groups_client = cls.os_primary.security_groups_client
-        cls.security_group_rules_client = (
-            cls.os_primary.security_group_rules_client)
-
-    # ## Test functions library
-    #
-    # The create_[resource] functions only return body and discard the
-    # resp part which is not used in scenario tests
-
-    def _create_port(self, network_id, client=None, namestart='port-quotatest',
-                     **kwargs):
-        if not client:
-            client = self.ports_client
-        name = data_utils.rand_name(namestart)
-        result = client.create_port(
-            name=name,
-            network_id=network_id,
-            **kwargs)
-        self.assertIsNotNone(result, 'Unable to allocate port')
-        port = result['port']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_port, port['id'])
-        return port
-
-    def create_keypair(self, client=None):
-        if not client:
-            client = self.keypairs_client
-        name = data_utils.rand_name(self.__class__.__name__)
-        # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name=name)
-        self.addCleanup(client.delete_keypair, name)
-        return body['keypair']
-
-    def create_server(self, name=None, image_id=None, flavor=None,
-                      validatable=False, wait_until='ACTIVE',
-                      clients=None, **kwargs):
-        """Wrapper utility that returns a test server.
-
-        This wrapper utility calls the common create test server and
-        returns a test server. The purpose of this wrapper is to minimize
-        the impact on the code of the tests already using this
-        function.
-        """
-
-        # NOTE(jlanoux): As a first step, ssh checks in the scenario
-        # tests need to be run regardless of the run_validation and
-        # validatable parameters and thus until the ssh validation job
-        # becomes voting in CI. The test resources management and IP
-        # association are taken care of in the scenario tests.
-        # Therefore, the validatable parameter is set to false in all
-        # those tests. In this way create_server just return a standard
-        # server and the scenario tests always perform ssh checks.
-
-        # Needed for the cross_tenant_traffic test:
-        if clients is None:
-            clients = self.os_primary
-
-        if name is None:
-            name = data_utils.rand_name(self.__class__.__name__ + "-server")
-
-        vnic_type = CONF.network.port_vnic_type
-
-        # If vnic_type is configured create port for
-        # every network
-        if vnic_type:
-            ports = []
-
-            create_port_body = {'binding:vnic_type': vnic_type,
-                                'namestart': 'port-smoke'}
-            if kwargs:
-                # Convert security group names to security group ids
-                # to pass to create_port
-                if 'security_groups' in kwargs:
-                    security_groups = \
-                        clients.security_groups_client.list_security_groups(
-                        ).get('security_groups')
-                    sec_dict = dict([(s['name'], s['id'])
-                                    for s in security_groups])
-
-                    sec_groups_names = [s['name'] for s in kwargs.pop(
-                        'security_groups')]
-                    security_groups_ids = [sec_dict[s]
-                                           for s in sec_groups_names]
-
-                    if security_groups_ids:
-                        create_port_body[
-                            'security_groups'] = security_groups_ids
-                networks = kwargs.pop('networks', [])
-            else:
-                networks = []
-
-            # If there are no networks passed to us we look up
-            # for the project's private networks and create a port.
-            # The same behaviour as we would expect when passing
-            # the call to the clients with no networks
-            if not networks:
-                networks = clients.networks_client.list_networks(
-                    **{'router:external': False, 'fields': 'id'})['networks']
-
-            # It's net['uuid'] if networks come from kwargs
-            # and net['id'] if they come from
-            # clients.networks_client.list_networks
-            for net in networks:
-                net_id = net.get('uuid', net.get('id'))
-                if 'port' not in net:
-                    port = self._create_port(network_id=net_id,
-                                             client=clients.ports_client,
-                                             **create_port_body)
-                    ports.append({'port': port['id']})
-                else:
-                    ports.append({'port': net['port']})
-            if ports:
-                kwargs['networks'] = ports
-            self.ports = ports
-
-        tenant_network = self.get_tenant_network()
-
-        body, servers = compute.create_test_server(
-            clients,
-            tenant_network=tenant_network,
-            wait_until=wait_until,
-            name=name, flavor=flavor,
-            image_id=image_id, **kwargs)
-
-        self.addCleanup(waiters.wait_for_server_termination,
-                        clients.servers_client, body['id'])
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        clients.servers_client.delete_server, body['id'])
-        server = clients.servers_client.show_server(body['id'])['server']
-        return server
-
-    def get_remote_client(self, ip_address, username=None, private_key=None):
-        """Get a SSH client to a remote server
-
-        @param ip_address the server floating or fixed IP address to use
-                          for ssh validation
-        @param username name of the Linux account on the remote server
-        @param private_key the SSH private key to use
-        @return a RemoteClient object
-        """
-
-        if username is None:
-            username = CONF.validation.image_ssh_user
-        # Set this with 'keypair' or others to log in with keypair or
-        # username/password.
-        if CONF.validation.auth_method == 'keypair':
-            password = None
-            if private_key is None:
-                private_key = self.keypair['private_key']
-        else:
-            password = CONF.validation.image_ssh_password
-            private_key = None
-        linux_client = remote_client.RemoteClient(ip_address, username,
-                                                  pkey=private_key,
-                                                  password=password)
-        try:
-            linux_client.validate_authentication()
-        except Exception as e:
-            message = ('Initializing SSH connection to %(ip)s failed. '
-                       'Error: %(error)s' % {'ip': ip_address,
-                                             'error': e})
-            caller = test_utils.find_test_caller()
-            if caller:
-                message = '(%s) %s' % (caller, message)
-            LOG.exception(message)
-            self._log_console_output()
-            raise
-
-        return linux_client
-
-    def _log_console_output(self, servers=None):
-        if not CONF.compute_feature_enabled.console_output:
-            LOG.debug('Console output not supported, cannot log')
-            return
-        if not servers:
-            servers = self.servers_client.list_servers()
-            servers = servers['servers']
-        for server in servers:
-            try:
-                console_output = self.servers_client.get_console_output(
-                    server['id'])['output']
-                LOG.debug('Console output for %s\nbody=\n%s',
-                          server['id'], console_output)
-            except lib_exc.NotFound:
-                LOG.debug("Server %s disappeared(deleted) while looking "
-                          "for the console log", server['id'])
-
-    def _log_net_info(self, exc):
-        # network debug is called as part of ssh init
-        if not isinstance(exc, lib_exc.SSHTimeout):
-            LOG.debug('Network information on a devstack host')
-
-    def ping_ip_address(self, ip_address, should_succeed=True,
-                        ping_timeout=None, mtu=None):
-        timeout = ping_timeout or CONF.validation.ping_timeout
-        cmd = ['ping', '-c1', '-w1']
-
-        if mtu:
-            cmd += [
-                # don't fragment
-                '-M', 'do',
-                # ping receives just the size of ICMP payload
-                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
-            ]
-        cmd.append(ip_address)
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-
-            return (proc.returncode == 0) == should_succeed
-
-        caller = test_utils.find_test_caller()
-        LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
-                  ' expected result is %(should_succeed)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'should_succeed':
-                      'reachable' if should_succeed else 'unreachable'
-                  })
-        result = test_utils.call_until_true(ping, timeout, 1)
-        LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
-                  'ping result is %(result)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'result': 'expected' if result else 'unexpected'
-                  })
-        return result
-
-    def check_vm_connectivity(self, ip_address,
-                              username=None,
-                              private_key=None,
-                              should_connect=True,
-                              mtu=None):
-        """Check server connectivity
-
-        :param ip_address: server to test against
-        :param username: server's ssh username
-        :param private_key: server's ssh private key to be used
-        :param should_connect: True/False indicates positive/negative test
-            positive - attempt ping and ssh
-            negative - attempt ping and fail if succeed
-        :param mtu: network MTU to use for connectivity validation
-
-        :raises: AssertError if the result of the connectivity check does
-            not match the value of the should_connect param
-        """
-        if should_connect:
-            msg = "Timed out waiting for %s to become reachable" % ip_address
-        else:
-            msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self.ping_ip_address(ip_address,
-                                             should_succeed=should_connect,
-                                             mtu=mtu),
-                        msg=msg)
-        if should_connect:
-            # no need to check ssh for negative connectivity
-            self.get_remote_client(ip_address, username, private_key)
-
-    def check_public_network_connectivity(self, ip_address, username,
-                                          private_key, should_connect=True,
-                                          msg=None, servers=None, mtu=None):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        LOG.debug('checking network connections to IP %s with user: %s',
-                  ip_address, username)
-        try:
-            self.check_vm_connectivity(ip_address,
-                                       username,
-                                       private_key,
-                                       should_connect=should_connect,
-                                       mtu=mtu)
-        except Exception:
-            ex_msg = 'Public network connectivity check failed'
-            if msg:
-                ex_msg += ": " + msg
-            LOG.exception(ex_msg)
-            self._log_console_output(servers)
-            raise
-
 
 class NetworkScenarioTest(ScenarioTest):
     """Base class for network scenario tests.
@@ -357,222 +57,6 @@
             msg = "Bgpvpn extension not enabled."
             raise cls.skipException(msg)
 
-    def _create_network(self, networks_client=None,
-                        tenant_id=None,
-                        namestart='network-smoke-',
-                        port_security_enabled=True):
-        if not networks_client:
-            networks_client = self.networks_client
-        if not tenant_id:
-            tenant_id = networks_client.tenant_id
-        name = data_utils.rand_name(namestart)
-        network_kwargs = dict(name=name, tenant_id=tenant_id)
-        # Neutron disables port security by default so we have to check the
-        # config before trying to create the network with port_security_enabled
-        if CONF.network_feature_enabled.port_security:
-            network_kwargs['port_security_enabled'] = port_security_enabled
-        result = networks_client.create_network(**network_kwargs)
-        network = result['network']
-
-        self.assertEqual(network['name'], name)
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        networks_client.delete_network,
-                        network['id'])
-        return network
-
-    def _create_subnet(self, network, subnets_client=None,
-                       routers_client=None, namestart='subnet-smoke',
-                       **kwargs):
-        """Create a subnet for the given network
-
-        within the cidr block configured for tenant networks.
-        """
-        if not subnets_client:
-            subnets_client = self.subnets_client
-        if not routers_client:
-            routers_client = self.routers_client
-
-        def cidr_in_use(cidr, tenant_id):
-            """Check cidr existence
-
-            :returns: True if subnet with cidr already exist in tenant
-                  False else
-            """
-            cidr_in_use = self.os_admin.subnets_client.list_subnets(
-                tenant_id=tenant_id, cidr=cidr)['subnets']
-            return len(cidr_in_use) != 0
-
-        ip_version = kwargs.pop('ip_version', 4)
-
-        if ip_version == 6:
-            tenant_cidr = netaddr.IPNetwork(
-                CONF.network.project_network_v6_cidr)
-            num_bits = CONF.network.project_network_v6_mask_bits
-        else:
-            tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            num_bits = CONF.network.project_network_mask_bits
-
-        result = None
-        str_cidr = None
-        # Repeatedly attempt subnet creation with sequential cidr
-        # blocks until an unallocated block is found.
-        for subnet_cidr in tenant_cidr.subnet(num_bits):
-            str_cidr = str(subnet_cidr)
-            if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
-                continue
-
-            subnet = dict(
-                name=data_utils.rand_name(namestart),
-                network_id=network['id'],
-                tenant_id=network['tenant_id'],
-                cidr=str_cidr,
-                ip_version=ip_version,
-                **kwargs
-            )
-            try:
-                result = subnets_client.create_subnet(**subnet)
-                break
-            except lib_exc.Conflict as e:
-                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                if not is_overlapping_cidr:
-                    raise
-        self.assertIsNotNone(result, 'Unable to allocate tenant network')
-
-        subnet = result['subnet']
-        self.assertEqual(subnet['cidr'], str_cidr)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        subnets_client.delete_subnet, subnet['id'])
-
-        return subnet
-
-    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
-        ports = self.os_admin.ports_client.list_ports(
-            device_id=server['id'], fixed_ip=ip_addr)['ports']
-        # A port can have more than one IP address in some cases.
-        # If the network is dual-stack (IPv4 + IPv6), this port is associated
-        # with 2 subnets
-        p_status = ['ACTIVE']
-        # NOTE(vsaienko) With Ironic, instances live on separate hardware
-        # servers. Neutron does not bind ports for Ironic instances, as a
-        # result the port remains in the DOWN state.
-        # TODO(vsaienko) remove once bug: #1599836 is resolved.
-        if getattr(CONF.service_available, 'ironic', False):
-            p_status.append('DOWN')
-        port_map = [(p["id"], fxip["ip_address"])
-                    for p in ports
-                    for fxip in p["fixed_ips"]
-                    if netutils.is_valid_ipv4(fxip["ip_address"]) and
-                    p['status'] in p_status]
-        inactive = [p for p in ports if p['status'] != 'ACTIVE']
-        if inactive:
-            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
-        self.assertNotEqual(0, len(port_map),
-                            "No IPv4 addresses found in: %s" % ports)
-        self.assertEqual(len(port_map), 1,
-                         "Found multiple IPv4 addresses: %s. "
-                         "Unable to determine which port to target."
-                         % port_map)
-        return port_map[0]
-
-    def _get_network_by_name(self, network_name):
-        net = self.os_admin.networks_client.list_networks(
-            name=network_name)['networks']
-        self.assertNotEqual(len(net), 0,
-                            "Unable to get network by name: %s" % network_name)
-        return net[0]
-
-    def create_floating_ip(self, thing, external_network_id=None,
-                           port_id=None, client=None):
-        """Create a floating IP and associates to a resource/port on Neutron"""
-        if not external_network_id:
-            external_network_id = CONF.network.public_network_id
-        if not client:
-            client = self.floating_ips_client
-        if not port_id:
-            port_id, ip4 = self._get_server_port_id_and_ip4(thing)
-        else:
-            ip4 = None
-        result = client.create_floatingip(
-            floating_network_id=external_network_id,
-            port_id=port_id,
-            tenant_id=thing['tenant_id'],
-            fixed_ip_address=ip4
-        )
-        floating_ip = result['floatingip']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_floatingip,
-                        floating_ip['id'])
-        return floating_ip
-
-    def _associate_floating_ip(self, floating_ip, server):
-        port_id, _ = self._get_server_port_id_and_ip4(server)
-        kwargs = dict(port_id=port_id)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertEqual(port_id, floating_ip['port_id'])
-        return floating_ip
-
-    def _disassociate_floating_ip(self, floating_ip):
-        """:param floating_ip: floating_ips_client.create_floatingip"""
-        kwargs = dict(port_id=None)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertIsNone(floating_ip['port_id'])
-        return floating_ip
-
-    def check_floating_ip_status(self, floating_ip, status):
-        """Verifies floatingip reaches the given status
-
-        :param dict floating_ip: floating IP dict to check status
-        :param status: target status
-        :raises: AssertionError if status doesn't match
-        """
-        floatingip_id = floating_ip['id']
-
-        def refresh():
-            result = (self.floating_ips_client.
-                      show_floatingip(floatingip_id)['floatingip'])
-            return status == result['status']
-
-        test_utils.call_until_true(refresh,
-                                   CONF.network.build_timeout,
-                                   CONF.network.build_interval)
-        floating_ip = self.floating_ips_client.show_floatingip(
-            floatingip_id)['floatingip']
-        self.assertEqual(status, floating_ip['status'],
-                         message="FloatingIP: {fp} is at status: {cst}. "
-                                 "failed  to reach status: {st}"
-                         .format(fp=floating_ip, cst=floating_ip['status'],
-                                 st=status))
-        LOG.info("FloatingIP: {fp} is at status: {st}"
-                 .format(fp=floating_ip, st=status))
-
-    def _check_tenant_network_connectivity(self, server,
-                                           username,
-                                           private_key,
-                                           should_connect=True,
-                                           servers_for_debug=None):
-        if not CONF.network.project_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for net_name, ip_addresses in server['addresses'].items():
-                for ip_address in ip_addresses:
-                    self.check_vm_connectivity(ip_address['addr'],
-                                               username,
-                                               private_key,
-                                               should_connect=should_connect)
-        except Exception as e:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
-            self._log_net_info(e)
-            raise
-
     def _check_remote_connectivity(self, source, dest, should_succeed=True,
                                    nic=None):
         """check ping server via source ssh connection
@@ -597,124 +81,9 @@
                                           CONF.validation.ping_timeout,
                                           1)
 
-    def _create_security_group(self, security_group_rules_client=None,
-                               tenant_id=None,
-                               namestart='secgroup-smoke',
-                               security_groups_client=None):
-        if security_group_rules_client is None:
-            security_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if tenant_id is None:
-            tenant_id = security_groups_client.tenant_id
-        secgroup = self._create_empty_security_group(
-            namestart=namestart, client=security_groups_client,
-            tenant_id=tenant_id)
-
-        # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule(
-            security_group_rules_client=security_group_rules_client,
-            secgroup=secgroup,
-            security_groups_client=security_groups_client)
-        for rule in rules:
-            self.assertEqual(tenant_id, rule['tenant_id'])
-            self.assertEqual(secgroup['id'], rule['security_group_id'])
-        return secgroup
-
-    def _create_empty_security_group(self, client=None, tenant_id=None,
-                                     namestart='secgroup-smoke'):
-        """Create a security group without rules.
-
-        Default rules will be created:
-         - IPv4 egress to any
-         - IPv6 egress to any
-
-        :param tenant_id: secgroup will be created in this tenant
-        :returns: the created security group
-        """
-        if client is None:
-            client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        sg_name = data_utils.rand_name(namestart)
-        sg_desc = sg_name + " description"
-        sg_dict = dict(name=sg_name,
-                       description=sg_desc)
-        sg_dict['tenant_id'] = tenant_id
-        result = client.create_security_group(**sg_dict)
-
-        secgroup = result['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(tenant_id, secgroup['tenant_id'])
-        self.assertEqual(secgroup['description'], sg_desc)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_security_group, secgroup['id'])
-        return secgroup
-
-    def _default_security_group(self, client=None, tenant_id=None):
-        """Get default secgroup for given tenant_id.
-
-        :returns: default secgroup for given tenant
-        """
-        if client is None:
-            client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        sgs = [
-            sg for sg in list(client.list_security_groups().values())[0]
-            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
-        ]
-        msg = "No default security group for tenant %s." % (tenant_id)
-        self.assertGreater(len(sgs), 0, msg)
-        return sgs[0]
-
-    def _create_security_group_rule(self, secgroup=None,
-                                    sec_group_rules_client=None,
-                                    tenant_id=None,
-                                    security_groups_client=None, **kwargs):
-        """Create a rule from a dictionary of rule parameters.
-
-        Create a rule in a secgroup. if secgroup not defined will search for
-        default secgroup in tenant_id.
-
-        :param secgroup: the security group.
-        :param tenant_id: if secgroup not passed -- the tenant in which to
-            search for default secgroup
-        :param kwargs: a dictionary containing rule parameters:
-            for example, to allow incoming ssh:
-            rule = {
-                    direction: 'ingress'
-                    protocol:'tcp',
-                    port_range_min: 22,
-                    port_range_max: 22
-                    }
-        """
-        if sec_group_rules_client is None:
-            sec_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = security_groups_client.tenant_id
-        if secgroup is None:
-            secgroup = self._default_security_group(
-                client=security_groups_client, tenant_id=tenant_id)
-
-        ruleset = dict(security_group_id=secgroup['id'],
-                       tenant_id=secgroup['tenant_id'])
-        ruleset.update(kwargs)
-
-        sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
-        sg_rule = sg_rule['security_group_rule']
-
-        self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
-        self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
-        return sg_rule
-
-    def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
-                                        secgroup=None,
-                                        security_groups_client=None):
+    def create_loginable_secgroup_rule(self, security_group_rules_client=None,
+                                       secgroup=None,
+                                       security_groups_client=None):
         """Create loginable security group rule
 
         This function will create:
@@ -759,7 +128,7 @@
             for r_direction in ['ingress', 'egress']:
                 ruleset['direction'] = r_direction
                 try:
-                    sg_rule = self._create_security_group_rule(
+                    sg_rule = self.create_security_group_rule(
                         sec_group_rules_client=sec_group_rules_client,
                         secgroup=secgroup,
                         security_groups_client=security_groups_client,
@@ -775,33 +144,6 @@
 
         return rules
 
-    def _get_router(self, client=None, tenant_id=None):
-        """Retrieve a router for the given tenant id.
-
-        If a public router has been configured, it will be returned.
-
-        If a public router has not been configured, but a public
-        network has, a tenant router will be created and returned that
-        routes traffic to the public network.
-        """
-        if not client:
-            client = self.routers_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        router_id = CONF.network.public_router_id
-        network_id = CONF.network.public_network_id
-        if router_id:
-            body = client.show_router(router_id)
-            return body['router']
-        elif network_id:
-            router = self._create_router(client, tenant_id)
-            kwargs = {'external_gateway_info': dict(network_id=network_id)}
-            router = client.update_router(router['id'], **kwargs)['router']
-            return router
-        else:
-            raise Exception("Neither of 'public_router_id' or "
-                            "'public_network_id' has been defined.")
-
     def _create_router(self, client=None, tenant_id=None,
                        namestart='router-smoke'):
         if not client:
@@ -818,62 +160,3 @@
                         client.delete_router,
                         router['id'])
         return router
-
-    def _update_router_admin_state(self, router, admin_state_up):
-        kwargs = dict(admin_state_up=admin_state_up)
-        router = self.routers_client.update_router(
-            router['id'], **kwargs)['router']
-        self.assertEqual(admin_state_up, router['admin_state_up'])
-
-    def create_networks(self, networks_client=None,
-                        routers_client=None, subnets_client=None,
-                        tenant_id=None, dns_nameservers=None,
-                        port_security_enabled=True):
-        """Create a network with a subnet connected to a router.
-
-        The baremetal driver is a special case since all nodes are
-        on the same shared network.
-
-        :param tenant_id: id of tenant to create resources in.
-        :param dns_nameservers: list of dns servers to send to subnet.
-        :returns: network, subnet, router
-        """
-        if CONF.network.shared_physical_network:
-            # NOTE(Shrews): This exception is for environments where tenant
-            # credential isolation is available, but network separation is
-            # not (the current baremetal case). Likely can be removed when
-            # test account mgmt is reworked:
-            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
-            if not CONF.compute.fixed_network_name:
-                m = 'fixed_network_name must be specified in config'
-                raise lib_exc.InvalidConfiguration(m)
-            network = self._get_network_by_name(
-                CONF.compute.fixed_network_name)
-            router = None
-            subnet = None
-        else:
-            network = self._create_network(
-                networks_client=networks_client,
-                tenant_id=tenant_id,
-                port_security_enabled=port_security_enabled)
-            router = self._get_router(client=routers_client,
-                                      tenant_id=tenant_id)
-            subnet_kwargs = dict(network=network,
-                                 subnets_client=subnets_client,
-                                 routers_client=routers_client)
-            # use explicit check because empty list is a valid option
-            if dns_nameservers is not None:
-                subnet_kwargs['dns_nameservers'] = dns_nameservers
-            subnet = self._create_subnet(**subnet_kwargs)
-            if not routers_client:
-                routers_client = self.routers_client
-            router_id = router['id']
-            routers_client.add_router_interface(router_id,
-                                                subnet_id=subnet['id'])
-
-            # save a cleanup job to remove this association between
-            # router and subnet
-            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                            routers_client.remove_router_interface, router_id,
-                            subnet_id=subnet['id'])
-        return network, subnet, router
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
index 14b0572..0142045 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
@@ -1103,8 +1103,8 @@
         self._check_l3_bgpvpn(should_succeed=False)
 
     def _create_security_group_for_test(self):
-        self.security_group = self._create_security_group(
-            tenant_id=self.bgpvpn_client.tenant_id)
+        self.security_group = self.create_security_group(
+            project_id=self.bgpvpn_client.project_id)
 
     def _create_networks_and_subnets(self, names=None, subnet_cidrs=None,
                                      port_security=True):
@@ -1113,8 +1113,9 @@
         if not subnet_cidrs:
             subnet_cidrs = [[NET_A_S1], [NET_B_S1], [NET_C_S1]]
         for (name, subnet_cidrs) in zip(names, subnet_cidrs):
-            network = self._create_network(
-                namestart=name, port_security_enabled=port_security)
+            network = super(manager.NetworkScenarioTest,
+                            self).create_network(namestart=name,
+                    port_security_enabled=port_security)
             self.networks[name] = network
             self.subnets[name] = []
             for (j, cidr) in enumerate(subnet_cidrs):
@@ -1181,9 +1182,12 @@
         create_port_body = {'fixed_ips': [{'ip_address': ip_address}],
                             'namestart': 'port-smoke',
                             'security_groups': security_groups}
-        port = self._create_port(network_id=network['id'],
-                                 client=clients.ports_client,
-                                 **create_port_body)
+
+        port = super(manager.NetworkScenarioTest,
+                    self).create_port(network_id=network['id'],
+                                    client=clients.ports_client,
+                                    **create_port_body)
+
         create_server_kwargs = {
             'key_name': keypair['name'],
             'networks': [{'uuid': network['id'], 'port': port['id']}]
@@ -1255,7 +1259,8 @@
         private_key = self.servers_keypairs[server['id']][
             'private_key']
         ssh_client = self.get_remote_client(server_fip,
-                                            private_key=private_key)
+                                            private_key=private_key,
+                                            server=server)
         return ssh_client
 
     def _setup_http_server(self, server_index):
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index 8334521..4cb1474 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -62,7 +62,8 @@
             host=host, username=username, password=password, timeout=timeout,
             pkey=pkey, channel_timeout=channel_timeout,
             look_for_keys=look_for_keys, key_filename=key_filename, port=port,
-            proxy_client=proxy_client)
+            proxy_client=proxy_client,
+            ssh_key_type=CONF.validation.ssh_key_type)
 
     @classmethod
     def create_proxy_client(cls, look_for_keys=True, **kwargs):
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index a40fb0d..aea79ad 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -220,6 +220,25 @@
 CONF.register_group(sfc_group)
 CONF.register_opts(SfcGroup, group="sfc")
 
+
+TaasGroup = [
+    cfg.StrOpt('provider_physical_network',
+               default='',
+               help='Physical network to be used for creating SRIOV network.'),
+    cfg.StrOpt('provider_segmentation_id',
+               default='',
+               help='Segmentation-id to be used for creating SRIOV network.'),
+    cfg.StrOpt('vlan_filter',
+               default='',
+               help='Comma separated list of VLANs to be mirrored '
+                    'for a Tap-Flow.'),
+]
+taas_group = cfg.OptGroup(name='taas',
+                          title='TaaS Tempest Options')
+CONF.register_group(taas_group)
+CONF.register_opts(TaasGroup, group="taas")
+
+
 config_opts_translator = {
     'project_network_cidr': 'tenant_network_cidr',
     'project_network_v6_cidr': 'tenant_network_v6_cidr',
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
index 00cdf2c..f8eb44c 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
@@ -46,6 +46,7 @@
             try:
                 client = ssh.Client(ip_address, username, pkey=private_key,
                                     channel_timeout=connect_timeout,
+                                    ssh_key_type=CONF.validation.ssh_key_type,
                                     **kwargs)
                 client.test_connection_auth()
                 self.assertTrue(should_connect, "Unexpectedly reachable")
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
index 5ead2a7..517c96e 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
@@ -14,28 +14,19 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import subprocess
-
-import netaddr
 from oslo_log import log
-from oslo_utils import netutils
 
-from tempest.common import compute
-from tempest.common.utils.linux import remote_client
-from tempest.common.utils import net_utils
-from tempest.common import waiters
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
-from tempest.lib import exceptions as lib_exc
-import tempest.test
+from tempest.scenario import manager
 
 CONF = config.CONF
 
 LOG = log.getLogger(__name__)
 
 
-class ScenarioTest(tempest.test.BaseTestCase):
+class ScenarioTest(manager.NetworkScenarioTest):
     """Base class for scenario tests. Uses tempest own clients. """
 
     credentials = ['primary']
@@ -50,298 +41,6 @@
         if msg:
             raise cls.skipException(msg)
 
-    @classmethod
-    def setup_clients(cls):
-        super(ScenarioTest, cls).setup_clients()
-        # Clients (in alphabetical order)
-        cls.keypairs_client = cls.os_primary.keypairs_client
-        cls.servers_client = cls.os_primary.servers_client
-        # Neutron network client
-        cls.networks_client = cls.os_primary.networks_client
-        cls.ports_client = cls.os_primary.ports_client
-        cls.routers_client = cls.os_primary.routers_client
-        cls.subnets_client = cls.os_primary.subnets_client
-        cls.floating_ips_client = cls.os_primary.floating_ips_client
-        cls.security_groups_client = cls.os_primary.security_groups_client
-        cls.security_group_rules_client = (
-            cls.os_primary.security_group_rules_client)
-
-    # Test functions library
-    #
-    # The create_[resource] functions only return body and discard the
-    # resp part which is not used in scenario tests
-
-    def _create_port(self, network_id, client=None, namestart='port-quotatest',
-                     **kwargs):
-        if not client:
-            client = self.ports_client
-        name = data_utils.rand_name(namestart)
-        result = client.create_port(
-            name=name,
-            network_id=network_id,
-            **kwargs)
-        self.assertIsNotNone(result, 'Unable to allocate port')
-        port = result['port']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_port, port['id'])
-        return port
-
-    def create_keypair(self, client=None):
-        if not client:
-            client = self.keypairs_client
-        name = data_utils.rand_name(self.__class__.__name__)
-        # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name=name)
-        self.addCleanup(client.delete_keypair, name)
-        return body['keypair']
-
-    def create_server(self, name=None, image_id=None, flavor=None,
-                      validatable=False, wait_until='ACTIVE',
-                      clients=None, **kwargs):
-        """Wrapper utility that returns a test server.
-
-        This wrapper utility calls the common create test server and
-        returns a test server. The purpose of this wrapper is to minimize
-        the impact on the code of the tests already using this
-        function.
-        """
-
-        # NOTE(jlanoux): As a first step, ssh checks in the scenario
-        # tests need to be run regardless of the run_validation and
-        # validatable parameters and thus until the ssh validation job
-        # becomes voting in CI. The test resources management and IP
-        # association are taken care of in the scenario tests.
-        # Therefore, the validatable parameter is set to false in all
-        # those tests. In this way create_server just return a standard
-        # server and the scenario tests always perform ssh checks.
-
-        # Needed for the cross_tenant_traffic test:
-        if clients is None:
-            clients = self.os_primary
-
-        if name is None:
-            name = data_utils.rand_name(self.__class__.__name__ + "-server")
-
-        vnic_type = CONF.network.port_vnic_type
-
-        # If vnic_type is configured create port for
-        # every network
-        if vnic_type:
-            ports = []
-
-            create_port_body = {'binding:vnic_type': vnic_type,
-                                'namestart': 'port-smoke'}
-            if kwargs:
-                # Convert security group names to security group ids
-                # to pass to create_port
-                if 'security_groups' in kwargs:
-                    security_groups = \
-                        clients.security_groups_client.list_security_groups(
-                        ).get('security_groups')
-                    sec_dict = dict([(s['name'], s['id'])
-                                    for s in security_groups])
-
-                    sec_groups_names = [s['name'] for s in kwargs.pop(
-                        'security_groups')]
-                    security_groups_ids = [sec_dict[s]
-                                           for s in sec_groups_names]
-
-                    if security_groups_ids:
-                        create_port_body[
-                            'security_groups'] = security_groups_ids
-                networks = kwargs.pop('networks', [])
-            else:
-                networks = []
-
-            # If there are no networks passed to us we look up
-            # for the project's private networks and create a port.
-            # The same behaviour as we would expect when passing
-            # the call to the clients with no networks
-            if not networks:
-                networks = clients.networks_client.list_networks(
-                    **{'router:external': False, 'fields': 'id'})['networks']
-
-            # It's net['uuid'] if networks come from kwargs
-            # and net['id'] if they come from
-            # clients.networks_client.list_networks
-            for net in networks:
-                net_id = net.get('uuid', net.get('id'))
-                if 'port' not in net:
-                    port = self._create_port(network_id=net_id,
-                                             client=clients.ports_client,
-                                             **create_port_body)
-                    ports.append({'port': port['id']})
-                else:
-                    ports.append({'port': net['port']})
-            if ports:
-                kwargs['networks'] = ports
-            self.ports = ports
-
-        tenant_network = self.get_tenant_network()
-
-        body, servers = compute.create_test_server(
-            clients,
-            tenant_network=tenant_network,
-            wait_until=wait_until,
-            name=name, flavor=flavor,
-            image_id=image_id, **kwargs)
-
-        self.addCleanup(waiters.wait_for_server_termination,
-                        clients.servers_client, body['id'])
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        clients.servers_client.delete_server, body['id'])
-        server = clients.servers_client.show_server(body['id'])['server']
-        return server
-
-    def get_remote_client(self, ip_address, username=None, private_key=None):
-        """Get a SSH client to a remote server
-
-        @param ip_address the server floating or fixed IP address to use
-                          for ssh validation
-        @param username name of the Linux account on the remote server
-        @param private_key the SSH private key to use
-        @return a RemoteClient object
-        """
-
-        if username is None:
-            username = CONF.validation.image_ssh_user
-        # Set this with 'keypair' or others to log in with keypair or
-        # username/password.
-        if CONF.validation.auth_method == 'keypair':
-            password = None
-            if private_key is None:
-                private_key = self.keypair['private_key']
-        else:
-            password = CONF.validation.image_ssh_password
-            private_key = None
-        linux_client = remote_client.RemoteClient(ip_address, username,
-                                                  pkey=private_key,
-                                                  password=password)
-        try:
-            linux_client.validate_authentication()
-        except Exception as e:
-            message = ('Initializing SSH connection to %(ip)s failed. '
-                       'Error: %(error)s' % {'ip': ip_address,
-                                             'error': e})
-            caller = test_utils.find_test_caller()
-            if caller:
-                message = '(%s) %s' % (caller, message)
-            LOG.exception(message)
-            self._log_console_output()
-            raise
-
-        return linux_client
-
-    def _log_console_output(self, servers=None):
-        if not CONF.compute_feature_enabled.console_output:
-            LOG.debug('Console output not supported, cannot log')
-            return
-        if not servers:
-            servers = self.servers_client.list_servers()
-            servers = servers['servers']
-        for server in servers:
-            try:
-                console_output = self.servers_client.get_console_output(
-                    server['id'])['output']
-                LOG.debug('Console output for %s\nbody=\n%s',
-                          server['id'], console_output)
-            except lib_exc.NotFound:
-                LOG.debug("Server %s disappeared(deleted) while looking "
-                          "for the console log", server['id'])
-
-    def _log_net_info(self, exc):
-        # network debug is called as part of ssh init
-        if not isinstance(exc, lib_exc.SSHTimeout):
-            LOG.debug('Network information on a devstack host')
-
-    def ping_ip_address(self, ip_address, should_succeed=True,
-                        ping_timeout=None, mtu=None):
-        timeout = ping_timeout or CONF.validation.ping_timeout
-        cmd = ['ping', '-c1', '-w1']
-
-        if mtu:
-            cmd += [
-                # don't fragment
-                '-M', 'do',
-                # ping receives just the size of ICMP payload
-                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
-            ]
-        cmd.append(ip_address)
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-
-            return (proc.returncode == 0) == should_succeed
-
-        caller = test_utils.find_test_caller()
-        LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
-                  ' expected result is %(should_succeed)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'should_succeed':
-                      'reachable' if should_succeed else 'unreachable'
-                  })
-        result = test_utils.call_until_true(ping, timeout, 1)
-        LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
-                  'ping result is %(result)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'result': 'expected' if result else 'unexpected'
-                  })
-        return result
-
-    def check_vm_connectivity(self, ip_address,
-                              username=None,
-                              private_key=None,
-                              should_connect=True,
-                              mtu=None):
-        """Check server connectivity
-
-        :param ip_address: server to test against
-        :param username: server's ssh username
-        :param private_key: server's ssh private key to be used
-        :param should_connect: True/False indicates positive/negative test
-            positive - attempt ping and ssh
-            negative - attempt ping and fail if succeed
-        :param mtu: network MTU to use for connectivity validation
-
-        :raises: AssertError if the result of the connectivity check does
-            not match the value of the should_connect param
-        """
-        if should_connect:
-            msg = "Timed out waiting for %s to become reachable" % ip_address
-        else:
-            msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self.ping_ip_address(ip_address,
-                                             should_succeed=should_connect,
-                                             mtu=mtu),
-                        msg=msg)
-        if should_connect:
-            # no need to check ssh for negative connectivity
-            self.get_remote_client(ip_address, username, private_key)
-
-    def check_public_network_connectivity(self, ip_address, username,
-                                          private_key, should_connect=True,
-                                          msg=None, servers=None, mtu=None):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        LOG.debug('checking network connections to IP %s with user: %s',
-                  ip_address, username)
-        try:
-            self.check_vm_connectivity(ip_address,
-                                       username,
-                                       private_key,
-                                       should_connect=should_connect,
-                                       mtu=mtu)
-        except Exception:
-            ex_msg = 'Public network connectivity check failed'
-            if msg:
-                ex_msg += ": " + msg
-            LOG.exception(ex_msg)
-            self._log_console_output(servers)
-            raise
-
 
 class NetworkScenarioTest(ScenarioTest):
     """Base class for network scenario tests.
@@ -363,443 +62,6 @@
         if not CONF.service_available.neutron:
             raise cls.skipException('Neutron not available')
 
-    def _create_network(self, networks_client=None,
-                        tenant_id=None,
-                        namestart='network-smoke-',
-                        port_security_enabled=True):
-        if not networks_client:
-            networks_client = self.networks_client
-        if not tenant_id:
-            tenant_id = networks_client.tenant_id
-        name = data_utils.rand_name(namestart)
-        network_kwargs = dict(name=name, tenant_id=tenant_id)
-        # Neutron disables port security by default so we have to check the
-        # config before trying to create the network with port_security_enabled
-        if CONF.network_feature_enabled.port_security:
-            network_kwargs['port_security_enabled'] = port_security_enabled
-        result = networks_client.create_network(**network_kwargs)
-        network = result['network']
-
-        self.assertEqual(network['name'], name)
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        networks_client.delete_network,
-                        network['id'])
-        return network
-
-    def _create_subnet(self, network, subnets_client=None,
-                       routers_client=None, namestart='subnet-smoke',
-                       **kwargs):
-        """Create a subnet for the given network
-
-        within the cidr block configured for tenant networks.
-        """
-        if not subnets_client:
-            subnets_client = self.subnets_client
-        if not routers_client:
-            routers_client = self.routers_client
-
-        def cidr_in_use(cidr, tenant_id):
-            """Check cidr existence
-
-            :returns: True if subnet with cidr already exist in tenant
-                  False else
-            """
-            cidr_in_use = self.os_admin.subnets_client.list_subnets(
-                tenant_id=tenant_id, cidr=cidr)['subnets']
-            return len(cidr_in_use) != 0
-
-        ip_version = kwargs.pop('ip_version', 4)
-
-        if ip_version == 6:
-            tenant_cidr = netaddr.IPNetwork(
-                CONF.network.project_network_v6_cidr)
-            num_bits = CONF.network.project_network_v6_mask_bits
-        else:
-            tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
-            num_bits = CONF.network.project_network_mask_bits
-
-        result = None
-        str_cidr = None
-        # Repeatedly attempt subnet creation with sequential cidr
-        # blocks until an unallocated block is found.
-        for subnet_cidr in tenant_cidr.subnet(num_bits):
-            str_cidr = str(subnet_cidr)
-            if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
-                continue
-
-            subnet = dict(
-                name=data_utils.rand_name(namestart),
-                network_id=network['id'],
-                tenant_id=network['tenant_id'],
-                cidr=str_cidr,
-                ip_version=ip_version,
-                **kwargs
-            )
-            try:
-                result = subnets_client.create_subnet(**subnet)
-                break
-            except lib_exc.Conflict as e:
-                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
-                if not is_overlapping_cidr:
-                    raise
-        self.assertIsNotNone(result, 'Unable to allocate tenant network')
-
-        subnet = result['subnet']
-        self.assertEqual(subnet['cidr'], str_cidr)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        subnets_client.delete_subnet, subnet['id'])
-
-        return subnet
-
-    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
-        ports = self.os_admin.ports_client.list_ports(
-            device_id=server['id'], fixed_ip=ip_addr)['ports']
-        # A port can have more than one IP address in some cases.
-        # If the network is dual-stack (IPv4 + IPv6), this port is associated
-        # with 2 subnets
-        p_status = ['ACTIVE']
-        # NOTE(vsaienko) With Ironic, instances live on separate hardware
-        # servers. Neutron does not bind ports for Ironic instances, as a
-        # result the port remains in the DOWN state.
-        # TODO(vsaienko) remove once bug: #1599836 is resolved.
-        if getattr(CONF.service_available, 'ironic', False):
-            p_status.append('DOWN')
-        port_map = [(p["id"], fxip["ip_address"])
-                    for p in ports
-                    for fxip in p["fixed_ips"]
-                    if (netutils.is_valid_ipv4(fxip["ip_address"]) and
-                        p['status'] in p_status)]
-        inactive = [p for p in ports if p['status'] != 'ACTIVE']
-        if inactive:
-            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
-        self.assertNotEqual(0, len(port_map),
-                            "No IPv4 addresses found in: %s" % ports)
-        self.assertEqual(len(port_map), 1,
-                         "Found multiple IPv4 addresses: %s. "
-                         "Unable to determine which port to target."
-                         % port_map)
-        return port_map[0]
-
-    def _get_network_by_name(self, network_name):
-        net = self.os_admin.networks_client.list_networks(
-            name=network_name)['networks']
-        self.assertNotEqual(len(net), 0,
-                            "Unable to get network by name: %s" % network_name)
-        return net[0]
-
-    def create_floating_ip(self, thing, external_network_id=None,
-                           port_id=None, client=None):
-        """Create a floating IP and associates to a resource/port on Neutron"""
-        if not external_network_id:
-            external_network_id = CONF.network.public_network_id
-        if not client:
-            client = self.floating_ips_client
-        if not port_id:
-            port_id, ip4 = self._get_server_port_id_and_ip4(thing)
-        else:
-            ip4 = None
-        result = client.create_floatingip(
-            floating_network_id=external_network_id,
-            port_id=port_id,
-            tenant_id=thing['tenant_id'],
-            fixed_ip_address=ip4
-        )
-        floating_ip = result['floatingip']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_floatingip,
-                        floating_ip['id'])
-        return floating_ip
-
-    def _associate_floating_ip(self, floating_ip, server):
-        port_id, _ = self._get_server_port_id_and_ip4(server)
-        kwargs = dict(port_id=port_id)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertEqual(port_id, floating_ip['port_id'])
-        return floating_ip
-
-    def _disassociate_floating_ip(self, floating_ip):
-        """:param floating_ip: floating_ips_client.create_floatingip"""
-        kwargs = dict(port_id=None)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertIsNone(floating_ip['port_id'])
-        return floating_ip
-
-    def check_floating_ip_status(self, floating_ip, status):
-        """Verifies floatingip reaches the given status
-
-        :param dict floating_ip: floating IP dict to check status
-        :param status: target status
-        :raises: AssertionError if status doesn't match
-        """
-        floatingip_id = floating_ip['id']
-
-        def refresh():
-            result = (self.floating_ips_client.
-                      show_floatingip(floatingip_id)['floatingip'])
-            return status == result['status']
-
-        test_utils.call_until_true(refresh,
-                                   CONF.network.build_timeout,
-                                   CONF.network.build_interval)
-        floating_ip = self.floating_ips_client.show_floatingip(
-            floatingip_id)['floatingip']
-        self.assertEqual(status, floating_ip['status'],
-                         message="FloatingIP: {fp} is at status: {cst}. "
-                                 "failed  to reach status: {st}"
-                         .format(fp=floating_ip, cst=floating_ip['status'],
-                                 st=status))
-        LOG.info("FloatingIP: {fp} is at status: {st}"
-                 .format(fp=floating_ip, st=status))
-
-    def _check_tenant_network_connectivity(self, server,
-                                           username,
-                                           private_key,
-                                           should_connect=True,
-                                           servers_for_debug=None):
-        if not CONF.network.project_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for net_name, ip_addresses in server['addresses'].items():
-                for ip_address in ip_addresses:
-                    self.check_vm_connectivity(ip_address['addr'],
-                                               username,
-                                               private_key,
-                                               should_connect=should_connect)
-        except Exception as e:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
-            self._log_net_info(e)
-            raise
-
-    def _check_remote_connectivity(self, source, dest, should_succeed=True,
-                                   nic=None):
-        """check ping server via source ssh connection
-
-        :param source: RemoteClient: an ssh connection from which to ping
-        :param dest: and IP to ping against
-        :param should_succeed: boolean should ping succeed or not
-        :param nic: specific network interface to ping from
-        :returns: boolean -- should_succeed == ping
-        :returns: ping is false if ping failed
-        """
-        def ping_remote():
-            try:
-                source.ping_host(dest, nic=nic)
-            except lib_exc.SSHExecCommandFailed:
-                LOG.warning('Failed to ping IP: %s via a ssh connection '
-                            'from: %s.', dest, source.ssh_client.host)
-                return not should_succeed
-            return should_succeed
-
-        return test_utils.call_until_true(ping_remote,
-                                          CONF.validation.ping_timeout,
-                                          1)
-
-    def _create_security_group(self, security_group_rules_client=None,
-                               tenant_id=None,
-                               namestart='secgroup-smoke',
-                               security_groups_client=None):
-        if security_group_rules_client is None:
-            security_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if tenant_id is None:
-            tenant_id = security_groups_client.tenant_id
-        secgroup = self._create_empty_security_group(
-            namestart=namestart, client=security_groups_client,
-            tenant_id=tenant_id)
-
-        # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule(
-            security_group_rules_client=security_group_rules_client,
-            secgroup=secgroup,
-            security_groups_client=security_groups_client)
-        for rule in rules:
-            self.assertEqual(tenant_id, rule['tenant_id'])
-            self.assertEqual(secgroup['id'], rule['security_group_id'])
-        return secgroup
-
-    def _create_empty_security_group(self, client=None, tenant_id=None,
-                                     namestart='secgroup-smoke'):
-        """Create a security group without rules.
-
-        Default rules will be created:
-         - IPv4 egress to any
-         - IPv6 egress to any
-
-        :param tenant_id: secgroup will be created in this tenant
-        :returns: the created security group
-        """
-        if client is None:
-            client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        sg_name = data_utils.rand_name(namestart)
-        sg_desc = sg_name + " description"
-        sg_dict = dict(name=sg_name,
-                       description=sg_desc)
-        sg_dict['tenant_id'] = tenant_id
-        result = client.create_security_group(**sg_dict)
-
-        secgroup = result['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(tenant_id, secgroup['tenant_id'])
-        self.assertEqual(secgroup['description'], sg_desc)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_security_group, secgroup['id'])
-        return secgroup
-
-    def _default_security_group(self, client=None, tenant_id=None):
-        """Get default secgroup for given tenant_id.
-
-        :returns: default secgroup for given tenant
-        """
-        if client is None:
-            client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        sgs = [
-            sg for sg in list(client.list_security_groups().values())[0]
-            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
-        ]
-        msg = "No default security group for tenant %s." % (tenant_id)
-        self.assertGreater(len(sgs), 0, msg)
-        return sgs[0]
-
-    def _create_security_group_rule(self, secgroup=None,
-                                    sec_group_rules_client=None,
-                                    tenant_id=None,
-                                    security_groups_client=None, **kwargs):
-        """Create a rule from a dictionary of rule parameters.
-
-        Create a rule in a secgroup. if secgroup not defined will search for
-        default secgroup in tenant_id.
-
-        :param secgroup: the security group.
-        :param tenant_id: if secgroup not passed -- the tenant in which to
-            search for default secgroup
-        :param kwargs: a dictionary containing rule parameters:
-            for example, to allow incoming ssh:
-            rule = {
-                    direction: 'ingress'
-                    protocol:'tcp',
-                    port_range_min: 22,
-                    port_range_max: 22
-                    }
-        """
-        if sec_group_rules_client is None:
-            sec_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = security_groups_client.tenant_id
-        if secgroup is None:
-            secgroup = self._default_security_group(
-                client=security_groups_client, tenant_id=tenant_id)
-
-        ruleset = dict(security_group_id=secgroup['id'],
-                       tenant_id=secgroup['tenant_id'])
-        ruleset.update(kwargs)
-
-        sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
-        sg_rule = sg_rule['security_group_rule']
-
-        self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
-        self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
-        return sg_rule
-
-    def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
-                                        secgroup=None,
-                                        security_groups_client=None):
-        """Create loginable security group rule
-
-        This function will create:
-        1. egress and ingress tcp port 22 allow rule in order to allow ssh
-        access for ipv4.
-        2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
-        3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
-        """
-
-        if security_group_rules_client is None:
-            security_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        rules = []
-        rulesets = [
-            dict(
-                # ssh
-                protocol='tcp',
-                port_range_min=22,
-                port_range_max=22,
-            ),
-            dict(
-                # ping
-                protocol='icmp',
-            ),
-            dict(
-                # ipv6-icmp for ping6
-                protocol='icmp',
-                ethertype='IPv6',
-            )
-        ]
-        sec_group_rules_client = security_group_rules_client
-        for ruleset in rulesets:
-            for r_direction in ['ingress', 'egress']:
-                ruleset['direction'] = r_direction
-                try:
-                    sg_rule = self._create_security_group_rule(
-                        sec_group_rules_client=sec_group_rules_client,
-                        secgroup=secgroup,
-                        security_groups_client=security_groups_client,
-                        **ruleset)
-                except lib_exc.Conflict as ex:
-                    # if rule already exist - skip rule and continue
-                    msg = 'Security group rule already exists'
-                    if msg not in ex._error_string:
-                        raise ex
-                else:
-                    self.assertEqual(r_direction, sg_rule['direction'])
-                    rules.append(sg_rule)
-
-        return rules
-
-    def _get_router(self, client=None, tenant_id=None):
-        """Retrieve a router for the given tenant id.
-
-        If a public router has been configured, it will be returned.
-
-        If a public router has not been configured, but a public
-        network has, a tenant router will be created and returned that
-        routes traffic to the public network.
-        """
-        if not client:
-            client = self.routers_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        router_id = CONF.network.public_router_id
-        network_id = CONF.network.public_network_id
-        if router_id:
-            body = client.show_router(router_id)
-            return body['router']
-        elif network_id:
-            router = self._create_router(client, tenant_id)
-            kwargs = {'external_gateway_info': dict(network_id=network_id)}
-            router = client.update_router(router['id'], **kwargs)['router']
-            return router
-        else:
-            raise Exception("Neither of 'public_router_id' or "
-                            "'public_network_id' has been defined.")
-
     def _create_router(self, client=None, tenant_id=None,
                        namestart='router-smoke'):
         if not client:
@@ -816,62 +78,3 @@
                         client.delete_router,
                         router['id'])
         return router
-
-    def _update_router_admin_state(self, router, admin_state_up):
-        kwargs = dict(admin_state_up=admin_state_up)
-        router = self.routers_client.update_router(
-            router['id'], **kwargs)['router']
-        self.assertEqual(admin_state_up, router['admin_state_up'])
-
-    def create_networks(self, networks_client=None,
-                        routers_client=None, subnets_client=None,
-                        tenant_id=None, dns_nameservers=None,
-                        port_security_enabled=True):
-        """Create a network with a subnet connected to a router.
-
-        The baremetal driver is a special case since all nodes are
-        on the same shared network.
-
-        :param tenant_id: id of tenant to create resources in.
-        :param dns_nameservers: list of dns servers to send to subnet.
-        :returns: network, subnet, router
-        """
-        if CONF.network.shared_physical_network:
-            # NOTE(Shrews): This exception is for environments where tenant
-            # credential isolation is available, but network separation is
-            # not (the current baremetal case). Likely can be removed when
-            # test account mgmt is reworked:
-            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
-            if not CONF.compute.fixed_network_name:
-                m = 'fixed_network_name must be specified in config'
-                raise lib_exc.InvalidConfiguration(m)
-            network = self._get_network_by_name(
-                CONF.compute.fixed_network_name)
-            router = None
-            subnet = None
-        else:
-            network = self._create_network(
-                networks_client=networks_client,
-                tenant_id=tenant_id,
-                port_security_enabled=port_security_enabled)
-            router = self._get_router(client=routers_client,
-                                      tenant_id=tenant_id)
-            subnet_kwargs = dict(network=network,
-                                 subnets_client=subnets_client,
-                                 routers_client=routers_client)
-            # use explicit check because empty list is a valid option
-            if dns_nameservers is not None:
-                subnet_kwargs['dns_nameservers'] = dns_nameservers
-            subnet = self._create_subnet(**subnet_kwargs)
-            if not routers_client:
-                routers_client = self.routers_client
-            router_id = router['id']
-            routers_client.add_router_interface(router_id,
-                                                subnet_id=subnet['id'])
-
-            # save a cleanup job to remove this association between
-            # router and subnet
-            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                            routers_client.remove_router_interface, router_id,
-                            subnet_id=subnet['id'])
-        return network, subnet, router
diff --git a/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py b/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
index 4681b88..4d5fdac 100644
--- a/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
+++ b/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
@@ -130,9 +130,9 @@
         return resp
 
     def _create_network_subnet(self):
-        network = self._create_network()
+        network = self.create_network()
         subnet_kwargs = dict(network=network)
-        subnet = self._create_subnet(**subnet_kwargs)
+        subnet = self.create_subnet(**subnet_kwargs)
         return network, subnet
 
     def _create_test_server(self, network, security_group):
@@ -196,7 +196,7 @@
         resp['router_portid_2'] = router_portid_2
 
         # Create a VM on each of the network and assign it a floating IP.
-        security_group = self._create_security_group()
+        security_group = self.create_security_group()
         server1, private_key1, server_fixed_ip_1, server_floating_ip_1 = (
             self._create_test_server(network1, security_group))
         server2, private_key2, server_fixed_ip_2, server_floating_ip_2 = (
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
index 85cc810..3ec231e 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
@@ -214,7 +214,8 @@
         left_server = self._create_server()
         ssh_client = ssh.Client(left_server['fip']['floating_ip_address'],
                                 CONF.validation.image_ssh_user,
-                                pkey=self.keypair['private_key'])
+                                pkey=self.keypair['private_key'],
+                                ssh_key_type=CONF.validation.ssh_key_type)
 
         # check LEFT -> RIGHT connectivity via BGP advertised routes
         self.check_remote_connectivity(
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index cbe5df6..cf68224 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -38,8 +38,11 @@
 from neutron_tempest_plugin.scenario import constants
 
 CONF = config.CONF
-
 LOG = log.getLogger(__name__)
+SSH_EXC_TUPLE = (lib_exc.SSHTimeout,
+                 ssh_exc.AuthenticationException,
+                 ssh_exc.NoValidConnectionsError,
+                 ConnectionResetError)
 
 
 def get_ncat_version(ssh_client=None):
@@ -308,7 +311,7 @@
                                     pkey=ssh_key, timeout=ssh_timeout)
         try:
             ssh_client.test_connection_auth()
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+        except SSH_EXC_TUPLE as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output(servers)
             self._log_local_network_status()
@@ -448,7 +451,7 @@
                 timeout=timeout, pattern=pattern,
                 forbid_packet_loss=forbid_packet_loss,
                 check_response_ip=check_response_ip))
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+        except SSH_EXC_TUPLE as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output(servers)
             self._log_local_network_status()
@@ -565,7 +568,7 @@
                     **kwargs)
                 self.assertIn(server['name'],
                               ssh_client.get_hostname())
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+        except SSH_EXC_TUPLE as ssh_e:
             LOG.debug(ssh_e)
             if log_errors:
                 self._log_console_output(servers)
@@ -600,7 +603,7 @@
             return ssh_client.execute_script(
                 get_ncat_server_cmd(port, protocol, echo_msg),
                 become_root=True, combine_stderr=True)
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+        except SSH_EXC_TUPLE as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output(servers)
             self._log_local_network_status()
@@ -615,3 +618,38 @@
         result = shell.execute_local_command(cmd)
         self.assertEqual(0, result.exit_status)
         return result.stdout
+
+    def _ensure_public_router(self, client=None, tenant_id=None):
+        """Retrieve a router for the given tenant id.
+
+        If a public router has been configured, it will be returned.
+
+        If a public router has not been configured, but a public
+        network has, a tenant router will be created and returned that
+        routes traffic to the public network.
+        """
+        if not client:
+            client = self.client
+        if not tenant_id:
+            tenant_id = client.tenant_id
+        router_id = CONF.network.public_router_id
+        network_id = CONF.network.public_network_id
+        if router_id:
+            body = client.show_router(router_id)
+            return body['router']
+        elif network_id:
+            router = self.create_router_by_client()
+            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                            client.delete_router, router['id'])
+            kwargs = {'external_gateway_info': dict(network_id=network_id)}
+            router = client.update_router(router['id'], **kwargs)['router']
+            return router
+        else:
+            raise Exception("Neither of 'public_router_id' or "
+                            "'public_network_id' has been defined.")
+
+    def _update_router_admin_state(self, router, admin_state_up):
+        kwargs = dict(admin_state_up=admin_state_up)
+        router = self.client.update_router(
+            router['id'], **kwargs)['router']
+        self.assertEqual(admin_state_up, router['admin_state_up'])
diff --git a/neutron_tempest_plugin/scenario/test_dns_integration.py b/neutron_tempest_plugin/scenario/test_dns_integration.py
index 240a5a8..6f2756c 100644
--- a/neutron_tempest_plugin/scenario/test_dns_integration.py
+++ b/neutron_tempest_plugin/scenario/test_dns_integration.py
@@ -147,11 +147,16 @@
                 provider_segmentation_id=12345)
         cls.subnet2 = cls.create_subnet(cls.network2)
 
+    def _verify_dns_assignment(self, port):
+        expected_fqdn = '%s.%s' % (port['dns_name'], self.zone['name'])
+        self.assertEqual(expected_fqdn, port['dns_assignment'][0]['fqdn'])
+
     @decorators.idempotent_id('fa6477ce-a12b-41da-b671-5a3bbdafab07')
     def test_port_on_special_network(self):
         name = data_utils.rand_name('port-test')
         port = self.create_port(self.network2,
                                 dns_name=name)
+        self._verify_dns_assignment(port)
         addr = port['fixed_ips'][0]['ip_address']
         self._verify_dns_records(addr, name)
         self.client.delete_port(port['id'])
diff --git a/neutron_tempest_plugin/scenario/test_ipv6.py b/neutron_tempest_plugin/scenario/test_ipv6.py
index d9d1a22..41ac2e6 100644
--- a/neutron_tempest_plugin/scenario/test_ipv6.py
+++ b/neutron_tempest_plugin/scenario/test_ipv6.py
@@ -33,17 +33,47 @@
 LOG = log.getLogger(__name__)
 
 
-def turn_nic6_on(ssh, ipv6_port):
+def turn_nic6_on(ssh, ipv6_port, config_nic=True):
     """Turns the IPv6 vNIC on
 
     Required because guest images usually set only the first vNIC on boot.
     Searches for the IPv6 vNIC's MAC and brings it up.
+    # NOTE(slaweq): on RHEL based OS ifcfg file for new interface is
+    # needed to make IPv6 working on it, so if
+    # /etc/sysconfig/network-scripts directory exists ifcfg-%(nic)s file
+    # should be added in it
 
     @param ssh: RemoteClient ssh instance to server
     @param ipv6_port: port from IPv6 network attached to the server
     """
     ip_command = ip.IPCommand(ssh)
     nic = ip_command.get_nic_name_by_mac(ipv6_port['mac_address'])
+
+    if config_nic:
+        try:
+            if sysconfig_network_scripts_dir_exists(ssh):
+                ssh.execute_script(
+                    'echo -e "DEVICE=%(nic)s\\nNAME=%(nic)s\\nIPV6INIT=yes" | '
+                    'tee /etc/sysconfig/network-scripts/ifcfg-%(nic)s; '
+                    % {'nic': nic}, become_root=True)
+            if nmcli_command_exists(ssh):
+                ssh.execute_script('nmcli connection reload %s' % nic,
+                                   become_root=True)
+                ssh.execute_script('nmcli con mod %s ipv6.addr-gen-mode eui64'
+                                   % nic, become_root=True)
+                ssh.execute_script('nmcli connection up %s' % nic,
+                                   become_root=True)
+
+        except lib_exc.SSHExecCommandFailed as e:
+            # NOTE(slaweq): Sometimes it can happen that this SSH command
+            # will fail because of some error from network manager in
+            # guest os.
+            # But even then doing ip link set up below is fine and
+            # IP address should be configured properly.
+            LOG.debug("Error creating NetworkManager profile. "
+                      "Error message: %(error)s",
+                      {'error': e})
+
     ip_command.set_link(nic, "up")
 
 
@@ -76,6 +106,11 @@
                       {'error': e})
 
 
+def sysconfig_network_scripts_dir_exists(ssh):
+    return "False" not in ssh.execute_script(
+        'test -d /etc/sysconfig/network-scripts/ || echo "False"')
+
+
 def nmcli_command_exists(ssh):
     return "False" not in ssh.execute_script(
         'if ! type nmcli > /dev/null ; then echo "False"; fi')
@@ -122,24 +157,45 @@
                 if expected_address in ip_address:
                     return True
             return False
-
+        # Set NIC with IPv6 to be UP and wait until IPv6 address
+        # will be configured on this NIC
+        turn_nic6_on(ssh_client, ipv6_port, False)
+        # And check if IPv6 address will be properly configured
+        # on this NIC
         try:
-            # Set NIC with IPv6 to be UP and wait until IPv6 address will be
-            # configured on this NIC
-            turn_nic6_on(ssh_client, ipv6_port)
-            # And check if IPv6 address will be properly configured on this NIC
             utils.wait_until_true(
                 lambda: guest_has_address(ipv6_address),
-                timeout=120,
-                exception=RuntimeError(
-                    "Timed out waiting for IP address {!r} to be configured "
-                    "in the VM {!r}.".format(ipv6_address, vm['id'])))
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+                timeout=60)
+        except utils.WaitTimeout:
+            LOG.debug('Timeout without NM configuration')
+        except (lib_exc.SSHTimeout,
+                ssh_exc.AuthenticationException) as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output([vm])
             self._log_local_network_status()
             raise
 
+        if not guest_has_address(ipv6_address):
+            try:
+                # Set NIC with IPv6 to be UP and wait until IPv6 address
+                # will be configured on this NIC
+                turn_nic6_on(ssh_client, ipv6_port)
+                # And check if IPv6 address will be properly configured
+                # on this NIC
+                utils.wait_until_true(
+                    lambda: guest_has_address(ipv6_address),
+                    timeout=90,
+                    exception=RuntimeError(
+                        "Timed out waiting for IP address {!r} to be "
+                        "configured in the VM {!r}.".format(ipv6_address,
+                        vm['id'])))
+            except (lib_exc.SSHTimeout,
+                    ssh_exc.AuthenticationException) as ssh_e:
+                LOG.debug(ssh_e)
+                self._log_console_output([vm])
+                self._log_local_network_status()
+                raise
+
     def _test_ipv6_hotplug(self, ra_mode, address_mode):
         ipv6_networks = [self.create_network() for _ in range(2)]
         for net in ipv6_networks:
diff --git a/neutron_tempest_plugin/scenario/test_mac_learning.py b/neutron_tempest_plugin/scenario/test_mac_learning.py
index 6cd894f..409a6d8 100644
--- a/neutron_tempest_plugin/scenario/test_mac_learning.py
+++ b/neutron_tempest_plugin/scenario/test_mac_learning.py
@@ -14,10 +14,8 @@
 #    under the License.
 
 from oslo_log import log
-from paramiko import ssh_exception as ssh_exc
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
 
 from neutron_tempest_plugin.common import ssh
 from neutron_tempest_plugin.common import utils
@@ -121,7 +119,7 @@
     def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
         try:
             ssh_client.execute_script('which %s' % cmd)
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+        except base.SSH_EXC_TUPLE as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output([server])
             self._log_local_network_status()
diff --git a/neutron_tempest_plugin/scenario/test_metadata.py b/neutron_tempest_plugin/scenario/test_metadata.py
index 05f0f04..af6bd09 100644
--- a/neutron_tempest_plugin/scenario/test_metadata.py
+++ b/neutron_tempest_plugin/scenario/test_metadata.py
@@ -19,11 +19,11 @@
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
+from tempest.lib import exceptions
 import testtools
 
 from neutron_tempest_plugin.common import ssh
 from neutron_tempest_plugin import config
-from neutron_tempest_plugin import exceptions
 from neutron_tempest_plugin.scenario import base
 
 LOG = logging.getLogger(__name__)
diff --git a/neutron_tempest_plugin/scenario/test_multicast.py b/neutron_tempest_plugin/scenario/test_multicast.py
index acfb75c..4fd41cf 100644
--- a/neutron_tempest_plugin/scenario/test_multicast.py
+++ b/neutron_tempest_plugin/scenario/test_multicast.py
@@ -16,10 +16,8 @@
 import netaddr
 from neutron_lib import constants
 from oslo_log import log
-from paramiko import ssh_exception as ssh_exc
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
 
 from neutron_tempest_plugin.common import ip
 from neutron_tempest_plugin.common import ssh
@@ -218,7 +216,7 @@
     def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
         try:
             ssh_client.execute_script('which %s' % cmd)
-        except (lib_exc.SSHTimeout, ssh_exc.AuthenticationException) as ssh_e:
+        except base.SSH_EXC_TUPLE as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output([server])
             self._log_local_network_status()
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/__init__.py b/neutron_tempest_plugin/tap_as_a_service/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/__init__.py
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/manager.py b/neutron_tempest_plugin/tap_as_a_service/scenario/manager.py
new file mode 100644
index 0000000..80389c1
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/manager.py
@@ -0,0 +1,293 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from oslo_log import log
+from oslo_utils import netutils
+
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.tap_as_a_service.services import taas_client
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class BaseTaasScenarioTests(base.BaseTempestTestCase):
+
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(BaseTaasScenarioTests, cls).setup_clients()
+
+        cls.client = cls.os_primary.network_client
+        cls.admin_network_client = cls.os_admin.network_client
+
+        # Setup taas clients
+        cls.tap_services_client = taas_client.TapServicesClient(
+            cls.os_primary.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **cls.os_primary.default_params)
+        cls.tap_flows_client = taas_client.TapFlowsClient(
+            cls.os_primary.auth_provider,
+            CONF.network.catalog_type,
+            CONF.network.region or CONF.identity.region,
+            endpoint_type=CONF.network.endpoint_type,
+            build_interval=CONF.network.build_interval,
+            build_timeout=CONF.network.build_timeout,
+            **cls.os_primary.default_params)
+
+    def _create_subnet(self, network, subnets_client=None,
+                       namestart='subnet-smoke', **kwargs):
+        """Create a subnet for the given network
+
+        within the cidr block configured for tenant networks.
+        """
+        if not subnets_client:
+            subnets_client = self.client
+
+        def cidr_in_use(cidr, tenant_id):
+            """Check cidr existence
+
+            :returns: True if subnet with cidr already exist in tenant
+                  False else
+            """
+            cidr_in_use = self.os_admin.network_client.list_subnets(
+                tenant_id=tenant_id, cidr=cidr)['subnets']
+            return len(cidr_in_use) != 0
+
+        ip_version = kwargs.pop('ip_version', 4)
+
+        if ip_version == 6:
+            tenant_cidr = netaddr.IPNetwork(
+                CONF.network.project_network_v6_cidr)
+            num_bits = CONF.network.project_network_v6_mask_bits
+        else:
+            tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+            num_bits = CONF.network.project_network_mask_bits
+
+        result = None
+        str_cidr = None
+        # Repeatedly attempt subnet creation with sequential cidr
+        # blocks until an unallocated block is found.
+        for subnet_cidr in tenant_cidr.subnet(num_bits):
+            str_cidr = str(subnet_cidr)
+            if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
+                continue
+
+            subnet = dict(
+                name=data_utils.rand_name(namestart),
+                network_id=network['id'],
+                tenant_id=network['tenant_id'],
+                cidr=str_cidr,
+                ip_version=ip_version,
+                **kwargs
+            )
+            try:
+                result = subnets_client.create_subnet(**subnet)
+                break
+            except lib_exc.Conflict as e:
+                is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+                if not is_overlapping_cidr:
+                    raise
+        assert result is not None, 'Unable to allocate tenant network'
+
+        subnet = result['subnet']
+        assert subnet['cidr'] == str_cidr
+
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                       subnets_client.delete_subnet, subnet['id'])
+
+        return subnet
+
+    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
+        ports = self.os_admin.network_client.list_ports(
+            device_id=server['id'], fixed_ip=ip_addr)['ports']
+        # A port can have more than one IP address in some cases.
+        # If the network is dual-stack (IPv4 + IPv6), this port is associated
+        # with 2 subnets
+        p_status = ['ACTIVE']
+        # NOTE(vsaienko) With Ironic, instances live on separate hardware
+        # servers. Neutron does not bind ports for Ironic instances, as a
+        # result the port remains in the DOWN state.
+        # TODO(vsaienko) remove once bug: #1599836 is resolved.
+        if getattr(CONF.service_available, 'ironic', False):
+            p_status.append('DOWN')
+        port_map = [(p["id"], fxip["ip_address"])
+                    for p in ports
+                    for fxip in p["fixed_ips"]
+                    if netutils.is_valid_ipv4(fxip["ip_address"]) and
+                    p['status'] in p_status]
+        inactive = [p for p in ports if p['status'] != 'ACTIVE']
+        if inactive:
+            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+        self.assertNotEqual(0, len(port_map),
+                            "No IPv4 addresses found in: %s" % ports)
+        self.assertEqual(len(port_map), 1,
+                         "Found multiple IPv4 addresses: %s. "
+                         "Unable to determine which port to target."
+                         % port_map)
+        return port_map[0]
+
+    def _get_network_by_name(self, network_name):
+        net = self.os_admin.network_client.list_networks(
+            name=network_name)['networks']
+        self.assertNotEqual(len(net), 0,
+                            "Unable to get network by name: %s" % network_name)
+        return net[0]
+
+    def _run_in_background(self, sshclient, cmd):
+        runInBg = "nohup %s 2>&1 &" % cmd
+        sshclient.exec_command(runInBg)
+
+    def create_networks(self, networks_client=None,
+                        routers_client=None, subnets_client=None,
+                        dns_nameservers=None, port_security_enabled=True):
+        """Create a network with a subnet connected to a router.
+
+        The baremetal driver is a special case since all nodes are
+        on the same shared network.
+
+        :param dns_nameservers: list of dns servers to send to subnet.
+        :returns: network, subnet, router
+        """
+        if CONF.network.shared_physical_network:
+            # NOTE(Shrews): This exception is for environments where tenant
+            # credential isolation is available, but network separation is
+            # not (the current baremetal case). Likely can be removed when
+            # test account mgmt is reworked:
+            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+            if not CONF.compute.fixed_network_name:
+                m = 'fixed_network_name must be specified in config'
+                raise lib_exc.InvalidConfiguration(m)
+            network = self._get_network_by_name(
+                CONF.compute.fixed_network_name)
+            router = None
+            subnet = None
+        else:
+            network = self.create_network(
+                client=networks_client,
+                port_security_enabled=port_security_enabled)
+            router = self._ensure_public_router(client=routers_client)
+            subnet_kwargs = dict(network=network,
+                                 subnets_client=subnets_client)
+            # use explicit check because empty list is a valid option
+            if dns_nameservers is not None:
+                subnet_kwargs['dns_nameservers'] = dns_nameservers
+            subnet = self._create_subnet(**subnet_kwargs)
+            if not routers_client:
+                routers_client = self.client
+            router_id = router['id']
+            routers_client.add_router_interface_with_subnet_id(
+                router_id=router_id, subnet_id=subnet['id'])
+
+            # save a cleanup job to remove this association between
+            # router and subnet
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                routers_client.remove_router_interface_with_subnet_id,
+                router_id=router_id, subnet_id=subnet['id'])
+        return network, subnet, router
+
+    def _create_server_with_floatingip(self, use_taas_cloud_image=False,
+                                       provider_net=False, **kwargs):
+        network = self.network
+        if use_taas_cloud_image:
+            image = CONF.neutron_plugin_options.advanced_image_ref
+            flavor = CONF.neutron_plugin_options.advanced_image_flavor_ref
+        else:
+            flavor = CONF.compute.flavor_ref
+            image = CONF.compute.image_ref
+
+        if provider_net:
+            network = self.provider_network
+
+        port = self.create_port(
+            network=network, security_groups=[self.secgroup['id']], **kwargs)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.client.delete_port, port['id'])
+
+        params = {
+            'flavor_ref': flavor,
+            'image_ref': image,
+            'key_name': self.keypair['name']
+        }
+        vm = self.create_server(networks=[{'port': port['id']}], **params)
+        self.wait_for_server_active(vm['server'])
+        self.wait_for_guest_os_ready(vm['server'])
+
+        fip = self.create_and_associate_floatingip(
+            port_id=port['id'])
+
+        return port, fip
+
+    def _setup_provider_network(self):
+        net = self._create_provider_network()
+        self._create_provider_subnet(net["id"])
+        return net
+
+    def _create_provider_network(self):
+        network_kwargs = {
+            "admin_state_up": True,
+            "shared": True,
+            "provider:network_type": "vlan",
+            "provider:physical_network":
+                CONF.taas.provider_physical_network,
+        }
+
+        segmentation_id = CONF.taas.provider_segmentation_id
+        if segmentation_id and segmentation_id == "0":
+            network_kwargs['provider:network_type'] = 'flat'
+        elif segmentation_id:
+            network_kwargs['provider:segmentation_id'] = segmentation_id
+
+        network = self.admin_network_client.create_network(
+            **network_kwargs)['network']
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.admin_network_client.delete_network,
+                        network['id'])
+
+        return network
+
+    def _create_provider_subnet(self, net_id):
+        subnet = dict(
+            network_id=net_id,
+            cidr="172.25.100.0/24",
+            ip_version=4,
+        )
+        result = self.admin_network_client.create_subnet(**subnet)
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.admin_network_client.delete_subnet, result['subnet']['id'])
+
+        self.admin_network_client.add_router_interface_with_subnet_id(
+            self.router['id'], subnet_id=result['subnet']['id'])
+
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.admin_network_client.remove_router_interface_with_subnet_id,
+            self.router['id'], subnet_id=result['subnet']['id'])
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
new file mode 100644
index 0000000..5598fbe
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
@@ -0,0 +1,249 @@
+# Copyright (c) 2015 Midokura SARL
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+import testtools
+
+from neutron_tempest_plugin.tap_as_a_service.scenario import manager
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+# pylint: disable=too-many-ancestors
+class TestTaaS(manager.BaseTaasScenarioTests):
+    """Config Requirement in tempest.conf:
+
+    - project_network_cidr_bits- specifies the subnet range for each network
+    - project_network_cidr
+    - public_network_id.
+    """
+
+    @classmethod
+    @utils.requires_ext(extension='taas', service='network')
+    @utils.requires_ext(extension='security-group', service='network')
+    @utils.requires_ext(extension='router', service='network')
+    def skip_checks(cls):
+        super(TestTaaS, cls).skip_checks()
+
+    @classmethod
+    def resource_setup(cls):
+        super(TestTaaS, cls).resource_setup()
+        cls.keypair = cls.create_keypair()
+        cls.secgroup = cls.create_security_group(
+            name=data_utils.rand_name('secgroup'))
+        cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+        LOG.debug("TaaSScenarioTest Setup done.")
+
+    def _create_server(self, network, security_group=None):
+        """Create a server
+
+        Creates a server having a port on given network and security group.
+        """
+        keys = self.create_keypair()
+        kwargs = {}
+        if security_group is not None:
+            kwargs['security_groups'] = [{'name': security_group['name']}]
+        server = self.create_server(
+            key_name=keys['name'],
+            networks=[{'uuid': network['id']}],
+            flavor_ref=CONF.compute.flavor_ref,
+            image_ref=CONF.compute.image_ref,
+            **kwargs)
+        self.wait_for_server_active(server['server'])
+        self.wait_for_guest_os_ready(server['server'])
+        return server, keys
+
+    @testtools.skipUnless(CONF.taas.provider_physical_network,
+                          'Provider physical network parameter not provided.')
+    @utils.requires_ext(extension="provider", service="network")
+    def _create_network_sriov(self, networks_client=None,
+                              tenant_id=None,
+                              namestart='network-smoke-sriov-',
+                              port_security_enabled=True):
+        if not networks_client:
+            networks_client = self.networks_client
+        if not tenant_id:
+            tenant_id = networks_client.tenant_id
+        name = data_utils.rand_name(namestart)
+        network_kwargs = dict(name=name, tenant_id=tenant_id)
+        # Neutron disables port security by default so we have to check the
+        # config before trying to create the network with
+        # port_security_enabled
+        if CONF.network_feature_enabled.port_security:
+            network_kwargs['port_security_enabled'] = port_security_enabled
+
+        if CONF.network.port_vnic_type and \
+                CONF.network.port_vnic_type == 'direct':
+            network_kwargs['provider:network_type'] = 'vlan'
+            if CONF.taas_plugin_options.provider_segmentation_id:
+                if CONF.taas_plugin_options.provider_segmentation_id == '0':
+                    network_kwargs['provider:network_type'] = 'flat'
+                else:
+                    network_kwargs['provider:segmentation_id'] = \
+                        CONF.taas_plugin_options.provider_segmentation_id
+
+            network_kwargs['provider:physical_network'] = \
+                CONF.taas_plugin_options.provider_physical_network
+
+        result = networks_client.create_network(**network_kwargs)
+        network = result['network']
+        self.assertEqual(network['name'], name)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        networks_client.delete_network,
+                        network['id'])
+        return network
+
+    @testtools.skipUnless(CONF.taas.provider_physical_network,
+                          'Provider physical network parameter not provided.')
+    @utils.requires_ext(extension="provider", service="network")
+    def create_networks_sriov(self, networks_client=None,
+                              routers_client=None, subnets_client=None,
+                              tenant_id=None, dns_nameservers=None,
+                              port_security_enabled=True):
+        """Create a network with a subnet connected to a router.
+
+        The baremetal driver is a special case since all nodes are
+        on the same shared network.
+
+        :param tenant_id: id of tenant to create resources in.
+        :param dns_nameservers: list of dns servers to send to subnet.
+        :returns: network, subnet, router
+        """
+        router = None
+        if CONF.network.shared_physical_network:
+            # NOTE(Shrews): This exception is for environments where tenant
+            # credential isolation is available, but network separation is
+            # not (the current baremetal case). Likely can be removed when
+            # test account mgmt is reworked:
+            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+            if not CONF.compute.fixed_network_name:
+                msg = 'fixed_network_name must be specified in config'
+                raise lib_exc.InvalidConfiguration(msg)
+            network = self._get_network_by_name(
+                CONF.compute.fixed_network_name)
+            subnet = None
+        else:
+            network = self._create_network_sriov(
+                networks_client=networks_client,
+                tenant_id=tenant_id,
+                port_security_enabled=port_security_enabled)
+            subnet_kwargs = dict(network=network,
+                                 subnets_client=subnets_client,
+                                 routers_client=routers_client)
+            # use explicit check because empty list is a valid option
+            if dns_nameservers is not None:
+                subnet_kwargs['dns_nameservers'] = dns_nameservers
+            subnet = self._create_subnet(**subnet_kwargs)
+        return network, subnet, router
+
+    def _create_topology(self):
+        """Topology
+
+        +----------+             +----------+
+        | "server" |             | "server" |
+        |  VM-1    |             |  VM-2    |
+        |          |             |          |
+        +----+-----+             +----+-----+
+             |                        |
+             |                        |
+        +----+----+----+----+----+----+-----+
+                            |
+                            |
+                            |
+                     +------+------+
+                     | "server"    |
+                     | tap-service |
+                     +-------------+
+        """
+        LOG.debug('Starting Topology Creation')
+        resp = {}
+        # Create Network1 and Subnet1.
+        vnic_type = CONF.network.port_vnic_type
+        if vnic_type == 'direct':
+            self.network1, self.subnet1, self.router1 = \
+                self.create_networks_sriov()
+        else:
+            self.network1, self.subnet1, self.router1 = self.create_networks()
+        resp['network1'] = self.network1
+        resp['subnet1'] = self.subnet1
+        resp['router1'] = self.router1
+
+        # Create a security group allowing icmp and ssh traffic.
+        self.security_group = self.create_security_group(
+            name=data_utils.rand_name('secgroup'))
+        self.create_loginable_secgroup_rule(
+            secgroup_id=self.security_group['id'])
+
+        # Create 3 VMs and assign them a floating IP each.
+        port1, server_floating_ip_1 = self._create_server_with_floatingip()
+        port2, server_floating_ip_2 = self._create_server_with_floatingip()
+        port3, server_floating_ip_3 = self._create_server_with_floatingip()
+
+        # Store the received information to be used later
+        resp['port1'] = port1
+        resp['server_floating_ip_1'] = server_floating_ip_1
+
+        resp['port2'] = port2
+        resp['server_floating_ip_2'] = server_floating_ip_2
+
+        resp['port3'] = port3
+        resp['server_floating_ip_3'] = server_floating_ip_3
+
+        return resp
+
+    @utils.services('network')
+    @utils.requires_ext(extension="taas-vlan-filter", service="network")
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('40903cbd-0e3c-464d-b311-dc77d3894e65')
+    def test_tap_flow_data_mirroring(self):
+        """Create test topology and TaaS resources
+
+        Creates test topology consisting of 3 servers, one routable network,
+        ports and TaaS resources, i.e. tap-service and tap-flow using those
+        ports.
+        """
+        self.network, self.subnet, self.router = self.create_networks()
+        topology = self._create_topology()
+
+        # Create Tap-Service.
+        tap_service = self.tap_services_client.create_tap_service(
+            port_id=topology['port1']['id'])['tap_service']
+
+        LOG.debug('TaaS Config options: vlan-filter: %s',
+                  CONF.taas.vlan_filter)
+
+        # Create Tap-Flow.
+        vnic_type = CONF.network.port_vnic_type
+        vlan_filter = None
+        if vnic_type == 'direct':
+            vlan_filter = '108-117,126,135-144'
+            if CONF.taas.vlan_filter:
+                vlan_filter = CONF.taas.vlan_filter
+            elif topology['network1']['provider:segmentation_id'] != '0':
+                vlan_filter = topology['network1']['provider:segmentation_id']
+
+        tap_flow = self.tap_flows_client.create_tap_flow(
+            tap_service_id=tap_service['id'], direction='BOTH',
+            source_port=topology['port3']['id'],
+            vlan_filter=vlan_filter)['tap_flow']
+
+        self.assertEqual(tap_flow['vlan_filter'], vlan_filter)
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/test_traffic_impact.py b/neutron_tempest_plugin/tap_as_a_service/scenario/test_traffic_impact.py
new file mode 100644
index 0000000..e2b14c7
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/test_traffic_impact.py
@@ -0,0 +1,261 @@
+# Copyright (c) 2019 AT&T
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from contextlib import contextmanager
+from oslo_log import log
+import testtools
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib.common.utils import test_utils
+
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.tap_as_a_service.scenario import manager
+
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+class TestTaaSTrafficScenarios(manager.BaseTaasScenarioTests):
+
+    @classmethod
+    @utils.requires_ext(extension='taas', service='network')
+    @utils.requires_ext(extension='security-group', service='network')
+    @utils.requires_ext(extension='router', service='network')
+    def skip_checks(cls):
+        super(TestTaaSTrafficScenarios, cls).skip_checks()
+
+    @classmethod
+    def resource_setup(cls):
+        super(TestTaaSTrafficScenarios, cls).resource_setup()
+        cls.provider_network = None
+        cls.keypair = cls.create_keypair()
+        cls.secgroup = cls.create_security_group(
+            name=data_utils.rand_name('secgroup'))
+        cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+        cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+
+    @contextmanager
+    def _setup_topology(self, taas=True, use_taas_cloud_image=False,
+                        provider_net=False):
+        """Setup topology for the test
+
+           +------------+
+           | monitor vm |
+           +-----+------+
+                 |
+           +-----v---+
+        +--+ network <--+
+        |  +----^----+  |
+        |       |       |
+        |  +----+-+ +---+--+
+        |  | vm 1 | | vm 2 |
+        |  +------+ +------+
+        |
+        |  +--------+
+        +--> router |
+           +-----+--+
+                 |
+           +-----v------+
+           | public net |
+           +------------+
+       """
+        self.network, self.subnet, self.router = self.create_networks()
+        LOG.debug('Setup topology sbunet details: %s ', self.subnet)
+        if provider_net:
+            if CONF.taas.provider_physical_network:
+                self.provider_network = self._setup_provider_network()
+            else:
+                msg = "provider_physical_network not provided"
+                raise self.skipException(msg)
+
+        self.mon_port, mon_fip = self._create_server_with_floatingip(
+            use_taas_cloud_image=use_taas_cloud_image,
+            provider_net=provider_net)
+        LOG.debug('Setup topology monitor port: %s  ###  monitor FIP: %s ',
+                  self.mon_port, mon_fip)
+        self.left_port, self.left_fip = self._create_server_with_floatingip(
+            provider_net=provider_net)
+        LOG.debug('Setup topology left port: %s  ###  left FIP: %s ',
+                  self.left_port, self.left_fip)
+        self.right_port, self.right_fip = self._create_server_with_floatingip(
+            provider_net=provider_net)
+        LOG.debug('Setup topology right port: %s  ###  right FIP: %s ',
+                  self.right_port, self.right_fip)
+
+        if taas:
+            LOG.debug("Create TAAS service")
+            tap_service = self.tap_services_client.create_tap_service(
+                port_id=self.mon_port['id'])['tap_service']
+            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                            self.client.delete_tap_service, tap_service['id'])
+            tap_flow = self.tap_flows_client.create_tap_flow(
+                tap_service_id=tap_service['id'], direction='BOTH',
+                source_port=self.left_port['id'])['tap_flow']
+            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                            self.client.delete_tap_flow, tap_flow['id'])
+            tap_flow = self.tap_flows_client.create_tap_flow(
+                tap_service_id=tap_service['id'], direction='BOTH',
+                source_port=self.right_port['id'])['tap_flow']
+            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                            self.client.delete_tap_flow, tap_flow['id'])
+
+        user = CONF.validation.image_ssh_user
+        if use_taas_cloud_image:
+            user = CONF.neutron_plugin_options.advanced_image_ssh_user
+
+        self.monitor_client = remote_client.RemoteClient(
+            mon_fip['floating_ip_address'], user,
+            pkey=self.keypair['private_key'])
+        self.monitor_client.validate_authentication()
+        self.left_client = remote_client.RemoteClient(
+            self.left_fip['floating_ip_address'],
+            CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+        self.left_client.validate_authentication()
+        self.right_client = remote_client.RemoteClient(
+            self.right_fip['floating_ip_address'],
+            CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+        self.right_client.validate_authentication()
+        yield
+
+    def _check_icmp_traffic(self):
+        log_location = "/tmp/tcpdumplog"
+
+        right_ip = self.right_port['fixed_ips'][0]['ip_address']
+        left_ip = self.left_port['fixed_ips'][0]['ip_address']
+
+        # Run tcpdump in background
+        self._run_in_background(self.monitor_client,
+                                "sudo tcpdump -n -nn > %s" % log_location)
+
+        # Ensure tcpdump is up and running
+        psax = self.monitor_client.exec_command("ps -ax")
+        self.assertTrue("tcpdump" in psax)
+
+        # Run traffic from left_vm to right_vm
+        LOG.debug('Check ICMP traffic: ping %s ', right_ip)
+        # self.left_client.exec_command(
+        #     "ping -c 50 %s" % self.right_fip['floating_ip_address'])
+        self.check_remote_connectivity(self.left_client, right_ip,
+                                       ping_count=50)
+
+        # Collect tcpdump results
+        output = self.monitor_client.exec_command("cat %s" % log_location)
+        self.assertLess(0, len(output))
+
+        looking_for = ["IP %s > %s: ICMP echo request" % (left_ip, right_ip),
+                       "IP %s > %s: ICMP echo reply" % (right_ip, left_ip)]
+
+        results = []
+        for tcpdump_line in looking_for:
+            results.append(tcpdump_line in output)
+
+        return all(results)
+
+    def _test_taas_connectivity(self, use_provider_net=False):
+        """Ensure TAAS doesn't break connectivity
+
+        This test creates TAAS service between two servers and checks that
+        it doesn't break basic connectivity between them.
+        """
+        # Check uninterrupted traffic between VMs
+        with self._setup_topology(provider_net=use_provider_net):
+            # Left to right
+            self.check_remote_connectivity(
+                self.left_client,
+                self.right_port['fixed_ips'][0]['ip_address'])
+
+            # Right to left
+            self.check_remote_connectivity(
+                self.right_client,
+                self.left_port['fixed_ips'][0]['ip_address'])
+
+            # TAAS vm to right
+            self.check_remote_connectivity(
+                self.monitor_client,
+                self.right_port['fixed_ips'][0]['ip_address'])
+
+            # TAAS vm to left
+            self.check_remote_connectivity(
+                self.monitor_client,
+                self.left_port['fixed_ips'][0]['ip_address'])
+
+    @decorators.idempotent_id('ff414b7d-e81c-47f2-b6c8-53bc2f1e9b00')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_taas_provider_network_connectivity(self):
+        self._test_taas_connectivity(use_provider_net=True)
+
+    @decorators.idempotent_id('e3c52e91-7abf-4dfd-8687-f7c071cdd333')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_taas_network_connectivity(self):
+        self._test_taas_connectivity(use_provider_net=False)
+
+    @decorators.idempotent_id('fcb15ca3-ef61-11e9-9792-f45c89c47e11')
+    @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+                          'Cloud image not found.')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_taas_forwarded_traffic_positive(self):
+        """Check that TAAS forwards traffic as expected"""
+
+        with self._setup_topology(use_taas_cloud_image=True):
+            # Check that traffic was forwarded to TAAS service
+            self.assertTrue(self._check_icmp_traffic())
+
+    @decorators.idempotent_id('6c54d9c5-075a-4a1f-bbe6-12c3c9abf1e2')
+    @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+                          'Cloud image not found.')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_taas_forwarded_traffic_negative(self):
+        """Check that TAAS doesn't forward traffic"""
+
+        with self._setup_topology(taas=False, use_taas_cloud_image=True):
+            # Check that traffic was NOT forwarded to TAAS service
+            self.assertFalse(self._check_icmp_traffic())
+
+    @decorators.idempotent_id('fcb15ca3-ef61-11e9-9792-f45c89c47e12')
+    @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+                          'Cloud image not found.')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_taas_forwarded_traffic_provider_net_positive(self):
+        """Check that TAAS forwards traffic as expected in provider network"""
+
+        with self._setup_topology(use_taas_cloud_image=True,
+                                  provider_net=True):
+            # Check that traffic was forwarded to TAAS service
+            self.assertTrue(self._check_icmp_traffic())
+
+    @decorators.idempotent_id('6c54d9c5-075a-4a1f-bbe6-12c3c9abf1e3')
+    @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+                          'Cloud image not found.')
+    @decorators.attr(type='slow')
+    @utils.services('compute', 'network')
+    def test_taas_forwarded_traffic_provider_net_negative(self):
+        """Check that TAAS doesn't forward traffic in provider network"""
+
+        with self._setup_topology(taas=False, use_taas_cloud_image=True,
+                                  provider_net=True):
+            # Check that traffic was NOT forwarded to TAAS service
+            self.assertFalse(self._check_icmp_traffic())
diff --git a/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py b/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
index 1a51198..92eed9e 100644
--- a/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
+++ b/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
@@ -233,7 +233,8 @@
         left_server = self._create_server()
         ssh_client = ssh.Client(left_server['fip']['floating_ip_address'],
                                 CONF.validation.image_ssh_user,
-                                pkey=self.keypair['private_key'])
+                                pkey=self.keypair['private_key'],
+                                ssh_key_type=CONF.validation.ssh_key_type)
 
         # check LEFT -> RIGHT connectivity via VPN
         self.check_remote_connectivity(ssh_client, right_ip,
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index e524c0b..4efcd0c 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -125,3 +125,7 @@
         CUSTOMIZE_IMAGE: true
         BUILD_TIMEOUT: 784
       tempest_concurrency: 3  # out of 4
+      zuul_copy_output:
+        '/var/log/ovn': 'logs'
+        '/var/log/openvswitch': 'logs'
+        '/var/lib/ovn': 'logs'
diff --git a/zuul.d/master_jobs.yaml b/zuul.d/master_jobs.yaml
index 9e36810..8b7623a 100644
--- a/zuul.d/master_jobs.yaml
+++ b/zuul.d/master_jobs.yaml
@@ -381,9 +381,13 @@
       # test_established_tcp_session_after_re_attachinging_sg from the
       # exclude regex when bug https://bugs.launchpad.net/neutron/+bug/1936911
       # will be fixed
+      # TODO(slaweq) remove test_floatingip_port_details from the exclude
+      # regex when bug https://bugs.launchpad.net/neutron/+bug/1799790 will be
+      # fixed
       tempest_exclude_regex: "\
           (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)|\
-          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
+          (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+          (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPPortDetailsTest.test_floatingip_port_details)"
       devstack_localrc:
         Q_AGENT: linuxbridge
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_linuxbridge) | join(',') }}"
@@ -488,7 +492,7 @@
         # is included in an ovn released version
         OVN_BUILD_FROM_SOURCE: True
         OVN_BRANCH: "v21.06.0"
-        OVS_BRANCH: "branch-2.15"
+        OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
         OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
       devstack_services:
         br-ex-tcpdump: true
@@ -1124,24 +1128,28 @@
         - taas-vlan-filter
       devstack_localrc:
         NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
-        DOWNLOAD_DEFAULT_IMAGES: false
-        IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img"
-        DEFAULT_IMAGE_NAME: cirros-0.3.4-i386-disk
-        ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
+        IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+        ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+        ADVANCED_INSTANCE_TYPE: ntp_image_384M
+        ADVANCED_INSTANCE_USER: ubuntu
+        CUSTOMIZE_IMAGE: false
         BUILD_TIMEOUT: 784
         Q_AGENT: openvswitch
-        Q_ML2_TENANT_NETWORK_TYPE: vxlan
+        Q_ML2_TENANT_NETWORK_TYPE: vxlan,vlan
         Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
       devstack_local_conf:
         post-config:
           /$NEUTRON_CORE_PLUGIN_CONF:
             AGENT:
-              tunnel_types: vxlan,gre
+              tunnel_types: vxlan
+            ml2_type_vlan:
+              network_vlan_ranges: public
         test-config:
           $TEMPEST_CONFIG:
-            taas_plugin_options:
-              advanced_image_ref: ubuntu-20.04-minimal-cloudimg-amd64
-              advanced_image_ssh_user: ubuntu
+            neutron_plugin_options:
+              image_is_advanced: true
+              advanced_image_flavor_ref: d1
+            taas:
               provider_physical_network: public
               provider_segmentation_id: 100
             image_feature_enabled:
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 3463d84..0584523 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -177,6 +177,24 @@
       jobs:
         - neutron-tempest-plugin-dvr-multinode-scenario-xena
 
+- project-template:
+    name: neutron-tempest-plugin-jobs-yoga
+    check:
+      jobs:
+        - neutron-tempest-plugin-api-yoga
+        - neutron-tempest-plugin-scenario-linuxbridge-yoga
+        - neutron-tempest-plugin-scenario-openvswitch-yoga
+        - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
+        - neutron-tempest-plugin-scenario-ovn-yoga
+        - neutron-tempest-plugin-designate-scenario-yoga
+    gate:
+      jobs:
+        - neutron-tempest-plugin-api-yoga
+    #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
+    #              the experimental queue when it will be more stable
+    experimental:
+      jobs:
+        - neutron-tempest-plugin-dvr-multinode-scenario-yoga
 
 - project:
     templates:
@@ -185,6 +203,7 @@
       - neutron-tempest-plugin-jobs-victoria
       - neutron-tempest-plugin-jobs-wallaby
       - neutron-tempest-plugin-jobs-xena
+      - neutron-tempest-plugin-jobs-yoga
       - check-requirements
       - tempest-plugin-jobs
       - release-notes-jobs-python3
@@ -194,21 +213,26 @@
         - neutron-tempest-plugin-sfc-victoria
         - neutron-tempest-plugin-sfc-wallaby
         - neutron-tempest-plugin-sfc-xena
+        - neutron-tempest-plugin-sfc-yoga
         - neutron-tempest-plugin-bgpvpn-bagpipe
         - neutron-tempest-plugin-bgpvpn-bagpipe-victoria
         - neutron-tempest-plugin-bgpvpn-bagpipe-wallaby
         - neutron-tempest-plugin-bgpvpn-bagpipe-xena
+        - neutron-tempest-plugin-bgpvpn-bagpipe-yoga
         - neutron-tempest-plugin-dynamic-routing
         - neutron-tempest-plugin-dynamic-routing-victoria
         - neutron-tempest-plugin-dynamic-routing-wallaby
         - neutron-tempest-plugin-dynamic-routing-xena
+        - neutron-tempest-plugin-dynamic-routing-yoga
         - neutron-tempest-plugin-fwaas
         - neutron-tempest-plugin-vpnaas
         - neutron-tempest-plugin-vpnaas-victoria
         - neutron-tempest-plugin-vpnaas-wallaby
         - neutron-tempest-plugin-vpnaas-xena
+        - neutron-tempest-plugin-vpnaas-yoga
         - neutron-tempest-plugin-tap-as-a-service
         - neutron-tempest-plugin-tap-as-a-service-xena
+        - neutron-tempest-plugin-tap-as-a-service-yoga
 
     gate:
       jobs:
diff --git a/zuul.d/victoria_jobs.yaml b/zuul.d/victoria_jobs.yaml
index 832d242..d648aa8 100644
--- a/zuul.d/victoria_jobs.yaml
+++ b/zuul.d/victoria_jobs.yaml
@@ -142,9 +142,9 @@
 - job:
     name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-victoria
     parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
-    override-checkout: stable-victoria
+    override-checkout: stable/victoria
     vars:
-      branch_override: stable-victoria
+      branch_override: stable/victoria
       network_api_extensions: *api_extensions
       network_available_features: *available_features
       devstack_localrc:
diff --git a/zuul.d/wallaby_jobs.yaml b/zuul.d/wallaby_jobs.yaml
index 13a192e..c79667a 100644
--- a/zuul.d/wallaby_jobs.yaml
+++ b/zuul.d/wallaby_jobs.yaml
@@ -106,9 +106,9 @@
 - job:
     name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-wallaby
     parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
-    override-checkout: stable-wallaby
+    override-checkout: stable/wallaby
     vars:
-      branch_override: stable-wallaby
+      branch_override: stable/wallaby
       network_api_extensions: *api_extensions
       network_available_features: *available_features
       devstack_localrc:
diff --git a/zuul.d/xena_jobs.yaml b/zuul.d/xena_jobs.yaml
index 5ad63f0..3d8ce43 100644
--- a/zuul.d/xena_jobs.yaml
+++ b/zuul.d/xena_jobs.yaml
@@ -108,9 +108,9 @@
 - job:
     name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-xena
     parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
-    override-checkout: stable-xena
+    override-checkout: stable/xena
     vars:
-      branch_override: stable-xena
+      branch_override: stable/xena
       network_api_extensions: *api_extensions
       network_available_features: *available_features
       devstack_localrc:
diff --git a/zuul.d/yoga_jobs.yaml b/zuul.d/yoga_jobs.yaml
new file mode 100644
index 0000000..35720a0
--- /dev/null
+++ b/zuul.d/yoga_jobs.yaml
@@ -0,0 +1,213 @@
+- job:
+    name: neutron-tempest-plugin-api-yoga
+    parent: neutron-tempest-plugin-api
+    override-checkout: stable/yoga
+    vars:
+      # TODO(slaweq): find a way to put this list of extensions in
+      # neutron repository and keep it different per branch,
+      # then it could be removed from here
+      network_api_extensions_common: &api_extensions
+        - address-group
+        - address-scope
+        - agent
+        - allowed-address-pairs
+        - auto-allocated-topology
+        - availability_zone
+        - binding
+        - default-subnetpools
+        - dhcp_agent_scheduler
+        - dns-domain-ports
+        - dns-integration
+        - dns-integration-domain-keywords
+        - empty-string-filtering
+        - expose-port-forwarding-in-fip
+        - expose-l3-conntrack-helper
+        - ext-gw-mode
+        - external-net
+        - extra_dhcp_opt
+        - extraroute
+        - extraroute-atomic
+        - filter-validation
+        - fip-port-details
+        - flavors
+        - floating-ip-port-forwarding
+        - floatingip-pools
+        - ip-substring-filtering
+        - l3-conntrack-helper
+        - l3-flavors
+        - l3-ha
+        - l3_agent_scheduler
+        - logging
+        - metering
+        - multi-provider
+        - net-mtu
+        - net-mtu-writable
+        - network-ip-availability
+        - network_availability_zone
+        - network-segment-range
+        - pagination
+        - port-device-profile
+        - port-resource-request
+        - port-resource-request-groups
+        - port-mac-address-regenerate
+        - port-security
+        - port-security-groups-filtering
+        - project-id
+        - provider
+        - qos
+        - qos-bw-minimum-ingress
+        - qos-fip
+        - quotas
+        - quota_details
+        - rbac-address-group
+        - rbac-address-scope
+        - rbac-policies
+        - rbac-security-groups
+        - rbac-subnetpool
+        - router
+        - router-admin-state-down-before-update
+        - router_availability_zone
+        - security-group
+        - security-groups-remote-address-group
+        - segment
+        - service-type
+        - sorting
+        - standard-attr-description
+        - standard-attr-revisions
+        - standard-attr-segment
+        - standard-attr-tag
+        - standard-attr-timestamp
+        - stateful-security-group
+        - subnet_allocation
+        - subnet-dns-publish-fixed-ip
+        - subnet-service-types
+        - subnetpool-prefix-ops
+        - tag-ports-during-bulk-creation
+        - trunk
+        - trunk-details
+        - uplink-status-propagation
+      network_api_extensions_tempest:
+        - dvr
+      network_available_features: &available_features
+        - ipv6_metadata
+
+- job:
+    name: neutron-tempest-plugin-scenario-openvswitch-yoga
+    parent: neutron-tempest-plugin-scenario-openvswitch
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions: *api_extensions
+      network_available_features: *available_features
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_openvswitch) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+    name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
+    parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions: *api_extensions
+      network_available_features: *available_features
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_openvswitch) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+    name: neutron-tempest-plugin-scenario-linuxbridge-yoga
+    parent: neutron-tempest-plugin-scenario-linuxbridge
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions: *api_extensions
+      network_available_features: *available_features
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+    name: neutron-tempest-plugin-scenario-ovn-yoga
+    parent: neutron-tempest-plugin-scenario-ovn
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions: *api_extensions
+      network_api_extensions_ovn:
+        - vlan-transparent
+      devstack_localrc:
+        NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_ovn) | join(',') }}"
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            network-feature-enabled:
+              available_features: ""
+
+- job:
+    name: neutron-tempest-plugin-dvr-multinode-scenario-yoga
+    parent: neutron-tempest-plugin-dvr-multinode-scenario
+    override-checkout: stable/yoga
+    vars:
+      network_api_extensions_common: *api_extensions
+      branch_override: stable/yoga
+
+- job:
+    name: neutron-tempest-plugin-designate-scenario-yoga
+    parent: neutron-tempest-plugin-designate-scenario
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions_common: *api_extensions
+
+- job:
+    name: neutron-tempest-plugin-sfc-yoga
+    parent: neutron-tempest-plugin-sfc
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions_common: *api_extensions
+
+- job:
+    name: neutron-tempest-plugin-bgpvpn-bagpipe-yoga
+    parent: neutron-tempest-plugin-bgpvpn-bagpipe
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions: *api_extensions
+
+- job:
+    name: neutron-tempest-plugin-dynamic-routing-yoga
+    parent: neutron-tempest-plugin-dynamic-routing
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions_common: *api_extensions
+
+- job:
+    name: neutron-tempest-plugin-vpnaas-yoga
+    parent: neutron-tempest-plugin-vpnaas
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions_common: *api_extensions
+
+- job:
+    name: neutron-tempest-plugin-tap-as-a-service-yoga
+    parent: neutron-tempest-plugin-tap-as-a-service
+    override-checkout: stable/yoga
+    vars:
+      branch_override: stable/yoga
+      network_api_extensions_common: *api_extensions