Refactored Neutron tempest plugin directory structure

* switch from neutron.tests.tempest to neutron_tempest_plugin
* Cleaned up README.rst and setup.cfg
* Use neutron_tempest_plugin as a tempest plugin package
* Fixed gitreview
* Keeping flake8 Ignores in tox.ini as tempest plugin is
  imported from neutron codebase.

Change-Id: I42d389836e72813fdeebc797a577f4a8ac2ee603
diff --git a/neutron_tempest_plugin/scenario/__init__.py b/neutron_tempest_plugin/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/__init__.py
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
new file mode 100644
index 0000000..e810490
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -0,0 +1,278 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from oslo_log import log
+from tempest.common.utils import net_utils
+from tempest.common import waiters
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.api import base as base_api
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import constants
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class BaseTempestTestCase(base_api.BaseNetworkTest):
+    @classmethod
+    def resource_setup(cls):
+        super(BaseTempestTestCase, cls).resource_setup()
+
+        cls.keypairs = []
+
+    @classmethod
+    def resource_cleanup(cls):
+        for keypair in cls.keypairs:
+            cls.os_primary.keypairs_client.delete_keypair(
+                keypair_name=keypair['name'])
+
+        super(BaseTempestTestCase, cls).resource_cleanup()
+
+    def create_server(self, flavor_ref, image_ref, key_name, networks,
+                      name=None, security_groups=None):
+        """Create a server using tempest lib
+        All the parameters are the ones used in Compute API
+
+        Args:
+           flavor_ref(str): The flavor of the server to be provisioned.
+           image_ref(str):  The image of the server to be provisioned.
+           key_name(str): SSH key to to be used to connect to the
+                            provisioned server.
+           networks(list): List of dictionaries where each represent
+               an interface to be attached to the server. For network
+               it should be {'uuid': network_uuid} and for port it should
+               be {'port': port_uuid}
+           name(str): Name of the server to be provisioned.
+           security_groups(list): List of dictionaries where
+                the keys is 'name' and the value is the name of
+                the security group. If it's not passed the default
+                security group will be used.
+        """
+
+        name = name or data_utils.rand_name('server-test')
+        if not security_groups:
+            security_groups = [{'name': 'default'}]
+
+        server = self.os_primary.servers_client.create_server(
+            name=name,
+            flavorRef=flavor_ref,
+            imageRef=image_ref,
+            key_name=key_name,
+            networks=networks,
+            security_groups=security_groups)
+
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+            waiters.wait_for_server_termination,
+            self.os_primary.servers_client, server['server']['id'])
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.os_primary.servers_client.delete_server,
+                        server['server']['id'])
+        return server
+
+    @classmethod
+    def create_keypair(cls, client=None):
+        client = client or cls.os_primary.keypairs_client
+        name = data_utils.rand_name('keypair-test')
+        body = client.create_keypair(name=name)
+        cls.keypairs.append(body['keypair'])
+        return body['keypair']
+
+    @classmethod
+    def create_secgroup_rules(cls, rule_list, secgroup_id=None):
+        client = cls.os_primary.network_client
+        if not secgroup_id:
+            sgs = client.list_security_groups()['security_groups']
+            for sg in sgs:
+                if sg['name'] == constants.DEFAULT_SECURITY_GROUP:
+                    secgroup_id = sg['id']
+                    break
+
+        for rule in rule_list:
+            direction = rule.pop('direction')
+            client.create_security_group_rule(
+                direction=direction,
+                security_group_id=secgroup_id,
+                **rule)
+
+    @classmethod
+    def create_loginable_secgroup_rule(cls, secgroup_id=None):
+        """This rule is intended to permit inbound ssh
+
+        Allowing ssh traffic traffic from all sources, so no group_id is
+        provided.
+        Setting a group_id would only permit traffic from ports
+        belonging to the same security group.
+        """
+
+        rule_list = [{'protocol': 'tcp',
+                      'direction': 'ingress',
+                      'port_range_min': 22,
+                      'port_range_max': 22,
+                      'remote_ip_prefix': '0.0.0.0/0'}]
+        cls.create_secgroup_rules(rule_list, secgroup_id=secgroup_id)
+
+    @classmethod
+    def create_pingable_secgroup_rule(cls, secgroup_id=None):
+        """This rule is intended to permit inbound ping
+        """
+
+        rule_list = [{'protocol': 'icmp',
+                      'direction': 'ingress',
+                      'port_range_min': 8,  # type
+                      'port_range_max': 0,  # code
+                      'remote_ip_prefix': '0.0.0.0/0'}]
+        cls.create_secgroup_rules(rule_list, secgroup_id=secgroup_id)
+
+    @classmethod
+    def create_router_by_client(cls, is_admin=False, **kwargs):
+        kwargs.update({'router_name': data_utils.rand_name('router'),
+                       'admin_state_up': True,
+                       'external_network_id': CONF.network.public_network_id})
+        if not is_admin:
+            router = cls.create_router(**kwargs)
+        else:
+            router = cls.create_admin_router(**kwargs)
+        LOG.debug("Created router %s", router['name'])
+        cls.routers.append(router)
+        return router
+
+    def create_and_associate_floatingip(self, port_id):
+        fip = self.os_primary.network_client.create_floatingip(
+            CONF.network.public_network_id,
+            port_id=port_id)['floatingip']
+        self.floating_ips.append(fip)
+        return fip
+
+    def setup_network_and_server(self, router=None, **kwargs):
+        """Create network resources and a server.
+
+        Creating a network, subnet, router, keypair, security group
+        and a server.
+        """
+        self.network = self.create_network()
+        LOG.debug("Created network %s", self.network['name'])
+        self.subnet = self.create_subnet(self.network)
+        LOG.debug("Created subnet %s", self.subnet['id'])
+
+        secgroup = self.os_primary.network_client.create_security_group(
+            name=data_utils.rand_name('secgroup-'))
+        LOG.debug("Created security group %s",
+                  secgroup['security_group']['name'])
+        self.security_groups.append(secgroup['security_group'])
+        if not router:
+            router = self.create_router_by_client(**kwargs)
+        self.create_router_interface(router['id'], self.subnet['id'])
+        self.keypair = self.create_keypair()
+        self.create_loginable_secgroup_rule(
+            secgroup_id=secgroup['security_group']['id'])
+        self.server = self.create_server(
+            flavor_ref=CONF.compute.flavor_ref,
+            image_ref=CONF.compute.image_ref,
+            key_name=self.keypair['name'],
+            networks=[{'uuid': self.network['id']}],
+            security_groups=[{'name': secgroup['security_group']['name']}])
+        waiters.wait_for_server_status(self.os_primary.servers_client,
+                                       self.server['server']['id'],
+                                       constants.SERVER_STATUS_ACTIVE)
+        self.port = self.client.list_ports(network_id=self.network['id'],
+                                           device_id=self.server[
+                                               'server']['id'])['ports'][0]
+        self.fip = self.create_and_associate_floatingip(self.port['id'])
+
+    def check_connectivity(self, host, ssh_user, ssh_key, servers=None):
+        ssh_client = ssh.Client(host, ssh_user, pkey=ssh_key)
+        try:
+            ssh_client.test_connection_auth()
+        except lib_exc.SSHTimeout as ssh_e:
+            LOG.debug(ssh_e)
+            self._log_console_output(servers)
+            raise
+
+    def _log_console_output(self, servers=None):
+        if not CONF.compute_feature_enabled.console_output:
+            LOG.debug('Console output not supported, cannot log')
+            return
+        if not servers:
+            servers = self.os_primary.servers_client.list_servers()
+            servers = servers['servers']
+        for server in servers:
+            try:
+                console_output = (
+                    self.os_primary.servers_client.get_console_output(
+                        server['id'])['output'])
+                LOG.debug('Console output for %s\nbody=\n%s',
+                          server['id'], console_output)
+            except lib_exc.NotFound:
+                LOG.debug("Server %s disappeared(deleted) while looking "
+                          "for the console log", server['id'])
+
+    def _check_remote_connectivity(self, source, dest, should_succeed=True,
+                                   nic=None, mtu=None, fragmentation=True):
+        """check ping server via source ssh connection
+
+        :param source: RemoteClient: an ssh connection from which to ping
+        :param dest: and IP to ping against
+        :param should_succeed: boolean should ping succeed or not
+        :param nic: specific network interface to ping from
+        :param mtu: mtu size for the packet to be sent
+        :param fragmentation: Flag for packet fragmentation
+        :returns: boolean -- should_succeed == ping
+        :returns: ping is false if ping failed
+        """
+        def ping_host(source, host, count=CONF.validation.ping_count,
+                      size=CONF.validation.ping_size, nic=None, mtu=None,
+                      fragmentation=True):
+            addr = netaddr.IPAddress(host)
+            cmd = 'ping6' if addr.version == 6 else 'ping'
+            if nic:
+                cmd = 'sudo {cmd} -I {nic}'.format(cmd=cmd, nic=nic)
+            if mtu:
+                if not fragmentation:
+                    cmd += ' -M do'
+                size = str(net_utils.get_ping_payload_size(
+                    mtu=mtu, ip_version=addr.version))
+            cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
+            return source.exec_command(cmd)
+
+        def ping_remote():
+            try:
+                result = ping_host(source, dest, nic=nic, mtu=mtu,
+                                   fragmentation=fragmentation)
+
+            except lib_exc.SSHExecCommandFailed:
+                LOG.warning('Failed to ping IP: %s via a ssh connection '
+                            'from: %s.', dest, source.host)
+                return not should_succeed
+            LOG.debug('ping result: %s', result)
+            # Assert that the return traffic was from the correct
+            # source address.
+            from_source = 'from %s' % dest
+            self.assertIn(from_source, result)
+            return should_succeed
+
+        return test_utils.call_until_true(ping_remote,
+                                          CONF.validation.ping_timeout,
+                                          1)
+
+    def check_remote_connectivity(self, source, dest, should_succeed=True,
+                                  nic=None, mtu=None, fragmentation=True):
+        self.assertTrue(self._check_remote_connectivity(
+            source, dest, should_succeed, nic, mtu, fragmentation))
diff --git a/neutron_tempest_plugin/scenario/constants.py b/neutron_tempest_plugin/scenario/constants.py
new file mode 100644
index 0000000..258c587
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/constants.py
@@ -0,0 +1,18 @@
+# Copyright 2016 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+SERVER_STATUS_ACTIVE = 'ACTIVE'
+DEFAULT_SECURITY_GROUP = 'default'
+LIMIT_KILO_BITS_PER_SECOND = 1000
+SOCKET_CONNECT_TIMEOUT = 60
diff --git a/neutron_tempest_plugin/scenario/exceptions.py b/neutron_tempest_plugin/scenario/exceptions.py
new file mode 100644
index 0000000..369a85b
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/exceptions.py
@@ -0,0 +1,33 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from tempest.lib import exceptions
+
+TempestException = exceptions.TempestException
+
+
+class QoSLimitReached(TempestException):
+    message = "Limit reached, limit = %(limit)d"
+
+
+class SocketConnectionRefused(TempestException):
+    message = "Unable to connect to %(host)s port %(port)d:Connection Refused"
+
+
+class ConnectionTimeoutException(TempestException):
+    message = "Timeout connecting to %(host)s port %(port)d"
+
+
+class FileCreationFailedException(TempestException):
+    message = "File %(file)s has not been created or has the wrong size"
diff --git a/neutron_tempest_plugin/scenario/test_basic.py b/neutron_tempest_plugin/scenario/test_basic.py
new file mode 100644
index 0000000..d825e15
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_basic.py
@@ -0,0 +1,35 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from tempest.lib import decorators
+
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class NetworkBasicTest(base.BaseTempestTestCase):
+    credentials = ['primary']
+    force_tenant_isolation = False
+
+    # Default to ipv4.
+    _ip_version = 4
+
+    @decorators.idempotent_id('de07fe0a-e955-449e-b48b-8641c14cd52e')
+    def test_basic_instance(self):
+        self.setup_network_and_server()
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
diff --git a/neutron_tempest_plugin/scenario/test_dvr.py b/neutron_tempest_plugin/scenario/test_dvr.py
new file mode 100644
index 0000000..3da0694
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_dvr.py
@@ -0,0 +1,66 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from tempest.lib import decorators
+from tempest import test
+
+from neutron_lib import constants
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class NetworkTestMixin(object):
+    def _check_connectivity(self):
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+
+    def _check_snat_port_connectivity(self):
+        self._check_connectivity()
+
+        # Put the Router_SNAT port down to make sure the traffic flows through
+        # Compute node.
+        self._put_snat_port_down(self.network['id'])
+        self._check_connectivity()
+
+    def _put_snat_port_down(self, network_id):
+        port_id = self.client.list_ports(
+            network_id=network_id,
+            device_owner=constants.DEVICE_OWNER_ROUTER_SNAT)['ports'][0]['id']
+        self.os_admin.network_client.update_port(
+            port_id, admin_state_up=False)
+
+
+class NetworkDvrTest(base.BaseTempestTestCase, NetworkTestMixin):
+    credentials = ['primary', 'admin']
+    force_tenant_isolation = False
+
+    @classmethod
+    @test.requires_ext(extension="dvr", service="network")
+    def skip_checks(cls):
+        super(NetworkDvrTest, cls).skip_checks()
+
+    @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d344')
+    def test_vm_reachable_through_compute(self):
+        """Check that the VM is reachable through compute node.
+
+        The test is done by putting the SNAT port down on controller node.
+        """
+        router = self.create_router_by_client(
+            distributed=True, tenant_id=self.client.tenant_id, is_admin=True,
+            ha=False)
+        self.setup_network_and_server(router=router)
+        self._check_snat_port_connectivity()
diff --git a/neutron_tempest_plugin/scenario/test_floatingip.py b/neutron_tempest_plugin/scenario/test_floatingip.py
new file mode 100644
index 0000000..97bfcc5
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_floatingip.py
@@ -0,0 +1,156 @@
+# Copyright (c) 2017 Midokura SARL
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from tempest.common import waiters
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest import test
+import testscenarios
+from testscenarios.scenarios import multiply_scenarios
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.scenario import constants
+
+
+CONF = config.CONF
+
+
+load_tests = testscenarios.load_tests_apply_scenarios
+
+
+class FloatingIpTestCasesMixin(object):
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    @test.requires_ext(extension="router", service="network")
+    def resource_setup(cls):
+        super(FloatingIpTestCasesMixin, cls).resource_setup()
+        cls.network = cls.create_network()
+        cls.subnet = cls.create_subnet(cls.network)
+        cls.router = cls.create_router_by_client()
+        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+        cls.keypair = cls.create_keypair()
+
+        cls.secgroup = cls.os_primary.network_client.create_security_group(
+            name=data_utils.rand_name('secgroup-'))['security_group']
+        cls.security_groups.append(cls.secgroup)
+        cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+        cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+
+        if cls.same_network:
+            cls._dest_network = cls.network
+        else:
+            cls._dest_network = cls._create_dest_network()
+
+    @classmethod
+    def _create_dest_network(cls):
+        network = cls.create_network()
+        subnet = cls.create_subnet(network,
+            cidr=netaddr.IPNetwork('10.10.0.0/24'))
+        cls.create_router_interface(cls.router['id'], subnet['id'])
+        return network
+
+    def _create_server(self, create_floating_ip=True, network=None):
+        if network is None:
+            network = self.network
+        port = self.create_port(network, security_groups=[self.secgroup['id']])
+        if create_floating_ip:
+            fip = self.create_and_associate_floatingip(port['id'])
+        else:
+            fip = None
+        server = self.create_server(
+            flavor_ref=CONF.compute.flavor_ref,
+            image_ref=CONF.compute.image_ref,
+            key_name=self.keypair['name'],
+            networks=[{'port': port['id']}])['server']
+        waiters.wait_for_server_status(self.os_primary.servers_client,
+                                       server['id'],
+                                       constants.SERVER_STATUS_ACTIVE)
+        return {'port': port, 'fip': fip, 'server': server}
+
+    def _test_east_west(self):
+        # The proxy VM is used to control the source VM when it doesn't
+        # have a floating-ip.
+        if self.src_has_fip:
+            proxy = None
+            proxy_client = None
+        else:
+            proxy = self._create_server()
+            proxy_client = ssh.Client(proxy['fip']['floating_ip_address'],
+                                      CONF.validation.image_ssh_user,
+                                      pkey=self.keypair['private_key'])
+
+        # Source VM
+        if self.src_has_fip:
+            src_server = self._create_server()
+            src_server_ip = src_server['fip']['floating_ip_address']
+        else:
+            src_server = self._create_server(create_floating_ip=False)
+            src_server_ip = src_server['port']['fixed_ips'][0]['ip_address']
+        ssh_client = ssh.Client(src_server_ip,
+                                CONF.validation.image_ssh_user,
+                                pkey=self.keypair['private_key'],
+                                proxy_client=proxy_client)
+
+        # Destination VM
+        if self.dest_has_fip:
+            dest_server = self._create_server(network=self._dest_network)
+        else:
+            dest_server = self._create_server(create_floating_ip=False,
+                                              network=self._dest_network)
+
+        # Check connectivity
+        self.check_remote_connectivity(ssh_client,
+            dest_server['port']['fixed_ips'][0]['ip_address'])
+        if self.dest_has_fip:
+            self.check_remote_connectivity(ssh_client,
+                dest_server['fip']['floating_ip_address'])
+
+
+class FloatingIpSameNetwork(FloatingIpTestCasesMixin,
+                            base.BaseTempestTestCase):
+    scenarios = multiply_scenarios([
+        ('SRC with FIP', dict(src_has_fip=True)),
+        ('SRC without FIP', dict(src_has_fip=False)),
+    ], [
+        ('DEST with FIP', dict(dest_has_fip=True)),
+        ('DEST without FIP', dict(dest_has_fip=False)),
+    ])
+
+    same_network = True
+
+    @decorators.idempotent_id('05c4e3b3-7319-4052-90ad-e8916436c23b')
+    def test_east_west(self):
+        self._test_east_west()
+
+
+class FloatingIpSeparateNetwork(FloatingIpTestCasesMixin,
+                                base.BaseTempestTestCase):
+    scenarios = multiply_scenarios([
+        ('SRC with FIP', dict(src_has_fip=True)),
+        ('SRC without FIP', dict(src_has_fip=False)),
+    ], [
+        ('DEST with FIP', dict(dest_has_fip=True)),
+        ('DEST without FIP', dict(dest_has_fip=False)),
+    ])
+
+    same_network = False
+
+    @decorators.idempotent_id('f18f0090-3289-4783-b956-a0f8ac511e8b')
+    def test_east_west(self):
+        self._test_east_west()
diff --git a/neutron_tempest_plugin/scenario/test_migration.py b/neutron_tempest_plugin/scenario/test_migration.py
new file mode 100644
index 0000000..291611c
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_migration.py
@@ -0,0 +1,127 @@
+# Copyright 2017 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib import decorators
+from tempest import test
+
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.scenario import test_dvr
+
+
+class NetworkMigrationTestBase(base.BaseTempestTestCase,
+                               test_dvr.NetworkTestMixin):
+    credentials = ['primary', 'admin']
+    force_tenant_isolation = False
+
+    @classmethod
+    @test.requires_ext(extension="dvr", service="network")
+    @test.requires_ext(extension="l3-ha", service="network")
+    def skip_checks(cls):
+        super(NetworkMigrationTestBase, cls).skip_checks()
+
+    def _check_update(self, router, is_dvr, is_ha):
+        router = self.os_admin.network_client.show_router(router['id'])
+        self.assertEqual(is_dvr, router['router']['distributed'])
+        self.assertEqual(is_ha, router['router']['ha'])
+
+    def _test_migration(self, before_dvr, before_ha, after_dvr, after_ha):
+        router = self.create_router_by_client(
+            distributed=before_dvr, ha=before_ha,
+            tenant_id=self.client.tenant_id, is_admin=True)
+
+        self.setup_network_and_server(router=router)
+        self._check_connectivity()
+
+        self.os_admin.network_client.update_router(
+            router_id=router['id'], admin_state_up=False)
+        self.os_admin.network_client.update_router(
+            router_id=router['id'], distributed=after_dvr, ha=after_ha)
+        self._check_update(router, after_dvr, after_ha)
+
+        self.os_admin.network_client.update_router(
+            router_id=router['id'], admin_state_up=True)
+        self._check_connectivity()
+
+
+class NetworkMigrationFromLegacy(NetworkMigrationTestBase):
+
+    @decorators.idempotent_id('23724222-483a-4129-bc15-7a9278f3828b')
+    def test_from_legacy_to_dvr(self):
+        self._test_migration(before_dvr=False, before_ha=False,
+                             after_dvr=True, after_ha=False)
+
+    @decorators.idempotent_id('09d85102-994f-4ff9-bf3e-17051145ca12')
+    def test_from_legacy_to_ha(self):
+        self._test_migration(before_dvr=False, before_ha=False,
+                             after_dvr=False, after_ha=True)
+
+    @decorators.idempotent_id('fe169f2c-6ed3-4eb0-8afe-2d540c4b49e2')
+    def test_from_legacy_to_dvr_ha(self):
+        self._test_migration(before_dvr=False, before_ha=False,
+                             after_dvr=True, after_ha=True)
+
+
+class NetworkMigrationFromHA(NetworkMigrationTestBase):
+
+    @decorators.idempotent_id('b4e68ac0-3b76-4306-ae8a-51cf4d363b22')
+    def test_from_ha_to_legacy(self):
+        self._test_migration(before_dvr=False, before_ha=True,
+                             after_dvr=False, after_ha=False)
+
+    @decorators.idempotent_id('42260eea-5d56-4d30-b62a-a62694dfe4d5')
+    def test_from_ha_to_dvr(self):
+        self._test_migration(before_dvr=False, before_ha=True,
+                             after_dvr=True, after_ha=False)
+
+    @decorators.idempotent_id('e4149576-248b-43fa-9d0b-a5c2f51967ce')
+    def test_from_ha_to_dvr_ha(self):
+        self._test_migration(before_dvr=False, before_ha=True,
+                             after_dvr=True, after_ha=True)
+
+
+class NetworkMigrationFromDVR(NetworkMigrationTestBase):
+
+    @decorators.idempotent_id('e5cac02c-248d-4aac-bd5e-9d47c5197307')
+    def test_from_dvr_to_legacy(self):
+        self._test_migration(before_dvr=True, before_ha=False,
+                             after_dvr=False, after_ha=False)
+
+    @decorators.idempotent_id('a00d5ad7-8509-4bb0-bdd2-7f1ee052d1cd')
+    def test_from_dvr_to_ha(self):
+        self._test_migration(before_dvr=True, before_ha=False,
+                             after_dvr=False, after_ha=True)
+
+    @decorators.idempotent_id('25304a51-93a8-4cf3-9523-bce8b4eaecf8')
+    def test_from_dvr_to_dvr_ha(self):
+        self._test_migration(before_dvr=True, before_ha=False,
+                             after_dvr=True, after_ha=True)
+
+
+class NetworkMigrationFromDVRHA(NetworkMigrationTestBase):
+
+    @decorators.idempotent_id('1be9b2e2-379c-40a4-a269-6687b81df691')
+    def test_from_dvr_ha_to_legacy(self):
+        self._test_migration(before_dvr=True, before_ha=True,
+                             after_dvr=False, after_ha=False)
+
+    @decorators.idempotent_id('55957267-4e84-4314-a2f7-7cd36a2df04b')
+    def test_from_dvr_ha_to_ha(self):
+        self._test_migration(before_dvr=True, before_ha=True,
+                             after_dvr=False, after_ha=True)
+
+    @decorators.idempotent_id('d6bedff1-72be-4a9a-8ea2-dc037cd838e0')
+    def test_from_dvr_ha_to_dvr(self):
+        self._test_migration(before_dvr=True, before_ha=True,
+                             after_dvr=True, after_ha=False)
diff --git a/neutron_tempest_plugin/scenario/test_portsecurity.py b/neutron_tempest_plugin/scenario/test_portsecurity.py
new file mode 100644
index 0000000..257627c
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_portsecurity.py
@@ -0,0 +1,53 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib import decorators
+
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class PortSecurityTest(base.BaseTempestTestCase):
+    credentials = ['primary']
+    required_extensions = ['port-security']
+
+    @decorators.idempotent_id('61ab176e-d48b-42b7-b38a-1ba571ecc033')
+    def test_port_security_removed_added(self):
+        """Test connection works after port security has been removed
+
+        Initial test that vm is accessible. Then port security is removed,
+        checked connectivity. Port security is added back and checked
+        connectivity again.
+        """
+        self.setup_network_and_server()
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+        sec_group_id = self.security_groups[0]['id']
+
+        self.port = self.update_port(port=self.port,
+                                     port_security_enabled=False,
+                                     security_groups=[])
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+
+        self.port = self.update_port(port=self.port,
+                                     port_security_enabled=True,
+                                     security_groups=[sec_group_id])
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
diff --git a/neutron_tempest_plugin/scenario/test_qos.py b/neutron_tempest_plugin/scenario/test_qos.py
new file mode 100644
index 0000000..d93f57f
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_qos.py
@@ -0,0 +1,175 @@
+# Copyright 2016 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+import errno
+import socket
+import time
+
+from oslo_log import log as logging
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from tempest import test
+
+from neutron_tempest_plugin.api import base as base_api
+from neutron_tempest_plugin.common import qos_consts
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.scenario import constants
+from neutron_tempest_plugin.scenario import exceptions as sc_exceptions
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+def _try_connect(host_ip, port):
+    try:
+        client_socket = socket.socket(socket.AF_INET,
+                                      socket.SOCK_STREAM)
+        client_socket.connect((host_ip, port))
+        return client_socket
+    except socket.error as serr:
+        if serr.errno == errno.ECONNREFUSED:
+            raise sc_exceptions.SocketConnectionRefused(host=host_ip,
+                                                        port=port)
+        else:
+            raise
+
+
+def _connect_socket(host, port):
+    """Try to initiate a connection to a host using an ip address
+    and a port.
+
+    Trying couple of times until a timeout is reached in case the listening
+    host is not ready yet.
+    """
+
+    start = time.time()
+    while True:
+        try:
+            return _try_connect(host, port)
+        except sc_exceptions.SocketConnectionRefused:
+            if time.time() - start > constants.SOCKET_CONNECT_TIMEOUT:
+                raise sc_exceptions.ConnectionTimeoutException(host=host,
+                                                               port=port)
+
+
+class QoSTest(base.BaseTempestTestCase):
+    credentials = ['primary', 'admin']
+    force_tenant_isolation = False
+
+    BUFFER_SIZE = 1024 * 1024
+    TOLERANCE_FACTOR = 1.5
+    BS = 512
+    COUNT = BUFFER_SIZE / BS
+    FILE_SIZE = BS * COUNT
+    LIMIT_BYTES_SEC = (constants.LIMIT_KILO_BITS_PER_SECOND * 1024
+                       * TOLERANCE_FACTOR / 8.0)
+    FILE_PATH = "/tmp/img"
+
+    @classmethod
+    @test.requires_ext(extension="qos", service="network")
+    @base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
+    def resource_setup(cls):
+        super(QoSTest, cls).resource_setup()
+
+    def _create_file_for_bw_tests(self, ssh_client):
+        cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) "
+               % {'bs': QoSTest.BS, 'count': QoSTest.COUNT,
+               'file_path': QoSTest.FILE_PATH})
+        ssh_client.exec_command(cmd)
+        cmd = "stat -c %%s %s" % QoSTest.FILE_PATH
+        filesize = ssh_client.exec_command(cmd)
+        if int(filesize.strip()) != QoSTest.FILE_SIZE:
+            raise sc_exceptions.FileCreationFailedException(
+                file=QoSTest.FILE_PATH)
+
+    def _check_bw(self, ssh_client, host, port):
+        cmd = "killall -q nc"
+        try:
+            ssh_client.exec_command(cmd)
+        except exceptions.SSHExecCommandFailed:
+            pass
+        cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % {
+                'port': port, 'file_path': QoSTest.FILE_PATH})
+        ssh_client.exec_command(cmd)
+
+        start_time = time.time()
+        client_socket = _connect_socket(host, port)
+        total_bytes_read = 0
+
+        while total_bytes_read < QoSTest.FILE_SIZE:
+            data = client_socket.recv(QoSTest.BUFFER_SIZE)
+            total_bytes_read += len(data)
+
+        time_elapsed = time.time() - start_time
+        bytes_per_second = total_bytes_read / time_elapsed
+
+        LOG.debug("time_elapsed = %(time_elapsed)d, "
+                  "total_bytes_read = %(total_bytes_read)d, "
+                  "bytes_per_second = %(bytes_per_second)d",
+                  {'time_elapsed': time_elapsed,
+                   'total_bytes_read': total_bytes_read,
+                   'bytes_per_second': bytes_per_second})
+
+        return bytes_per_second <= QoSTest.LIMIT_BYTES_SEC
+
+    @decorators.idempotent_id('1f7ed39b-428f-410a-bd2b-db9f465680df')
+    def test_qos(self):
+        """This is a basic test that check that a QoS policy with
+
+           a bandwidth limit rule is applied correctly by sending
+           a file from the instance to the test node.
+           Then calculating the bandwidth every ~1 sec by the number of bits
+           received / elapsed time.
+        """
+
+        NC_PORT = 1234
+
+        self.setup_network_and_server()
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+        rulesets = [{'protocol': 'tcp',
+                     'direction': 'ingress',
+                     'port_range_min': NC_PORT,
+                     'port_range_max': NC_PORT,
+                     'remote_ip_prefix': '0.0.0.0/0'}]
+        self.create_secgroup_rules(rulesets,
+                                   self.security_groups[-1]['id'])
+
+        ssh_client = ssh.Client(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                pkey=self.keypair['private_key'])
+        policy = self.os_admin.network_client.create_qos_policy(
+                                       name='test-policy',
+                                       description='test-qos-policy',
+                                       shared=True)
+        policy_id = policy['policy']['id']
+        self.os_admin.network_client.create_bandwidth_limit_rule(
+            policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
+            max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)
+        port = self.client.list_ports(network_id=self.network['id'],
+                                      device_id=self.server[
+                                      'server']['id'])['ports'][0]
+        self.os_admin.network_client.update_port(port['id'],
+                                                 qos_policy_id=policy_id)
+        self._create_file_for_bw_tests(ssh_client)
+        utils.wait_until_true(lambda: self._check_bw(
+            ssh_client,
+            self.fip['floating_ip_address'],
+            port=NC_PORT),
+            timeout=120,
+            sleep=1)
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
new file mode 100644
index 0000000..95906a0
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -0,0 +1,266 @@
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+from oslo_log import log as logging
+from tempest.common import waiters
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest import test
+import testtools
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.scenario import constants
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+CONFIGURE_VLAN_INTERFACE_COMMANDS = (
+    'IFACE=$(PATH=$PATH:/usr/sbin ip l | grep "^[0-9]*: e" |'
+    'cut -d \: -f 2) && '
+    'sudo su -c '
+    '"ip l a link $IFACE name $IFACE.%(tag)d type vlan id %(tag)d &&'
+    'ip l s up dev $IFACE.%(tag)d && '
+    'dhclient $IFACE.%(tag)d"')
+
+
+def get_next_subnet(cidr):
+    return netaddr.IPNetwork(cidr).next()
+
+
+class TrunkTest(base.BaseTempestTestCase):
+    credentials = ['primary']
+    force_tenant_isolation = False
+
+    @classmethod
+    @test.requires_ext(extension="trunk", service="network")
+    def resource_setup(cls):
+        super(TrunkTest, cls).resource_setup()
+        # setup basic topology for servers we can log into
+        cls.network = cls.create_network()
+        cls.subnet = cls.create_subnet(cls.network)
+        router = cls.create_router_by_client()
+        cls.create_router_interface(router['id'], cls.subnet['id'])
+        cls.keypair = cls.create_keypair()
+        cls.secgroup = cls.os_primary.network_client.create_security_group(
+            name=data_utils.rand_name('secgroup-'))
+        cls.security_groups.append(cls.secgroup['security_group'])
+        cls.create_loginable_secgroup_rule(
+            secgroup_id=cls.secgroup['security_group']['id'])
+
+    def _create_server_with_trunk_port(self):
+        port = self.create_port(self.network, security_groups=[
+            self.secgroup['security_group']['id']])
+        trunk = self.client.create_trunk(port['id'], subports=[])['trunk']
+        server, fip = self._create_server_with_fip(port['id'])
+        self.addCleanup(self._detach_and_delete_trunk, server, trunk)
+        return {'port': port, 'trunk': trunk, 'fip': fip,
+                'server': server}
+
+    def _create_server_with_fip(self, port_id, **server_kwargs):
+        fip = self.create_and_associate_floatingip(port_id)
+        return (
+            self.create_server(
+                flavor_ref=CONF.compute.flavor_ref,
+                image_ref=CONF.compute.image_ref,
+                key_name=self.keypair['name'],
+                networks=[{'port': port_id}],
+                security_groups=[{'name': self.secgroup[
+                    'security_group']['name']}],
+                **server_kwargs)['server'],
+            fip)
+
+    def _detach_and_delete_trunk(self, server, trunk):
+        # we have to detach the interface from the server before
+        # the trunk can be deleted.
+        self.os_primary.compute.InterfacesClient().delete_interface(
+            server['id'], trunk['port_id'])
+
+        def is_port_detached():
+            p = self.client.show_port(trunk['port_id'])['port']
+            return p['device_id'] == ''
+        utils.wait_until_true(is_port_detached)
+        self.client.delete_trunk(trunk['id'])
+
+    def _is_port_down(self, port_id):
+        p = self.client.show_port(port_id)['port']
+        return p['status'] == 'DOWN'
+
+    def _is_port_active(self, port_id):
+        p = self.client.show_port(port_id)['port']
+        return p['status'] == 'ACTIVE'
+
+    def _is_trunk_active(self, trunk_id):
+        t = self.client.show_trunk(trunk_id)['trunk']
+        return t['status'] == 'ACTIVE'
+
+    def _create_server_with_port_and_subport(self, vlan_network, vlan_tag):
+        parent_port = self.create_port(self.network, security_groups=[
+            self.secgroup['security_group']['id']])
+        port_for_subport = self.create_port(
+            vlan_network,
+            security_groups=[self.secgroup['security_group']['id']],
+            mac_address=parent_port['mac_address'])
+        subport = {
+            'port_id': port_for_subport['id'],
+            'segmentation_type': 'vlan',
+            'segmentation_id': vlan_tag}
+        trunk = self.client.create_trunk(
+            parent_port['id'], subports=[subport])['trunk']
+
+        server, fip = self._create_server_with_fip(parent_port['id'])
+        self.addCleanup(self._detach_and_delete_trunk, server, trunk)
+
+        server_ssh_client = ssh.Client(
+            fip['floating_ip_address'],
+            CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+
+        return {
+            'server': server,
+            'fip': fip,
+            'ssh_client': server_ssh_client,
+            'subport': port_for_subport,
+        }
+
+    def _wait_for_server(self, server):
+        waiters.wait_for_server_status(self.os_primary.servers_client,
+                                       server['server']['id'],
+                                       constants.SERVER_STATUS_ACTIVE)
+        self.check_connectivity(server['fip']['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+
+    @decorators.idempotent_id('bb13fe28-f152-4000-8131-37890a40c79e')
+    def test_trunk_subport_lifecycle(self):
+        """Test trunk creation and subport transition to ACTIVE status.
+
+        This is a basic test for the trunk extension to ensure that we
+        can create a trunk, attach it to a server, add/remove subports,
+        while ensuring the status transitions as appropriate.
+
+        This test does not assert any dataplane behavior for the subports.
+        It's just a high-level check to ensure the agents claim to have
+        wired the port correctly and that the trunk port itself maintains
+        connectivity.
+        """
+        server1 = self._create_server_with_trunk_port()
+        server2 = self._create_server_with_trunk_port()
+        for server in (server1, server2):
+            self._wait_for_server(server)
+        trunk1_id, trunk2_id = server1['trunk']['id'], server2['trunk']['id']
+        # trunks should transition to ACTIVE without any subports
+        utils.wait_until_true(
+            lambda: self._is_trunk_active(trunk1_id),
+            exception=RuntimeError("Timed out waiting for trunk %s to "
+                                   "transition to ACTIVE." % trunk1_id))
+        utils.wait_until_true(
+            lambda: self._is_trunk_active(trunk2_id),
+            exception=RuntimeError("Timed out waiting for trunk %s to "
+                                   "transition to ACTIVE." % trunk2_id))
+        # create a few more networks and ports for subports
+        subports = [{'port_id': self.create_port(self.create_network())['id'],
+                     'segmentation_type': 'vlan', 'segmentation_id': seg_id}
+                    for seg_id in range(3, 7)]
+        # add all subports to server1
+        self.client.add_subports(trunk1_id, subports)
+        # ensure trunk transitions to ACTIVE
+        utils.wait_until_true(
+            lambda: self._is_trunk_active(trunk1_id),
+            exception=RuntimeError("Timed out waiting for trunk %s to "
+                                   "transition to ACTIVE." % trunk1_id))
+        # ensure all underlying subports transitioned to ACTIVE
+        for s in subports:
+            utils.wait_until_true(lambda: self._is_port_active(s['port_id']))
+        # ensure main dataplane wasn't interrupted
+        self.check_connectivity(server1['fip']['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+        # move subports over to other server
+        self.client.remove_subports(trunk1_id, subports)
+        # ensure all subports go down
+        for s in subports:
+            utils.wait_until_true(
+                lambda: self._is_port_down(s['port_id']),
+                exception=RuntimeError("Timed out waiting for subport %s to "
+                                       "transition to DOWN." % s['port_id']))
+        self.client.add_subports(trunk2_id, subports)
+        # wait for both trunks to go back to ACTIVE
+        utils.wait_until_true(
+            lambda: self._is_trunk_active(trunk1_id),
+            exception=RuntimeError("Timed out waiting for trunk %s to "
+                                   "transition to ACTIVE." % trunk1_id))
+        utils.wait_until_true(
+            lambda: self._is_trunk_active(trunk2_id),
+            exception=RuntimeError("Timed out waiting for trunk %s to "
+                                   "transition to ACTIVE." % trunk2_id))
+        # ensure subports come up on other trunk
+        for s in subports:
+            utils.wait_until_true(
+                lambda: self._is_port_active(s['port_id']),
+                exception=RuntimeError("Timed out waiting for subport %s to "
+                                       "transition to ACTIVE." % s['port_id']))
+        # final connectivity check
+        self.check_connectivity(server1['fip']['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+        self.check_connectivity(server2['fip']['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+
+    @testtools.skipUnless(
+          CONF.neutron_plugin_options.image_is_advanced,
+          "Advanced image is required to run this test.")
+    @decorators.idempotent_id('a8a02c9b-b453-49b5-89a2-cce7da66aafb')
+    def test_subport_connectivity(self):
+        vlan_tag = 10
+
+        vlan_network = self.create_network()
+        new_subnet_cidr = get_next_subnet(
+            config.safe_get_config_value('network', 'project_network_cidr'))
+        self.create_subnet(vlan_network, gateway=None, cidr=new_subnet_cidr)
+
+        servers = [
+            self._create_server_with_port_and_subport(vlan_network, vlan_tag)
+            for i in range(2)]
+
+        for server in servers:
+            self._wait_for_server(server)
+            # Configure VLAN interfaces on server
+            command = CONFIGURE_VLAN_INTERFACE_COMMANDS % {'tag': vlan_tag}
+            server['ssh_client'].exec_command(command)
+            out = server['ssh_client'].exec_command(
+                'PATH=$PATH:/usr/sbin;ip addr list')
+            LOG.debug("Interfaces on server %s: %s", server, out)
+
+        # Ping from server1 to server2 via VLAN interface should fail because
+        # we haven't allowed ICMP
+        self.check_remote_connectivity(
+            servers[0]['ssh_client'],
+            servers[1]['subport']['fixed_ips'][0]['ip_address'],
+            should_succeed=False
+        )
+        # allow intra-securitygroup traffic
+        self.client.create_security_group_rule(
+            security_group_id=self.secgroup['security_group']['id'],
+            direction='ingress', ethertype='IPv4', protocol='icmp',
+            remote_group_id=self.secgroup['security_group']['id'])
+        self.check_remote_connectivity(
+            servers[0]['ssh_client'],
+            servers[1]['subport']['fixed_ips'][0]['ip_address'],
+            should_succeed=True
+        )