Merge "Keep stable branch jobs on Ubuntu Xenial"
diff --git a/.zuul.yaml b/.zuul.yaml
index 5c3477a..185166a 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -15,8 +15,9 @@
       tempest_concurrency: 4
       tox_envlist: all
       devstack_localrc:
+        USE_PYTHON3: true
         TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-bw-minimum-ingress,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details
+        NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,floatingip-pools,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-bw-minimum-ingress,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details,uplink-status-propagation
       devstack_plugins:
         neutron: git://git.openstack.org/openstack/neutron.git
         neutron-tempest-plugin: git://git.openstack.org/openstack/neutron-tempest-plugin.git
@@ -27,6 +28,7 @@
         neutron-qos: true
         neutron-segments: true
         neutron-trunk: true
+        neutron-uplink-status-propagation: true
       devstack_local_conf:
         post-config:
           $NEUTRON_CONF:
@@ -102,6 +104,7 @@
     vars:
       branch_override: stable/queens
       devstack_localrc:
+        USE_PYTHON3: false
         # TODO(slaweq): find a way to put this list of extensions in
         # neutron repository and keep it different per branch,
         # then it could be removed from here
@@ -115,6 +118,7 @@
     vars:
       branch_override: stable/rocky
       devstack_localrc:
+        USE_PYTHON3: false
         # TODO(slaweq): find a way to put this list of extensions in
         # neutron repository and keep it different per branch,
         # then it could be removed from here
@@ -147,7 +151,7 @@
     vars:
       devstack_localrc:
         Q_AGENT: linuxbridge
-        NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-bw-minimum-ingress,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+        NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,floatingip-pools,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-bw-minimum-ingress,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
       devstack_local_conf:
         post-config:
           $NEUTRON_CONF:
@@ -165,6 +169,7 @@
           $TEMPEST_CONFIG:
             neutron_plugin_options:
               available_type_drivers: flat,vlan,local,vxlan
+              q_agent: linuxbridge
 
 - job:
     name: neutron-tempest-plugin-scenario-linuxbridge-queens
@@ -174,7 +179,16 @@
     vars:
       branch_override: stable/queens
       devstack_localrc:
+        USE_PYTHON3: false
         NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+      devstack_local_conf:
+        test-config:
+          # NOTE: ignores linux bridge's trunk delete on bound port test
+          # for queens branch (as https://review.openstack.org/#/c/605589/
+          # fix will not apply for queens branch)
+          $TEMPEST_CONFIG:
+            neutron_plugin_options:
+              q_agent: None
 
 - job:
     name: neutron-tempest-plugin-scenario-linuxbridge-rocky
@@ -184,7 +198,16 @@
     vars:
       branch_override: stable/rocky
       devstack_localrc:
+        USE_PYTHON3: false
         NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
+      devstack_local_conf:
+        test-config:
+          # NOTE: ignores linux bridge's trunk delete on bound port test
+          # for rocky branch (as https://review.openstack.org/#/c/605589/
+          # fix will not apply for rocky branch)
+          $TEMPEST_CONFIG:
+            neutron_plugin_options:
+              q_agent: None
 
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario
@@ -205,8 +228,9 @@
       tox_envlist: all
       tempest_test_regex: ^neutron_tempest_plugin\.scenario
       devstack_localrc:
+        USE_PYTHON3: true
         TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-bw-minimum-ingress,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
+        NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,floatingip-pools,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-bw-minimum-ingress,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
         PHYSICAL_NETWORK: default
         DOWNLOAD_DEFAULT_IMAGES: false
         IMAGE_URLS: "http://download.cirros-cloud.net/0.3.4/cirros-0.3.4-i386-disk.img,http://cloud-images.ubuntu.com/releases/16.04/release-20180622/ubuntu-16.04-server-cloudimg-amd64-disk1.img"
@@ -318,6 +342,8 @@
     override-checkout: stable/queens
     vars:
       branch_override: stable/queens
+      devstack_localrc:
+        USE_PYTHON3: false
 
 - job:
     name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
@@ -326,6 +352,8 @@
     override-checkout: stable/rocky
     vars:
       branch_override: stable/rocky
+      devstack_localrc:
+        USE_PYTHON3: false
 
 - job:
     name: neutron-tempest-plugin-designate-scenario
@@ -360,6 +388,8 @@
     override-checkout: stable/queens
     vars:
       branch_override: stable/queens
+      devstack_localrc:
+        USE_PYTHON3: false
 
 - job:
     name: neutron-tempest-plugin-designate-scenario-rocky
@@ -368,6 +398,8 @@
     override-checkout: stable/rocky
     vars:
       branch_override: stable/rocky
+      devstack_localrc:
+        USE_PYTHON3: false
 
 - project-template:
     name: neutron-tempest-plugin-jobs
diff --git a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
index d449ead..048a1e5 100644
--- a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
+++ b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
@@ -17,11 +17,9 @@
 from tempest.lib import exceptions as lib_exc
 
 from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.api import base_security_groups as base_security
 
 
-class PortSecurityAdminTests(base_security.BaseSecGroupTest,
-                             base.BaseAdminNetworkTest):
+class PortSecurityAdminTests(base.BaseAdminNetworkTest):
 
     required_extensions = ['port-security']
 
diff --git a/neutron_tempest_plugin/api/admin/test_quotas_negative.py b/neutron_tempest_plugin/api/admin/test_quotas_negative.py
index cd64e5c..9c37d92 100644
--- a/neutron_tempest_plugin/api/admin/test_quotas_negative.py
+++ b/neutron_tempest_plugin/api/admin/test_quotas_negative.py
@@ -10,6 +10,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from neutron_lib import constants
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -110,54 +111,38 @@
     @decorators.idempotent_id('5c924ff7-b7a9-474f-92a3-dbe0f976ec13')
     @utils.requires_ext(extension="security-group", service="network")
     def test_create_security_group_when_quotas_is_full(self):
-        tenant_id = self.create_project()['id']
-        sg_args = {'tenant_id': tenant_id}
-        # avoid a number that is made by default
-        sg_list = self.admin_client.list_security_groups(
-            tenant_id=tenant_id)['security_groups']
-        num = len(sg_list) + 1
+        project = self.create_project()
 
-        new_quotas = {'security_group': num}
-        self._setup_quotas(tenant_id, **new_quotas)
+        # Set quotas to allow to create only one more security group
+        security_groups = self.admin_client.list_security_groups(
+            tenant_id=project['id'])['security_groups']
+        self._setup_quotas(project['id'],
+                           security_group=len(security_groups) + 1)
 
-        sg = self.admin_client.create_security_group(
-            **sg_args)['security_group']
-        self.addCleanup(self.admin_client.delete_security_group, sg['id'])
-
-        self.assertRaises(lib_exc.Conflict,
-                          self.admin_client.create_security_group, **sg_args)
+        self.create_security_group(project=project)
+        self.assertRaises(lib_exc.Conflict, self.create_security_group,
+                          project=project)
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('b7143480-6118-4ed4-be38-1b6f15f30d05')
     @utils.requires_ext(extension="security-group", service="network")
     def test_create_security_group_rule_when_quotas_is_full(self):
-        tenant_id = self.create_project()['id']
-        sg_args = {'tenant_id': tenant_id}
+        project = self.create_project()
+        security_group = self.create_security_group(project=project)
 
-        sg = self.admin_client.create_security_group(
-            **sg_args)['security_group']
-        self.addCleanup(self.admin_client.delete_security_group, sg['id'])
+        # Set quotas to allow to create only one more security group rule
+        security_group_rules = self.admin_client.list_security_group_rules(
+            tenant_id=project['id'])['security_group_rules']
+        self._setup_quotas(project['id'],
+                           security_group_rule=len(security_group_rules) + 1)
 
-        # avoid a number that is made by default
-        sg_rule_list = self.admin_client.list_security_group_rules(
-            tenant_id=tenant_id)['security_group_rules']
-        num = len(sg_rule_list) + 1
-
-        new_quotas = {'security_group_rule': num}
-        self._setup_quotas(tenant_id, **new_quotas)
-
-        sg_rule_args = {'tenant_id': tenant_id,
-                        'security_group_id': sg['id'],
-                        'direction': 'ingress'}
-        sg_rule = self.admin_client.create_security_group_rule(
-            **sg_rule_args)['security_group_rule']
-        self.addCleanup(
-            self.admin_client.delete_security_group_rule, sg_rule['id'])
-
-        sg_rule_args['direction'] = 'egress'
+        self.create_security_group_rule(
+            project=project, security_group=security_group,
+            direction=constants.INGRESS_DIRECTION)
         self.assertRaises(lib_exc.Conflict,
-                          self.admin_client.create_security_group_rule,
-                          **sg_rule_args)
+                          self.create_security_group_rule,
+                          project=project, security_group=security_group,
+                          direction=constants.EGRESS_DIRECTION)
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('d00fe5bb-9db8-4e1a-9c31-490f52897e6f')
diff --git a/neutron_tempest_plugin/api/admin/test_security_groups.py b/neutron_tempest_plugin/api/admin/test_security_groups.py
index de7e7d2..d79b0ee 100644
--- a/neutron_tempest_plugin/api/admin/test_security_groups.py
+++ b/neutron_tempest_plugin/api/admin/test_security_groups.py
@@ -14,10 +14,10 @@
 
 from tempest.lib import decorators
 
-from neutron_tempest_plugin.api import base_security_groups as base
+from neutron_tempest_plugin.api import base
 
 
-class SecGroupAdminTest(base.BaseSecGroupTest):
+class SecGroupAdminTest(base.BaseNetworkTest):
     required_extensions = ['security-group']
     credentials = ['primary', 'admin']
 
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index c4bc71d..3101af8 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -189,15 +189,15 @@
                                          network['id'])
 
             # Clean up security groups
-            for secgroup in cls.security_groups:
-                cls._try_delete_resource(cls.client.delete_security_group,
-                                         secgroup['id'])
+            for security_group in cls.security_groups:
+                cls._try_delete_resource(cls.delete_security_group,
+                                         security_group)
 
             # Clean up admin security groups
-            for secgroup in cls.admin_security_groups:
-                cls._try_delete_resource(
-                    cls.admin_client.delete_security_group,
-                    secgroup['id'])
+            for security_group in cls.admin_security_groups:
+                cls._try_delete_resource(cls.delete_security_group,
+                                         security_group,
+                                         client=cls.admin_client)
 
             for subnetpool in cls.subnetpools:
                 cls._try_delete_resource(cls.client.delete_subnetpool,
@@ -431,6 +431,8 @@
                 ip_version=ip_version, cidr=cidr, mask_bits=mask_bits):
             if gateway is not None:
                 kwargs['gateway_ip'] = str(gateway or (subnet_cidr.ip + 1))
+            else:
+                kwargs['gateway_ip'] = None
             try:
                 body = client.create_subnet(
                     network_id=network['id'],
@@ -611,7 +613,11 @@
                                cls.external_network_id)
 
         if port:
-            kwargs['port_id'] = port['id']
+            port_id = kwargs.setdefault('port_id', port['id'])
+            if port_id != port['id']:
+                message = "Port ID specified twice: {!s} != {!s}".format(
+                    port_id, port['id'])
+                raise ValueError(message)
 
         fip = client.create_floatingip(external_network_id,
                                        **kwargs)['floatingip']
@@ -670,6 +676,8 @@
     @classmethod
     def delete_router(cls, router, client=None):
         client = client or cls.client
+        if 'routes' in router:
+            client.remove_router_extra_routes(router['id'])
         body = client.list_router_interfaces(router['id'])
         interfaces = [port for port in body['ports']
                       if port['device_owner'] in const.ROUTER_INTERFACE_OWNERS]
@@ -710,18 +718,78 @@
             description=test_description)['project']
         cls.projects.append(project)
         # Create a project will create a default security group.
-        # We make these security groups into admin_security_groups.
         sgs_list = cls.admin_client.list_security_groups(
             tenant_id=project['id'])['security_groups']
-        for sg in sgs_list:
-            cls.admin_security_groups.append(sg)
+        for security_group in sgs_list:
+            # Make sure delete_security_group method will use
+            # the admin client for this group
+            security_group['client'] = cls.admin_client
+            cls.security_groups.append(security_group)
         return project
 
     @classmethod
-    def create_security_group(cls, name, **kwargs):
-        body = cls.client.create_security_group(name=name, **kwargs)
-        cls.security_groups.append(body['security_group'])
-        return body['security_group']
+    def create_security_group(cls, name=None, project=None, client=None,
+                              **kwargs):
+        if project:
+            client = client or cls.admin_client
+            project_id = kwargs.setdefault('project_id', project['id'])
+            tenant_id = kwargs.setdefault('tenant_id', project['id'])
+            if project_id != project['id'] or tenant_id != project['id']:
+                raise ValueError('Project ID specified multiple times')
+        else:
+            client = client or cls.client
+
+        name = name or data_utils.rand_name(cls.__name__)
+        security_group = client.create_security_group(name=name, **kwargs)[
+            'security_group']
+        security_group['client'] = client
+        cls.security_groups.append(security_group)
+        return security_group
+
+    @classmethod
+    def delete_security_group(cls, security_group, client=None):
+        client = client or security_group.get('client') or cls.client
+        client.delete_security_group(security_group['id'])
+
+    @classmethod
+    def create_security_group_rule(cls, security_group=None, project=None,
+                                   client=None, ip_version=None, **kwargs):
+        if project:
+            client = client or cls.admin_client
+            project_id = kwargs.setdefault('project_id', project['id'])
+            tenant_id = kwargs.setdefault('tenant_id', project['id'])
+            if project_id != project['id'] or tenant_id != project['id']:
+                raise ValueError('Project ID specified multiple times')
+
+        if 'security_group_id' not in kwargs:
+            security_group = (security_group or
+                              cls.get_security_group(client=client))
+
+        if security_group:
+            client = client or security_group.get('client')
+            security_group_id = kwargs.setdefault('security_group_id',
+                                                  security_group['id'])
+            if security_group_id != security_group['id']:
+                raise ValueError('Security group ID specified multiple times.')
+
+        ip_version = ip_version or cls._ip_version
+        default_params = (
+            constants.DEFAULT_SECURITY_GROUP_RULE_PARAMS[ip_version])
+        for key, value in default_params.items():
+            kwargs.setdefault(key, value)
+
+        client = client or cls.client
+        return client.create_security_group_rule(**kwargs)[
+            'security_group_rule']
+
+    @classmethod
+    def get_security_group(cls, name='default', client=None):
+        client = client or cls.client
+        security_groups = client.list_security_groups()['security_groups']
+        for security_group in security_groups:
+            if security_group['name'] == name:
+                return security_group
+        raise ValueError("No such security group named {!r}".format(name))
 
     @classmethod
     def create_keypair(cls, client=None, name=None, **kwargs):
@@ -762,7 +830,7 @@
         return trunk
 
     @classmethod
-    def delete_trunk(cls, trunk, client=None):
+    def delete_trunk(cls, trunk, client=None, detach_parent_port=True):
         """Delete network trunk
 
         :param trunk: dictionary containing trunk ID (trunk['id'])
@@ -788,7 +856,7 @@
             parent_port.update(client.show_port(parent_port['id'])['port'])
             return not parent_port['device_id']
 
-        if not is_parent_port_detached():
+        if detach_parent_port and not is_parent_port_detached():
             # this could probably happen when trunk is deleted and parent port
             # has been assigned to a VM that is still running. Here we are
             # assuming that device_id points to such VM.
diff --git a/neutron_tempest_plugin/api/base_security_groups.py b/neutron_tempest_plugin/api/base_security_groups.py
index 127bbd9..ca2c17a 100644
--- a/neutron_tempest_plugin/api/base_security_groups.py
+++ b/neutron_tempest_plugin/api/base_security_groups.py
@@ -14,9 +14,6 @@
 #    under the License.
 
 from neutron_lib import constants
-from tempest.lib.common.utils import data_utils
-
-from neutron_tempest_plugin.api import base
 
 
 # NOTE(yamamoto): The list of protocols here is what we had in Ocata.
@@ -45,9 +42,13 @@
     'udplite',
     'vrrp',
 }
-V4_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items() if
-                       k in V4_PROTOCOL_NAMES)
-V6_PROTOCOL_LEGACY = set([constants.PROTO_NAME_IPV6_ICMP_LEGACY])
+
+V4_PROTOCOL_INTS = {v
+                    for k, v in constants.IP_PROTOCOL_MAP.items()
+                    if k in V4_PROTOCOL_NAMES}
+
+V6_PROTOCOL_LEGACY = {constants.PROTO_NAME_IPV6_ICMP_LEGACY}
+
 V6_PROTOCOL_NAMES = {
     'ipv6-encap',
     'ipv6-frag',
@@ -56,66 +57,7 @@
     'ipv6-opts',
     'ipv6-route',
 }
-V6_PROTOCOL_INTS = set(v for k, v in constants.IP_PROTOCOL_MAP.items() if
-                       k in (V6_PROTOCOL_NAMES | V6_PROTOCOL_LEGACY))
 
-
-class BaseSecGroupTest(base.BaseNetworkTest):
-
-    def _create_security_group(self, **kwargs):
-        # Create a security group
-        name = data_utils.rand_name('secgroup-')
-        group_create_body = self.client.create_security_group(name=name,
-                                                              **kwargs)
-        self.addCleanup(self._delete_security_group,
-                        group_create_body['security_group']['id'])
-        self.assertEqual(group_create_body['security_group']['name'], name)
-        return group_create_body, name
-
-    def _delete_security_group(self, secgroup_id):
-        self.client.delete_security_group(secgroup_id)
-        # Asserting that the security group is not found in the list
-        # after deletion
-        list_body = self.client.list_security_groups()
-        secgroup_list = list()
-        for secgroup in list_body['security_groups']:
-            secgroup_list.append(secgroup['id'])
-        self.assertNotIn(secgroup_id, secgroup_list)
-
-    def _create_security_group_rule(self, **kwargs):
-        rule_create_body = self.client.create_security_group_rule(**kwargs)
-        # List rules and verify created rule is in response
-        rule_list_body = (
-            self.client.list_security_group_rules())
-        rule_list = [rule['id']
-                     for rule in rule_list_body['security_group_rules']]
-        self.assertIn(rule_create_body['security_group_rule']['id'],
-                      rule_list)
-        self.addCleanup(self._delete_security_group_rule,
-                        rule_create_body['security_group_rule']['id'])
-        return rule_create_body
-
-    def _show_security_group_rule(self, **kwargs):
-        show_rule_body = self.client.show_security_group_rule(kwargs['id'])
-        for key, value in kwargs.items():
-            self.assertEqual(value,
-                             show_rule_body['security_group_rule'][key],
-                             "%s does not match." % key)
-
-    def _delete_security_group_rule(self, secgroup_rule_id):
-        self.client.delete_security_group_rule(secgroup_rule_id)
-        rule_list_body = self.client.list_security_group_rules()
-        rule_list = [rule['id']
-                     for rule in rule_list_body['security_group_rules']]
-        self.assertNotIn(secgroup_rule_id, rule_list)
-
-    def _test_create_show_delete_security_group_rule(self, **kwargs):
-        # The security group rule is deleted by the cleanup call in
-        # _create_security_group_rule.
-        rule_create_body = (
-            self._create_security_group_rule(**kwargs)['security_group_rule'])
-        self._show_security_group_rule(
-            id=rule_create_body['id'],
-            protocol=rule_create_body['protocol'],
-            direction=rule_create_body['direction'],
-            ethertype=rule_create_body['ethertype'])
+V6_PROTOCOL_INTS = {v
+                    for k, v in constants.IP_PROTOCOL_MAP.items()
+                    if k in (V6_PROTOCOL_NAMES | V6_PROTOCOL_LEGACY)}
diff --git a/neutron_tempest_plugin/api/test_availability_zones.py b/neutron_tempest_plugin/api/test_availability_zones.py
new file mode 100644
index 0000000..9d75c28
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_availability_zones.py
@@ -0,0 +1,30 @@
+# Copyright 2018 AT&T Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.api import base
+
+
+class ListAvailableZonesTest(base.BaseNetworkTest):
+
+    @decorators.idempotent_id('5a8a8a1a-c265-11e8-a611-080027758b73')
+    @utils.requires_ext(extension="availability_zone",
+                        service="network")
+    def test_list_available_zones(self):
+        body = self.client.list_availability_zones()
+        self.assertIsNotNone(body)
+        self.assertIsInstance(body['availability_zones'], list)
diff --git a/neutron_tempest_plugin/api/test_extension_driver_port_security.py b/neutron_tempest_plugin/api/test_extension_driver_port_security.py
index 8a8c4f2..6b05557 100644
--- a/neutron_tempest_plugin/api/test_extension_driver_port_security.py
+++ b/neutron_tempest_plugin/api/test_extension_driver_port_security.py
@@ -19,15 +19,13 @@
 from tempest.lib import exceptions as lib_exc
 
 from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.api import base_security_groups as base_security
 
 FAKE_IP = '10.0.0.1'
 FAKE_MAC = '00:25:64:e8:19:dd'
 
 
 @ddt.ddt
-class PortSecTest(base_security.BaseSecGroupTest,
-                  base.BaseNetworkTest):
+class PortSecTest(base.BaseNetworkTest):
 
     @decorators.idempotent_id('7c338ddf-e64e-4118-bd33-e49a1f2f1495')
     @utils.requires_ext(extension='port-security', service='network')
@@ -76,7 +74,7 @@
         network = self.create_network()
         self.create_subnet(network)
 
-        sec_group_body, _ = self._create_security_group()
+        security_group = self.create_security_group()
         port = self.create_port(network)
 
         # Exception when set port-sec to False with sec-group defined
@@ -88,7 +86,7 @@
         self.assertEmpty(port['security_groups'])
         self.assertFalse(port['port_security_enabled'])
         port = self.update_port(
-            port, security_groups=[sec_group_body['security_group']['id']],
+            port, security_groups=[security_group['id']],
             port_security_enabled=True)
 
         self.assertNotEmpty(port['security_groups'])
@@ -102,11 +100,11 @@
     def test_port_sec_update_pass(self):
         network = self.create_network()
         self.create_subnet(network)
-        sec_group, _ = self._create_security_group()
-        sec_group_id = sec_group['security_group']['id']
-        port = self.create_port(network, security_groups=[sec_group_id],
-                                port_security_enabled=True)
+        security_group = self.create_security_group()
 
+        port = self.create_port(network,
+                                security_groups=[security_group['id']],
+                                port_security_enabled=True)
         self.assertNotEmpty(port['security_groups'])
         self.assertTrue(port['port_security_enabled'])
 
@@ -114,7 +112,7 @@
         self.assertEmpty(port['security_groups'])
         self.assertTrue(port['port_security_enabled'])
 
-        port = self.update_port(port, security_groups=[sec_group_id])
+        port = self.update_port(port, security_groups=[security_group['id']])
         self.assertNotEmpty(port['security_groups'])
         port = self.update_port(port, security_groups=[],
                                 port_security_enabled=False)
diff --git a/neutron_tempest_plugin/api/test_floating_ips.py b/neutron_tempest_plugin/api/test_floating_ips.py
index ea3d22e..9c1af14 100644
--- a/neutron_tempest_plugin/api/test_floating_ips.py
+++ b/neutron_tempest_plugin/api/test_floating_ips.py
@@ -13,6 +13,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import netaddr
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -121,3 +122,39 @@
         self.assertEqual(port['status'], port_details['status'])
         self.assertEqual(port['device_id'], port_details['device_id'])
         self.assertEqual(port['device_owner'], port_details['device_owner'])
+
+
+class FloatingIPPoolTestJSON(base.BaseAdminNetworkTest):
+
+    required_extensions = ['router']
+
+    @decorators.idempotent_id('6c438332-4554-461c-9668-512ae09bf952')
+    @utils.requires_ext(extension="floatingip-pools", service="network")
+    def test_create_floatingip_from_specific_pool(self):
+        network = self.create_network(client=self.admin_client, external=True)
+        subnet1 = self.create_subnet(network, client=self.admin_client)
+        subnet2 = self.create_subnet(network, client=self.admin_client)
+        pools = self.client.list_floatingip_pools()["floatingip_pools"]
+
+        def test_create_floatingip_from_subnet(pools, subnet):
+            pool = None
+            for p in pools:
+                if p['network_id'] == subnet['network_id'] \
+                        and p['subnet_id'] == subnet['id']:
+                    pool = p
+                    break
+
+            self.assertTrue(pool)
+            new_floatingip = self.create_floatingip(
+                pool['network_id'], subnet_id=pool['subnet_id'])
+            cidr = netaddr.IPNetwork(pool['cidr'])
+            ip_address = netaddr.IPAddress(
+                new_floatingip['floating_ip_address'])
+            self.assertIn(ip_address, cidr)
+            fip_id = new_floatingip['id']
+            floatingip = self.client.get_floatingip(fip_id)['floatingip']
+            self.assertEqual(new_floatingip['floating_ip_address'],
+                             floatingip['floating_ip_address'])
+
+        test_create_floatingip_from_subnet(pools, subnet1)
+        test_create_floatingip_from_subnet(pools, subnet2)
diff --git a/neutron_tempest_plugin/api/test_network_ip_availability.py b/neutron_tempest_plugin/api/test_network_ip_availability.py
index 1cdfc7e..e798680 100644
--- a/neutron_tempest_plugin/api/test_network_ip_availability.py
+++ b/neutron_tempest_plugin/api/test_network_ip_availability.py
@@ -19,7 +19,7 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
-from tempest.lib import exceptions as lib_exc
+from tempest.lib import exceptions
 
 from neutron_tempest_plugin.api import base
 
@@ -53,27 +53,26 @@
     def skip_checks(cls):
         super(NetworksIpAvailabilityTest, cls).skip_checks()
 
-    def _get_used_ips(self, network, net_availability):
-        if network:
+    @staticmethod
+    def _get_availability(network, net_availability):
+        if 'network_ip_availabilities' in net_availability:
             for availability in net_availability['network_ip_availabilities']:
                 if availability['network_id'] == network['id']:
-                    return availability['used_ips']
+                    return availability
+            raise exceptions.TempestException('Network IP Availability not '
+                                              'found')
+        else:
+            return net_availability['network_ip_availability']
 
-    def _cleanUp_port(self, port_id):
-        # delete port, any way to avoid race
-        try:
-            self.client.delete_port(port_id)
-        # if port is not found, this means it was deleted in the test
-        except lib_exc.NotFound:
-            pass
+    def _get_used_ips(self, network, net_availability):
+        availability = self._get_availability(network, net_availability)
+        return availability and availability['used_ips']
 
     def _assert_total_and_used_ips(self, expected_used, expected_total,
                                    network, net_availability):
-        if network:
-            for availability in net_availability['network_ip_availabilities']:
-                if availability['network_id'] == network['id']:
-                    self.assertEqual(expected_total, availability['total_ips'])
-                    self.assertEqual(expected_used, availability['used_ips'])
+        availability = self._get_availability(network, net_availability)
+        self.assertEqual(expected_total, availability['total_ips'])
+        self.assertEqual(expected_used, availability['used_ips'])
 
 
 def calc_total_ips(prefix, ip_version):
@@ -89,56 +88,87 @@
 
 class NetworksIpAvailabilityIPv4Test(NetworksIpAvailabilityTest):
 
-    @decorators.idempotent_id('0f33cc8c-1bf6-47d1-9ce1-010618240599')
-    def test_admin_network_availability_before_subnet(self):
+    def setUp(self):
+        super(NetworksIpAvailabilityIPv4Test, self).setUp()
         net_name = data_utils.rand_name('network')
-        network = self.create_network(network_name=net_name)
-        self.addCleanup(self.client.delete_network, network['id'])
+        self.network = self.create_network(network_name=net_name)
+
+    @decorators.idempotent_id('0f33cc8c-1bf6-47d1-9ce1-010618240599')
+    def test_list_ip_availability_before_subnet(self):
         net_availability = self.admin_client.list_network_ip_availabilities()
-        self._assert_total_and_used_ips(0, 0, network, net_availability)
+        self._assert_total_and_used_ips(0, 0, self.network, net_availability)
 
     @decorators.idempotent_id('3aecd3b2-16ed-4b87-a54a-91d7b3c2986b')
-    def test_net_ip_availability_after_subnet_and_ports(self):
-        net_name = data_utils.rand_name('network')
-        network = self.create_network(network_name=net_name)
-        self.addCleanup(self.client.delete_network, network['id'])
-        subnet = self.create_subnet(network, enable_dhcp=False)
+    def test_list_ip_availability_after_subnet_and_ports(self):
+        subnet = self.create_subnet(self.network, enable_dhcp=False)
         prefix = netaddr.IPNetwork(subnet['cidr']).prefixlen
-        self.addCleanup(self.client.delete_subnet, subnet['id'])
         body = self.admin_client.list_network_ip_availabilities()
-        used_ip = self._get_used_ips(network, body)
-        port1 = self.client.create_port(network_id=network['id'])
-        self.addCleanup(self.client.delete_port, port1['port']['id'])
-        port2 = self.client.create_port(network_id=network['id'])
-        self.addCleanup(self.client.delete_port, port2['port']['id'])
+        used_ips_before_port_create = self._get_used_ips(self.network, body)
+        self.create_port(self.network)
         net_availability = self.admin_client.list_network_ip_availabilities()
         self._assert_total_and_used_ips(
-            used_ip + 2,
+            used_ips_before_port_create + 1,
             calc_total_ips(prefix, self._ip_version),
-            network, net_availability)
+            self.network, net_availability)
 
     @decorators.idempotent_id('9f11254d-757b-492e-b14b-f52144e4ee7b')
-    def test_net_ip_availability_after_port_delete(self):
-        net_name = data_utils.rand_name('network')
-        network = self.create_network(network_name=net_name)
-        self.addCleanup(self.client.delete_network, network['id'])
-        subnet = self.create_subnet(network, enable_dhcp=False)
-        self.addCleanup(self.client.delete_subnet, subnet['id'])
-        port = self.client.create_port(network_id=network['id'])
-        self.addCleanup(self._cleanUp_port, port['port']['id'])
+    def test_list_ip_availability_after_port_delete(self):
+        self.create_subnet(self.network, enable_dhcp=False)
+        port = self.create_port(self.network)
         net_availability = self.admin_client.list_network_ip_availabilities()
-        used_ip = self._get_used_ips(network, net_availability)
-        self.client.delete_port(port['port']['id'])
+        used_ips = self._get_used_ips(self.network, net_availability)
+        self.client.delete_port(port['id'])
 
-        def get_net_availability():
+        def is_count_ip_availability_valid():
             availabilities = self.admin_client.list_network_ip_availabilities()
-            used_ip_after_port_delete = self._get_used_ips(network,
-                                                           availabilities)
-            return used_ip - 1 == used_ip_after_port_delete
+            used_ips_after_port_delete = self._get_used_ips(self.network,
+                                                            availabilities)
+            return used_ips - 1 == used_ips_after_port_delete
 
         self.assertTrue(
             test_utils.call_until_true(
-                get_net_availability, DELETE_TIMEOUT, DELETE_SLEEP),
+                is_count_ip_availability_valid, DELETE_TIMEOUT, DELETE_SLEEP),
+            msg="IP address did not become available after port delete")
+
+    @decorators.idempotent_id('da1fbed5-b4a9-45b3-bdcb-b1660710d565')
+    def test_show_ip_availability_after_subnet_and_ports_create(self):
+        net_availability = self.admin_client.show_network_ip_availability(
+            self.network['id'])
+        self._assert_total_and_used_ips(0, 0, self.network, net_availability)
+        subnet = self.create_subnet(self.network, enable_dhcp=False)
+        prefix = netaddr.IPNetwork(subnet['cidr']).prefixlen
+        net_availability = self.admin_client.show_network_ip_availability(
+            self.network['id'])
+        used_ips_before_port_create = self._get_used_ips(self.network,
+                                                         net_availability)
+        self.create_port(self.network)
+        net_availability = self.admin_client.show_network_ip_availability(
+            self.network['id'])
+        self._assert_total_and_used_ips(
+            used_ips_before_port_create + 1,
+            calc_total_ips(prefix, self._ip_version),
+            self.network,
+            net_availability)
+
+    @decorators.idempotent_id('a4d1e291-c152-4d62-9316-8c9bf1c6aee2')
+    def test_show_ip_availability_after_port_delete(self):
+        self.create_subnet(self.network, enable_dhcp=False)
+        port = self.create_port(self.network)
+        net_availability = self.admin_client.show_network_ip_availability(
+            self.network['id'])
+        used_ips = self._get_used_ips(self.network, net_availability)
+        self.client.delete_port(port['id'])
+
+        def is_count_ip_availability_valid():
+            availabilities = self.admin_client.show_network_ip_availability(
+                self.network['id'])
+            used_ips_after_port_delete = self._get_used_ips(self.network,
+                                                            availabilities)
+            return used_ips - 1 == used_ips_after_port_delete
+
+        self.assertTrue(
+            test_utils.call_until_true(
+                is_count_ip_availability_valid, DELETE_TIMEOUT, DELETE_SLEEP),
             msg="IP address did not become available after port delete")
 
 
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index 3b877c2..52783b9 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -134,6 +134,28 @@
         expected = [s['id'], s['id']]
         self.assertEqual(expected, subnets)
 
+    @decorators.idempotent_id('9700828d-86eb-4f21-9fa3-da487a2d77f2')
+    @utils.requires_ext(extension="uplink-status-propagation",
+                        service="network")
+    def test_create_port_with_propagate_uplink_status(self):
+        body = self.create_port(self.network, propagate_uplink_status=True)
+        self.assertTrue(body['propagate_uplink_status'])
+        body = self.client.list_ports(id=body['id'])['ports'][0]
+        self.assertTrue(body['propagate_uplink_status'])
+        body = self.client.show_port(body['id'])['port']
+        self.assertTrue(body['propagate_uplink_status'])
+
+    @decorators.idempotent_id('c396a880-0c7b-409d-a80b-800a3d09bdc4')
+    @utils.requires_ext(extension="uplink-status-propagation",
+                        service="network")
+    def test_create_port_without_propagate_uplink_status(self):
+        body = self.create_port(self.network)
+        self.assertFalse(body['propagate_uplink_status'])
+        body = self.client.list_ports(id=body['id'])['ports'][0]
+        self.assertFalse(body['propagate_uplink_status'])
+        body = self.client.show_port(body['id'])['port']
+        self.assertFalse(body['propagate_uplink_status'])
+
 
 class PortsSearchCriteriaTest(base.BaseSearchCriteriaTest):
 
diff --git a/neutron_tempest_plugin/api/test_revisions.py b/neutron_tempest_plugin/api/test_revisions.py
index b03285d..0d590f6 100644
--- a/neutron_tempest_plugin/api/test_revisions.py
+++ b/neutron_tempest_plugin/api/test_revisions.py
@@ -12,16 +12,16 @@
 
 import netaddr
 
+from neutron_lib import constants
 from tempest.common import utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
 
 from neutron_tempest_plugin.api import base
-from neutron_tempest_plugin.api import base_security_groups as bsg
 from neutron_tempest_plugin import config
 
 
-class TestRevisions(base.BaseAdminNetworkTest, bsg.BaseSecGroupTest):
+class TestRevisions(base.BaseAdminNetworkTest):
 
     required_extensions = ['standard-attr-revisions']
 
@@ -111,46 +111,51 @@
     @decorators.idempotent_id('6c256f71-c929-4200-b3dc-4e1843506be5')
     @utils.requires_ext(extension="security-group", service="network")
     def test_update_sg_group_bumps_revision(self):
-        sg, name = self._create_security_group()
-        self.assertIn('revision_number', sg['security_group'])
-        update_body = self.client.update_security_group(
-            sg['security_group']['id'], name='new_sg_name')
-        self.assertGreater(update_body['security_group']['revision_number'],
-                           sg['security_group']['revision_number'])
+        security_group = self.create_security_group()
+        self.assertIn('revision_number', security_group)
+        updated_security_group = self.client.update_security_group(
+            security_group['id'], name='new_sg_name')['security_group']
+        self.assertGreater(updated_security_group['revision_number'],
+                           security_group['revision_number'])
 
     @decorators.idempotent_id('6489632f-8550-4453-a674-c98849742967')
     @utils.requires_ext(extension="security-group", service="network")
     def test_update_port_sg_binding_bumps_revision(self):
-        net = self.create_network()
-        self.addCleanup(self.client.delete_network, net['id'])
-        port = self.create_port(net)
-        self.addCleanup(self.client.delete_port, port['id'])
-        sg = self._create_security_group()[0]
-        self.client.update_port(
-            port['id'], security_groups=[sg['security_group']['id']])
-        updated = self.client.show_port(port['id'])
-        updated2 = self.client.update_port(port['id'], security_groups=[])
-        self.assertGreater(updated['port']['revision_number'],
+        network = self.create_network()
+        port = self.create_port(network)
+
+        security_group = self.create_security_group()
+        updated_port = self.client.update_port(
+            port['id'], security_groups=[security_group['id']])['port']
+        self.assertGreater(updated_port['revision_number'],
                            port['revision_number'])
-        self.assertGreater(updated2['port']['revision_number'],
-                           updated['port']['revision_number'])
+
+        updated_port2 = self.client.update_port(
+            port['id'], security_groups=[])['port']
+        self.assertGreater(updated_port2['revision_number'],
+                           updated_port['revision_number'])
 
     @decorators.idempotent_id('29c7ab2b-d1d8-425d-8cec-fcf632960f22')
     @utils.requires_ext(extension="security-group", service="network")
     def test_update_sg_rule_bumps_sg_revision(self):
-        sg, name = self._create_security_group()
-        rule = self.client.create_security_group_rule(
-            security_group_id=sg['security_group']['id'],
-            protocol='tcp', direction='ingress', ethertype=self.ethertype,
-            port_range_min=60, port_range_max=70)
-        updated = self.client.show_security_group(sg['security_group']['id'])
-        self.assertGreater(updated['security_group']['revision_number'],
-                           sg['security_group']['revision_number'])
-        self.client.delete_security_group_rule(
-            rule['security_group_rule']['id'])
-        updated2 = self.client.show_security_group(sg['security_group']['id'])
-        self.assertGreater(updated2['security_group']['revision_number'],
-                           updated['security_group']['revision_number'])
+        security_group = self.create_security_group()
+
+        security_group_rule = self.create_security_group_rule(
+            security_group=security_group,
+            protocol=constants.PROTO_NAME_TCP,
+            direction=constants.INGRESS_DIRECTION,
+            port_range_min=60,
+            port_range_max=70)
+        updated_security_group = self.client.show_security_group(
+            security_group['id'])['security_group']
+        self.assertGreater(updated_security_group['revision_number'],
+                           security_group['revision_number'])
+
+        self.client.delete_security_group_rule(security_group_rule['id'])
+        updated_security_group2 = self.client.show_security_group(
+            security_group['id'])['security_group']
+        self.assertGreater(updated_security_group2['revision_number'],
+                           updated_security_group['revision_number'])
 
     @decorators.idempotent_id('db70c285-0365-4fac-9f55-2a0ad8cf55a8')
     @utils.requires_ext(extension="allowed-address-pairs", service="network")
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index 299a62e..b6d344d 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -17,39 +17,40 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
-from neutron_tempest_plugin.api import base_security_groups as base
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin.api import base_security_groups
 
 
-class SecGroupTest(base.BaseSecGroupTest):
+class SecGroupTest(base.BaseNetworkTest):
 
     required_extensions = ['security-group']
 
     @decorators.idempotent_id('bfd128e5-3c92-44b6-9d66-7fe29d22c802')
     def test_create_list_update_show_delete_security_group(self):
-        group_create_body, name = self._create_security_group()
+        security_group = self.create_security_group()
 
         # List security groups and verify if created group is there in response
-        list_body = self.client.list_security_groups()
-        secgroup_list = list()
-        for secgroup in list_body['security_groups']:
-            secgroup_list.append(secgroup['id'])
-        self.assertIn(group_create_body['security_group']['id'], secgroup_list)
+        security_groups = self.client.list_security_groups()['security_groups']
+        self.assertIn(security_group['id'],
+                      {sg['id'] for sg in security_groups})
+
         # Update the security group
         new_name = data_utils.rand_name('security')
         new_description = data_utils.rand_name('security-description')
-        update_body = self.client.update_security_group(
-            group_create_body['security_group']['id'],
-            name=new_name,
-            description=new_description)
+        updated_security_group = self.client.update_security_group(
+            security_group['id'], name=new_name,
+            description=new_description)['security_group']
+
         # Verify if security group is updated
-        self.assertEqual(update_body['security_group']['name'], new_name)
-        self.assertEqual(update_body['security_group']['description'],
+        self.assertEqual(updated_security_group['name'], new_name)
+        self.assertEqual(updated_security_group['description'],
                          new_description)
+
         # Show details of the updated security group
-        show_body = self.client.show_security_group(
-            group_create_body['security_group']['id'])
-        self.assertEqual(show_body['security_group']['name'], new_name)
-        self.assertEqual(show_body['security_group']['description'],
+        observed_security_group = self.client.show_security_group(
+            security_group['id'])['security_group']
+        self.assertEqual(observed_security_group['name'], new_name)
+        self.assertEqual(observed_security_group['description'],
                          new_description)
 
     @decorators.idempotent_id('7c0ecb10-b2db-11e6-9b14-000c29248b0d')
@@ -67,58 +68,48 @@
             self.assertIsNotNone(secgrp['id'])
 
 
-class SecGroupProtocolTest(base.BaseSecGroupTest):
+class SecGroupProtocolTest(base.BaseNetworkTest):
+
+    protocol_names = base_security_groups.V4_PROTOCOL_NAMES
+    protocol_ints = base_security_groups.V4_PROTOCOL_INTS
 
     @decorators.idempotent_id('282e3681-aa6e-42a7-b05c-c341aa1e3cdf')
-    def test_create_show_delete_security_group_rule_names(self):
-        group_create_body, _ = self._create_security_group()
-        for protocol in base.V4_PROTOCOL_NAMES:
-            self._test_create_show_delete_security_group_rule(
-                security_group_id=group_create_body['security_group']['id'],
-                protocol=protocol,
+    def test_security_group_rule_protocol_names(self):
+        self._test_security_group_rule_protocols(protocols=self.protocol_names)
+
+    @decorators.idempotent_id('66e47f1f-20b6-4417-8839-3cc671c7afa3')
+    def test_security_group_rule_protocol_ints(self):
+        self._test_security_group_rule_protocols(protocols=self.protocol_ints)
+
+    def _test_security_group_rule_protocols(self, protocols):
+        security_group = self.create_security_group()
+        for protocol in protocols:
+            self._test_security_group_rule(
+                security_group=security_group,
+                protocol=str(protocol),
                 direction=constants.INGRESS_DIRECTION,
                 ethertype=self.ethertype)
 
-    @decorators.idempotent_id('66e47f1f-20b6-4417-8839-3cc671c7afa3')
-    def test_create_show_delete_security_group_rule_integers(self):
-        group_create_body, _ = self._create_security_group()
-        for protocol in base.V4_PROTOCOL_INTS:
-            self._test_create_show_delete_security_group_rule(
-                security_group_id=group_create_body['security_group']['id'],
-                protocol=protocol,
-                direction=constants.INGRESS_DIRECTION,
-                ethertype=self.ethertype)
+    def _test_security_group_rule(self, security_group, **kwargs):
+        security_group_rule = self.create_security_group_rule(
+            security_group=security_group, **kwargs)
+        observed_security_group_rule = self.client.show_security_group_rule(
+            security_group_rule['id'])['security_group_rule']
+        for key, value in kwargs.items():
+            self.assertEqual(value, security_group_rule[key],
+                             "{!r} does not match.".format(key))
+            self.assertEqual(value, observed_security_group_rule[key],
+                             "{!r} does not match.".format(key))
 
 
 class SecGroupProtocolIPv6Test(SecGroupProtocolTest):
-    _ip_version = constants.IP_VERSION_6
 
-    @decorators.idempotent_id('1f7cc9f5-e0d5-487c-8384-3d74060ab530')
-    def test_create_security_group_rule_with_ipv6_protocol_names(self):
-        group_create_body, _ = self._create_security_group()
-        for protocol in base.V6_PROTOCOL_NAMES:
-            self._test_create_show_delete_security_group_rule(
-                security_group_id=group_create_body['security_group']['id'],
-                protocol=protocol,
-                direction=constants.INGRESS_DIRECTION,
-                ethertype=self.ethertype)
+    _ip_version = constants.IP_VERSION_6
+    protocol_names = base_security_groups.V6_PROTOCOL_NAMES
+    protocol_ints = base_security_groups.V6_PROTOCOL_INTS
+    protocol_legacy_names = base_security_groups.V6_PROTOCOL_LEGACY
 
     @decorators.idempotent_id('c7d17b41-3b4e-4add-bb3b-6af59baaaffa')
-    def test_create_security_group_rule_with_ipv6_protocol_legacy_names(self):
-        group_create_body, _ = self._create_security_group()
-        for protocol in base.V6_PROTOCOL_LEGACY:
-            self._test_create_show_delete_security_group_rule(
-                security_group_id=group_create_body['security_group']['id'],
-                protocol=protocol,
-                direction=constants.INGRESS_DIRECTION,
-                ethertype=self.ethertype)
-
-    @decorators.idempotent_id('bcfce0b7-bc96-40ae-9b08-3f6774ee0260')
-    def test_create_security_group_rule_with_ipv6_protocol_integers(self):
-        group_create_body, _ = self._create_security_group()
-        for protocol in base.V6_PROTOCOL_INTS:
-            self._test_create_show_delete_security_group_rule(
-                security_group_id=group_create_body['security_group']['id'],
-                protocol=protocol,
-                direction=constants.INGRESS_DIRECTION,
-                ethertype=self.ethertype)
+    def test_security_group_rule_protocol_legacy_names(self):
+        self._test_security_group_rule_protocols(
+            protocols=self.protocol_legacy_names)
diff --git a/neutron_tempest_plugin/api/test_security_groups_negative.py b/neutron_tempest_plugin/api/test_security_groups_negative.py
index c427691..1fcbd18 100644
--- a/neutron_tempest_plugin/api/test_security_groups_negative.py
+++ b/neutron_tempest_plugin/api/test_security_groups_negative.py
@@ -18,12 +18,14 @@
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
-from neutron_tempest_plugin.api import base_security_groups as base
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin.api import base_security_groups
+
 
 LONG_NAME_NG = 'x' * (db_const.NAME_FIELD_SIZE + 1)
 
 
-class NegativeSecGroupTest(base.BaseSecGroupTest):
+class NegativeSecGroupTest(base.BaseNetworkTest):
 
     required_extensions = ['security-group']
 
@@ -36,72 +38,68 @@
     @decorators.idempotent_id('594edfa8-9a5b-438e-9344-49aece337d49')
     def test_create_security_group_with_too_long_name(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group,
+                          self.create_security_group,
                           name=LONG_NAME_NG)
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('b6b79838-7430-4d3f-8e07-51dfb61802c2')
     def test_create_security_group_with_boolean_type_name(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.client.create_security_group,
+                          self.create_security_group,
                           name=True)
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('55100aa8-b24f-333c-0bef-64eefd85f15c')
     def test_update_default_security_group_name(self):
-        sg_list = self.client.list_security_groups(name='default')
-        sg = sg_list['security_groups'][0]
+        security_group = self.client.list_security_groups(name='default')[
+            'security_groups'][0]
         self.assertRaises(lib_exc.Conflict, self.client.update_security_group,
-                          sg['id'], name='test')
+                          security_group['id'], name='test')
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('c8510dd8-c3a8-4df9-ae44-24354db50960')
     def test_update_security_group_with_too_long_name(self):
-        sg_list = self.client.list_security_groups(name='default')
-        sg = sg_list['security_groups'][0]
+        security_group = self.client.list_security_groups(name='default')[
+            'security_groups'][0]
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_security_group,
-                          sg['id'], name=LONG_NAME_NG)
+                          security_group['id'], name=LONG_NAME_NG)
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('d9a14917-f66f-4eca-ab72-018563917f1b')
     def test_update_security_group_with_boolean_type_name(self):
-        sg_list = self.client.list_security_groups(name='default')
-        sg = sg_list['security_groups'][0]
+        security_group = self.client.list_security_groups(name='default')[
+            'security_groups'][0]
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_security_group,
-                          sg['id'], name=True)
+                          security_group['id'], name=True)
 
     @decorators.attr(type='negative')
     @decorators.idempotent_id('3200b1a8-d73b-48e9-b03f-e891a4abe2d3')
     def test_delete_in_use_sec_group(self):
-        sgroup = self.os_primary.network_client.create_security_group(
-            name='sgroup')
-        self.security_groups.append(sgroup['security_group'])
-        port = self.client.create_port(
-            network_id=self.network['id'],
-            security_groups=[sgroup['security_group']['id']])
-        self.ports.append(port['port'])
+        security_group = self.create_security_group()
+        self.create_port(network=self.network,
+                         security_groups=[security_group['id']])
         self.assertRaises(lib_exc.Conflict,
                           self.os_primary.network_client.delete_security_group,
-                          security_group_id=sgroup['security_group']['id'])
+                          security_group_id=security_group['id'])
 
 
 class NegativeSecGroupIPv6Test(NegativeSecGroupTest):
     _ip_version = constants.IP_VERSION_6
 
 
-class NegativeSecGroupProtocolTest(base.BaseSecGroupTest):
+class NegativeSecGroupProtocolTest(base.BaseNetworkTest):
 
     def _test_create_security_group_rule_with_bad_protocols(self, protocols):
-        group_create_body, _ = self._create_security_group()
+        security_group = self.create_security_group()
 
         # bad protocols can include v6 protocols because self.ethertype is v4
         for protocol in protocols:
             self.assertRaises(
                 lib_exc.BadRequest,
                 self.client.create_security_group_rule,
-                security_group_id=group_create_body['security_group']['id'],
+                security_group_id=security_group['id'],
                 protocol=protocol, direction=constants.INGRESS_DIRECTION,
                 ethertype=self.ethertype)
 
@@ -109,10 +107,10 @@
     @decorators.idempotent_id('cccbb0f3-c273-43ed-b3fc-1efc48833810')
     def test_create_security_group_rule_with_ipv6_protocol_names(self):
         self._test_create_security_group_rule_with_bad_protocols(
-            base.V6_PROTOCOL_NAMES)
+            base_security_groups.V6_PROTOCOL_NAMES)
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('8aa636bd-7060-4fdf-b722-cdae28e2f1ef')
     def test_create_security_group_rule_with_ipv6_protocol_integers(self):
         self._test_create_security_group_rule_with_bad_protocols(
-            base.V6_PROTOCOL_INTS)
+            base_security_groups.V6_PROTOCOL_INTS)
diff --git a/neutron_tempest_plugin/api/test_timestamp.py b/neutron_tempest_plugin/api/test_timestamp.py
index f5888f9..9ec982d 100644
--- a/neutron_tempest_plugin/api/test_timestamp.py
+++ b/neutron_tempest_plugin/api/test_timestamp.py
@@ -11,14 +11,15 @@
 #    under the License.
 
 import copy
+import time
 
+from neutron_lib import constants
 from tempest.common import utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
 from neutron_tempest_plugin.api import base
 from neutron_tempest_plugin.api import base_routers
-from neutron_tempest_plugin.api import base_security_groups
 from neutron_tempest_plugin import config
 
 CONF = config.CONF
@@ -276,7 +277,7 @@
                          show_fip['updated_at'])
 
 
-class TestTimeStampWithSecurityGroup(base_security_groups.BaseSecGroupTest):
+class TestTimeStampWithSecurityGroup(base.BaseNetworkTest):
 
     required_extensions = ['standard-attr-timestamp']
 
@@ -287,66 +288,66 @@
 
     @decorators.idempotent_id('a3150a7b-d31a-423a-abf3-45e71c97cbac')
     def test_create_sg_with_timestamp(self):
-        sg, _ = self._create_security_group()
+        security_group = self.create_security_group()
         # Verifies body contains timestamp fields
-        self.assertIsNotNone(sg['security_group']['created_at'])
-        self.assertIsNotNone(sg['security_group']['updated_at'])
+        self.assertIsNotNone(security_group['created_at'])
+        self.assertIsNotNone(security_group['updated_at'])
 
     @decorators.idempotent_id('432ae0d3-32b4-413e-a9b3-091ac76da31b')
     def test_update_sg_with_timestamp(self):
-        sgc, _ = self._create_security_group()
-        sg = sgc['security_group']
-        origin_updated_at = sg['updated_at']
-        update_body = {'name': sg['name'] + 'new'}
-        body = self.client.update_security_group(sg['id'], **update_body)
-        updated_sg = body['security_group']
-        new_updated_at = updated_sg['updated_at']
-        self.assertEqual(sg['created_at'], updated_sg['created_at'])
-        # Verify that origin_updated_at is not same with new_updated_at
-        self.assertIsNot(origin_updated_at, new_updated_at)
+        security_group = self.create_security_group()
+
+        # Make sure update time will be different
+        time.sleep(2.)
+        updated_security_group = self.client.update_security_group(
+            security_group['id'], name=security_group['name'] + 'new')[
+                'security_group']
+
+        # Verify that created_at hasn't changed
+        self.assertEqual(security_group['created_at'],
+                         updated_security_group['created_at'])
+        # Verify that updated_at has changed
+        self.assertNotEqual(security_group['updated_at'],
+                            updated_security_group['updated_at'])
 
     @decorators.idempotent_id('521e6723-43d6-12a6-8c3d-f5042ad9fc32')
     def test_show_sg_attribute_with_timestamp(self):
-        sg, _ = self._create_security_group()
-        body = self.client.show_security_group(sg['security_group']['id'])
-        show_sg = body['security_group']
-        # verify the timestamp from creation and showed is same
-        self.assertEqual(sg['security_group']['created_at'],
-                         show_sg['created_at'])
-        self.assertEqual(sg['security_group']['updated_at'],
-                         show_sg['updated_at'])
+        security_group = self.create_security_group()
+        observed_security_group = self.client.show_security_group(
+            security_group['id'])['security_group']
 
-    def _prepare_sgrule_test(self):
-        sg, _ = self._create_security_group()
-        sg_id = sg['security_group']['id']
-        direction = 'ingress'
-        protocol = 'tcp'
-        port_range_min = 77
-        port_range_max = 77
-        rule_create_body = self.client.create_security_group_rule(
-            security_group_id=sg_id,
-            direction=direction,
-            ethertype=self.ethertype,
-            protocol=protocol,
-            port_range_min=port_range_min,
-            port_range_max=port_range_max,
-            remote_group_id=None,
-            remote_ip_prefix=None
-        )
-        return rule_create_body['security_group_rule']
+        # Verify that created_at hasn't changed
+        self.assertEqual(security_group['created_at'],
+                         observed_security_group['created_at'])
+        # Verify that updated_at hasn't changed
+        self.assertEqual(security_group['updated_at'],
+                         observed_security_group['updated_at'])
+
+    def _create_security_group_rule(self):
+        security_group = self.create_security_group()
+        return self.create_security_group_rule(
+            security_group=security_group,
+            direction=constants.INGRESS_DIRECTION,
+            protocol=constants.PROTO_NAME_TCP,
+            port_range_min=77,
+            port_range_max=77)
 
     @decorators.idempotent_id('83e8bd32-43e0-a3f0-1af3-12a5733c653e')
     def test_create_sgrule_with_timestamp(self):
-        sgrule = self._prepare_sgrule_test()
+        security_group_rule = self._create_security_group_rule()
         # Verifies body contains timestamp fields
-        self.assertIsNotNone(sgrule['created_at'])
-        self.assertIsNotNone(sgrule['updated_at'])
+        self.assertIn('created_at', security_group_rule)
+        self.assertIn('updated_at', security_group_rule)
 
     @decorators.idempotent_id('143da0e6-ba17-43ad-b3d7-03aa759c3cb4')
     def test_show_sgrule_attribute_with_timestamp(self):
-        sgrule = self._prepare_sgrule_test()
-        body = self.client.show_security_group_rule(sgrule['id'])
-        show_sgrule = body['security_group_rule']
-        # verify the timestamp from creation and showed is same
-        self.assertEqual(sgrule['created_at'], show_sgrule['created_at'])
-        self.assertEqual(sgrule['updated_at'], show_sgrule['updated_at'])
+        security_group_rule = self._create_security_group_rule()
+
+        observed_security_group_rule = self.client.show_security_group_rule(
+            security_group_rule['id'])['security_group_rule']
+
+        # Verify the time stamp from creation and showed are equal
+        self.assertEqual(security_group_rule['created_at'],
+                         observed_security_group_rule['created_at'])
+        self.assertEqual(security_group_rule['updated_at'],
+                         observed_security_group_rule['updated_at'])
diff --git a/neutron_tempest_plugin/common/constants.py b/neutron_tempest_plugin/common/constants.py
index 4dc7844..f695f6c 100644
--- a/neutron_tempest_plugin/common/constants.py
+++ b/neutron_tempest_plugin/common/constants.py
@@ -171,3 +171,11 @@
 # Possible types of values (e.g. in QoS rule types)
 VALUES_TYPE_CHOICES = "choices"
 VALUES_TYPE_RANGE = "range"
+
+# Security group parameters values mapped by IP version
+DEFAULT_SECURITY_GROUP_RULE_PARAMS = {
+    lib_constants.IP_VERSION_4: {'ethertype': lib_constants.IPv4,
+                                 'remote_ip_prefix': lib_constants.IPv4_ANY},
+    lib_constants.IP_VERSION_6: {'ethertype': lib_constants.IPv6,
+                                 'remote_ip_prefix': lib_constants.IPv6_ANY},
+}
diff --git a/neutron_tempest_plugin/common/shell.py b/neutron_tempest_plugin/common/shell.py
new file mode 100644
index 0000000..bd4a7a3
--- /dev/null
+++ b/neutron_tempest_plugin/common/shell.py
@@ -0,0 +1,180 @@
+# Copyright (c) 2018 Red Hat, Inc.
+#
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import collections
+import subprocess
+import sys
+
+from oslo_log import log
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
+
+
+LOG = log.getLogger(__name__)
+
+CONF = config.CONF
+
+if ssh.Client.proxy_jump_host:
+    # Perform all SSH connections passing through configured SSH server
+    SSH_PROXY_CLIENT = ssh.Client.create_proxy_client()
+else:
+    SSH_PROXY_CLIENT = None
+
+
+def execute(command, ssh_client=None, timeout=None, check=True):
+    """Execute command inside a remote or local shell
+
+    :param command: command string to be executed
+
+    :param ssh_client: SSH client instance used for remote shell execution
+
+    :param timeout: command execution timeout in seconds
+
+    :param check: when False it doesn't raises ShellCommandError when
+    exit status is not zero. True by default
+
+    :returns: STDOUT text when command execution terminates with zero exit
+    status.
+
+    :raises ShellTimeoutExpired: when timeout expires before command execution
+    terminates. In such case it kills the process, then it eventually would
+    try to read STDOUT and STDERR buffers (not fully implemented) before
+    raising the exception.
+
+    :raises ShellCommandError: when command execution terminates with non-zero
+    exit status.
+    """
+    ssh_client = ssh_client or SSH_PROXY_CLIENT
+    if timeout:
+        timeout = float(timeout)
+
+    if ssh_client:
+        result = execute_remote_command(command=command, timeout=timeout,
+                                        ssh_client=ssh_client)
+    else:
+        result = execute_local_command(command=command, timeout=timeout)
+
+    if result.exit_status == 0:
+        LOG.debug("Command %r succeeded:\n"
+                  "stderr:\n%s\n"
+                  "stdout:\n%s\n",
+                  command, result.stderr, result.stdout)
+    elif result.exit_status is None:
+        LOG.debug("Command %r timeout expired (timeout=%s):\n"
+                  "stderr:\n%s\n"
+                  "stdout:\n%s\n",
+                  command, timeout, result.stderr, result.stdout)
+    else:
+        LOG.debug("Command %r failed (exit_status=%s):\n"
+                  "stderr:\n%s\n"
+                  "stdout:\n%s\n",
+                  command, result.exit_status, result.stderr, result.stdout)
+    if check:
+        result.check()
+
+    return result
+
+
+def execute_remote_command(command, ssh_client, timeout=None):
+    """Execute command on a remote host using SSH client"""
+    LOG.debug("Executing command %r on remote host %r (timeout=%r)...",
+              command, ssh_client.host, timeout)
+
+    stdout = stderr = exit_status = None
+
+    try:
+        # TODO(fressi): re-implement to capture stderr
+        stdout = ssh_client.exec_command(command, timeout=timeout)
+        exit_status = 0
+
+    except lib_exc.TimeoutException:
+        # TODO(fressi): re-implement to capture STDOUT and STDERR and make
+        # sure process is killed
+        pass
+
+    except lib_exc.SSHExecCommandFailed as ex:
+        # Please note class SSHExecCommandFailed has been re-based on
+        # top of ShellCommandError
+        stdout = ex.stdout
+        stderr = ex.stderr
+        exit_status = ex.exit_status
+
+    return ShellExecuteResult(command=command, timeout=timeout,
+                              exit_status=exit_status,
+                              stdout=stdout, stderr=stderr)
+
+
+def execute_local_command(command, timeout=None):
+    """Execute command on local host using local shell"""
+
+    LOG.debug("Executing command %r on local host (timeout=%r)...",
+              command, timeout)
+
+    process = subprocess.Popen(command, shell=True,
+                               universal_newlines=True,
+                               stdout=subprocess.PIPE,
+                               stderr=subprocess.PIPE)
+
+    if timeout and sys.version_info < (3, 3):
+        # TODO(fressi): re-implement to timeout support on older Pythons
+        LOG.warning("Popen.communicate method doens't support for timeout "
+                    "on Python %r", sys.version)
+        timeout = None
+
+    # Wait for process execution while reading STDERR and STDOUT streams
+    if timeout:
+        try:
+            stdout, stderr = process.communicate(timeout=timeout)
+        except subprocess.TimeoutExpired:
+            # At this state I expect the process to be still running
+            # therefore it has to be kill later after calling poll()
+            LOG.exception("Command %r timeout expired.", command)
+            stdout = stderr = None
+    else:
+        stdout, stderr = process.communicate()
+
+    # Check process termination status
+    exit_status = process.poll()
+    if exit_status is None:
+        # The process is still running after calling communicate():
+        # let kill it and then read buffers again
+        process.kill()
+        stdout, stderr = process.communicate()
+
+    return ShellExecuteResult(command=command, timeout=timeout,
+                              stdout=stdout, stderr=stderr,
+                              exit_status=exit_status)
+
+
+class ShellExecuteResult(collections.namedtuple(
+        'ShellExecuteResult', ['command', 'timeout', 'exit_status', 'stdout',
+                               'stderr'])):
+
+    def check(self):
+        if self.exit_status is None:
+            raise exceptions.ShellTimeoutExpired(command=self.command,
+                                                 timeout=self.timeout,
+                                                 stderr=self.stderr,
+                                                 stdout=self.stdout)
+
+        elif self.exit_status != 0:
+            raise exceptions.ShellCommandError(command=self.command,
+                                               exit_status=self.exit_status,
+                                               stderr=self.stderr,
+                                               stdout=self.stdout)
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index 4829db2..ea30a28 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -12,6 +12,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import locale
 import os
 import time
 
@@ -21,6 +22,7 @@
 from tempest.lib import exceptions
 
 from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions as exc
 
 
 CONF = config.CONF
@@ -41,13 +43,13 @@
 
     def __init__(self, host, username, password=None, timeout=None, pkey=None,
                  channel_timeout=10, look_for_keys=False, key_filename=None,
-                 port=22, proxy_client=None):
+                 port=22, proxy_client=None, create_proxy_client=True):
 
         timeout = timeout or self.timeout
 
-        if self.proxy_jump_host:
+        if not proxy_client and create_proxy_client and self.proxy_jump_host:
             # Perform all SSH connections passing through configured SSH server
-            proxy_client = proxy_client or self.create_proxy_client(
+            proxy_client = self.create_proxy_client(
                 timeout=timeout, channel_timeout=channel_timeout)
 
         super(Client, self).__init__(
@@ -113,10 +115,10 @@
                         "set 'proxy_jump_keyfile' to provide a valid SSH key "
                         "file.", login)
 
-        return ssh.Client(
+        return Client(
             host=host, username=username, password=password,
             look_for_keys=look_for_keys, key_filename=key_file,
-            port=port, proxy_client=None, **kwargs)
+            port=port, create_proxy_client=False, **kwargs)
 
     # attribute used to keep reference to opened client connection
     _client = None
@@ -145,6 +147,10 @@
     # more times
     _get_ssh_connection = connect
 
+    # This overrides superclass test_connection_auth method forbidding it to
+    # close connection
+    test_connection_auth = connect
+
     def close(self):
         """Closes connection to SSH server and cleanup resources."""
         client = self._client
@@ -152,6 +158,9 @@
             client.close()
             self._client = None
 
+    def __exit__(self, _exception_type, _exception_value, _traceback):
+        self.close()
+
     def open_session(self):
         """Gets connection to SSH server and open a new paramiko.Channel
 
@@ -170,8 +179,18 @@
                                         user=self.username,
                                         password=self.password)
 
-    def execute_script(self, script, become_root=False,
-                       combine_stderr=True, shell='sh -eux'):
+    def exec_command(self, cmd, encoding="utf-8", timeout=None):
+        if timeout:
+            original_timeout = self.timeout
+            self.timeout = timeout
+        try:
+            return super(Client, self).exec_command(cmd=cmd, encoding=encoding)
+        finally:
+            if timeout:
+                self.timeout = original_timeout
+
+    def execute_script(self, script, become_root=False, combine_stderr=False,
+                       shell='sh -eux', timeout=None, **params):
         """Connect to remote machine and executes script.
 
         Implementation note: it passes script lines to shell interpreter via
@@ -191,67 +210,99 @@
         variable would interrupt script execution with an error and every
         command executed by the script is going to be traced to STDERR.
 
+        :param timeout: time in seconds to wait before brutally aborting
+        script execution.
+
+        :param **params: script parameter values to be assigned at the
+        beginning of the script.
+
         :returns output written by script to STDOUT.
 
         :raises tempest.lib.exceptions.SSHTimeout: in case it fails to connect
         to remote server or it fails to open a channel.
 
         :raises tempest.lib.exceptions.SSHExecCommandFailed: in case command
-        script exits with non zero exit status.
+        script exits with non zero exit status or times out.
         """
 
+        if params:
+            # Append script parameters at the beginning of the script
+            header = ''.join(sorted(["{!s}={!s}\n".format(k, v)
+                                     for k, v in params.items()]))
+            script = header + '\n' + script
+
+        timeout = timeout or self.timeout
+        end_of_time = time.time() + timeout
+        output_data = b''
+        error_data = b''
+        exit_status = None
+
         channel = self.open_session()
         with channel:
 
             # Combine STOUT and STDERR to have to handle with only one stream
             channel.set_combine_stderr(combine_stderr)
 
-            # Set default environment
-            channel.update_environment({
-                # Language and encoding
-                'LC_ALL': os.environ.get('LC_ALL') or self.default_ssh_lang,
-                'LANG': os.environ.get('LANG') or self.default_ssh_lang
-            })
+            # Update local environment
+            lang, encoding = locale.getlocale()
+            if not lang:
+                lang, encoding = locale.getdefaultlocale()
+            _locale = '.'.join([lang, encoding])
+            channel.update_environment({'LC_ALL': _locale,
+                                        'LANG': _locale})
 
             if become_root:
                 shell = 'sudo ' + shell
             # Spawn a Bash
             channel.exec_command(shell)
 
+            end_of_script = False
             lines_iterator = iter(script.splitlines())
-            output_data = b''
-            error_data = b''
-
-            while not channel.exit_status_ready():
+            while (not channel.exit_status_ready() and
+                   time.time() < end_of_time):
                 # Drain incoming data buffers
                 while channel.recv_ready():
                     output_data += channel.recv(self.buf_size)
                 while channel.recv_stderr_ready():
                     error_data += channel.recv_stderr(self.buf_size)
 
-                if channel.send_ready():
+                if not end_of_script and channel.send_ready():
                     try:
                         line = next(lines_iterator)
                     except StopIteration:
                         # Finalize Bash script execution
                         channel.shutdown_write()
+                        end_of_script = True
                     else:
                         # Send script to Bash STDIN line by line
-                        channel.send((line + '\n').encode('utf-8'))
-                else:
-                    time.sleep(.1)
+                        channel.send((line + '\n').encode(encoding))
+                        continue
+
+                time.sleep(.1)
 
             # Get exit status and drain incoming data buffers
-            exit_status = channel.recv_exit_status()
+            if channel.exit_status_ready():
+                exit_status = channel.recv_exit_status()
             while channel.recv_ready():
                 output_data += channel.recv(self.buf_size)
             while channel.recv_stderr_ready():
                 error_data += channel.recv_stderr(self.buf_size)
 
-        if exit_status != 0:
-            raise exceptions.SSHExecCommandFailed(
-                command='bash', exit_status=exit_status,
-                stderr=error_data.decode('utf-8'),
-                stdout=output_data.decode('utf-8'))
+        stdout = _buffer_to_string(output_data, encoding)
+        if exit_status == 0:
+            return stdout
 
-        return output_data.decode('utf-8')
+        stderr = _buffer_to_string(error_data, encoding)
+        if exit_status is None:
+            raise exc.SSHScriptTimeoutExpired(
+                command=shell, host=self.host, script=script, stderr=stderr,
+                stdout=stdout, timeout=timeout)
+        else:
+            raise exc.SSHScriptFailed(
+                command=shell, host=self.host, script=script, stderr=stderr,
+                stdout=stdout, exit_status=exit_status)
+
+
+def _buffer_to_string(data_buffer, encoding):
+    return data_buffer.decode(encoding).replace("\r\n", "\n").replace(
+        "\r", "\n")
diff --git a/neutron_tempest_plugin/common/utils.py b/neutron_tempest_plugin/common/utils.py
index fa7bb8b..3649cb6 100644
--- a/neutron_tempest_plugin/common/utils.py
+++ b/neutron_tempest_plugin/common/utils.py
@@ -88,3 +88,17 @@
                 raise self.skipTest(msg)
         return inner
     return decor
+
+
+def override_class(overriden_class, overrider_class):
+    """Override class definition with a MixIn class
+
+    If overriden_class is not a subclass of overrider_class then it creates
+    a new class that has as bases overrider_class and overriden_class.
+    """
+
+    if not issubclass(overriden_class, overrider_class):
+        name = overriden_class.__name__
+        bases = (overrider_class, overriden_class)
+        overriden_class = type(name, bases, {})
+    return overriden_class
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index 030a126..1bc9617 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -53,6 +53,10 @@
                     '"mtu":<MTU> - integer '
                     '"cidr"<SUBNET/MASK> - string '
                     '"provider:segmentation_id":<VLAN_ID> - integer'),
+    cfg.StrOpt('q_agent',
+               default=None,
+               choices=['None', 'linuxbridge', 'ovs', 'sriov'],
+               help='Agent used for devstack@q-agt.service'),
 
     # Option for feature to connect via SSH to VMs using an intermediate SSH
     # server
diff --git a/neutron_tempest_plugin/exceptions.py b/neutron_tempest_plugin/exceptions.py
index c9264ca..895cb40 100644
--- a/neutron_tempest_plugin/exceptions.py
+++ b/neutron_tempest_plugin/exceptions.py
@@ -15,16 +15,86 @@
 
 from tempest.lib import exceptions
 
-TempestException = exceptions.TempestException
+from neutron_tempest_plugin.common import utils
 
 
-class InvalidConfiguration(TempestException):
+class NeutronTempestPluginException(exceptions.TempestException):
+
+    def __init__(self, **kwargs):
+        super(NeutronTempestPluginException, self).__init__(**kwargs)
+        self._properties = kwargs
+
+    def __getattr__(self, name):
+        try:
+            return self._properties[name]
+        except KeyError:
+            pass
+
+        msg = ("AttributeError: {!r} object has no attribute {!r}").format(
+            self, name)
+        raise AttributeError(msg)
+
+
+class InvalidConfiguration(NeutronTempestPluginException):
     message = "Invalid Configuration"
 
 
-class InvalidCredentials(TempestException):
+class InvalidCredentials(NeutronTempestPluginException):
     message = "Invalid Credentials"
 
 
-class InvalidServiceTag(TempestException):
+class InvalidServiceTag(NeutronTempestPluginException):
     message = "Invalid service tag"
+
+
+class SSHScriptException(exceptions.TempestException):
+    """Base class for SSH client execute_script() exceptions"""
+
+
+class ShellError(NeutronTempestPluginException):
+    pass
+
+
+class ShellCommandFailed(ShellError):
+    """Raised when shell command exited with non-zero status
+
+    """
+    message = ("Command %(command)r failed, exit status: %(exit_status)d, "
+               "stderr:\n%(stderr)s\n"
+               "stdout:\n%(stdout)s")
+
+
+class SSHScriptFailed(ShellCommandFailed):
+    message = ("Command %(command)r failed, exit status: %(exit_status)d, "
+               "host: %(host)r\n"
+               "script:\n%(script)s\n"
+               "stderr:\n%(stderr)s\n"
+               "stdout:\n%(stdout)s")
+
+
+class ShellTimeoutExpired(ShellError):
+    """Raised when shell command timeouts and has been killed before exiting
+
+    """
+    message = ("Command '%(command)s' timed out: %(timeout)d, "
+               "stderr:\n%(stderr)s\n"
+               "stdout:\n%(stdout)s")
+
+
+class SSHScriptTimeoutExpired(ShellTimeoutExpired):
+    message = ("Command '%(command)s', timed out: %(timeout)d "
+               "host: %(host)r\n"
+               "script:\n%(script)s\n"
+               "stderr:\n%(stderr)s\n"
+               "stdout:\n%(stdout)s")
+
+
+# Patch SSHExecCommandFailed exception to make sure we can access to fields
+# command, exit_status, STDOUT and STDERR when SSH client reports command
+# failure
+exceptions.SSHExecCommandFailed = utils.override_class(
+    exceptions.SSHExecCommandFailed, ShellCommandFailed)
+
+# Above code created a new SSHExecCommandFailed class based on top
+# of ShellCommandError
+assert issubclass(exceptions.SSHExecCommandFailed, ShellCommandFailed)
diff --git a/neutron_tempest_plugin/scenario/admin/test_floatingip.py b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
index 1af5502..511452c 100644
--- a/neutron_tempest_plugin/scenario/admin/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
@@ -14,7 +14,6 @@
 #    under the License.
 from tempest.common import utils
 from tempest.common import waiters
-from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
 from neutron_tempest_plugin.common import ssh
@@ -38,22 +37,17 @@
         cls.create_router_interface(router['id'], cls.subnets[0]['id'])
         # Create keypair with admin privileges
         cls.keypair = cls.create_keypair(client=cls.os_admin.keypairs_client)
-        # Create security group with admin privileges
-        cls.secgroup = cls.os_admin.network_client.create_security_group(
-            name=data_utils.rand_name('secgroup'))['security_group']
-        # Execute funcs to achieve ssh and ICMP capabilities
-        funcs = [cls.create_loginable_secgroup_rule,
-                 cls.create_pingable_secgroup_rule]
-        for func in funcs:
-            func(secgroup_id=cls.secgroup['id'],
-                 client=cls.os_admin.network_client)
 
-    @classmethod
-    def resource_cleanup(cls):
-        # Cleanup for security group
-        cls.os_admin.network_client.delete_security_group(
-            security_group_id=cls.secgroup['id'])
-        super(FloatingIpTestCasesAdmin, cls).resource_cleanup()
+        # Create security group with admin privileges
+        network_client = cls.os_admin.network_client
+        cls.secgroup = cls.create_security_group(
+            client=cls.os_admin.network_client)
+        cls.create_loginable_secgroup_rule(
+            secgroup_id=cls.secgroup['id'],
+            client=network_client)
+        cls.create_pingable_secgroup_rule(
+            secgroup_id=cls.secgroup['id'],
+            client=network_client),
 
     def _list_hypervisors(self):
         # List of hypervisors
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index a2c5c72..cc1ca4c 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -122,29 +122,24 @@
         Setting a group_id would only permit traffic from ports
         belonging to the same security group.
         """
-
-        rule_list = [{'protocol': 'tcp',
-                      'direction': 'ingress',
-                      'port_range_min': 22,
-                      'port_range_max': 22,
-                      'remote_ip_prefix': '0.0.0.0/0'}]
-        client = client or cls.os_primary.network_client
-        cls.create_secgroup_rules(rule_list, client=client,
-                                  secgroup_id=secgroup_id)
+        return cls.create_security_group_rule(
+            security_group_id=secgroup_id,
+            client=client,
+            protocol=neutron_lib_constants.PROTO_NAME_TCP,
+            direction=neutron_lib_constants.INGRESS_DIRECTION,
+            port_range_min=22,
+            port_range_max=22)
 
     @classmethod
     def create_pingable_secgroup_rule(cls, secgroup_id=None,
                                       client=None):
-        """This rule is intended to permit inbound ping"""
+        """This rule is intended to permit inbound ping
 
-        rule_list = [{'protocol': 'icmp',
-                      'direction': 'ingress',
-                      'port_range_min': 8,  # type
-                      'port_range_max': 0,  # code
-                      'remote_ip_prefix': '0.0.0.0/0'}]
-        client = client or cls.os_primary.network_client
-        cls.create_secgroup_rules(rule_list, client=client,
-                                  secgroup_id=secgroup_id)
+        """
+        return cls.create_security_group_rule(
+            security_group_id=secgroup_id, client=client,
+            protocol=neutron_lib_constants.PROTO_NAME_ICMP,
+            direction=neutron_lib_constants.INGRESS_DIRECTION)
 
     @classmethod
     def create_router_by_client(cls, is_admin=False, **kwargs):
@@ -176,13 +171,13 @@
         client.delete_interface(server_id, port_id=port_id)
 
     def setup_network_and_server(
-        self, router=None, server_name=None, **kwargs):
+        self, router=None, server_name=None, network=None, **kwargs):
         """Create network resources and a server.
 
         Creating a network, subnet, router, keypair, security group
         and a server.
         """
-        self.network = self.create_network()
+        self.network = network or self.create_network()
         LOG.debug("Created network %s", self.network['name'])
         self.subnet = self.create_subnet(self.network)
         LOG.debug("Created subnet %s", self.subnet['id'])
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
new file mode 100644
index 0000000..3385a04
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -0,0 +1,111 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import compute
+from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class NetworkConnectivityTest(base.BaseTempestTestCase):
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    @utils.requires_ext(extension="router", service="network")
+    def resource_setup(cls):
+        super(NetworkConnectivityTest, cls).resource_setup()
+        # Create keypair with admin privileges
+        cls.keypair = cls.create_keypair()
+        # Create security group with admin privileges
+        cls.secgroup = cls.create_security_group(
+            name=data_utils.rand_name('secgroup'))
+        # Execute funcs to achieve ssh and ICMP capabilities
+        cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+        cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+
+    def _create_servers(self, port_1, port_2):
+        params = {
+            'flavor_ref': CONF.compute.flavor_ref,
+            'image_ref': CONF.compute.image_ref,
+            'key_name': self.keypair['name']
+        }
+        vm1 = self.create_server(networks=[{'port': port_1['id']}], **params)
+
+        if (CONF.compute.min_compute_nodes > 1 and
+                compute.is_scheduler_filter_enabled("DifferentHostFilter")):
+            params['scheduler_hints'] = {
+                'different_host': [vm1['server']['id']]}
+
+        self.create_server(networks=[{'port': port_2['id']}], **params)
+
+    @decorators.idempotent_id('8944b90d-1766-4669-bd8a-672b5d106bb7')
+    def test_connectivity_through_2_routers(self):
+        ap1_net = self.create_network()
+        ap2_net = self.create_network()
+        wan_net = self.create_network()
+        ap1_subnet = self.create_subnet(
+            ap1_net, cidr="10.10.210.0/24", gateway="10.10.210.254")
+        ap2_subnet = self.create_subnet(
+            ap2_net, cidr="10.10.220.0/24", gateway="10.10.220.254")
+        self.create_subnet(
+            wan_net, cidr="10.10.200.0/24", gateway="10.10.200.254")
+
+        ap1_rt = self.create_router(
+            router_name=data_utils.rand_name("ap1_rt"),
+            admin_state_up=True,
+            external_network_id=CONF.network.public_network_id)
+        ap2_rt = self.create_router(
+            router_name=data_utils.rand_name("ap2_rt"),
+            admin_state_up=True)
+
+        ap1_internal_port = self.create_port(
+            ap1_net, security_groups=[self.secgroup['id']])
+        ap2_internal_port = self.create_port(
+            ap2_net, security_groups=[self.secgroup['id']])
+        ap1_wan_port = self.create_port(wan_net)
+        ap2_wan_port = self.create_port(wan_net)
+
+        self._create_servers(ap1_internal_port, ap2_internal_port)
+
+        self.client.add_router_interface_with_port_id(
+            ap1_rt['id'], ap1_wan_port['id'])
+        self.client.add_router_interface_with_port_id(
+            ap2_rt['id'], ap2_wan_port['id'])
+        self.create_router_interface(ap1_rt['id'], ap1_subnet['id'])
+        self.create_router_interface(ap2_rt['id'], ap2_subnet['id'])
+
+        self.client.update_router(
+            ap1_rt['id'],
+            routes=[{"destination": ap2_subnet['cidr'],
+                     "nexthop": ap2_wan_port['fixed_ips'][0]['ip_address']}])
+        self.client.update_router(
+            ap2_rt['id'],
+            routes=[{"destination": ap1_subnet['cidr'],
+                     "nexthop": ap1_wan_port['fixed_ips'][0]['ip_address']}])
+
+        ap1_fip = self.create_and_associate_floatingip(
+            ap1_internal_port['id'])
+        ap1_sshclient = ssh.Client(
+            ap1_fip['floating_ip_address'], CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+
+        self.check_remote_connectivity(
+            ap1_sshclient, ap2_internal_port['fixed_ips'][0]['ip_address'])
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
index dd89727..fadabb0 100644
--- a/neutron_tempest_plugin/scenario/test_internal_dns.py
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -27,16 +27,17 @@
 
     @utils.requires_ext(extension="dns-integration", service="network")
     @decorators.idempotent_id('988347de-07af-471a-abfa-65aea9f452a6')
-    def test_dns_name(self):
+    def test_dns_domain_and_name(self):
         """Test the ability to ping a VM's hostname from another VM.
 
         1) Create two VMs on the same network, giving each a name
         2) SSH in to the first VM:
           2.1) ping the other VM's internal IP
-          2.2) ping the otheR VM's hostname
+          2.2) ping the other VM's hostname
         """
 
-        self.setup_network_and_server(server_name='luke')
+        network = self.create_network(dns_domain='starwars.')
+        self.setup_network_and_server(network=network, server_name='luke')
         self.create_pingable_secgroup_rule(
             secgroup_id=self.security_groups[-1]['id'])
         self.check_connectivity(self.fip['floating_ip_address'],
@@ -70,4 +71,8 @@
         self.check_remote_connectivity(
             ssh_client, leia_port['fixed_ips'][0]['ip_address'],
             timeout=CONF.validation.ping_timeout * 10)
+        self.assertIn(
+            'starwars', ssh_client.exec_command('cat /etc/resolv.conf'))
+
         self.check_remote_connectivity(ssh_client, 'leia')
+        self.check_remote_connectivity(ssh_client, 'leia.starwars')
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index ebdcf93..7b43a7e 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -137,16 +137,18 @@
     @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d864')
     def test_protocol_number_rule(self):
         # protocol number is added instead of str in security rule creation
-        server_ssh_clients, fips, _ = self.create_vm_testing_sec_grp(
-            num_servers=1)
+        name = data_utils.rand_name("test_protocol_number_rule")
+        security_group = self.create_security_group(name=name)
+        port = self.create_port(network=self.network, name=name,
+                                security_groups=[security_group['id']])
+        _, fips, _ = self.create_vm_testing_sec_grp(num_servers=1,
+                                                    ports=[port])
         self.ping_ip_address(fips[0]['floating_ip_address'],
                              should_succeed=False)
         rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
                       'direction': constants.INGRESS_DIRECTION,
                       'remote_ip_prefix': '0.0.0.0/0'}]
-        secgroup_id = self.os_primary.network_client.list_security_groups()[
-            'security_groups'][0]['id']
-        self.create_secgroup_rules(rule_list, secgroup_id=secgroup_id)
+        self.create_secgroup_rules(rule_list, secgroup_id=security_group['id'])
         self.ping_ip_address(fips[0]['floating_ip_address'])
 
     @decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a283d9d964')
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
index 1903180..85b16cb 100644
--- a/neutron_tempest_plugin/scenario/test_trunk.py
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -47,8 +47,8 @@
         # setup basic topology for servers we can log into
         cls.network = cls.create_network()
         cls.subnet = cls.create_subnet(cls.network)
-        router = cls.create_router_by_client()
-        cls.create_router_interface(router['id'], cls.subnet['id'])
+        cls.router = cls.create_router_by_client()
+        cls.create_router_interface(cls.router['id'], cls.subnet['id'])
         cls.keypair = cls.create_keypair()
         cls.secgroup = cls.os_primary.network_client.create_security_group(
             name=data_utils.rand_name('secgroup'))
@@ -95,6 +95,27 @@
         t = self.client.show_trunk(trunk_id)['trunk']
         return t['status'] == 'ACTIVE'
 
+    def _create_server_with_network(self, network, use_advanced_image=False):
+        port = self.create_port(network, security_groups=[
+            self.secgroup['security_group']['id']])
+        server, fip = self._create_server_with_fip(
+            port['id'], use_advanced_image=use_advanced_image)
+        ssh_user = CONF.validation.image_ssh_user
+        if use_advanced_image:
+            ssh_user = CONF.neutron_plugin_options.advanced_image_ssh_user
+
+        server_ssh_client = ssh.Client(
+            fip['floating_ip_address'],
+            ssh_user,
+            pkey=self.keypair['private_key'])
+
+        return {
+            'server': server,
+            'fip': fip,
+            'ssh_client': server_ssh_client,
+            'port': port,
+        }
+
     def _create_server_with_port_and_subport(self, vlan_network, vlan_tag,
                                              use_advanced_image=False):
         parent_port = self.create_port(self.network, security_groups=[
@@ -107,7 +128,7 @@
             'port_id': port_for_subport['id'],
             'segmentation_type': 'vlan',
             'segmentation_id': vlan_tag}
-        self.create_trunk(parent_port, [subport])
+        trunk = self.create_trunk(parent_port, [subport])
 
         server, fip = self._create_server_with_fip(
             parent_port['id'], use_advanced_image=use_advanced_image)
@@ -126,6 +147,8 @@
             'fip': fip,
             'ssh_client': server_ssh_client,
             'subport': port_for_subport,
+            'parentport': parent_port,
+            'trunk': trunk,
         }
 
     def _wait_for_server(self, server, advanced_image=False):
@@ -260,3 +283,78 @@
             servers[1]['subport']['fixed_ips'][0]['ip_address'],
             should_succeed=True
         )
+
+    @testtools.skipUnless(
+          CONF.neutron_plugin_options.advanced_image_ref,
+          "Advanced image is required to run this test.")
+    @testtools.skipUnless(
+          CONF.neutron_plugin_options.q_agent == "linuxbridge",
+          "Linux bridge agent is required to run this test.")
+    @decorators.idempotent_id('d61cbdf6-1896-491c-b4b4-871caf7fbffe')
+    def test_parent_port_connectivity_after_trunk_deleted_lb(self):
+        vlan_tag = 10
+
+        vlan_network = self.create_network()
+        vlan_subnet = self.create_subnet(vlan_network)
+        self.create_router_interface(self.router['id'], vlan_subnet['id'])
+
+        trunk_network_server = self._create_server_with_port_and_subport(
+            vlan_network, vlan_tag, use_advanced_image=True)
+        normal_network_server = self._create_server_with_network(self.network)
+        vlan_network_server = self._create_server_with_network(vlan_network)
+
+        self._wait_for_server(trunk_network_server, advanced_image=True)
+        # Configure VLAN interfaces on server
+        command = CONFIGURE_VLAN_INTERFACE_COMMANDS % {'tag': vlan_tag}
+        trunk_network_server['ssh_client'].exec_command(command)
+        out = trunk_network_server['ssh_client'].exec_command(
+            'PATH=$PATH:/usr/sbin;ip addr list')
+        LOG.debug("Interfaces on server %s: %s", trunk_network_server, out)
+
+        self._wait_for_server(normal_network_server)
+        self._wait_for_server(vlan_network_server)
+
+        # allow intra-securitygroup traffic
+        rule = self.client.create_security_group_rule(
+            security_group_id=self.secgroup['security_group']['id'],
+            direction='ingress', ethertype='IPv4', protocol='icmp',
+            remote_group_id=self.secgroup['security_group']['id'])
+        self.addCleanup(self.client.delete_security_group_rule,
+                        rule['security_group_rule']['id'])
+
+        # Ping from trunk_network_server to normal_network_server
+        # via parent port
+        self.check_remote_connectivity(
+            trunk_network_server['ssh_client'],
+            normal_network_server['port']['fixed_ips'][0]['ip_address'],
+            should_succeed=True
+        )
+
+        # Ping from trunk_network_server to vlan_network_server via VLAN
+        # interface should success
+        self.check_remote_connectivity(
+            trunk_network_server['ssh_client'],
+            vlan_network_server['port']['fixed_ips'][0]['ip_address'],
+            should_succeed=True
+        )
+
+        # Delete the trunk
+        self.delete_trunk(trunk_network_server['trunk'],
+            detach_parent_port=False)
+        LOG.debug("Trunk %s is deleted.", trunk_network_server['trunk']['id'])
+
+        # Ping from trunk_network_server to normal_network_server
+        # via parent port success after trunk deleted
+        self.check_remote_connectivity(
+            trunk_network_server['ssh_client'],
+            normal_network_server['port']['fixed_ips'][0]['ip_address'],
+            should_succeed=True
+        )
+
+        # Ping from trunk_network_server to vlan_network_server via VLAN
+        # interface should fail after trunk deleted
+        self.check_remote_connectivity(
+            trunk_network_server['ssh_client'],
+            vlan_network_server['port']['fixed_ips'][0]['ip_address'],
+            should_succeed=False
+        )
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 58dfbf4..d590c25 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -41,7 +41,7 @@
 
         # The following list represents resource names that do not require
         # changing underscore to a hyphen
-        hyphen_exceptions = ["service_profiles"]
+        hyphen_exceptions = ["service_profiles", "availability_zones"]
         # the following map is used to construct proper URI
         # for the given neutron resource
         service_resource_prefix_map = {
@@ -375,6 +375,8 @@
             update_body['distributed'] = kwargs['distributed']
         if 'ha' in kwargs:
             update_body['ha'] = kwargs['ha']
+        if 'routes' in kwargs:
+            update_body['routes'] = kwargs['routes']
         update_body = dict(router=update_body)
         update_body = jsonutils.dumps(update_body)
         resp, body = self.put(uri, update_body)
@@ -446,6 +448,9 @@
         body = jsonutils.loads(body)
         return service_client.ResponseBody(resp, body)
 
+    def remove_router_extra_routes(self, router_id):
+        self.update_router(router_id, routes=None)
+
     def update_agent(self, agent_id, agent_info):
         """Update an agent
 
@@ -871,6 +876,13 @@
         body = jsonutils.loads(body)
         return service_client.ResponseBody(resp, body)
 
+    def delete_security_group_rule(self, security_group_rule_id):
+        uri = '%s/security-group-rules/%s' % (self.uri_prefix,
+                                              security_group_rule_id)
+        resp, body = self.delete(uri)
+        self.expected_success(204, resp.status)
+        return service_client.ResponseBody(resp, body)
+
     def list_security_groups(self, **kwargs):
         post_body = {'security_groups': kwargs}
         body = jsonutils.dumps(post_body)
diff --git a/setup.cfg b/setup.cfg
index c6a1fad..d7790d6 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -4,7 +4,7 @@
 description-file =
     README.rst
 author = OpenStack
-author-email = openstack-dev@lists.openstack.org
+author-email = openstack-discuss@lists.openstack.org
 home-page = https://git.openstack.org/cgit/openstack/neutron-tempest-plugin
 classifier =
     Environment :: OpenStack