Merge "Use latest Ubuntu 16.04 LTS guest image."
diff --git a/.zuul.yaml b/.zuul.yaml
index 39cfe7c..8430f99 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -16,7 +16,7 @@
       tox_envlist: all
       devstack_localrc:
         TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
-        NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
+        NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
       devstack_plugins:
         neutron: git://git.openstack.org/openstack/neutron.git
         neutron-tempest-plugin: git://git.openstack.org/openstack/neutron-tempest-plugin.git
diff --git a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
index 60af89e..d449ead 100644
--- a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
+++ b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
@@ -28,7 +28,7 @@
     @decorators.attr(type='negative')
     @decorators.idempotent_id('d39a96e2-2dea-4feb-8093-e7ac991ce6f8')
     def test_create_port_security_false_on_shared_network(self):
-        network = self.create_shared_network()
+        network = self.create_network(shared=True)
         self.assertTrue(network['shared'])
         self.create_subnet(network, client=self.admin_client)
         self.assertRaises(lib_exc.Forbidden, self.create_port,
diff --git a/neutron_tempest_plugin/api/admin/test_networks.py b/neutron_tempest_plugin/api/admin/test_networks.py
index bb7ac24..74e72ef 100644
--- a/neutron_tempest_plugin/api/admin/test_networks.py
+++ b/neutron_tempest_plugin/api/admin/test_networks.py
@@ -24,50 +24,51 @@
 
     @decorators.idempotent_id('d3c76044-d067-4cb0-ae47-8cdd875c7f67')
     @utils.requires_ext(extension="project-id", service="network")
-    def test_admin_create_network_keystone_v3(self):
+    def test_create_network_with_project(self):
         project_id = self.client.tenant_id  # non-admin
 
         name = 'admin-created-with-project_id'
-        new_net = self.create_network_keystone_v3(name, project_id,
-            client=self.admin_client)
-        self.assertEqual(name, new_net['name'])
-        self.assertEqual(project_id, new_net['project_id'])
-        self.assertEqual(project_id, new_net['tenant_id'])
+        network = self.create_network(name, project_id=project_id,
+                                      client=self.admin_client)
+        self.assertEqual(name, network['name'])
+        self.assertEqual(project_id, network['project_id'])
+        self.assertEqual(project_id, network['tenant_id'])
 
-        body = self.client.list_networks(id=new_net['id'])
-        lookup_net = body['networks'][0]
-        self.assertEqual(name, lookup_net['name'])
-        self.assertEqual(project_id, lookup_net['project_id'])
-        self.assertEqual(project_id, lookup_net['tenant_id'])
+        observed_network = self.client.list_networks(
+            id=network['id'])['networks'][0]
+        self.assertEqual(name, observed_network['name'])
+        self.assertEqual(project_id, observed_network['project_id'])
+        self.assertEqual(project_id, observed_network['tenant_id'])
 
     @decorators.idempotent_id('8d21aaca-4364-4eb9-8b79-44b4fff6373b')
     @utils.requires_ext(extension="project-id", service="network")
-    def test_admin_create_network_keystone_v3_and_tenant(self):
+    def test_create_network_with_project_and_tenant(self):
         project_id = self.client.tenant_id  # non-admin
 
         name = 'created-with-project-and-tenant'
-        new_net = self.create_network_keystone_v3(
-            name, project_id, tenant_id=project_id, client=self.admin_client)
-        self.assertEqual(name, new_net['name'])
-        self.assertEqual(project_id, new_net['project_id'])
-        self.assertEqual(project_id, new_net['tenant_id'])
+        network = self.create_network(name, project_id=project_id,
+                                      tenant_id=project_id,
+                                      client=self.admin_client)
+        self.assertEqual(name, network['name'])
+        self.assertEqual(project_id, network['project_id'])
+        self.assertEqual(project_id, network['tenant_id'])
 
-        body = self.client.list_networks(id=new_net['id'])
-        lookup_net = body['networks'][0]
-        self.assertEqual(name, lookup_net['name'])
-        self.assertEqual(project_id, lookup_net['project_id'])
-        self.assertEqual(project_id, lookup_net['tenant_id'])
+        observed_network = self.client.list_networks(
+            id=network['id'])['networks'][0]
+        self.assertEqual(name, observed_network['name'])
+        self.assertEqual(project_id, observed_network['project_id'])
+        self.assertEqual(project_id, observed_network['tenant_id'])
 
     @decorators.idempotent_id('08b92179-669d-45ee-8233-ef6611190809')
     @utils.requires_ext(extension="project-id", service="network")
-    def test_admin_create_network_keystone_v3_and_other_tenant(self):
+    def test_create_network_with_project_and_other_tenant(self):
         project_id = self.client.tenant_id  # non-admin
         other_tenant = uuidutils.generate_uuid()
 
         name = 'created-with-project-and-other-tenant'
         e = self.assertRaises(lib_exc.BadRequest,
-                              self.create_network_keystone_v3, name,
-                              project_id, tenant_id=other_tenant,
+                              self.create_network, name,
+                              project_id=project_id, tenant_id=other_tenant,
                               client=self.admin_client)
         expected_message = "'project_id' and 'tenant_id' do not match"
         self.assertEqual(expected_message, e.resp_body['message'])
diff --git a/neutron_tempest_plugin/api/admin/test_ports.py b/neutron_tempest_plugin/api/admin/test_ports.py
new file mode 100644
index 0000000..cbcd933
--- /dev/null
+++ b/neutron_tempest_plugin/api/admin/test_ports.py
@@ -0,0 +1,60 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import netaddr
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.api import base
+
+
+class PortTestCasesAdmin(base.BaseAdminNetworkTest):
+
+    @classmethod
+    def resource_setup(cls):
+        super(PortTestCasesAdmin, cls).resource_setup()
+        cls.network = cls.create_network()
+        cls.create_subnet(cls.network)
+
+    @decorators.idempotent_id('dfe8cc79-18d9-4ae8-acef-3ec6bb719bb1')
+    def test_update_mac_address(self):
+        body = self.create_port(self.network)
+        current_mac = body['mac_address']
+
+        # Verify mac_address can be successfully updated.
+        body = self.admin_client.update_port(body['id'],
+                                             mac_address='12:34:56:78:be:6d')
+        new_mac = body['port']['mac_address']
+        self.assertNotEqual(current_mac, new_mac)
+        self.assertEqual('12:34:56:78:be:6d', new_mac)
+
+        # Verify that port update without specifying mac_address does not
+        # change the mac address.
+        body = self.admin_client.update_port(body['port']['id'],
+                                             description='Port Description')
+        self.assertEqual(new_mac, body['port']['mac_address'])
+
+    @decorators.idempotent_id('dfe8cc79-18d9-4ae8-acef-3ec6bb719cc2')
+    @utils.requires_ext(extension="port-mac-address-regenerate",
+                        service="network")
+    def test_regenerate_mac_address(self):
+        body = self.create_port(self.network)
+        current_mac = body['mac_address']
+        body = self.admin_client.update_port(body['id'],
+                                             mac_address=None)
+        new_mac = body['port']['mac_address']
+        self.assertNotEqual(current_mac, new_mac)
+        self.assertTrue(netaddr.valid_mac(new_mac))
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index 16375ec..cef0ffc 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -29,7 +29,7 @@
     @classmethod
     def resource_setup(cls):
         super(SharedNetworksTest, cls).resource_setup()
-        cls.shared_network = cls.create_shared_network()
+        cls.shared_network = cls.create_network(shared=True)
 
     @decorators.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
     def test_filtering_shared_networks(self):
@@ -84,7 +84,7 @@
 
     @decorators.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
     def test_create_update_shared_network(self):
-        shared_network = self.create_shared_network()
+        shared_network = self.create_network(shared=True)
         net_id = shared_network['id']
         self.assertEqual('ACTIVE', shared_network['status'])
         self.assertIsNotNone(shared_network['id'])
@@ -156,7 +156,7 @@
     @classmethod
     def resource_setup(cls):
         super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
-        cls.network = cls.create_shared_network()
+        cls.network = cls.create_network(shared=True)
         cls.create_subnet(cls.network, client=cls.admin_client)
 
     @decorators.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 6246eb7..ae01d56 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -60,6 +60,9 @@
     # Default to ipv4.
     _ip_version = const.IP_VERSION_4
 
+    # Derive from BaseAdminNetworkTest class to have this initialized
+    admin_client = None
+
     @classmethod
     def get_client_manager(cls, credential_type=None, roles=None,
                            force_new=None):
@@ -124,6 +127,7 @@
         cls.projects = []
         cls.log_objects = []
         cls.reserved_subnet_cidrs = set()
+        cls.keypairs = []
 
     @classmethod
     def resource_cleanup(cls):
@@ -170,8 +174,7 @@
                                          subnet['id'])
             # Clean up networks
             for network in cls.networks:
-                cls._try_delete_resource(cls.client.delete_network,
-                                         network['id'])
+                cls._try_delete_resource(cls.delete_network, network)
 
             # Clean up admin networks
             for network in cls.admin_networks:
@@ -221,6 +224,9 @@
                 cls._try_delete_resource(cls.admin_client.delete_log,
                                          log_object['id'])
 
+            for keypair in cls.keypairs:
+                cls._try_delete_resource(cls.delete_keypair, keypair)
+
         super(BaseNetworkTest, cls).resource_cleanup()
 
     @classmethod
@@ -244,44 +250,108 @@
             pass
 
     @classmethod
-    def create_network(cls, network_name=None, client=None, **kwargs):
-        """Wrapper utility that returns a test network."""
-        network_name = network_name or data_utils.rand_name('test-network-')
+    def create_network(cls, network_name=None, client=None, external=None,
+                       shared=None, provider_network_type=None,
+                       provider_physical_network=None,
+                       provider_segmentation_id=None, **kwargs):
+        """Create a network.
 
-        client = client or cls.client
-        body = client.create_network(name=network_name, **kwargs)
-        network = body['network']
-        if client is cls.client:
-            cls.networks.append(network)
-        else:
-            cls.admin_networks.append(network)
+        When client is not provider and admin_client is attribute is not None
+        (for example when using BaseAdminNetworkTest base class) and using any
+        of the convenience parameters (external, shared, provider_network_type,
+        provider_physical_network and provider_segmentation_id) it silently
+        uses admin_client. If the network is not shared then it uses the same
+        project_id as regular client.
+
+        :param network_name: Human-readable name of the network
+
+        :param client: client to be used for connecting to network service
+
+        :param external: indicates whether the network has an external routing
+        facility that's not managed by the networking service.
+
+        :param shared: indicates whether this resource is shared across all
+        projects. By default, only administrative users can change this value.
+        If True and admin_client attribute is not None, then the network is
+        created under administrative project.
+
+        :param provider_network_type: the type of physical network that this
+        network should be mapped to. For example, 'flat', 'vlan', 'vxlan', or
+        'gre'. Valid values depend on a networking back-end.
+
+        :param provider_physical_network: the physical network where this
+        network should be implemented. The Networking API v2.0 does not provide
+        a way to list available physical networks. For example, the Open
+        vSwitch plug-in configuration file defines a symbolic name that maps to
+        specific bridges on each compute host.
+
+        :param provider_segmentation_id: The ID of the isolated segment on the
+        physical network. The network_type attribute defines the segmentation
+        model. For example, if the network_type value is 'vlan', this ID is a
+        vlan identifier. If the network_type value is 'gre', this ID is a gre
+        key.
+
+        :param **kwargs: extra parameters to be forwarded to network service
+        """
+
+        name = (network_name or kwargs.pop('name', None) or
+                data_utils.rand_name('test-network-'))
+
+        # translate convenience parameters
+        admin_client_required = False
+        if provider_network_type:
+            admin_client_required = True
+            kwargs['provider:network_type'] = provider_network_type
+        if provider_physical_network:
+            admin_client_required = True
+            kwargs['provider:physical_network'] = provider_physical_network
+        if provider_segmentation_id:
+            admin_client_required = True
+            kwargs['provider:segmentation_id'] = provider_segmentation_id
+        if external is not None:
+            admin_client_required = True
+            kwargs['router:external'] = bool(external)
+        if shared is not None:
+            admin_client_required = True
+            kwargs['shared'] = bool(shared)
+
+        if not client:
+            if admin_client_required and cls.admin_client:
+                # For convenience silently switch to admin client
+                client = cls.admin_client
+                if not shared:
+                    # Keep this network visible from current project
+                    project_id = (kwargs.get('project_id') or
+                                  kwargs.get('tenant_id') or
+                                  cls.client.tenant_id)
+                    kwargs.update(project_id=project_id, tenant_id=project_id)
+            else:
+                # Use default client
+                client = cls.client
+
+        network = client.create_network(name=name, **kwargs)['network']
+        network['client'] = client
+        cls.networks.append(network)
         return network
 
     @classmethod
-    def create_shared_network(cls, network_name=None, **post_body):
-        network_name = network_name or data_utils.rand_name('sharednetwork-')
-        post_body.update({'name': network_name, 'shared': True})
-        body = cls.admin_client.create_network(**post_body)
-        network = body['network']
-        cls.admin_networks.append(network)
-        return network
+    def delete_network(cls, network, client=None):
+        client = client or network.get('client') or cls.client
+        client.delete_network(network['id'])
+
+    @classmethod
+    def create_shared_network(cls, network_name=None, **kwargs):
+        return cls.create_network(name=network_name, shared=True, **kwargs)
 
     @classmethod
     def create_network_keystone_v3(cls, network_name=None, project_id=None,
                                    tenant_id=None, client=None):
-        """Wrapper utility that creates a test network with project_id."""
-        client = client or cls.client
-        network_name = network_name or data_utils.rand_name(
-            'test-network-with-project_id')
-        project_id = cls.client.tenant_id
-        body = client.create_network_keystone_v3(network_name, project_id,
-            tenant_id)
-        network = body['network']
-        if client is cls.client:
-            cls.networks.append(network)
-        else:
-            cls.admin_networks.append(network)
-        return network
+        params = {}
+        if project_id:
+            params['project_id'] = project_id
+        if tenant_id:
+            params['tenant_id'] = tenant_id
+        return cls.create_network(name=network_name, client=client, **params)
 
     @classmethod
     def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
@@ -593,6 +663,23 @@
         cls.security_groups.append(body['security_group'])
         return body['security_group']
 
+    @classmethod
+    def create_keypair(cls, client=None, name=None, **kwargs):
+        client = client or cls.os_primary.keypairs_client
+        name = name or data_utils.rand_name('keypair-test')
+        keypair = client.create_keypair(name=name, **kwargs)['keypair']
+
+        # save client for later cleanup
+        keypair['client'] = client
+        cls.keypairs.append(keypair)
+        return keypair
+
+    @classmethod
+    def delete_keypair(cls, keypair, client=None):
+        client = (client or keypair.get('client') or
+                  cls.os_primary.keypairs_client)
+        client.delete_keypair(keypair_name=keypair['name'])
+
 
 class BaseAdminNetworkTest(BaseNetworkTest):
 
diff --git a/neutron_tempest_plugin/api/test_auto_allocated_topology.py b/neutron_tempest_plugin/api/test_auto_allocated_topology.py
index 37f9ad1..0baa2a8 100644
--- a/neutron_tempest_plugin/api/test_auto_allocated_topology.py
+++ b/neutron_tempest_plugin/api/test_auto_allocated_topology.py
@@ -63,7 +63,7 @@
 
         up = {'admin_state_up': True}
         networks = _count(self.client.list_networks(**up)['networks'])
-        subnets = _count(self.client.list_subnets(**up)['subnets'])
+        subnets = _count(self.client.list_subnets()['subnets'])
         routers = _count(self.client.list_routers(**up)['routers'])
         return networks, subnets, routers
 
diff --git a/neutron_tempest_plugin/api/test_networks.py b/neutron_tempest_plugin/api/test_networks.py
index 19f4fcb..7e9943d 100644
--- a/neutron_tempest_plugin/api/test_networks.py
+++ b/neutron_tempest_plugin/api/test_networks.py
@@ -75,28 +75,29 @@
 
     @decorators.idempotent_id('0cc0552f-afaf-4231-b7a7-c2a1774616da')
     @utils.requires_ext(extension="project-id", service="network")
-    def test_create_network_keystone_v3(self):
+    def test_create_network_with_project(self):
         project_id = self.client.tenant_id
 
         name = 'created-with-project_id'
-        new_net = self.create_network_keystone_v3(name, project_id)
-        self.assertEqual(name, new_net['name'])
-        self.assertEqual(project_id, new_net['project_id'])
-        self.assertEqual(project_id, new_net['tenant_id'])
+        network = self.create_network(name, project_id=project_id)
+        self.assertEqual(name, network['name'])
+        self.assertEqual(project_id, network['project_id'])
+        self.assertEqual(project_id, network['tenant_id'])
 
-        body = self.client.list_networks(id=new_net['id'])['networks'][0]
-        self.assertEqual(name, body['name'])
+        observed_network = self.client.list_networks(
+            id=network['id'])['networks'][0]
+        self.assertEqual(name, observed_network['name'])
 
         new_name = 'create-with-project_id-2'
-        body = self.client.update_network(new_net['id'], name=new_name)
-        new_net = body['network']
-        self.assertEqual(new_name, new_net['name'])
-        self.assertEqual(project_id, new_net['project_id'])
-        self.assertEqual(project_id, new_net['tenant_id'])
+        updated_network = self.client.update_network(
+            network['id'], name=new_name)['network']
+        self.assertEqual(new_name, updated_network['name'])
+        self.assertEqual(project_id, updated_network['project_id'])
+        self.assertEqual(project_id, updated_network['tenant_id'])
 
     @decorators.idempotent_id('94e2a44c-3367-4253-8c2a-22deaf59e96c')
     @utils.requires_ext(extension="dns-integration",
-                       service="network")
+                        service="network")
     def test_create_update_network_dns_domain(self):
         domain1 = 'test.org.'
         body = self.create_network(dns_domain=domain1)
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index d31eab8..2bf99bf 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -209,8 +209,8 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=False)
-        network = self.create_shared_network('test network',
-                                             qos_policy_id=policy['id'])
+        network = self.create_network('test network', shared=True,
+                                      qos_policy_id=policy['id'])
 
         retrieved_network = self.admin_client.show_network(network['id'])
         self.assertEqual(
@@ -251,7 +251,7 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=False)
-        network = self.create_shared_network('test network')
+        network = self.create_network('test network', shared=True)
         retrieved_network = self.admin_client.show_network(network['id'])
         self.assertIsNone(retrieved_network['network']['qos_policy_id'])
 
@@ -266,7 +266,7 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=True)
-        network = self.create_shared_network('test network')
+        network = self.create_network('test network', shared=True)
         port = self.create_port(network, qos_policy_id=policy['id'])
 
         retrieved_port = self.admin_client.show_port(port['id'])
@@ -275,7 +275,7 @@
 
     @decorators.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
     def test_policy_association_with_port_nonexistent_policy(self):
-        network = self.create_shared_network('test network')
+        network = self.create_network('test network', shared=True)
         self.assertRaises(
             exceptions.NotFound,
             self.create_port,
@@ -287,7 +287,7 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=False)
-        network = self.create_shared_network('test network')
+        network = self.create_network('test network', shared=True)
         self.assertRaises(
             exceptions.NotFound,
             self.create_port,
@@ -298,7 +298,7 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=True)
-        network = self.create_shared_network('test network')
+        network = self.create_network('test network', shared=True)
         port = self.create_port(network)
         retrieved_port = self.admin_client.show_port(port['id'])
         self.assertIsNone(retrieved_port['port']['qos_policy_id'])
@@ -313,7 +313,8 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=True)
-        self.create_shared_network('test network', qos_policy_id=policy['id'])
+        self.create_network('test network', qos_policy_id=policy['id'],
+                            shared=True)
         self.assertRaises(
             exceptions.Conflict,
             self.admin_client.delete_qos_policy, policy['id'])
@@ -323,7 +324,7 @@
         policy = self.create_qos_policy(name='test-policy',
                                         description='test policy',
                                         shared=True)
-        network = self.create_shared_network('test network')
+        network = self.create_network('test network', shared=True)
         self.create_port(network, qos_policy_id=policy['id'])
         self.assertRaises(
             exceptions.Conflict,
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index e02cf92..1a000fd 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -240,10 +240,9 @@
 
     def create_provider_network(self):
         foo_net = config.CONF.neutron_plugin_options.provider_vlans[0]
-        post_body = {'network_name': data_utils.rand_name('vlan-net'),
-                     'provider:network_type': 'vlan',
-                     'provider:physical_network': foo_net}
-        return self.create_shared_network(**post_body)
+        return self.create_network(name=data_utils.rand_name('vlan-net'),
+                                   provider_network_type='vlan',
+                                   provider_physical_network=foo_net)
 
     @decorators.idempotent_id('0f05d98e-41f5-4629-dada-9aee269c9602')
     def test_add_subport(self):
@@ -286,13 +285,13 @@
         super(TrunkTestMtusJSONBase, self).setUp()
 
         # VXLAN autocomputed MTU (1450) is smaller than that of GRE (1458)
-        vxlan_kwargs = {'network_name': data_utils.rand_name('vxlan-net'),
-                        'provider:network_type': 'vxlan'}
-        self.smaller_mtu_net = self.create_shared_network(**vxlan_kwargs)
+        self.smaller_mtu_net = self.create_network(
+            name=data_utils.rand_name('vxlan-net'),
+            provider_network_type='vxlan')
 
-        gre_kwargs = {'network_name': data_utils.rand_name('gre-net'),
-                      'provider:network_type': 'gre'}
-        self.larger_mtu_net = self.create_shared_network(**gre_kwargs)
+        self.larger_mtu_net = self.create_network(
+            name=data_utils.rand_name('gre-net'),
+            provider_network_type='gre')
 
         self.smaller_mtu_port = self.create_port(self.smaller_mtu_net)
         self.smaller_mtu_port_2 = self.create_port(self.smaller_mtu_net)
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index b919b65..99f731c 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -12,13 +12,103 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
+
+from oslo_log import log
 from tempest.lib.common import ssh
 
 from neutron_tempest_plugin import config
 
 
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
 class Client(ssh.Client):
-    def __init__(self, *args, **kwargs):
-        if 'timeout' not in kwargs:
-            kwargs['timeout'] = config.CONF.validation.ssh_timeout
-        super(Client, self).__init__(*args, **kwargs)
+
+    timeout = CONF.validation.ssh_timeout
+
+    proxy_jump_host = CONF.neutron_plugin_options.ssh_proxy_jump_host
+    proxy_jump_username = CONF.neutron_plugin_options.ssh_proxy_jump_username
+    proxy_jump_password = CONF.neutron_plugin_options.ssh_proxy_jump_password
+    proxy_jump_keyfile = CONF.neutron_plugin_options.ssh_proxy_jump_keyfile
+    proxy_jump_port = CONF.neutron_plugin_options.ssh_proxy_jump_port
+
+    def __init__(self, host, username, password=None, timeout=None, pkey=None,
+                 channel_timeout=10, look_for_keys=False, key_filename=None,
+                 port=22, proxy_client=None):
+
+        timeout = timeout or self.timeout
+
+        if self.proxy_jump_host:
+            # Perform all SSH connections passing through configured SSH server
+            proxy_client = proxy_client or self.create_proxy_client(
+                timeout=timeout, channel_timeout=channel_timeout)
+
+        super(Client, self).__init__(
+            host=host, username=username, password=password, timeout=timeout,
+            pkey=pkey, channel_timeout=channel_timeout,
+            look_for_keys=look_for_keys, key_filename=key_filename, port=port,
+            proxy_client=proxy_client)
+
+    @classmethod
+    def create_proxy_client(cls, look_for_keys=True, **kwargs):
+        host = cls.proxy_jump_host
+        if not host:
+            # proxy_jump_host string cannot be empty or None
+            raise ValueError(
+                "'proxy_jump_host' configuration option is empty.")
+
+        # Let accept an empty string as a synonymous of default value on below
+        # options
+        password = cls.proxy_jump_password or None
+        key_file = cls.proxy_jump_keyfile or None
+        username = cls.proxy_jump_username
+
+        # Port must be a positive integer
+        port = cls.proxy_jump_port
+        if port <= 0 or port > 65535:
+            raise ValueError(
+                "Invalid value for 'proxy_jump_port' configuration option: "
+                "{!r}".format(port))
+
+        login = "{username}@{host}:{port}".format(username=username, host=host,
+                                                  port=port)
+
+        if key_file:
+            # expand ~ character with user HOME directory
+            key_file = os.path.expanduser(key_file)
+            if os.path.isfile(key_file):
+                LOG.debug("Going to create SSH connection to %r using key "
+                          "file: %s", login, key_file)
+
+            else:
+                # This message could help the user to identify a
+                # mis-configuration in tempest.conf
+                raise ValueError(
+                    "Cannot find file specified as 'proxy_jump_keyfile' "
+                    "option: {!r}".format(key_file))
+
+        elif password:
+            LOG.debug("Going to create SSH connection to %r using password.",
+                      login)
+
+        elif look_for_keys:
+            # This message could help the user to identify a mis-configuration
+            # in tempest.conf
+            LOG.info("Both 'proxy_jump_password' and 'proxy_jump_keyfile' "
+                     "options are empty. Going to create SSH connection to %r "
+                     "looking for key file location into %r directory.",
+                     login, os.path.expanduser('~/.ssh'))
+        else:
+            # An user that forces look_for_keys=False should really know what
+            # he really wants
+            LOG.warning("No authentication method provided to create an SSH "
+                        "connection to %r. If it fails, then please "
+                        "set 'proxy_jump_keyfile' to provide a valid SSH key "
+                        "file.", login)
+
+        return ssh.Client(
+            host=host, username=username, password=password,
+            look_for_keys=look_for_keys, key_filename=key_file,
+            port=port, proxy_client=None, **kwargs)
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index fc07e81..e15748d 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -56,7 +56,25 @@
                     '"provider:network_type":<TYPE> - string '
                     '"mtu":<MTU> - integer '
                     '"cidr"<SUBNET/MASK> - string '
-                    '"provider:segmentation_id":<VLAN_ID> - integer')
+                    '"provider:segmentation_id":<VLAN_ID> - integer'),
+
+    # Option for feature to connect via SSH to VMs using an intermediate SSH
+    # server
+    cfg.StrOpt('ssh_proxy_jump_host',
+               default=None,
+               help='Proxy jump host used to connect via SSH to VMs..'),
+    cfg.StrOpt('ssh_proxy_jump_username',
+               default='root',
+               help='User name used to connect to "ssh_proxy_jump_host".'),
+    cfg.StrOpt('ssh_proxy_jump_password',
+               default=None,
+               help='Password used to connect to "ssh_proxy_jump_host".'),
+    cfg.StrOpt('ssh_proxy_jump_keyfile',
+               default=None,
+               help='Keyfile used to connect to "ssh_proxy_jump_host".'),
+    cfg.IntOpt('ssh_proxy_jump_port',
+               default=22,
+               help='Port used to connect to "ssh_proxy_jump_host".'),
 ]
 
 # TODO(amuller): Redo configuration options registration as part of the planned
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index b76a81a..10cdaf1 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -15,6 +15,8 @@
 import subprocess
 
 import netaddr
+from neutron_lib.api import validators
+from neutron_lib import constants as neutron_lib_constants
 from oslo_log import log
 from tempest.common.utils import net_utils
 from tempest.common import waiters
@@ -33,20 +35,6 @@
 
 
 class BaseTempestTestCase(base_api.BaseNetworkTest):
-    @classmethod
-    def resource_setup(cls):
-        super(BaseTempestTestCase, cls).resource_setup()
-
-        cls.keypairs = []
-
-    @classmethod
-    def resource_cleanup(cls):
-        for keypair in cls.keypairs:
-            client = keypair['client']
-            client.delete_keypair(
-                keypair_name=keypair['keypair']['name'])
-
-        super(BaseTempestTestCase, cls).resource_cleanup()
 
     def create_server(self, flavor_ref, image_ref, key_name, networks,
                       **kwargs):
@@ -105,17 +93,6 @@
         return server
 
     @classmethod
-    def create_keypair(cls, client=None):
-        client = client or cls.os_primary.keypairs_client
-        name = data_utils.rand_name('keypair-test')
-        body = client.create_keypair(name=name)
-        body.update(client=client)
-        if client is cls.os_primary.keypairs_client:
-            cls.keypairs.append(body)
-
-        return body['keypair']
-
-    @classmethod
     def create_secgroup_rules(cls, rule_list, secgroup_id=None,
                               client=None):
         client = client or cls.os_primary.network_client
@@ -190,7 +167,8 @@
             self.floating_ips.append(fip)
         return fip
 
-    def setup_network_and_server(self, router=None, **kwargs):
+    def setup_network_and_server(
+        self, router=None, server_name=None, **kwargs):
         """Create network resources and a server.
 
         Creating a network, subnet, router, keypair, security group
@@ -212,12 +190,18 @@
         self.keypair = self.create_keypair()
         self.create_loginable_secgroup_rule(
             secgroup_id=secgroup['security_group']['id'])
-        self.server = self.create_server(
-            flavor_ref=CONF.compute.flavor_ref,
-            image_ref=CONF.compute.image_ref,
-            key_name=self.keypair['name'],
-            networks=[{'uuid': self.network['id']}],
-            security_groups=[{'name': secgroup['security_group']['name']}])
+
+        server_kwargs = {
+            'flavor_ref': CONF.compute.flavor_ref,
+            'image_ref': CONF.compute.image_ref,
+            'key_name': self.keypair['name'],
+            'networks': [{'uuid': self.network['id']}],
+            'security_groups': [{'name': secgroup['security_group']['name']}],
+        }
+        if server_name is not None:
+            server_kwargs['name'] = server_name
+
+        self.server = self.create_server(**server_kwargs)
         self.wait_for_server_active(self.server['server'])
         self.port = self.client.list_ports(network_id=self.network['id'],
                                            device_id=self.server[
@@ -252,7 +236,8 @@
                           "for the console log", server['id'])
 
     def _check_remote_connectivity(self, source, dest, should_succeed=True,
-                                   nic=None, mtu=None, fragmentation=True):
+                                   nic=None, mtu=None, fragmentation=True,
+                                   timeout=None):
         """check ping server via source ssh connection
 
         :param source: RemoteClient: an ssh connection from which to ping
@@ -267,15 +252,21 @@
         def ping_host(source, host, count=CONF.validation.ping_count,
                       size=CONF.validation.ping_size, nic=None, mtu=None,
                       fragmentation=True):
-            addr = netaddr.IPAddress(host)
-            cmd = 'ping6' if addr.version == 6 else 'ping'
+            IP_VERSION_4 = neutron_lib_constants.IP_VERSION_4
+            IP_VERSION_6 = neutron_lib_constants.IP_VERSION_6
+
+            # Use 'ping6' for IPv6 addresses, 'ping' for IPv4 and hostnames
+            ip_version = (
+                IP_VERSION_6 if netaddr.valid_ipv6(host) else IP_VERSION_4)
+            cmd = (
+                'ping6' if ip_version == IP_VERSION_6 else 'ping')
             if nic:
                 cmd = 'sudo {cmd} -I {nic}'.format(cmd=cmd, nic=nic)
             if mtu:
                 if not fragmentation:
                     cmd += ' -M do'
                 size = str(net_utils.get_ping_payload_size(
-                    mtu=mtu, ip_version=addr.version))
+                    mtu=mtu, ip_version=ip_version))
             cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
             return source.exec_command(cmd)
 
@@ -289,22 +280,24 @@
                             'from: %s.', dest, source.host)
                 return not should_succeed
             LOG.debug('ping result: %s', result)
-            # Assert that the return traffic was from the correct
-            # source address.
-            from_source = 'from %s' % dest
-            self.assertIn(from_source, result)
+
+            if validators.validate_ip_address(dest) is None:
+                # Assert that the return traffic was from the correct
+                # source address.
+                from_source = 'from %s' % dest
+                self.assertIn(from_source, result)
             return should_succeed
 
-        return test_utils.call_until_true(ping_remote,
-                                          CONF.validation.ping_timeout,
-                                          1)
+        return test_utils.call_until_true(
+            ping_remote, timeout or CONF.validation.ping_timeout, 1)
 
     def check_remote_connectivity(self, source, dest, should_succeed=True,
                                   nic=None, mtu=None, fragmentation=True,
-                                  servers=None):
+                                  servers=None, timeout=None):
         try:
             self.assertTrue(self._check_remote_connectivity(
-                source, dest, should_succeed, nic, mtu, fragmentation))
+                source, dest, should_succeed, nic, mtu, fragmentation,
+                timeout=timeout))
         except lib_exc.SSHTimeout as ssh_e:
             LOG.debug(ssh_e)
             self._log_console_output(servers)
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
new file mode 100644
index 0000000..dd89727
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -0,0 +1,73 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class InternalDNSTest(base.BaseTempestTestCase):
+
+    @utils.requires_ext(extension="dns-integration", service="network")
+    @decorators.idempotent_id('988347de-07af-471a-abfa-65aea9f452a6')
+    def test_dns_name(self):
+        """Test the ability to ping a VM's hostname from another VM.
+
+        1) Create two VMs on the same network, giving each a name
+        2) SSH in to the first VM:
+          2.1) ping the other VM's internal IP
+          2.2) ping the otheR VM's hostname
+        """
+
+        self.setup_network_and_server(server_name='luke')
+        self.create_pingable_secgroup_rule(
+            secgroup_id=self.security_groups[-1]['id'])
+        self.check_connectivity(self.fip['floating_ip_address'],
+                                CONF.validation.image_ssh_user,
+                                self.keypair['private_key'])
+
+        leia = self.create_server(
+            flavor_ref=CONF.compute.flavor_ref,
+            image_ref=CONF.compute.image_ref,
+            key_name=self.keypair['name'],
+            networks=[{'uuid': self.network['id']}],
+            security_groups=[
+                {'name': self.security_groups[-1]['name']}],
+            name='leia')
+        self.wait_for_server_active(leia['server'])
+
+        ssh_client = ssh.Client(
+            self.fip['floating_ip_address'],
+            CONF.validation.image_ssh_user,
+            pkey=self.keypair['private_key'])
+
+        self.assertIn('luke', ssh_client.exec_command('hostname'))
+
+        leia_port = self.client.list_ports(
+            network_id=self.network['id'],
+            device_id=leia['server']['id'])['ports'][0]
+
+        # Ping with a higher timeout because spawning 2 VMs in some
+        # environment can put significant load on the deployment, resulting
+        # in very long boot times.
+        self.check_remote_connectivity(
+            ssh_client, leia_port['fixed_ips'][0]['ip_address'],
+            timeout=CONF.validation.ping_timeout * 10)
+        self.check_remote_connectivity(ssh_client, 'leia')
diff --git a/tools/customize_ubuntu_image b/tools/customize_ubuntu_image
new file mode 100755
index 0000000..9c3fd07
--- /dev/null
+++ b/tools/customize_ubuntu_image
@@ -0,0 +1,172 @@
+#!/bin/bash
+
+# IMPLEMENTATION NOTE: It was not possible to implement this script using
+# virt-customize because of below ubuntu bugs:
+#  - https://bugs.launchpad.net/ubuntu/+source/libguestfs/+bug/1632405
+#  - https://bugs.launchpad.net/ubuntu/+source/isc-dhcp/+bug/1650740
+#
+# It has therefore been adopted a more low level strategy performing below
+# steps:
+#  - mount guest image to a temporary folder
+#  - set up an environment suitable for executing chroot
+#  - execute customize_image function inside chroot environment
+#  - cleanup chroot environment
+
+# Array of packages to be installed of guest image
+INSTALL_GUEST_PACKAGES=(
+   socat  # used to replace nc for testing advanced network features like
+          # multicast
+)
+
+# Function to be executed once after chroot on guest image
+# Add more customization steps here
+function customize_image {
+    # dhclient-script requires to read /etc/fstab for setting up network
+    touch /etc/fstab
+    chmod ugo+r /etc/fstab
+
+    # Ubuntu guest image _apt user could require access to below folders
+    local apt_user_folders=( /var/lib/apt/lists/partial )
+    mkdir -p "${apt_user_folders[@]}"
+    chown _apt.root -fR "${apt_user_folders[@]}"
+
+    # Install desired packages to Ubuntu guest image
+    apt-get update -y
+    apt-get install -y "${INSTALL_GUEST_PACKAGES[@]}"
+}
+
+function main {
+    set -eux
+    trap cleanup EXIT
+    "${ENTRY_POINT:-chroot_image}" "$@"
+}
+
+# Chroot to guest image then executes customize_image function inside it
+function chroot_image {
+    local image_file=$1
+    local temp_dir=${TEMP_DIR:-$(make_temp -d)}
+
+    # Mount guest image into a temporary directory
+    local mount_dir=${temp_dir}/mount
+    mkdir -p "${mount_dir}"
+    mount_image "${mount_dir}" "${temp_dir}/pid"
+
+    # Mount system directories
+    bind_dir "/dev" "${mount_dir}/dev"
+    bind_dir "/dev/pts" "${mount_dir}/dev/pts"
+    bind_dir "/proc" "${mount_dir}/proc"
+    bind_dir "/sys" "${mount_dir}/sys"
+
+    # Mount to keep temporary files out of guest image
+    mkdir -p "${temp_dir}/apt" "${temp_dir}/cache" "${temp_dir}/tmp"
+    bind_dir "${temp_dir}/cache" "${mount_dir}/var/cache"
+    bind_dir "${temp_dir}/tmp" "${mount_dir}/tmp"
+    bind_dir "${temp_dir}/tmp" "${mount_dir}/var/tmp"
+    bind_dir "${temp_dir}/apt" "${mount_dir}/var/lib/apt"
+
+    # Replace /etc/resolv.conf symlink to use the same DNS as this host
+    sudo rm -f "${mount_dir}/etc/resolv.conf"
+    sudo cp /etc/resolv.conf "${mount_dir}/etc/resolv.conf"
+
+    # Makesure /etc/fstab exists and it is readable because it is required by
+    # /sbin/dhclient-script
+    sudo touch /etc/fstab
+    sudo chmod 644 /etc/fstab
+
+    # Copy this script to mount dir
+    local script_name=$(basename "$0")
+    local script_file=${mount_dir}/${script_name}
+    sudo cp "$0" "${script_file}"
+    sudo chmod 500 "${script_file}"
+    add_cleanup sudo rm -f "'${script_file}'"
+
+    # Execute customize_image inside chroot environment
+    local command_line=( ${CHROOT_COMMAND:-customize_image} )
+    local entry_point=${command_line[0]}
+    unset command_line[0]
+    sudo -E "ENTRY_POINT=${entry_point}" \
+        chroot "${mount_dir}" "/${script_name}" "${command_line[@]:-}"
+}
+
+# Mounts guest image to $1 directory writing pid to $1 pid file
+# Then registers umount of such directory for final cleanup
+function mount_image {
+    local mount_dir=$1
+    local pid_file=$2
+
+    # export libguest settings
+    export LIBGUESTFS_BACKEND=${LIBGUESTFS_BACKEND:-direct}
+    export LIBGUESTFS_BACKEND_SETTINGS=${LIBGUESTFS_BACKEND_SETTINGS:-force_tcg}
+
+    # Mount guest image
+    sudo -E guestmount -i \
+        --add "${image_file}" \
+        --pid-file "${pid_file}" \
+        "${mount_dir}"
+
+    add_cleanup \
+        'ENTRY_POINT=umount_image' \
+        "'$0'" "'${mount_dir}'" "'${pid_file}'"
+}
+
+# Unmounts guest image directory
+function umount_image {
+    local mount_dir=$1
+    local pid_file=$2
+    local timeout=10
+
+    # Take PID just before unmounting
+    local pid=$(cat ${pid_file} || true)
+    sudo -E guestunmount "${mount_dir}"
+
+    if [ "${pid:-}" != "" ]; then
+        # Make sure guestmount process is not running before using image
+        # file again
+        local count=${timeout}
+        while sudo kill -0 "${pid}" 2> /dev/null && (( count-- > 0 )); do
+            sleep 1
+        done
+        if [ ${count} == 0 ]; then
+            # It is not safe to use image file at this point
+            echo "Wait for guestmount to exit failed after ${timeout} seconds"
+        fi
+    fi
+}
+
+# Creates a temporary file or directory and register removal for final cleanup
+function make_temp {
+    local temporary=$(mktemp "$@")
+    add_cleanup sudo rm -fR "'${temporary}'"
+    echo "${temporary}"
+}
+
+# Bind directory $1 to directory $2 and register umount for final cleanup
+function bind_dir {
+    local source_dir=$1
+    local target_dir=$2
+    sudo mount --bind "${source_dir}" "${target_dir}"
+    add_cleanup sudo umount "'${target_dir}'"
+}
+
+# Registers a command line to be executed for final cleanup
+function add_cleanup {
+    CLEANUP_FILE=${CLEANUP_FILE:-$(mktemp)}
+
+    echo -e "$*" >> ${CLEANUP_FILE}
+}
+
+# Execute command lines for final cleanup in reversed order
+function cleanup {
+    error=$?
+
+    local cleanup_file=${CLEANUP_FILE:-}
+    if [ -r "${cleanup_file}" ]; then
+        tac "${cleanup_file}" | bash +e -x
+        CLEANUP_FILE=
+        rm -fR "${cleanup_file}"
+    fi
+
+    exit ${error}
+}
+
+main "$@"
diff --git a/tox.ini b/tox.ini
index bba0a64..5eb8b10 100644
--- a/tox.ini
+++ b/tox.ini
@@ -16,6 +16,7 @@
 commands = stestr run --slowest {posargs}
 
 [testenv:pep8]
+basepython = python3
 commands =
   sh ./tools/misc-sanity-checks.sh
   flake8
@@ -23,9 +24,11 @@
   sh
 
 [testenv:venv]
+basepython = python3
 commands = {posargs}
 
 [testenv:cover]
+basepython = python3
 setenv =
     {[testenv]setenv}
     PYTHON=coverage run --source neutron_tempest_plugin --parallel-mode
@@ -36,13 +39,16 @@
     coverage xml -o cover/coverage.xml
 
 [testenv:docs]
+basepython = python3
 commands = python setup.py build_sphinx
 
 [testenv:releasenotes]
+basepython = python3
 commands =
   sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
 
 [testenv:debug]
+basepython = python3
 commands = oslo_debug_helper -t neutron_tempest_plugin/ {posargs}
 
 [flake8]