Merge "fix tox python3 overrides"
diff --git a/.zuul.yaml b/.zuul.yaml
index adcb433..cc1b61f 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -1,9 +1,9 @@
- job:
- name: neutron-tempest-plugin-scenario
+ name: neutron-tempest-plugin
parent: devstack-tempest
abstract: true
description: |
- Perform setup common to all tempest scenario test jobs.
+ Perform setup common to all Neutron tempest tests
roles:
- zuul: openstack-dev/devstack
required-projects:
@@ -12,22 +12,16 @@
- openstack/neutron-tempest-plugin
- openstack/tempest
vars:
- tempest_test_regex: ^neutron_tempest_plugin\.scenario
tempest_concurrency: 4
tox_envlist: all
devstack_localrc:
- TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- PHYSICAL_NETWORK: default
- DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: "http://cloud-images.ubuntu.com/releases/16.04/release-20170113/ubuntu-16.04-server-cloudimg-amd64-disk1.img,"
- DEFAULT_INSTANCE_TYPE: ds512M
- DEFAULT_INSTANCE_USER: ubuntu
- BUILD_TIMEOUT: 784
+ TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
devstack_plugins:
neutron: git://git.openstack.org/openstack/neutron.git
neutron-tempest-plugin: git://git.openstack.org/openstack/neutron-tempest-plugin.git
devstack_services:
- cinder: true
+ tls-proxy: false
tempest: true
neutron-dns: true
neutron-qos: true
@@ -46,11 +40,13 @@
# lib/neutron-legacy
"/$NEUTRON_CORE_PLUGIN_CONF":
ml2:
- type_drivers: flat,vlan,local,vxlan
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
ml2_type_vlan:
network_vlan_ranges: foo:1:10
ml2_type_vxlan:
vni_ranges: 1:2000
+ ml2_type_gre:
+ tunnel_id_ranges: 1:1000
$NEUTRON_L3_CONF:
agent:
availability_zone: nova
@@ -68,7 +64,7 @@
provider_vlans: foo,
agent_availability_zone: nova
image_is_advanced: true
- available_type_drivers: flat,vlan,local,vxlan
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
irrelevant-files:
- ^(test-|)requirements.txt$
- ^releasenotes/.*$
@@ -82,25 +78,21 @@
- job:
name: neutron-tempest-plugin-api
- parent: legacy-dsvm-base
- run: playbooks/neutron-tempest-plugin-api/run.yaml
- post-run: playbooks/neutron-tempest-plugin-api/post.yaml
- timeout: 10000
- required-projects:
- - openstack-infra/devstack-gate
- - openstack/neutron
- - openstack/neutron-tempest-plugin
- - openstack/tempest
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^releasenotes/.*$
- - ^doc/.*$
- - ^setup.cfg$
- - ^.*\.rst$
- - ^neutron/locale/.*$
- - ^neutron/tests/unit/.*$
- - ^tools/.*$
- - ^tox.ini$
+ parent: neutron-tempest-plugin
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.api
+ devstack_services:
+ neutron-log: true
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ AGENT:
+ tunnel_types: gre,vxlan
+ network_log:
+ local_output_log_base: /tmp/test_log.log
- job:
name: neutron-tempest-plugin-api-queens
@@ -108,6 +100,30 @@
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ devstack_localrc:
+ # TODO(slaweq): find a way to put this list of extensions in
+ # neutron repository and keep it different per branch,
+ # then it could be removed from here
+ NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details"
+
+- job:
+ name: neutron-tempest-plugin-scenario
+ parent: neutron-tempest-plugin
+ abstract: true
+ description: |
+ Perform setup common to all tempest scenario test jobs.
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.scenario
+ devstack_localrc:
+ PHYSICAL_NETWORK: default
+ DOWNLOAD_DEFAULT_IMAGES: false
+ IMAGE_URLS: "http://cloud-images.ubuntu.com/releases/16.04/release-20180622/ubuntu-16.04-server-cloudimg-amd64-disk1.img,"
+ DEFAULT_INSTANCE_TYPE: ds512M
+ DEFAULT_INSTANCE_USER: ubuntu
+ BUILD_TIMEOUT: 784
+ LIBVIRT_TYPE: kvm
+ devstack_services:
+ cinder: true
- job:
name: neutron-tempest-plugin-scenario-linuxbridge
@@ -115,8 +131,8 @@
timeout: 10000
vars:
devstack_localrc:
- NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details"
Q_AGENT: linuxbridge
+ NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details"
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -124,6 +140,16 @@
enable_dvr: false
AGENT:
debug_iptables_rules: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ml2:
+ type_drivers: flat,vlan,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-queens
@@ -131,6 +157,8 @@
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details"
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario
@@ -211,7 +239,7 @@
- build-openstack-sphinx-docs
- project-template:
- name: neutron-tempest-plugin-jobs-stable
+ name: neutron-tempest-plugin-jobs-queens
check:
jobs:
- neutron-tempest-plugin-api-queens
@@ -225,4 +253,4 @@
- project:
templates:
- neutron-tempest-plugin-jobs
- - neutron-tempest-plugin-jobs-stable
+ - neutron-tempest-plugin-jobs-queens
diff --git a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
index 60af89e..d449ead 100644
--- a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
+++ b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
@@ -28,7 +28,7 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('d39a96e2-2dea-4feb-8093-e7ac991ce6f8')
def test_create_port_security_false_on_shared_network(self):
- network = self.create_shared_network()
+ network = self.create_network(shared=True)
self.assertTrue(network['shared'])
self.create_subnet(network, client=self.admin_client)
self.assertRaises(lib_exc.Forbidden, self.create_port,
diff --git a/neutron_tempest_plugin/api/admin/test_networks.py b/neutron_tempest_plugin/api/admin/test_networks.py
index bb7ac24..74e72ef 100644
--- a/neutron_tempest_plugin/api/admin/test_networks.py
+++ b/neutron_tempest_plugin/api/admin/test_networks.py
@@ -24,50 +24,51 @@
@decorators.idempotent_id('d3c76044-d067-4cb0-ae47-8cdd875c7f67')
@utils.requires_ext(extension="project-id", service="network")
- def test_admin_create_network_keystone_v3(self):
+ def test_create_network_with_project(self):
project_id = self.client.tenant_id # non-admin
name = 'admin-created-with-project_id'
- new_net = self.create_network_keystone_v3(name, project_id,
- client=self.admin_client)
- self.assertEqual(name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ network = self.create_network(name, project_id=project_id,
+ client=self.admin_client)
+ self.assertEqual(name, network['name'])
+ self.assertEqual(project_id, network['project_id'])
+ self.assertEqual(project_id, network['tenant_id'])
- body = self.client.list_networks(id=new_net['id'])
- lookup_net = body['networks'][0]
- self.assertEqual(name, lookup_net['name'])
- self.assertEqual(project_id, lookup_net['project_id'])
- self.assertEqual(project_id, lookup_net['tenant_id'])
+ observed_network = self.client.list_networks(
+ id=network['id'])['networks'][0]
+ self.assertEqual(name, observed_network['name'])
+ self.assertEqual(project_id, observed_network['project_id'])
+ self.assertEqual(project_id, observed_network['tenant_id'])
@decorators.idempotent_id('8d21aaca-4364-4eb9-8b79-44b4fff6373b')
@utils.requires_ext(extension="project-id", service="network")
- def test_admin_create_network_keystone_v3_and_tenant(self):
+ def test_create_network_with_project_and_tenant(self):
project_id = self.client.tenant_id # non-admin
name = 'created-with-project-and-tenant'
- new_net = self.create_network_keystone_v3(
- name, project_id, tenant_id=project_id, client=self.admin_client)
- self.assertEqual(name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ network = self.create_network(name, project_id=project_id,
+ tenant_id=project_id,
+ client=self.admin_client)
+ self.assertEqual(name, network['name'])
+ self.assertEqual(project_id, network['project_id'])
+ self.assertEqual(project_id, network['tenant_id'])
- body = self.client.list_networks(id=new_net['id'])
- lookup_net = body['networks'][0]
- self.assertEqual(name, lookup_net['name'])
- self.assertEqual(project_id, lookup_net['project_id'])
- self.assertEqual(project_id, lookup_net['tenant_id'])
+ observed_network = self.client.list_networks(
+ id=network['id'])['networks'][0]
+ self.assertEqual(name, observed_network['name'])
+ self.assertEqual(project_id, observed_network['project_id'])
+ self.assertEqual(project_id, observed_network['tenant_id'])
@decorators.idempotent_id('08b92179-669d-45ee-8233-ef6611190809')
@utils.requires_ext(extension="project-id", service="network")
- def test_admin_create_network_keystone_v3_and_other_tenant(self):
+ def test_create_network_with_project_and_other_tenant(self):
project_id = self.client.tenant_id # non-admin
other_tenant = uuidutils.generate_uuid()
name = 'created-with-project-and-other-tenant'
e = self.assertRaises(lib_exc.BadRequest,
- self.create_network_keystone_v3, name,
- project_id, tenant_id=other_tenant,
+ self.create_network, name,
+ project_id=project_id, tenant_id=other_tenant,
client=self.admin_client)
expected_message = "'project_id' and 'tenant_id' do not match"
self.assertEqual(expected_message, e.resp_body['message'])
diff --git a/neutron_tempest_plugin/api/admin/test_ports.py b/neutron_tempest_plugin/api/admin/test_ports.py
new file mode 100644
index 0000000..cbcd933
--- /dev/null
+++ b/neutron_tempest_plugin/api/admin/test_ports.py
@@ -0,0 +1,60 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.api import base
+
+
+class PortTestCasesAdmin(base.BaseAdminNetworkTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortTestCasesAdmin, cls).resource_setup()
+ cls.network = cls.create_network()
+ cls.create_subnet(cls.network)
+
+ @decorators.idempotent_id('dfe8cc79-18d9-4ae8-acef-3ec6bb719bb1')
+ def test_update_mac_address(self):
+ body = self.create_port(self.network)
+ current_mac = body['mac_address']
+
+ # Verify mac_address can be successfully updated.
+ body = self.admin_client.update_port(body['id'],
+ mac_address='12:34:56:78:be:6d')
+ new_mac = body['port']['mac_address']
+ self.assertNotEqual(current_mac, new_mac)
+ self.assertEqual('12:34:56:78:be:6d', new_mac)
+
+ # Verify that port update without specifying mac_address does not
+ # change the mac address.
+ body = self.admin_client.update_port(body['port']['id'],
+ description='Port Description')
+ self.assertEqual(new_mac, body['port']['mac_address'])
+
+ @decorators.idempotent_id('dfe8cc79-18d9-4ae8-acef-3ec6bb719cc2')
+ @utils.requires_ext(extension="port-mac-address-regenerate",
+ service="network")
+ def test_regenerate_mac_address(self):
+ body = self.create_port(self.network)
+ current_mac = body['mac_address']
+ body = self.admin_client.update_port(body['id'],
+ mac_address=None)
+ new_mac = body['port']['mac_address']
+ self.assertNotEqual(current_mac, new_mac)
+ self.assertTrue(netaddr.valid_mac(new_mac))
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index 16375ec..cef0ffc 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -29,7 +29,7 @@
@classmethod
def resource_setup(cls):
super(SharedNetworksTest, cls).resource_setup()
- cls.shared_network = cls.create_shared_network()
+ cls.shared_network = cls.create_network(shared=True)
@decorators.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
def test_filtering_shared_networks(self):
@@ -84,7 +84,7 @@
@decorators.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
def test_create_update_shared_network(self):
- shared_network = self.create_shared_network()
+ shared_network = self.create_network(shared=True)
net_id = shared_network['id']
self.assertEqual('ACTIVE', shared_network['status'])
self.assertIsNotNone(shared_network['id'])
@@ -156,7 +156,7 @@
@classmethod
def resource_setup(cls):
super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
- cls.network = cls.create_shared_network()
+ cls.network = cls.create_network(shared=True)
cls.create_subnet(cls.network, client=cls.admin_client)
@decorators.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 6246eb7..966b30d 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -60,6 +60,9 @@
# Default to ipv4.
_ip_version = const.IP_VERSION_4
+ # Derive from BaseAdminNetworkTest class to have this initialized
+ admin_client = None
+
@classmethod
def get_client_manager(cls, credential_type=None, roles=None,
force_new=None):
@@ -124,10 +127,16 @@
cls.projects = []
cls.log_objects = []
cls.reserved_subnet_cidrs = set()
+ cls.keypairs = []
+ cls.trunks = []
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
+ # Clean up trunks
+ for trunk in cls.trunks:
+ cls._try_delete_resource(cls.delete_trunk, trunk)
+
# Clean up floating IPs
for floating_ip in cls.floating_ips:
cls._try_delete_resource(cls.client.delete_floatingip,
@@ -170,8 +179,7 @@
subnet['id'])
# Clean up networks
for network in cls.networks:
- cls._try_delete_resource(cls.client.delete_network,
- network['id'])
+ cls._try_delete_resource(cls.delete_network, network)
# Clean up admin networks
for network in cls.admin_networks:
@@ -221,6 +229,9 @@
cls._try_delete_resource(cls.admin_client.delete_log,
log_object['id'])
+ for keypair in cls.keypairs:
+ cls._try_delete_resource(cls.delete_keypair, keypair)
+
super(BaseNetworkTest, cls).resource_cleanup()
@classmethod
@@ -244,44 +255,108 @@
pass
@classmethod
- def create_network(cls, network_name=None, client=None, **kwargs):
- """Wrapper utility that returns a test network."""
- network_name = network_name or data_utils.rand_name('test-network-')
+ def create_network(cls, network_name=None, client=None, external=None,
+ shared=None, provider_network_type=None,
+ provider_physical_network=None,
+ provider_segmentation_id=None, **kwargs):
+ """Create a network.
- client = client or cls.client
- body = client.create_network(name=network_name, **kwargs)
- network = body['network']
- if client is cls.client:
- cls.networks.append(network)
- else:
- cls.admin_networks.append(network)
+ When client is not provider and admin_client is attribute is not None
+ (for example when using BaseAdminNetworkTest base class) and using any
+ of the convenience parameters (external, shared, provider_network_type,
+ provider_physical_network and provider_segmentation_id) it silently
+ uses admin_client. If the network is not shared then it uses the same
+ project_id as regular client.
+
+ :param network_name: Human-readable name of the network
+
+ :param client: client to be used for connecting to network service
+
+ :param external: indicates whether the network has an external routing
+ facility that's not managed by the networking service.
+
+ :param shared: indicates whether this resource is shared across all
+ projects. By default, only administrative users can change this value.
+ If True and admin_client attribute is not None, then the network is
+ created under administrative project.
+
+ :param provider_network_type: the type of physical network that this
+ network should be mapped to. For example, 'flat', 'vlan', 'vxlan', or
+ 'gre'. Valid values depend on a networking back-end.
+
+ :param provider_physical_network: the physical network where this
+ network should be implemented. The Networking API v2.0 does not provide
+ a way to list available physical networks. For example, the Open
+ vSwitch plug-in configuration file defines a symbolic name that maps to
+ specific bridges on each compute host.
+
+ :param provider_segmentation_id: The ID of the isolated segment on the
+ physical network. The network_type attribute defines the segmentation
+ model. For example, if the network_type value is 'vlan', this ID is a
+ vlan identifier. If the network_type value is 'gre', this ID is a gre
+ key.
+
+ :param **kwargs: extra parameters to be forwarded to network service
+ """
+
+ name = (network_name or kwargs.pop('name', None) or
+ data_utils.rand_name('test-network-'))
+
+ # translate convenience parameters
+ admin_client_required = False
+ if provider_network_type:
+ admin_client_required = True
+ kwargs['provider:network_type'] = provider_network_type
+ if provider_physical_network:
+ admin_client_required = True
+ kwargs['provider:physical_network'] = provider_physical_network
+ if provider_segmentation_id:
+ admin_client_required = True
+ kwargs['provider:segmentation_id'] = provider_segmentation_id
+ if external is not None:
+ admin_client_required = True
+ kwargs['router:external'] = bool(external)
+ if shared is not None:
+ admin_client_required = True
+ kwargs['shared'] = bool(shared)
+
+ if not client:
+ if admin_client_required and cls.admin_client:
+ # For convenience silently switch to admin client
+ client = cls.admin_client
+ if not shared:
+ # Keep this network visible from current project
+ project_id = (kwargs.get('project_id') or
+ kwargs.get('tenant_id') or
+ cls.client.tenant_id)
+ kwargs.update(project_id=project_id, tenant_id=project_id)
+ else:
+ # Use default client
+ client = cls.client
+
+ network = client.create_network(name=name, **kwargs)['network']
+ network['client'] = client
+ cls.networks.append(network)
return network
@classmethod
- def create_shared_network(cls, network_name=None, **post_body):
- network_name = network_name or data_utils.rand_name('sharednetwork-')
- post_body.update({'name': network_name, 'shared': True})
- body = cls.admin_client.create_network(**post_body)
- network = body['network']
- cls.admin_networks.append(network)
- return network
+ def delete_network(cls, network, client=None):
+ client = client or network.get('client') or cls.client
+ client.delete_network(network['id'])
+
+ @classmethod
+ def create_shared_network(cls, network_name=None, **kwargs):
+ return cls.create_network(name=network_name, shared=True, **kwargs)
@classmethod
def create_network_keystone_v3(cls, network_name=None, project_id=None,
tenant_id=None, client=None):
- """Wrapper utility that creates a test network with project_id."""
- client = client or cls.client
- network_name = network_name or data_utils.rand_name(
- 'test-network-with-project_id')
- project_id = cls.client.tenant_id
- body = client.create_network_keystone_v3(network_name, project_id,
- tenant_id)
- network = body['network']
- if client is cls.client:
- cls.networks.append(network)
- else:
- cls.admin_networks.append(network)
- return network
+ params = {}
+ if project_id:
+ params['project_id'] = project_id
+ if tenant_id:
+ params['tenant_id'] = tenant_id
+ return cls.create_network(name=network_name, client=client, **params)
@classmethod
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
@@ -593,6 +668,81 @@
cls.security_groups.append(body['security_group'])
return body['security_group']
+ @classmethod
+ def create_keypair(cls, client=None, name=None, **kwargs):
+ client = client or cls.os_primary.keypairs_client
+ name = name or data_utils.rand_name('keypair-test')
+ keypair = client.create_keypair(name=name, **kwargs)['keypair']
+
+ # save client for later cleanup
+ keypair['client'] = client
+ cls.keypairs.append(keypair)
+ return keypair
+
+ @classmethod
+ def delete_keypair(cls, keypair, client=None):
+ client = (client or keypair.get('client') or
+ cls.os_primary.keypairs_client)
+ client.delete_keypair(keypair_name=keypair['name'])
+
+ @classmethod
+ def create_trunk(cls, port=None, subports=None, client=None, **kwargs):
+ """Create network trunk
+
+ :param port: dictionary containing parent port ID (port['id'])
+ :param client: client to be used for connecting to networking service
+ :param **kwargs: extra parameters to be forwarded to network service
+
+ :returns: dictionary containing created trunk details
+ """
+ client = client or cls.client
+
+ if port:
+ kwargs['port_id'] = port['id']
+
+ trunk = client.create_trunk(subports=subports, **kwargs)['trunk']
+ # Save client reference for later deletion
+ trunk['client'] = client
+ cls.trunks.append(trunk)
+ return trunk
+
+ @classmethod
+ def delete_trunk(cls, trunk, client=None):
+ """Delete network trunk
+
+ :param trunk: dictionary containing trunk ID (trunk['id'])
+
+ :param client: client to be used for connecting to networking service
+ """
+ client = client or trunk.get('client') or cls.client
+ trunk.update(client.show_trunk(trunk['id'])['trunk'])
+
+ if not trunk['admin_state_up']:
+ # Cannot touch trunk before admin_state_up is True
+ client.update_trunk(trunk['id'], admin_state_up=True)
+ if trunk['sub_ports']:
+ # Removes trunk ports before deleting it
+ cls._try_delete_resource(client.remove_subports, trunk['id'],
+ trunk['sub_ports'])
+
+ # we have to detach the interface from the server before
+ # the trunk can be deleted.
+ parent_port = {'id': trunk['port_id']}
+
+ def is_parent_port_detached():
+ parent_port.update(client.show_port(parent_port['id'])['port'])
+ return not parent_port['device_id']
+
+ if not is_parent_port_detached():
+ # this could probably happen when trunk is deleted and parent port
+ # has been assigned to a VM that is still running. Here we are
+ # assuming that device_id points to such VM.
+ cls.os_primary.compute.InterfacesClient().delete_interface(
+ parent_port['device_id'], parent_port['id'])
+ utils.wait_until_true(is_parent_port_detached)
+
+ client.delete_trunk(trunk['id'])
+
class BaseAdminNetworkTest(BaseNetworkTest):
diff --git a/neutron_tempest_plugin/api/clients.py b/neutron_tempest_plugin/api/clients.py
index 14f6714..ee0289c 100644
--- a/neutron_tempest_plugin/api/clients.py
+++ b/neutron_tempest_plugin/api/clients.py
@@ -15,6 +15,7 @@
from tempest.lib.services.compute import availability_zone_client
from tempest.lib.services.compute import hypervisor_client
+from tempest.lib.services.compute import interfaces_client
from tempest.lib.services.compute import keypairs_client
from tempest.lib.services.compute import servers_client
from tempest.lib.services.identity.v2 import tenants_client
@@ -75,6 +76,8 @@
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
+ self.interfaces_client = interfaces_client.InterfacesClient(
+ self.auth_provider, **params)
self.keypairs_client = keypairs_client.KeyPairsClient(
self.auth_provider, **params)
self.hv_client = hypervisor_client.HypervisorClient(
diff --git a/neutron_tempest_plugin/api/test_auto_allocated_topology.py b/neutron_tempest_plugin/api/test_auto_allocated_topology.py
index 37f9ad1..0baa2a8 100644
--- a/neutron_tempest_plugin/api/test_auto_allocated_topology.py
+++ b/neutron_tempest_plugin/api/test_auto_allocated_topology.py
@@ -63,7 +63,7 @@
up = {'admin_state_up': True}
networks = _count(self.client.list_networks(**up)['networks'])
- subnets = _count(self.client.list_subnets(**up)['subnets'])
+ subnets = _count(self.client.list_subnets()['subnets'])
routers = _count(self.client.list_routers(**up)['routers'])
return networks, subnets, routers
diff --git a/neutron_tempest_plugin/api/test_extensions.py b/neutron_tempest_plugin/api/test_extensions.py
index 1462ae1..5b7fe67 100644
--- a/neutron_tempest_plugin/api/test_extensions.py
+++ b/neutron_tempest_plugin/api/test_extensions.py
@@ -11,31 +11,46 @@
# under the License.
from tempest.common import utils
+from tempest import config
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
+CONF = config.CONF
+
+
class ExtensionsTest(base.BaseNetworkTest):
- def _test_list_extensions_includes(self, ext):
+ def _test_list_extensions_includes(self, exts):
body = self.client.list_extensions()
extensions = {ext_['alias'] for ext_ in body['extensions']}
self.assertNotEmpty(extensions, "Extension list returned is empty")
- ext_enabled = utils.is_extension_enabled(ext, "network")
- if ext_enabled:
- self.assertIn(ext, extensions)
- else:
- self.assertNotIn(ext, extensions)
+ for ext in exts:
+ ext_enabled = utils.is_extension_enabled(ext, "network")
+ if ext_enabled:
+ self.assertIn(ext, extensions)
+ else:
+ self.assertNotIn(ext, extensions)
@decorators.idempotent_id('262420b7-a4bb-4a3e-b4b5-e73bad18df8c')
def test_list_extensions_sorting(self):
- self._test_list_extensions_includes('sorting')
+ self._test_list_extensions_includes(['sorting'])
@decorators.idempotent_id('19db409e-a23f-445d-8bc8-ca3d64c84706')
def test_list_extensions_pagination(self):
- self._test_list_extensions_includes('pagination')
+ self._test_list_extensions_includes(['pagination'])
@decorators.idempotent_id('155b7bc2-e358-4dd8-bf3e-1774c084567f')
def test_list_extensions_project_id(self):
- self._test_list_extensions_includes('project-id')
+ self._test_list_extensions_includes(['project-id'])
+
+ @decorators.idempotent_id('c7597fac-2404-45b1-beb4-523c8b1d4604')
+ def test_list_extensions_includes_all(self):
+ extensions = CONF.network_feature_enabled.api_extensions
+ if not extensions:
+ raise self.skipException("Extension list is empty")
+ if extensions[0] == 'all':
+ raise self.skipException("No lists of enabled extensions provided")
+
+ self._test_list_extensions_includes(extensions)
diff --git a/neutron_tempest_plugin/api/test_networks.py b/neutron_tempest_plugin/api/test_networks.py
index 19f4fcb..c4b3596 100644
--- a/neutron_tempest_plugin/api/test_networks.py
+++ b/neutron_tempest_plugin/api/test_networks.py
@@ -75,28 +75,29 @@
@decorators.idempotent_id('0cc0552f-afaf-4231-b7a7-c2a1774616da')
@utils.requires_ext(extension="project-id", service="network")
- def test_create_network_keystone_v3(self):
+ def test_create_network_with_project(self):
project_id = self.client.tenant_id
name = 'created-with-project_id'
- new_net = self.create_network_keystone_v3(name, project_id)
- self.assertEqual(name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ network = self.create_network(name, project_id=project_id)
+ self.assertEqual(name, network['name'])
+ self.assertEqual(project_id, network['project_id'])
+ self.assertEqual(project_id, network['tenant_id'])
- body = self.client.list_networks(id=new_net['id'])['networks'][0]
- self.assertEqual(name, body['name'])
+ observed_network = self.client.list_networks(
+ id=network['id'])['networks'][0]
+ self.assertEqual(name, observed_network['name'])
new_name = 'create-with-project_id-2'
- body = self.client.update_network(new_net['id'], name=new_name)
- new_net = body['network']
- self.assertEqual(new_name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ updated_network = self.client.update_network(
+ network['id'], name=new_name)['network']
+ self.assertEqual(new_name, updated_network['name'])
+ self.assertEqual(project_id, updated_network['project_id'])
+ self.assertEqual(project_id, updated_network['tenant_id'])
@decorators.idempotent_id('94e2a44c-3367-4253-8c2a-22deaf59e96c')
@utils.requires_ext(extension="dns-integration",
- service="network")
+ service="network")
def test_create_update_network_dns_domain(self):
domain1 = 'test.org.'
body = self.create_network(dns_domain=domain1)
@@ -208,6 +209,7 @@
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
+ @decorators.skip_because(bug="1749820")
@decorators.idempotent_id('3574ec9b-a8b8-43e3-9c11-98f5875df6a9')
def test_list_validation_filters(self):
self._test_list_validation_filters()
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index 5a01798..3b877c2 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -108,7 +108,7 @@
body = self.client.update_port(body['id'],
dns_name='d2', dns_domain='d.org.')
self.assertEqual('d2', body['port']['dns_name'])
- self.assertEqual('d.org.', body['dns_domain'])
+ self.assertEqual('d.org.', body['port']['dns_domain'])
self._confirm_dns_assignment(body['port'])
body = self.client.show_port(body['port']['id'])['port']
self.assertEqual('d2', body['dns_name'])
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index d31eab8..2bf99bf 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -209,8 +209,8 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
- network = self.create_shared_network('test network',
- qos_policy_id=policy['id'])
+ network = self.create_network('test network', shared=True,
+ qos_policy_id=policy['id'])
retrieved_network = self.admin_client.show_network(network['id'])
self.assertEqual(
@@ -251,7 +251,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
retrieved_network = self.admin_client.show_network(network['id'])
self.assertIsNone(retrieved_network['network']['qos_policy_id'])
@@ -266,7 +266,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
port = self.create_port(network, qos_policy_id=policy['id'])
retrieved_port = self.admin_client.show_port(port['id'])
@@ -275,7 +275,7 @@
@decorators.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
def test_policy_association_with_port_nonexistent_policy(self):
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
self.assertRaises(
exceptions.NotFound,
self.create_port,
@@ -287,7 +287,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
self.assertRaises(
exceptions.NotFound,
self.create_port,
@@ -298,7 +298,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
port = self.create_port(network)
retrieved_port = self.admin_client.show_port(port['id'])
self.assertIsNone(retrieved_port['port']['qos_policy_id'])
@@ -313,7 +313,8 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- self.create_shared_network('test network', qos_policy_id=policy['id'])
+ self.create_network('test network', qos_policy_id=policy['id'],
+ shared=True)
self.assertRaises(
exceptions.Conflict,
self.admin_client.delete_qos_policy, policy['id'])
@@ -323,7 +324,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
self.create_port(network, qos_policy_id=policy['id'])
self.assertRaises(
exceptions.Conflict,
diff --git a/neutron_tempest_plugin/api/test_subnetpools.py b/neutron_tempest_plugin/api/test_subnetpools.py
index ec3753a..8adbc4c 100644
--- a/neutron_tempest_plugin/api/test_subnetpools.py
+++ b/neutron_tempest_plugin/api/test_subnetpools.py
@@ -414,6 +414,7 @@
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
+ @decorators.skip_because(bug="1749820")
@decorators.idempotent_id('27feb3f8-40f4-4e50-8cd2-7d0096a98682')
def test_list_validation_filters(self):
self._test_list_validation_filters()
diff --git a/neutron_tempest_plugin/api/test_subnets.py b/neutron_tempest_plugin/api/test_subnets.py
index fb2f4d6..b7a1b21 100644
--- a/neutron_tempest_plugin/api/test_subnets.py
+++ b/neutron_tempest_plugin/api/test_subnets.py
@@ -64,6 +64,7 @@
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
+ @decorators.skip_because(bug="1749820")
@decorators.idempotent_id('c0f9280b-9d81-4728-a967-6be22659d4c8')
def test_list_validation_filters(self):
self._test_list_validation_filters()
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index e02cf92..1a000fd 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -240,10 +240,9 @@
def create_provider_network(self):
foo_net = config.CONF.neutron_plugin_options.provider_vlans[0]
- post_body = {'network_name': data_utils.rand_name('vlan-net'),
- 'provider:network_type': 'vlan',
- 'provider:physical_network': foo_net}
- return self.create_shared_network(**post_body)
+ return self.create_network(name=data_utils.rand_name('vlan-net'),
+ provider_network_type='vlan',
+ provider_physical_network=foo_net)
@decorators.idempotent_id('0f05d98e-41f5-4629-dada-9aee269c9602')
def test_add_subport(self):
@@ -286,13 +285,13 @@
super(TrunkTestMtusJSONBase, self).setUp()
# VXLAN autocomputed MTU (1450) is smaller than that of GRE (1458)
- vxlan_kwargs = {'network_name': data_utils.rand_name('vxlan-net'),
- 'provider:network_type': 'vxlan'}
- self.smaller_mtu_net = self.create_shared_network(**vxlan_kwargs)
+ self.smaller_mtu_net = self.create_network(
+ name=data_utils.rand_name('vxlan-net'),
+ provider_network_type='vxlan')
- gre_kwargs = {'network_name': data_utils.rand_name('gre-net'),
- 'provider:network_type': 'gre'}
- self.larger_mtu_net = self.create_shared_network(**gre_kwargs)
+ self.larger_mtu_net = self.create_network(
+ name=data_utils.rand_name('gre-net'),
+ provider_network_type='gre')
self.smaller_mtu_port = self.create_port(self.smaller_mtu_net)
self.smaller_mtu_port_2 = self.create_port(self.smaller_mtu_net)
diff --git a/neutron_tempest_plugin/common/socat.py b/neutron_tempest_plugin/common/socat.py
new file mode 100644
index 0000000..6bd1fdc
--- /dev/null
+++ b/neutron_tempest_plugin/common/socat.py
@@ -0,0 +1,105 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+COMMAND = 'socat'
+
+
+class SocatAddress(object):
+
+ def __init__(self, address, args=None, options=None):
+ self.address = address
+ self.args = args
+ self.options = options
+
+ @classmethod
+ def udp_datagram(cls, host, port, options=None, ip_version=None):
+ address = 'UDP{}-DATAGRAM'.format(ip_version or '')
+ return cls(address, (host, int(port)), options)
+
+ @classmethod
+ def udp_recvfrom(cls, port, options=None, ip_version=None):
+ address = 'UDP{}-RECVFROM'.format(ip_version or '')
+ return cls(address, (int(port),), options)
+
+ @classmethod
+ def stdio(cls):
+ return cls('STDIO')
+
+ def __str__(self):
+ address = self.address
+ if self.args:
+ address += ':' + ':'.join(str(a) for a in self.args)
+ if self.options:
+ address += ',' + ','.join(str(o) for o in self.options)
+ return address
+
+ def format(self, *args, **kwargs):
+ return str(self).format(*args, **kwargs)
+
+
+STDIO = SocatAddress.stdio()
+
+
+class SocatOption(object):
+
+ def __init__(self, name, *args):
+ self.name = name
+ self.args = args
+
+ @classmethod
+ def bind(cls, host):
+ return cls('bind', host)
+
+ @classmethod
+ def fork(cls):
+ return cls('fork')
+
+ @classmethod
+ def ip_multicast_ttl(cls, ttl):
+ return cls('ip-multicast-ttl', int(ttl))
+
+ @classmethod
+ def ip_multicast_if(cls, interface_address):
+ return cls('ip-multicast-if', interface_address)
+
+ @classmethod
+ def ip_add_membership(cls, multicast_address, interface_address):
+ return cls('ip-add-membership', multicast_address, interface_address)
+
+ def __str__(self):
+ result = self.name
+ args = self.args
+ if args:
+ result += '=' + ':'.join(str(a) for a in args)
+ return result
+
+
+class SocatCommand(object):
+
+ def __init__(self, source=STDIO, destination=STDIO, command=COMMAND):
+ self.source = source
+ self.destination = destination
+ self.command = command
+
+ def __str__(self):
+ words = [self.command, self.source, self.destination]
+ return ' '.join(str(obj) for obj in words)
+
+
+def socat_command(source=STDIO, destination=STDIO, command=COMMAND):
+ command = SocatCommand(source=source, destination=destination,
+ command=command)
+ return str(command)
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index b919b65..99f731c 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -12,13 +12,103 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+
+from oslo_log import log
from tempest.lib.common import ssh
from neutron_tempest_plugin import config
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
class Client(ssh.Client):
- def __init__(self, *args, **kwargs):
- if 'timeout' not in kwargs:
- kwargs['timeout'] = config.CONF.validation.ssh_timeout
- super(Client, self).__init__(*args, **kwargs)
+
+ timeout = CONF.validation.ssh_timeout
+
+ proxy_jump_host = CONF.neutron_plugin_options.ssh_proxy_jump_host
+ proxy_jump_username = CONF.neutron_plugin_options.ssh_proxy_jump_username
+ proxy_jump_password = CONF.neutron_plugin_options.ssh_proxy_jump_password
+ proxy_jump_keyfile = CONF.neutron_plugin_options.ssh_proxy_jump_keyfile
+ proxy_jump_port = CONF.neutron_plugin_options.ssh_proxy_jump_port
+
+ def __init__(self, host, username, password=None, timeout=None, pkey=None,
+ channel_timeout=10, look_for_keys=False, key_filename=None,
+ port=22, proxy_client=None):
+
+ timeout = timeout or self.timeout
+
+ if self.proxy_jump_host:
+ # Perform all SSH connections passing through configured SSH server
+ proxy_client = proxy_client or self.create_proxy_client(
+ timeout=timeout, channel_timeout=channel_timeout)
+
+ super(Client, self).__init__(
+ host=host, username=username, password=password, timeout=timeout,
+ pkey=pkey, channel_timeout=channel_timeout,
+ look_for_keys=look_for_keys, key_filename=key_filename, port=port,
+ proxy_client=proxy_client)
+
+ @classmethod
+ def create_proxy_client(cls, look_for_keys=True, **kwargs):
+ host = cls.proxy_jump_host
+ if not host:
+ # proxy_jump_host string cannot be empty or None
+ raise ValueError(
+ "'proxy_jump_host' configuration option is empty.")
+
+ # Let accept an empty string as a synonymous of default value on below
+ # options
+ password = cls.proxy_jump_password or None
+ key_file = cls.proxy_jump_keyfile or None
+ username = cls.proxy_jump_username
+
+ # Port must be a positive integer
+ port = cls.proxy_jump_port
+ if port <= 0 or port > 65535:
+ raise ValueError(
+ "Invalid value for 'proxy_jump_port' configuration option: "
+ "{!r}".format(port))
+
+ login = "{username}@{host}:{port}".format(username=username, host=host,
+ port=port)
+
+ if key_file:
+ # expand ~ character with user HOME directory
+ key_file = os.path.expanduser(key_file)
+ if os.path.isfile(key_file):
+ LOG.debug("Going to create SSH connection to %r using key "
+ "file: %s", login, key_file)
+
+ else:
+ # This message could help the user to identify a
+ # mis-configuration in tempest.conf
+ raise ValueError(
+ "Cannot find file specified as 'proxy_jump_keyfile' "
+ "option: {!r}".format(key_file))
+
+ elif password:
+ LOG.debug("Going to create SSH connection to %r using password.",
+ login)
+
+ elif look_for_keys:
+ # This message could help the user to identify a mis-configuration
+ # in tempest.conf
+ LOG.info("Both 'proxy_jump_password' and 'proxy_jump_keyfile' "
+ "options are empty. Going to create SSH connection to %r "
+ "looking for key file location into %r directory.",
+ login, os.path.expanduser('~/.ssh'))
+ else:
+ # An user that forces look_for_keys=False should really know what
+ # he really wants
+ LOG.warning("No authentication method provided to create an SSH "
+ "connection to %r. If it fails, then please "
+ "set 'proxy_jump_keyfile' to provide a valid SSH key "
+ "file.", login)
+
+ return ssh.Client(
+ host=host, username=username, password=password,
+ look_for_keys=look_for_keys, key_filename=key_file,
+ port=port, proxy_client=None, **kwargs)
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index fc07e81..e15748d 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -56,7 +56,25 @@
'"provider:network_type":<TYPE> - string '
'"mtu":<MTU> - integer '
'"cidr"<SUBNET/MASK> - string '
- '"provider:segmentation_id":<VLAN_ID> - integer')
+ '"provider:segmentation_id":<VLAN_ID> - integer'),
+
+ # Option for feature to connect via SSH to VMs using an intermediate SSH
+ # server
+ cfg.StrOpt('ssh_proxy_jump_host',
+ default=None,
+ help='Proxy jump host used to connect via SSH to VMs..'),
+ cfg.StrOpt('ssh_proxy_jump_username',
+ default='root',
+ help='User name used to connect to "ssh_proxy_jump_host".'),
+ cfg.StrOpt('ssh_proxy_jump_password',
+ default=None,
+ help='Password used to connect to "ssh_proxy_jump_host".'),
+ cfg.StrOpt('ssh_proxy_jump_keyfile',
+ default=None,
+ help='Keyfile used to connect to "ssh_proxy_jump_host".'),
+ cfg.IntOpt('ssh_proxy_jump_port',
+ default=22,
+ help='Port used to connect to "ssh_proxy_jump_host".'),
]
# TODO(amuller): Redo configuration options registration as part of the planned
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index b76a81a..3adaa1e 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -15,6 +15,8 @@
import subprocess
import netaddr
+from neutron_lib.api import validators
+from neutron_lib import constants as neutron_lib_constants
from oslo_log import log
from tempest.common.utils import net_utils
from tempest.common import waiters
@@ -33,20 +35,6 @@
class BaseTempestTestCase(base_api.BaseNetworkTest):
- @classmethod
- def resource_setup(cls):
- super(BaseTempestTestCase, cls).resource_setup()
-
- cls.keypairs = []
-
- @classmethod
- def resource_cleanup(cls):
- for keypair in cls.keypairs:
- client = keypair['client']
- client.delete_keypair(
- keypair_name=keypair['keypair']['name'])
-
- super(BaseTempestTestCase, cls).resource_cleanup()
def create_server(self, flavor_ref, image_ref, key_name, networks,
**kwargs):
@@ -105,17 +93,6 @@
return server
@classmethod
- def create_keypair(cls, client=None):
- client = client or cls.os_primary.keypairs_client
- name = data_utils.rand_name('keypair-test')
- body = client.create_keypair(name=name)
- body.update(client=client)
- if client is cls.os_primary.keypairs_client:
- cls.keypairs.append(body)
-
- return body['keypair']
-
- @classmethod
def create_secgroup_rules(cls, rule_list, secgroup_id=None,
client=None):
client = client or cls.os_primary.network_client
@@ -190,7 +167,17 @@
self.floating_ips.append(fip)
return fip
- def setup_network_and_server(self, router=None, **kwargs):
+ def create_interface(cls, server_id, port_id, client=None):
+ client = client or cls.os_primary.interfaces_client
+ body = client.create_interface(server_id, port_id=port_id)
+ return body['interfaceAttachment']
+
+ def delete_interface(cls, server_id, port_id, client=None):
+ client = client or cls.os_primary.interfaces_client
+ client.delete_interface(server_id, port_id=port_id)
+
+ def setup_network_and_server(
+ self, router=None, server_name=None, **kwargs):
"""Create network resources and a server.
Creating a network, subnet, router, keypair, security group
@@ -212,12 +199,18 @@
self.keypair = self.create_keypair()
self.create_loginable_secgroup_rule(
secgroup_id=secgroup['security_group']['id'])
- self.server = self.create_server(
- flavor_ref=CONF.compute.flavor_ref,
- image_ref=CONF.compute.image_ref,
- key_name=self.keypair['name'],
- networks=[{'uuid': self.network['id']}],
- security_groups=[{'name': secgroup['security_group']['name']}])
+
+ server_kwargs = {
+ 'flavor_ref': CONF.compute.flavor_ref,
+ 'image_ref': CONF.compute.image_ref,
+ 'key_name': self.keypair['name'],
+ 'networks': [{'uuid': self.network['id']}],
+ 'security_groups': [{'name': secgroup['security_group']['name']}],
+ }
+ if server_name is not None:
+ server_kwargs['name'] = server_name
+
+ self.server = self.create_server(**server_kwargs)
self.wait_for_server_active(self.server['server'])
self.port = self.client.list_ports(network_id=self.network['id'],
device_id=self.server[
@@ -252,7 +245,8 @@
"for the console log", server['id'])
def _check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None, mtu=None, fragmentation=True):
+ nic=None, mtu=None, fragmentation=True,
+ timeout=None):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
@@ -267,15 +261,21 @@
def ping_host(source, host, count=CONF.validation.ping_count,
size=CONF.validation.ping_size, nic=None, mtu=None,
fragmentation=True):
- addr = netaddr.IPAddress(host)
- cmd = 'ping6' if addr.version == 6 else 'ping'
+ IP_VERSION_4 = neutron_lib_constants.IP_VERSION_4
+ IP_VERSION_6 = neutron_lib_constants.IP_VERSION_6
+
+ # Use 'ping6' for IPv6 addresses, 'ping' for IPv4 and hostnames
+ ip_version = (
+ IP_VERSION_6 if netaddr.valid_ipv6(host) else IP_VERSION_4)
+ cmd = (
+ 'ping6' if ip_version == IP_VERSION_6 else 'ping')
if nic:
cmd = 'sudo {cmd} -I {nic}'.format(cmd=cmd, nic=nic)
if mtu:
if not fragmentation:
cmd += ' -M do'
size = str(net_utils.get_ping_payload_size(
- mtu=mtu, ip_version=addr.version))
+ mtu=mtu, ip_version=ip_version))
cmd += ' -c{0} -w{0} -s{1} {2}'.format(count, size, host)
return source.exec_command(cmd)
@@ -289,22 +289,24 @@
'from: %s.', dest, source.host)
return not should_succeed
LOG.debug('ping result: %s', result)
- # Assert that the return traffic was from the correct
- # source address.
- from_source = 'from %s' % dest
- self.assertIn(from_source, result)
+
+ if validators.validate_ip_address(dest) is None:
+ # Assert that the return traffic was from the correct
+ # source address.
+ from_source = 'from %s' % dest
+ self.assertIn(from_source, result)
return should_succeed
- return test_utils.call_until_true(ping_remote,
- CONF.validation.ping_timeout,
- 1)
+ return test_utils.call_until_true(
+ ping_remote, timeout or CONF.validation.ping_timeout, 1)
def check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None, mtu=None, fragmentation=True,
- servers=None):
+ servers=None, timeout=None):
try:
self.assertTrue(self._check_remote_connectivity(
- source, dest, should_succeed, nic, mtu, fragmentation))
+ source, dest, should_succeed, nic, mtu, fragmentation,
+ timeout=timeout))
except lib_exc.SSHTimeout as ssh_e:
LOG.debug(ssh_e)
self._log_console_output(servers)
diff --git a/neutron_tempest_plugin/scenario/test_floatingip.py b/neutron_tempest_plugin/scenario/test_floatingip.py
index bc40176..504af12 100644
--- a/neutron_tempest_plugin/scenario/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/test_floatingip.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts
from tempest.common import utils
@@ -26,6 +28,7 @@
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils as common_utils
from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.scenario import test_qos
@@ -199,8 +202,95 @@
gateway_external_ip)
+class FloatingIPPortDetailsTest(FloatingIpTestCasesMixin,
+ base.BaseTempestTestCase):
+ same_network = True
+
+ @classmethod
+ @utils.requires_ext(extension="router", service="network")
+ @utils.requires_ext(extension="fip-port-details", service="network")
+ def resource_setup(cls):
+ super(FloatingIPPortDetailsTest, cls).resource_setup()
+
+ @decorators.idempotent_id('a663aeee-dd81-492b-a207-354fd6284dbe')
+ def test_floatingip_port_details(self):
+ """Tests the following:
+
+ 1. Create a port with floating ip in Neutron.
+ 2. Create two servers in Nova.
+ 3. Attach the port to the server.
+ 4. Detach the port from the server.
+ 5. Attach the port to the second server.
+ 6. Detach the port from the second server.
+ """
+ port = self.create_port(self.network)
+ fip = self.create_and_associate_floatingip(port['id'])
+ server1 = self._create_server(create_floating_ip=False)
+ server2 = self._create_server(create_floating_ip=False)
+
+ for server in [server1, server2]:
+ # attach the port to the server
+ self.create_interface(
+ server['server']['id'], port_id=port['id'])
+ waiters.wait_for_interface_status(
+ self.os_primary.interfaces_client, server['server']['id'],
+ port['id'], 'ACTIVE')
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self._check_port_details(
+ fip, port, status='ACTIVE',
+ device_id=server['server']['id'], device_owner='compute:nova')
+
+ # detach the port from the server; this is a cast in the compute
+ # API so we have to poll the port until the device_id is unset.
+ self.delete_interface(server['server']['id'], port['id'])
+ self._wait_for_port_detach(port['id'])
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self._check_port_details(
+ fip, port, status='DOWN', device_id='', device_owner='')
+
+ def _check_port_details(self, fip, port, status, device_id, device_owner):
+ self.assertIn('port_details', fip)
+ port_details = fip['port_details']
+ self.assertEqual(port['name'], port_details['name'])
+ self.assertEqual(port['network_id'], port_details['network_id'])
+ self.assertEqual(port['mac_address'], port_details['mac_address'])
+ self.assertEqual(port['admin_state_up'],
+ port_details['admin_state_up'])
+ self.assertEqual(status, port_details['status'])
+ self.assertEqual(device_id, port_details['device_id'])
+ self.assertEqual(device_owner, port_details['device_owner'])
+
+ def _wait_for_port_detach(self, port_id, timeout=120, interval=10):
+ """Waits for the port's device_id to be unset.
+
+ :param port_id: The id of the port being detached.
+ :returns: The final port dict from the show_port response.
+ """
+ port = self.client.show_port(port_id)['port']
+ device_id = port['device_id']
+ start = int(time.time())
+
+ # NOTE(mriedem): Nova updates the port's device_id to '' rather than
+ # None, but it's not contractual so handle Falsey either way.
+ while device_id:
+ time.sleep(interval)
+ port = self.client.show_port(port_id)['port']
+ device_id = port['device_id']
+
+ timed_out = int(time.time()) - start >= timeout
+
+ if device_id and timed_out:
+ message = ('Port %s failed to detach (device_id %s) within '
+ 'the required time (%s s).' %
+ (port_id, device_id, timeout))
+ raise exceptions.TimeoutException(message)
+
+ return port
+
+
class FloatingIPQosTest(FloatingIpTestCasesMixin,
- test_qos.QoSTest):
+ test_qos.QoSTestMixin,
+ base.BaseTempestTestCase):
same_network = True
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
new file mode 100644
index 0000000..dd89727
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -0,0 +1,73 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+
+
+class InternalDNSTest(base.BaseTempestTestCase):
+
+ @utils.requires_ext(extension="dns-integration", service="network")
+ @decorators.idempotent_id('988347de-07af-471a-abfa-65aea9f452a6')
+ def test_dns_name(self):
+ """Test the ability to ping a VM's hostname from another VM.
+
+ 1) Create two VMs on the same network, giving each a name
+ 2) SSH in to the first VM:
+ 2.1) ping the other VM's internal IP
+ 2.2) ping the otheR VM's hostname
+ """
+
+ self.setup_network_and_server(server_name='luke')
+ self.create_pingable_secgroup_rule(
+ secgroup_id=self.security_groups[-1]['id'])
+ self.check_connectivity(self.fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ self.keypair['private_key'])
+
+ leia = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'],
+ networks=[{'uuid': self.network['id']}],
+ security_groups=[
+ {'name': self.security_groups[-1]['name']}],
+ name='leia')
+ self.wait_for_server_active(leia['server'])
+
+ ssh_client = ssh.Client(
+ self.fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ self.assertIn('luke', ssh_client.exec_command('hostname'))
+
+ leia_port = self.client.list_ports(
+ network_id=self.network['id'],
+ device_id=leia['server']['id'])['ports'][0]
+
+ # Ping with a higher timeout because spawning 2 VMs in some
+ # environment can put significant load on the deployment, resulting
+ # in very long boot times.
+ self.check_remote_connectivity(
+ ssh_client, leia_port['fixed_ips'][0]['ip_address'],
+ timeout=CONF.validation.ping_timeout * 10)
+ self.check_remote_connectivity(ssh_client, 'leia')
diff --git a/neutron_tempest_plugin/scenario/test_migration.py b/neutron_tempest_plugin/scenario/test_migration.py
index 5e081f1..f4b918c 100644
--- a/neutron_tempest_plugin/scenario/test_migration.py
+++ b/neutron_tempest_plugin/scenario/test_migration.py
@@ -67,6 +67,19 @@
device_owner),
timeout=300, sleep=5)
+ def _wait_until_router_ports_down(self, router_id):
+
+ def _is_port_down(port_id):
+ port = self.os_admin.network_client.show_port(port_id).get('port')
+ return port['status'] == const.DOWN
+
+ ports = self.os_admin.network_client.list_ports(
+ device_id=router_id).get('ports')
+ for port in ports:
+ common_utils.wait_until_true(
+ functools.partial(_is_port_down, port['id']),
+ timeout=300, sleep=5)
+
def _is_port_active(self, router_id, device_owner):
ports = self.os_admin.network_client.list_ports(
device_id=router_id,
@@ -120,6 +133,8 @@
self.os_admin.network_client.update_router(
router_id=router['id'], admin_state_up=False)
+ self._wait_until_router_ports_down(router['id'])
+
self.os_admin.network_client.update_router(
router_id=router['id'], distributed=after_dvr, ha=after_ha)
self._check_update(router, after_dvr, after_ha)
diff --git a/neutron_tempest_plugin/scenario/test_mtu.py b/neutron_tempest_plugin/scenario/test_mtu.py
index 0e3afe9..dbfde9b 100644
--- a/neutron_tempest_plugin/scenario/test_mtu.py
+++ b/neutron_tempest_plugin/scenario/test_mtu.py
@@ -19,6 +19,7 @@
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+import testtools
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin import config
@@ -118,6 +119,9 @@
self.keypair['private_key'])
return server_ssh_client1, fip1, server_ssh_client2, fip2
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.image_is_advanced,
+ "Advanced image is required to run this test.")
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a273d9d344')
def test_connectivity_min_max_mtu(self):
server_ssh_client, _, _, fip2 = self._create_setup()
@@ -207,6 +211,9 @@
self.keypair['private_key'])
return server_ssh_client1, fip1, server_ssh_client2, fip2
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.image_is_advanced,
+ "Advanced image is required to run this test.")
@decorators.idempotent_id('bc470200-d8f4-4f07-b294-1b4cbaaa35b9')
def test_connectivity_min_max_mtu(self):
server_ssh_client, _, _, fip2 = self._create_setup()
diff --git a/neutron_tempest_plugin/scenario/test_qos.py b/neutron_tempest_plugin/scenario/test_qos.py
index 0611160..702bbaa 100644
--- a/neutron_tempest_plugin/scenario/test_qos.py
+++ b/neutron_tempest_plugin/scenario/test_qos.py
@@ -66,7 +66,7 @@
port=port)
-class QoSTest(base.BaseTempestTestCase):
+class QoSTestMixin(object):
credentials = ['primary', 'admin']
force_tenant_isolation = False
@@ -81,22 +81,16 @@
NC_PORT = 1234
- @classmethod
- @tutils.requires_ext(extension="qos", service="network")
- @base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
- def resource_setup(cls):
- super(QoSTest, cls).resource_setup()
-
def _create_file_for_bw_tests(self, ssh_client):
cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) "
- % {'bs': QoSTest.BS, 'count': QoSTest.COUNT,
- 'file_path': QoSTest.FILE_PATH})
+ % {'bs': QoSTestMixin.BS, 'count': QoSTestMixin.COUNT,
+ 'file_path': QoSTestMixin.FILE_PATH})
ssh_client.exec_command(cmd)
- cmd = "stat -c %%s %s" % QoSTest.FILE_PATH
+ cmd = "stat -c %%s %s" % QoSTestMixin.FILE_PATH
filesize = ssh_client.exec_command(cmd)
- if int(filesize.strip()) != QoSTest.FILE_SIZE:
+ if int(filesize.strip()) != QoSTestMixin.FILE_SIZE:
raise sc_exceptions.FileCreationFailedException(
- file=QoSTest.FILE_PATH)
+ file=QoSTestMixin.FILE_PATH)
def _check_bw(self, ssh_client, host, port):
cmd = "killall -q nc"
@@ -105,15 +99,15 @@
except exceptions.SSHExecCommandFailed:
pass
cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % {
- 'port': port, 'file_path': QoSTest.FILE_PATH})
+ 'port': port, 'file_path': QoSTestMixin.FILE_PATH})
ssh_client.exec_command(cmd)
start_time = time.time()
client_socket = _connect_socket(host, port)
total_bytes_read = 0
- while total_bytes_read < QoSTest.FILE_SIZE:
- data = client_socket.recv(QoSTest.BUFFER_SIZE)
+ while total_bytes_read < QoSTestMixin.FILE_SIZE:
+ data = client_socket.recv(QoSTestMixin.BUFFER_SIZE)
total_bytes_read += len(data)
time_elapsed = time.time() - start_time
@@ -126,7 +120,7 @@
'total_bytes_read': total_bytes_read,
'bytes_per_second': bytes_per_second})
- return bytes_per_second <= QoSTest.LIMIT_BYTES_SEC
+ return bytes_per_second <= QoSTestMixin.LIMIT_BYTES_SEC
def _create_ssh_client(self):
return ssh.Client(self.fip['floating_ip_address'],
@@ -153,6 +147,14 @@
shared=True)
return policy['policy']['id']
+
+class QoSTest(QoSTestMixin, base.BaseTempestTestCase):
+ @classmethod
+ @tutils.requires_ext(extension="qos", service="network")
+ @base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
+ def resource_setup(cls):
+ super(QoSTest, cls).resource_setup()
+
@decorators.idempotent_id('1f7ed39b-428f-410a-bd2b-db9f465680df')
def test_qos(self):
"""This is a basic test that check that a QoS policy with
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 930cbfd..b316ce4 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -745,26 +745,23 @@
body = jsonutils.loads(body)
return service_client.ResponseBody(resp, body)
- def create_trunk(self, parent_port_id, subports,
+ def create_trunk(self, parent_port_id=None, subports=None,
tenant_id=None, name=None, admin_state_up=None,
- description=None):
+ description=None, **kwargs):
uri = '%s/trunks' % self.uri_prefix
- post_data = {
- 'trunk': {
- 'port_id': parent_port_id,
- }
- }
+ if parent_port_id:
+ kwargs['port_id'] = parent_port_id
if subports is not None:
- post_data['trunk']['sub_ports'] = subports
+ kwargs['sub_ports'] = subports
if tenant_id is not None:
- post_data['trunk']['tenant_id'] = tenant_id
+ kwargs['tenant_id'] = tenant_id
if name is not None:
- post_data['trunk']['name'] = name
+ kwargs['name'] = name
if description is not None:
- post_data['trunk']['description'] = description
+ kwargs['description'] = description
if admin_state_up is not None:
- post_data['trunk']['admin_state_up'] = admin_state_up
- resp, body = self.post(uri, self.serialize(post_data))
+ kwargs['admin_state_up'] = admin_state_up
+ resp, body = self.post(uri, self.serialize({'trunk': kwargs}))
body = self.deserialize_single(body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
diff --git a/playbooks/neutron-tempest-plugin-api/post.yaml b/playbooks/neutron-tempest-plugin-api/post.yaml
deleted file mode 100644
index dac8753..0000000
--- a/playbooks/neutron-tempest-plugin-api/post.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
-- hosts: primary
- tasks:
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*nose_results.html
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testr_results.html.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.testrepository/tmp*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=**/*testrepository.subunit.gz
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}/tox'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/.tox/*/log/*
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
-
- - name: Copy files from {{ ansible_user_dir }}/workspace/ on node
- synchronize:
- src: '{{ ansible_user_dir }}/workspace/'
- dest: '{{ zuul.executor.log_root }}'
- mode: pull
- copy_links: true
- verify_host: true
- rsync_opts:
- - --include=/logs/**
- - --include=*/
- - --exclude=*
- - --prune-empty-dirs
diff --git a/playbooks/neutron-tempest-plugin-api/run.yaml b/playbooks/neutron-tempest-plugin-api/run.yaml
deleted file mode 100644
index 230ac10..0000000
--- a/playbooks/neutron-tempest-plugin-api/run.yaml
+++ /dev/null
@@ -1,58 +0,0 @@
-- hosts: all
- name: neutron-tempest-plugin-api
- tasks:
-
- - name: Ensure legacy workspace directory
- file:
- path: '{{ ansible_user_dir }}/workspace'
- state: directory
-
- - shell:
- cmd: |
- set -e
- set -x
- cat > clonemap.yaml << EOF
- clonemap:
- - name: openstack-infra/devstack-gate
- dest: devstack-gate
- EOF
- /usr/zuul-env/bin/zuul-cloner -m clonemap.yaml --cache-dir /opt/git \
- git://git.openstack.org \
- openstack-infra/devstack-gate
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
-
- - shell:
- cmd: |
- set -e
- set -x
- export PYTHONUNBUFFERED=true
- export DEVSTACK_GATE_TEMPEST=1
- export DEVSTACK_GATE_TEMPEST_ALL_PLUGINS=1
- export DEVSTACK_GATE_NEUTRON=1
- export DEVSTACK_GATE_EXERCISES=0
- export DEVSTACK_GATE_TEMPEST_REGEX="neutron_tempest_plugin.api"
- export DEVSTACK_LOCAL_CONFIG="enable_plugin neutron-tempest-plugin git://git.openstack.org/openstack/neutron-tempest-plugin"
- export BRANCH_OVERRIDE="{{ branch_override | default('default') }}"
- if [ "$BRANCH_OVERRIDE" != "default" ] ; then
- export OVERRIDE_ZUUL_BRANCH=$BRANCH_OVERRIDE
- fi
-
- export PROJECTS="openstack/neutron-tempest-plugin $PROJECTS"
-
- function gate_hook {
- bash -xe $BASE/new/neutron/neutron/tests/contrib/gate_hook.sh api
- }
- export -f gate_hook
-
- function post_test_hook {
- bash -xe $BASE/new/neutron/neutron/tests/contrib/post_test_hook.sh api
- }
- export -f post_test_hook
-
- cp devstack-gate/devstack-vm-gate-wrap.sh ./safe-devstack-vm-gate-wrap.sh
- ./safe-devstack-vm-gate-wrap.sh
- executable: /bin/bash
- chdir: '{{ ansible_user_dir }}/workspace'
- environment: '{{ zuul | zuul_legacy_vars }}'
diff --git a/requirements.txt b/requirements.txt
index 2ecce4e..5660c68 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,7 +3,7 @@
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
-neutron-lib>=1.13.0 # Apache-2.0
+neutron-lib>=1.18.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
ipaddress>=1.0.17;python_version<'3.3' # PSF
netaddr>=0.7.18 # BSD
diff --git a/tools/customize_ubuntu_image b/tools/customize_ubuntu_image
new file mode 100755
index 0000000..9c3fd07
--- /dev/null
+++ b/tools/customize_ubuntu_image
@@ -0,0 +1,172 @@
+#!/bin/bash
+
+# IMPLEMENTATION NOTE: It was not possible to implement this script using
+# virt-customize because of below ubuntu bugs:
+# - https://bugs.launchpad.net/ubuntu/+source/libguestfs/+bug/1632405
+# - https://bugs.launchpad.net/ubuntu/+source/isc-dhcp/+bug/1650740
+#
+# It has therefore been adopted a more low level strategy performing below
+# steps:
+# - mount guest image to a temporary folder
+# - set up an environment suitable for executing chroot
+# - execute customize_image function inside chroot environment
+# - cleanup chroot environment
+
+# Array of packages to be installed of guest image
+INSTALL_GUEST_PACKAGES=(
+ socat # used to replace nc for testing advanced network features like
+ # multicast
+)
+
+# Function to be executed once after chroot on guest image
+# Add more customization steps here
+function customize_image {
+ # dhclient-script requires to read /etc/fstab for setting up network
+ touch /etc/fstab
+ chmod ugo+r /etc/fstab
+
+ # Ubuntu guest image _apt user could require access to below folders
+ local apt_user_folders=( /var/lib/apt/lists/partial )
+ mkdir -p "${apt_user_folders[@]}"
+ chown _apt.root -fR "${apt_user_folders[@]}"
+
+ # Install desired packages to Ubuntu guest image
+ apt-get update -y
+ apt-get install -y "${INSTALL_GUEST_PACKAGES[@]}"
+}
+
+function main {
+ set -eux
+ trap cleanup EXIT
+ "${ENTRY_POINT:-chroot_image}" "$@"
+}
+
+# Chroot to guest image then executes customize_image function inside it
+function chroot_image {
+ local image_file=$1
+ local temp_dir=${TEMP_DIR:-$(make_temp -d)}
+
+ # Mount guest image into a temporary directory
+ local mount_dir=${temp_dir}/mount
+ mkdir -p "${mount_dir}"
+ mount_image "${mount_dir}" "${temp_dir}/pid"
+
+ # Mount system directories
+ bind_dir "/dev" "${mount_dir}/dev"
+ bind_dir "/dev/pts" "${mount_dir}/dev/pts"
+ bind_dir "/proc" "${mount_dir}/proc"
+ bind_dir "/sys" "${mount_dir}/sys"
+
+ # Mount to keep temporary files out of guest image
+ mkdir -p "${temp_dir}/apt" "${temp_dir}/cache" "${temp_dir}/tmp"
+ bind_dir "${temp_dir}/cache" "${mount_dir}/var/cache"
+ bind_dir "${temp_dir}/tmp" "${mount_dir}/tmp"
+ bind_dir "${temp_dir}/tmp" "${mount_dir}/var/tmp"
+ bind_dir "${temp_dir}/apt" "${mount_dir}/var/lib/apt"
+
+ # Replace /etc/resolv.conf symlink to use the same DNS as this host
+ sudo rm -f "${mount_dir}/etc/resolv.conf"
+ sudo cp /etc/resolv.conf "${mount_dir}/etc/resolv.conf"
+
+ # Makesure /etc/fstab exists and it is readable because it is required by
+ # /sbin/dhclient-script
+ sudo touch /etc/fstab
+ sudo chmod 644 /etc/fstab
+
+ # Copy this script to mount dir
+ local script_name=$(basename "$0")
+ local script_file=${mount_dir}/${script_name}
+ sudo cp "$0" "${script_file}"
+ sudo chmod 500 "${script_file}"
+ add_cleanup sudo rm -f "'${script_file}'"
+
+ # Execute customize_image inside chroot environment
+ local command_line=( ${CHROOT_COMMAND:-customize_image} )
+ local entry_point=${command_line[0]}
+ unset command_line[0]
+ sudo -E "ENTRY_POINT=${entry_point}" \
+ chroot "${mount_dir}" "/${script_name}" "${command_line[@]:-}"
+}
+
+# Mounts guest image to $1 directory writing pid to $1 pid file
+# Then registers umount of such directory for final cleanup
+function mount_image {
+ local mount_dir=$1
+ local pid_file=$2
+
+ # export libguest settings
+ export LIBGUESTFS_BACKEND=${LIBGUESTFS_BACKEND:-direct}
+ export LIBGUESTFS_BACKEND_SETTINGS=${LIBGUESTFS_BACKEND_SETTINGS:-force_tcg}
+
+ # Mount guest image
+ sudo -E guestmount -i \
+ --add "${image_file}" \
+ --pid-file "${pid_file}" \
+ "${mount_dir}"
+
+ add_cleanup \
+ 'ENTRY_POINT=umount_image' \
+ "'$0'" "'${mount_dir}'" "'${pid_file}'"
+}
+
+# Unmounts guest image directory
+function umount_image {
+ local mount_dir=$1
+ local pid_file=$2
+ local timeout=10
+
+ # Take PID just before unmounting
+ local pid=$(cat ${pid_file} || true)
+ sudo -E guestunmount "${mount_dir}"
+
+ if [ "${pid:-}" != "" ]; then
+ # Make sure guestmount process is not running before using image
+ # file again
+ local count=${timeout}
+ while sudo kill -0 "${pid}" 2> /dev/null && (( count-- > 0 )); do
+ sleep 1
+ done
+ if [ ${count} == 0 ]; then
+ # It is not safe to use image file at this point
+ echo "Wait for guestmount to exit failed after ${timeout} seconds"
+ fi
+ fi
+}
+
+# Creates a temporary file or directory and register removal for final cleanup
+function make_temp {
+ local temporary=$(mktemp "$@")
+ add_cleanup sudo rm -fR "'${temporary}'"
+ echo "${temporary}"
+}
+
+# Bind directory $1 to directory $2 and register umount for final cleanup
+function bind_dir {
+ local source_dir=$1
+ local target_dir=$2
+ sudo mount --bind "${source_dir}" "${target_dir}"
+ add_cleanup sudo umount "'${target_dir}'"
+}
+
+# Registers a command line to be executed for final cleanup
+function add_cleanup {
+ CLEANUP_FILE=${CLEANUP_FILE:-$(mktemp)}
+
+ echo -e "$*" >> ${CLEANUP_FILE}
+}
+
+# Execute command lines for final cleanup in reversed order
+function cleanup {
+ error=$?
+
+ local cleanup_file=${CLEANUP_FILE:-}
+ if [ -r "${cleanup_file}" ]; then
+ tac "${cleanup_file}" | bash +e -x
+ CLEANUP_FILE=
+ rm -fR "${cleanup_file}"
+ fi
+
+ exit ${error}
+}
+
+main "$@"