Merge "Add delete_agent to NetworkClientJSON"
diff --git a/.zuul.yaml b/.zuul.yaml
index 4862822..a2506c7 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -3,7 +3,7 @@
parent: devstack-tempest
abstract: true
description: |
- Perform setup common to all Neutron tempest tests
+ Perform setup common to all Neutron tempest tests
roles:
- zuul: openstack-dev/devstack
required-projects:
@@ -16,7 +16,7 @@
tox_envlist: all
devstack_localrc:
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details"
+ NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,empty-string-filtering,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-mac-address-regenerate,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-segment,standard-attr-timestamp,standard-attr-tag,subnet_allocation,trunk,trunk-details
devstack_plugins:
neutron: git://git.openstack.org/openstack/neutron.git
neutron-tempest-plugin: git://git.openstack.org/openstack/neutron-tempest-plugin.git
@@ -38,7 +38,7 @@
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
# devstack-tempest job will be switched to use lib/neutron instead of
# lib/neutron-legacy
- "/$NEUTRON_CORE_PLUGIN_CONF":
+ /$NEUTRON_CORE_PLUGIN_CONF:
ml2:
type_drivers: flat,geneve,vlan,gre,local,vxlan
ml2_type_vlan:
@@ -53,11 +53,11 @@
$NEUTRON_DHCP_CONF:
agent:
availability_zone: nova
- "/etc/neutron/api-paste.ini":
- "composite:neutronapi_v2_0":
- use: "call:neutron.auth:pipeline_factory"
- noauth: "cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0"
- keystone: "cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0"
+ /etc/neutron/api-paste.ini:
+ composite:neutronapi_v2_0:
+ use: call:neutron.auth:pipeline_factory
+ noauth: cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0
+ keystone: cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0
test-config:
$TEMPEST_CONFIG:
neutron_plugin_options:
@@ -65,7 +65,7 @@
agent_availability_zone: nova
image_is_advanced: true
available_type_drivers: flat,geneve,vlan,gre,local,vxlan
- irrelevant-files:
+ irrelevant-files: &tempest-irrelevant-files
- ^(test-|)requirements.txt$
- ^releasenotes/.*$
- ^doc/.*$
@@ -88,7 +88,7 @@
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
# devstack-tempest job will be switched to use lib/neutron instead of
# lib/neutron-legacy
- "/$NEUTRON_CORE_PLUGIN_CONF":
+ /$NEUTRON_CORE_PLUGIN_CONF:
AGENT:
tunnel_types: gre,vxlan
network_log:
@@ -104,23 +104,23 @@
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
# then it could be removed from here
- NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details"
+ NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,dvr,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
- job:
name: neutron-tempest-plugin-scenario
parent: neutron-tempest-plugin
abstract: true
description: |
- Perform setup common to all tempest scenario test jobs.
+ Perform setup common to all tempest scenario test jobs.
vars:
tempest_test_regex: ^neutron_tempest_plugin\.scenario
devstack_localrc:
- PHYSICAL_NETWORK: default
- DOWNLOAD_DEFAULT_IMAGES: false
- IMAGE_URLS: "http://cloud-images.ubuntu.com/releases/16.04/release-20170113/ubuntu-16.04-server-cloudimg-amd64-disk1.img,"
- DEFAULT_INSTANCE_TYPE: ds512M
- DEFAULT_INSTANCE_USER: ubuntu
- BUILD_TIMEOUT: 784
+ PHYSICAL_NETWORK: default
+ DOWNLOAD_DEFAULT_IMAGES: false
+ IMAGE_URLS: http://cloud-images.ubuntu.com/releases/16.04/release-20180622/ubuntu-16.04-server-cloudimg-amd64-disk1.img,
+ DEFAULT_INSTANCE_TYPE: ds512M
+ DEFAULT_INSTANCE_USER: ubuntu
+ BUILD_TIMEOUT: 784
devstack_services:
cinder: true
@@ -130,8 +130,8 @@
timeout: 10000
vars:
devstack_localrc:
- Q_AGENT: linuxbridge
- NETWORK_API_EXTENSIONS: "address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details"
+ Q_AGENT: linuxbridge
+ NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-domain-ports,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,filter-validation,fip-port-details,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -142,7 +142,7 @@
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
# devstack-tempest job will be switched to use lib/neutron instead of
# lib/neutron-legacy
- "/$NEUTRON_CORE_PLUGIN_CONF":
+ /$NEUTRON_CORE_PLUGIN_CONF:
ml2:
type_drivers: flat,vlan,local,vxlan
test-config:
@@ -156,6 +156,8 @@
override-checkout: stable/queens
vars:
branch_override: stable/queens
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: address-scope,agent,allowed-address-pairs,auto-allocated-topology,availability_zone,binding,default-subnetpools,dhcp_agent_scheduler,dns-integration,ext-gw-mode,external-net,extra_dhcp_opt,extraroute,flavors,ip-substring-filtering,l3-flavors,l3-ha,l3_agent_scheduler,logging,metering,multi-provider,net-mtu,net-mtu-writable,network-ip-availability,network_availability_zone,pagination,port-security,project-id,provider,qos,qos-fip,quotas,quota_details,rbac-policies,router,router_availability_zone,security-group,port-security-groups-filtering,segment,service-type,sorting,standard-attr-description,standard-attr-revisions,standard-attr-timestamp,standard-attr-tag,subnet_allocation,tag,tag-ext,trunk,trunk-details
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario
@@ -169,16 +171,7 @@
- openstack/neutron
- openstack/neutron-tempest-plugin
- openstack/tempest
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^releasenotes/.*$
- - ^doc/.*$
- - ^setup.cfg$
- - ^.*\.rst$
- - ^neutron/locale/.*$
- - ^neutron/tests/unit/.*$
- - ^tools/.*$
- - ^tox.ini$
+ irrelevant-files: *tempest-irrelevant-files
voting: false
- job:
@@ -199,8 +192,7 @@
timeout: 3600
vars:
devstack_localrc:
- TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin
- /opt/stack/neutron-tempest-plugin"'
+ TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
DESIGNATE_BACKEND_DRIVER: bind9
devstack_plugins:
designate: git://git.openstack.org/openstack/designate.git
@@ -208,10 +200,7 @@
cinder: false
designate: true
tempest_test_regex: ^neutron_tempest_plugin\.scenario\.test_dns_integration
- irrelevant-files:
- - ^(test-|)requirements.txt$
- - ^releasenotes/.*$
- - ^setup.cfg$
+ irrelevant-files: *tempest-irrelevant-files
- job:
name: neutron-tempest-plugin-designate-scenario-queens
@@ -251,3 +240,5 @@
templates:
- neutron-tempest-plugin-jobs
- neutron-tempest-plugin-jobs-queens
+ - check-requirements
+ - tempest-plugin-jobs
diff --git a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
index 60af89e..d449ead 100644
--- a/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
+++ b/neutron_tempest_plugin/api/admin/test_extension_driver_port_security_admin.py
@@ -28,7 +28,7 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('d39a96e2-2dea-4feb-8093-e7ac991ce6f8')
def test_create_port_security_false_on_shared_network(self):
- network = self.create_shared_network()
+ network = self.create_network(shared=True)
self.assertTrue(network['shared'])
self.create_subnet(network, client=self.admin_client)
self.assertRaises(lib_exc.Forbidden, self.create_port,
diff --git a/neutron_tempest_plugin/api/admin/test_networks.py b/neutron_tempest_plugin/api/admin/test_networks.py
index bb7ac24..74e72ef 100644
--- a/neutron_tempest_plugin/api/admin/test_networks.py
+++ b/neutron_tempest_plugin/api/admin/test_networks.py
@@ -24,50 +24,51 @@
@decorators.idempotent_id('d3c76044-d067-4cb0-ae47-8cdd875c7f67')
@utils.requires_ext(extension="project-id", service="network")
- def test_admin_create_network_keystone_v3(self):
+ def test_create_network_with_project(self):
project_id = self.client.tenant_id # non-admin
name = 'admin-created-with-project_id'
- new_net = self.create_network_keystone_v3(name, project_id,
- client=self.admin_client)
- self.assertEqual(name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ network = self.create_network(name, project_id=project_id,
+ client=self.admin_client)
+ self.assertEqual(name, network['name'])
+ self.assertEqual(project_id, network['project_id'])
+ self.assertEqual(project_id, network['tenant_id'])
- body = self.client.list_networks(id=new_net['id'])
- lookup_net = body['networks'][0]
- self.assertEqual(name, lookup_net['name'])
- self.assertEqual(project_id, lookup_net['project_id'])
- self.assertEqual(project_id, lookup_net['tenant_id'])
+ observed_network = self.client.list_networks(
+ id=network['id'])['networks'][0]
+ self.assertEqual(name, observed_network['name'])
+ self.assertEqual(project_id, observed_network['project_id'])
+ self.assertEqual(project_id, observed_network['tenant_id'])
@decorators.idempotent_id('8d21aaca-4364-4eb9-8b79-44b4fff6373b')
@utils.requires_ext(extension="project-id", service="network")
- def test_admin_create_network_keystone_v3_and_tenant(self):
+ def test_create_network_with_project_and_tenant(self):
project_id = self.client.tenant_id # non-admin
name = 'created-with-project-and-tenant'
- new_net = self.create_network_keystone_v3(
- name, project_id, tenant_id=project_id, client=self.admin_client)
- self.assertEqual(name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ network = self.create_network(name, project_id=project_id,
+ tenant_id=project_id,
+ client=self.admin_client)
+ self.assertEqual(name, network['name'])
+ self.assertEqual(project_id, network['project_id'])
+ self.assertEqual(project_id, network['tenant_id'])
- body = self.client.list_networks(id=new_net['id'])
- lookup_net = body['networks'][0]
- self.assertEqual(name, lookup_net['name'])
- self.assertEqual(project_id, lookup_net['project_id'])
- self.assertEqual(project_id, lookup_net['tenant_id'])
+ observed_network = self.client.list_networks(
+ id=network['id'])['networks'][0]
+ self.assertEqual(name, observed_network['name'])
+ self.assertEqual(project_id, observed_network['project_id'])
+ self.assertEqual(project_id, observed_network['tenant_id'])
@decorators.idempotent_id('08b92179-669d-45ee-8233-ef6611190809')
@utils.requires_ext(extension="project-id", service="network")
- def test_admin_create_network_keystone_v3_and_other_tenant(self):
+ def test_create_network_with_project_and_other_tenant(self):
project_id = self.client.tenant_id # non-admin
other_tenant = uuidutils.generate_uuid()
name = 'created-with-project-and-other-tenant'
e = self.assertRaises(lib_exc.BadRequest,
- self.create_network_keystone_v3, name,
- project_id, tenant_id=other_tenant,
+ self.create_network, name,
+ project_id=project_id, tenant_id=other_tenant,
client=self.admin_client)
expected_message = "'project_id' and 'tenant_id' do not match"
self.assertEqual(expected_message, e.resp_body['message'])
diff --git a/neutron_tempest_plugin/api/admin/test_routers_ha.py b/neutron_tempest_plugin/api/admin/test_routers_ha.py
index b8227bd..d9aafe9 100644
--- a/neutron_tempest_plugin/api/admin/test_routers_ha.py
+++ b/neutron_tempest_plugin/api/admin/test_routers_ha.py
@@ -63,7 +63,7 @@
as opposed to a "High Availability Router"
"""
name = data_utils.rand_name('router')
- router = self.create_admin_router(name, ha=False)
+ router = self._create_admin_router(name, ha=False)
self.assertFalse(router['ha'])
@decorators.idempotent_id('5a6bfe82-5b23-45a4-b027-5160997d4753')
diff --git a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
index 16375ec..cef0ffc 100644
--- a/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
+++ b/neutron_tempest_plugin/api/admin/test_shared_network_extension.py
@@ -29,7 +29,7 @@
@classmethod
def resource_setup(cls):
super(SharedNetworksTest, cls).resource_setup()
- cls.shared_network = cls.create_shared_network()
+ cls.shared_network = cls.create_network(shared=True)
@decorators.idempotent_id('6661d219-b96d-4597-ad10-55766123421a')
def test_filtering_shared_networks(self):
@@ -84,7 +84,7 @@
@decorators.idempotent_id('6661d219-b96d-4597-ad10-55766ce4abf7')
def test_create_update_shared_network(self):
- shared_network = self.create_shared_network()
+ shared_network = self.create_network(shared=True)
net_id = shared_network['id']
self.assertEqual('ACTIVE', shared_network['status'])
self.assertIsNotNone(shared_network['id'])
@@ -156,7 +156,7 @@
@classmethod
def resource_setup(cls):
super(AllowedAddressPairSharedNetworkTest, cls).resource_setup()
- cls.network = cls.create_shared_network()
+ cls.network = cls.create_network(shared=True)
cls.create_subnet(cls.network, client=cls.admin_client)
@decorators.idempotent_id('86c3529b-1231-40de-803c-ffffffff1fff')
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index 2f5446c..df0f4fa 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -60,6 +60,11 @@
# Default to ipv4.
_ip_version = const.IP_VERSION_4
+ # Derive from BaseAdminNetworkTest class to have this initialized
+ admin_client = None
+
+ external_network_id = CONF.network.public_network_id
+
@classmethod
def get_client_manager(cls, credential_type=None, roles=None,
force_new=None):
@@ -121,18 +126,24 @@
cls.subnetpools = []
cls.admin_subnetpools = []
cls.security_groups = []
+ cls.admin_security_groups = []
cls.projects = []
cls.log_objects = []
cls.reserved_subnet_cidrs = set()
cls.keypairs = []
+ cls.trunks = []
@classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
+ # Clean up trunks
+ for trunk in cls.trunks:
+ cls._try_delete_resource(cls.delete_trunk, trunk)
+
# Clean up floating IPs
for floating_ip in cls.floating_ips:
- cls._try_delete_resource(cls.client.delete_floatingip,
- floating_ip['id'])
+ cls._try_delete_resource(cls.delete_floatingip, floating_ip)
+
# Clean up routers
for router in cls.routers:
cls._try_delete_resource(cls.delete_router,
@@ -171,8 +182,7 @@
subnet['id'])
# Clean up networks
for network in cls.networks:
- cls._try_delete_resource(cls.client.delete_network,
- network['id'])
+ cls._try_delete_resource(cls.delete_network, network)
# Clean up admin networks
for network in cls.admin_networks:
@@ -184,6 +194,12 @@
cls._try_delete_resource(cls.client.delete_security_group,
secgroup['id'])
+ # Clean up admin security groups
+ for secgroup in cls.admin_security_groups:
+ cls._try_delete_resource(
+ cls.admin_client.delete_security_group,
+ secgroup['id'])
+
for subnetpool in cls.subnetpools:
cls._try_delete_resource(cls.client.delete_subnetpool,
subnetpool['id'])
@@ -248,44 +264,108 @@
pass
@classmethod
- def create_network(cls, network_name=None, client=None, **kwargs):
- """Wrapper utility that returns a test network."""
- network_name = network_name or data_utils.rand_name('test-network-')
+ def create_network(cls, network_name=None, client=None, external=None,
+ shared=None, provider_network_type=None,
+ provider_physical_network=None,
+ provider_segmentation_id=None, **kwargs):
+ """Create a network.
- client = client or cls.client
- body = client.create_network(name=network_name, **kwargs)
- network = body['network']
- if client is cls.client:
- cls.networks.append(network)
- else:
- cls.admin_networks.append(network)
+ When client is not provider and admin_client is attribute is not None
+ (for example when using BaseAdminNetworkTest base class) and using any
+ of the convenience parameters (external, shared, provider_network_type,
+ provider_physical_network and provider_segmentation_id) it silently
+ uses admin_client. If the network is not shared then it uses the same
+ project_id as regular client.
+
+ :param network_name: Human-readable name of the network
+
+ :param client: client to be used for connecting to network service
+
+ :param external: indicates whether the network has an external routing
+ facility that's not managed by the networking service.
+
+ :param shared: indicates whether this resource is shared across all
+ projects. By default, only administrative users can change this value.
+ If True and admin_client attribute is not None, then the network is
+ created under administrative project.
+
+ :param provider_network_type: the type of physical network that this
+ network should be mapped to. For example, 'flat', 'vlan', 'vxlan', or
+ 'gre'. Valid values depend on a networking back-end.
+
+ :param provider_physical_network: the physical network where this
+ network should be implemented. The Networking API v2.0 does not provide
+ a way to list available physical networks. For example, the Open
+ vSwitch plug-in configuration file defines a symbolic name that maps to
+ specific bridges on each compute host.
+
+ :param provider_segmentation_id: The ID of the isolated segment on the
+ physical network. The network_type attribute defines the segmentation
+ model. For example, if the network_type value is 'vlan', this ID is a
+ vlan identifier. If the network_type value is 'gre', this ID is a gre
+ key.
+
+ :param **kwargs: extra parameters to be forwarded to network service
+ """
+
+ name = (network_name or kwargs.pop('name', None) or
+ data_utils.rand_name('test-network-'))
+
+ # translate convenience parameters
+ admin_client_required = False
+ if provider_network_type:
+ admin_client_required = True
+ kwargs['provider:network_type'] = provider_network_type
+ if provider_physical_network:
+ admin_client_required = True
+ kwargs['provider:physical_network'] = provider_physical_network
+ if provider_segmentation_id:
+ admin_client_required = True
+ kwargs['provider:segmentation_id'] = provider_segmentation_id
+ if external is not None:
+ admin_client_required = True
+ kwargs['router:external'] = bool(external)
+ if shared is not None:
+ admin_client_required = True
+ kwargs['shared'] = bool(shared)
+
+ if not client:
+ if admin_client_required and cls.admin_client:
+ # For convenience silently switch to admin client
+ client = cls.admin_client
+ if not shared:
+ # Keep this network visible from current project
+ project_id = (kwargs.get('project_id') or
+ kwargs.get('tenant_id') or
+ cls.client.tenant_id)
+ kwargs.update(project_id=project_id, tenant_id=project_id)
+ else:
+ # Use default client
+ client = cls.client
+
+ network = client.create_network(name=name, **kwargs)['network']
+ network['client'] = client
+ cls.networks.append(network)
return network
@classmethod
- def create_shared_network(cls, network_name=None, **post_body):
- network_name = network_name or data_utils.rand_name('sharednetwork-')
- post_body.update({'name': network_name, 'shared': True})
- body = cls.admin_client.create_network(**post_body)
- network = body['network']
- cls.admin_networks.append(network)
- return network
+ def delete_network(cls, network, client=None):
+ client = client or network.get('client') or cls.client
+ client.delete_network(network['id'])
+
+ @classmethod
+ def create_shared_network(cls, network_name=None, **kwargs):
+ return cls.create_network(name=network_name, shared=True, **kwargs)
@classmethod
def create_network_keystone_v3(cls, network_name=None, project_id=None,
tenant_id=None, client=None):
- """Wrapper utility that creates a test network with project_id."""
- client = client or cls.client
- network_name = network_name or data_utils.rand_name(
- 'test-network-with-project_id')
- project_id = cls.client.tenant_id
- body = client.create_network_keystone_v3(network_name, project_id,
- tenant_id)
- network = body['network']
- if client is cls.client:
- cls.networks.append(network)
- else:
- cls.admin_networks.append(network)
- return network
+ params = {}
+ if project_id:
+ params['project_id'] = project_id
+ if tenant_id:
+ params['tenant_id'] = tenant_id
+ return cls.create_network(name=network_name, client=client, **params)
@classmethod
def create_subnet(cls, network, gateway='', cidr=None, mask_bits=None,
@@ -506,15 +586,56 @@
*args, **kwargs)
@classmethod
- def create_floatingip(cls, external_network_id):
- """Wrapper utility that returns a test floating IP."""
- body = cls.client.create_floatingip(
- floating_network_id=external_network_id)
- fip = body['floatingip']
+ def create_floatingip(cls, external_network_id=None, port=None,
+ client=None, **kwargs):
+ """Creates a floating IP.
+
+ Create a floating IP and schedule it for later deletion.
+ If a client is passed, then it is used for deleting the IP too.
+
+ :param external_network_id: network ID where to create
+ By default this is 'CONF.network.public_network_id'.
+
+ :param port: port to bind floating IP to
+ This is translated to 'port_id=port['id']'
+ By default it is None.
+
+ :param client: network client to be used for creating and cleaning up
+ the floating IP.
+
+ :param **kwargs: additional creation parameters to be forwarded to
+ networking server.
+ """
+
+ client = client or cls.client
+ external_network_id = (external_network_id or
+ cls.external_network_id)
+
+ if port:
+ kwargs['port_id'] = port['id']
+
+ fip = client.create_floatingip(external_network_id,
+ **kwargs)['floatingip']
+
+ # save client to be used later in cls.delete_floatingip
+ # for final cleanup
+ fip['client'] = client
cls.floating_ips.append(fip)
return fip
@classmethod
+ def delete_floatingip(cls, floating_ip, client=None):
+ """Delete floating IP
+
+ :param client: Client to be used
+ If client is not given it will use the client used to create
+ the floating IP, or cls.client if unknown.
+ """
+
+ client = client or floating_ip.get('client') or cls.client
+ client.delete_floatingip(floating_ip['id'])
+
+ @classmethod
def create_router_interface(cls, router_id, subnet_id):
"""Wrapper utility that returns a router interface."""
interface = cls.client.add_router_interface_with_subnet_id(
@@ -589,6 +710,12 @@
name=test_project,
description=test_description)['project']
cls.projects.append(project)
+ # Create a project will create a default security group.
+ # We make these security groups into admin_security_groups.
+ sgs_list = cls.admin_client.list_security_groups(
+ tenant_id=project['id'])['security_groups']
+ for sg in sgs_list:
+ cls.admin_security_groups.append(sg)
return project
@classmethod
@@ -614,6 +741,64 @@
cls.os_primary.keypairs_client)
client.delete_keypair(keypair_name=keypair['name'])
+ @classmethod
+ def create_trunk(cls, port=None, subports=None, client=None, **kwargs):
+ """Create network trunk
+
+ :param port: dictionary containing parent port ID (port['id'])
+ :param client: client to be used for connecting to networking service
+ :param **kwargs: extra parameters to be forwarded to network service
+
+ :returns: dictionary containing created trunk details
+ """
+ client = client or cls.client
+
+ if port:
+ kwargs['port_id'] = port['id']
+
+ trunk = client.create_trunk(subports=subports, **kwargs)['trunk']
+ # Save client reference for later deletion
+ trunk['client'] = client
+ cls.trunks.append(trunk)
+ return trunk
+
+ @classmethod
+ def delete_trunk(cls, trunk, client=None):
+ """Delete network trunk
+
+ :param trunk: dictionary containing trunk ID (trunk['id'])
+
+ :param client: client to be used for connecting to networking service
+ """
+ client = client or trunk.get('client') or cls.client
+ trunk.update(client.show_trunk(trunk['id'])['trunk'])
+
+ if not trunk['admin_state_up']:
+ # Cannot touch trunk before admin_state_up is True
+ client.update_trunk(trunk['id'], admin_state_up=True)
+ if trunk['sub_ports']:
+ # Removes trunk ports before deleting it
+ cls._try_delete_resource(client.remove_subports, trunk['id'],
+ trunk['sub_ports'])
+
+ # we have to detach the interface from the server before
+ # the trunk can be deleted.
+ parent_port = {'id': trunk['port_id']}
+
+ def is_parent_port_detached():
+ parent_port.update(client.show_port(parent_port['id'])['port'])
+ return not parent_port['device_id']
+
+ if not is_parent_port_detached():
+ # this could probably happen when trunk is deleted and parent port
+ # has been assigned to a VM that is still running. Here we are
+ # assuming that device_id points to such VM.
+ cls.os_primary.compute.InterfacesClient().delete_interface(
+ parent_port['device_id'], parent_port['id'])
+ utils.wait_until_true(is_parent_port_detached)
+
+ client.delete_trunk(trunk['id'])
+
class BaseAdminNetworkTest(BaseNetworkTest):
@@ -995,11 +1180,14 @@
expected_resources[:-1],
self._extract_resources(body))
- def _test_list_validation_filters(self):
- validation_args = {
- 'unknown_filter': 'value',
- }
- body = self.list_method(**validation_args)
- resources = self._extract_resources(body)
- for resource in resources:
- self.assertIn(resource['name'], self.resource_names)
+ @tutils.requires_ext(extension="filter-validation", service="network")
+ def _test_list_validation_filters(
+ self, validation_args, filter_is_valid=True):
+ if not filter_is_valid:
+ self.assertRaises(lib_exc.BadRequest, self.list_method,
+ **validation_args)
+ else:
+ body = self.list_method(**validation_args)
+ resources = self._extract_resources(body)
+ for resource in resources:
+ self.assertIn(resource['name'], self.resource_names)
diff --git a/neutron_tempest_plugin/api/clients.py b/neutron_tempest_plugin/api/clients.py
index 14f6714..ee0289c 100644
--- a/neutron_tempest_plugin/api/clients.py
+++ b/neutron_tempest_plugin/api/clients.py
@@ -15,6 +15,7 @@
from tempest.lib.services.compute import availability_zone_client
from tempest.lib.services.compute import hypervisor_client
+from tempest.lib.services.compute import interfaces_client
from tempest.lib.services.compute import keypairs_client
from tempest.lib.services.compute import servers_client
from tempest.lib.services.identity.v2 import tenants_client
@@ -75,6 +76,8 @@
enable_instance_password=CONF.compute_feature_enabled
.enable_instance_password,
**params)
+ self.interfaces_client = interfaces_client.InterfacesClient(
+ self.auth_provider, **params)
self.keypairs_client = keypairs_client.KeyPairsClient(
self.auth_provider, **params)
self.hv_client = hypervisor_client.HypervisorClient(
diff --git a/neutron_tempest_plugin/api/test_extensions.py b/neutron_tempest_plugin/api/test_extensions.py
index 1462ae1..5b7fe67 100644
--- a/neutron_tempest_plugin/api/test_extensions.py
+++ b/neutron_tempest_plugin/api/test_extensions.py
@@ -11,31 +11,46 @@
# under the License.
from tempest.common import utils
+from tempest import config
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
+CONF = config.CONF
+
+
class ExtensionsTest(base.BaseNetworkTest):
- def _test_list_extensions_includes(self, ext):
+ def _test_list_extensions_includes(self, exts):
body = self.client.list_extensions()
extensions = {ext_['alias'] for ext_ in body['extensions']}
self.assertNotEmpty(extensions, "Extension list returned is empty")
- ext_enabled = utils.is_extension_enabled(ext, "network")
- if ext_enabled:
- self.assertIn(ext, extensions)
- else:
- self.assertNotIn(ext, extensions)
+ for ext in exts:
+ ext_enabled = utils.is_extension_enabled(ext, "network")
+ if ext_enabled:
+ self.assertIn(ext, extensions)
+ else:
+ self.assertNotIn(ext, extensions)
@decorators.idempotent_id('262420b7-a4bb-4a3e-b4b5-e73bad18df8c')
def test_list_extensions_sorting(self):
- self._test_list_extensions_includes('sorting')
+ self._test_list_extensions_includes(['sorting'])
@decorators.idempotent_id('19db409e-a23f-445d-8bc8-ca3d64c84706')
def test_list_extensions_pagination(self):
- self._test_list_extensions_includes('pagination')
+ self._test_list_extensions_includes(['pagination'])
@decorators.idempotent_id('155b7bc2-e358-4dd8-bf3e-1774c084567f')
def test_list_extensions_project_id(self):
- self._test_list_extensions_includes('project-id')
+ self._test_list_extensions_includes(['project-id'])
+
+ @decorators.idempotent_id('c7597fac-2404-45b1-beb4-523c8b1d4604')
+ def test_list_extensions_includes_all(self):
+ extensions = CONF.network_feature_enabled.api_extensions
+ if not extensions:
+ raise self.skipException("Extension list is empty")
+ if extensions[0] == 'all':
+ raise self.skipException("No lists of enabled extensions provided")
+
+ self._test_list_extensions_includes(extensions)
diff --git a/neutron_tempest_plugin/api/test_networks.py b/neutron_tempest_plugin/api/test_networks.py
index 19f4fcb..63e8ae5 100644
--- a/neutron_tempest_plugin/api/test_networks.py
+++ b/neutron_tempest_plugin/api/test_networks.py
@@ -75,28 +75,29 @@
@decorators.idempotent_id('0cc0552f-afaf-4231-b7a7-c2a1774616da')
@utils.requires_ext(extension="project-id", service="network")
- def test_create_network_keystone_v3(self):
+ def test_create_network_with_project(self):
project_id = self.client.tenant_id
name = 'created-with-project_id'
- new_net = self.create_network_keystone_v3(name, project_id)
- self.assertEqual(name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ network = self.create_network(name, project_id=project_id)
+ self.assertEqual(name, network['name'])
+ self.assertEqual(project_id, network['project_id'])
+ self.assertEqual(project_id, network['tenant_id'])
- body = self.client.list_networks(id=new_net['id'])['networks'][0]
- self.assertEqual(name, body['name'])
+ observed_network = self.client.list_networks(
+ id=network['id'])['networks'][0]
+ self.assertEqual(name, observed_network['name'])
new_name = 'create-with-project_id-2'
- body = self.client.update_network(new_net['id'], name=new_name)
- new_net = body['network']
- self.assertEqual(new_name, new_net['name'])
- self.assertEqual(project_id, new_net['project_id'])
- self.assertEqual(project_id, new_net['tenant_id'])
+ updated_network = self.client.update_network(
+ network['id'], name=new_name)['network']
+ self.assertEqual(new_name, updated_network['name'])
+ self.assertEqual(project_id, updated_network['project_id'])
+ self.assertEqual(project_id, updated_network['tenant_id'])
@decorators.idempotent_id('94e2a44c-3367-4253-8c2a-22deaf59e96c')
@utils.requires_ext(extension="dns-integration",
- service="network")
+ service="network")
def test_create_update_network_dns_domain(self):
domain1 = 'test.org.'
body = self.create_network(dns_domain=domain1)
@@ -210,4 +211,6 @@
@decorators.idempotent_id('3574ec9b-a8b8-43e3-9c11-98f5875df6a9')
def test_list_validation_filters(self):
- self._test_list_validation_filters()
+ self._test_list_validation_filters(self.list_kwargs)
+ self._test_list_validation_filters({
+ 'unknown_filter': 'value'}, filter_is_valid=False)
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index d31eab8..2bf99bf 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -209,8 +209,8 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
- network = self.create_shared_network('test network',
- qos_policy_id=policy['id'])
+ network = self.create_network('test network', shared=True,
+ qos_policy_id=policy['id'])
retrieved_network = self.admin_client.show_network(network['id'])
self.assertEqual(
@@ -251,7 +251,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
retrieved_network = self.admin_client.show_network(network['id'])
self.assertIsNone(retrieved_network['network']['qos_policy_id'])
@@ -266,7 +266,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
port = self.create_port(network, qos_policy_id=policy['id'])
retrieved_port = self.admin_client.show_port(port['id'])
@@ -275,7 +275,7 @@
@decorators.idempotent_id('49e02f5a-e1dd-41d5-9855-cfa37f2d195e')
def test_policy_association_with_port_nonexistent_policy(self):
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
self.assertRaises(
exceptions.NotFound,
self.create_port,
@@ -287,7 +287,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
self.assertRaises(
exceptions.NotFound,
self.create_port,
@@ -298,7 +298,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
port = self.create_port(network)
retrieved_port = self.admin_client.show_port(port['id'])
self.assertIsNone(retrieved_port['port']['qos_policy_id'])
@@ -313,7 +313,8 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- self.create_shared_network('test network', qos_policy_id=policy['id'])
+ self.create_network('test network', qos_policy_id=policy['id'],
+ shared=True)
self.assertRaises(
exceptions.Conflict,
self.admin_client.delete_qos_policy, policy['id'])
@@ -323,7 +324,7 @@
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=True)
- network = self.create_shared_network('test network')
+ network = self.create_network('test network', shared=True)
self.create_port(network, qos_policy_id=policy['id'])
self.assertRaises(
exceptions.Conflict,
diff --git a/neutron_tempest_plugin/api/test_subnetpools.py b/neutron_tempest_plugin/api/test_subnetpools.py
index ec3753a..25d9780 100644
--- a/neutron_tempest_plugin/api/test_subnetpools.py
+++ b/neutron_tempest_plugin/api/test_subnetpools.py
@@ -416,4 +416,6 @@
@decorators.idempotent_id('27feb3f8-40f4-4e50-8cd2-7d0096a98682')
def test_list_validation_filters(self):
- self._test_list_validation_filters()
+ self._test_list_validation_filters(self.list_kwargs)
+ self._test_list_validation_filters({
+ 'unknown_filter': 'value'}, filter_is_valid=False)
diff --git a/neutron_tempest_plugin/api/test_subnets.py b/neutron_tempest_plugin/api/test_subnets.py
index fb2f4d6..b8842ab 100644
--- a/neutron_tempest_plugin/api/test_subnets.py
+++ b/neutron_tempest_plugin/api/test_subnets.py
@@ -66,4 +66,6 @@
@decorators.idempotent_id('c0f9280b-9d81-4728-a967-6be22659d4c8')
def test_list_validation_filters(self):
- self._test_list_validation_filters()
+ self._test_list_validation_filters(self.list_kwargs)
+ self._test_list_validation_filters({
+ 'unknown_filter': 'value'}, filter_is_valid=False)
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index e02cf92..1a000fd 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -240,10 +240,9 @@
def create_provider_network(self):
foo_net = config.CONF.neutron_plugin_options.provider_vlans[0]
- post_body = {'network_name': data_utils.rand_name('vlan-net'),
- 'provider:network_type': 'vlan',
- 'provider:physical_network': foo_net}
- return self.create_shared_network(**post_body)
+ return self.create_network(name=data_utils.rand_name('vlan-net'),
+ provider_network_type='vlan',
+ provider_physical_network=foo_net)
@decorators.idempotent_id('0f05d98e-41f5-4629-dada-9aee269c9602')
def test_add_subport(self):
@@ -286,13 +285,13 @@
super(TrunkTestMtusJSONBase, self).setUp()
# VXLAN autocomputed MTU (1450) is smaller than that of GRE (1458)
- vxlan_kwargs = {'network_name': data_utils.rand_name('vxlan-net'),
- 'provider:network_type': 'vxlan'}
- self.smaller_mtu_net = self.create_shared_network(**vxlan_kwargs)
+ self.smaller_mtu_net = self.create_network(
+ name=data_utils.rand_name('vxlan-net'),
+ provider_network_type='vxlan')
- gre_kwargs = {'network_name': data_utils.rand_name('gre-net'),
- 'provider:network_type': 'gre'}
- self.larger_mtu_net = self.create_shared_network(**gre_kwargs)
+ self.larger_mtu_net = self.create_network(
+ name=data_utils.rand_name('gre-net'),
+ provider_network_type='gre')
self.smaller_mtu_port = self.create_port(self.smaller_mtu_net)
self.smaller_mtu_port_2 = self.create_port(self.smaller_mtu_net)
diff --git a/neutron_tempest_plugin/common/socat.py b/neutron_tempest_plugin/common/socat.py
new file mode 100644
index 0000000..6bd1fdc
--- /dev/null
+++ b/neutron_tempest_plugin/common/socat.py
@@ -0,0 +1,105 @@
+# Copyright 2018 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+COMMAND = 'socat'
+
+
+class SocatAddress(object):
+
+ def __init__(self, address, args=None, options=None):
+ self.address = address
+ self.args = args
+ self.options = options
+
+ @classmethod
+ def udp_datagram(cls, host, port, options=None, ip_version=None):
+ address = 'UDP{}-DATAGRAM'.format(ip_version or '')
+ return cls(address, (host, int(port)), options)
+
+ @classmethod
+ def udp_recvfrom(cls, port, options=None, ip_version=None):
+ address = 'UDP{}-RECVFROM'.format(ip_version or '')
+ return cls(address, (int(port),), options)
+
+ @classmethod
+ def stdio(cls):
+ return cls('STDIO')
+
+ def __str__(self):
+ address = self.address
+ if self.args:
+ address += ':' + ':'.join(str(a) for a in self.args)
+ if self.options:
+ address += ',' + ','.join(str(o) for o in self.options)
+ return address
+
+ def format(self, *args, **kwargs):
+ return str(self).format(*args, **kwargs)
+
+
+STDIO = SocatAddress.stdio()
+
+
+class SocatOption(object):
+
+ def __init__(self, name, *args):
+ self.name = name
+ self.args = args
+
+ @classmethod
+ def bind(cls, host):
+ return cls('bind', host)
+
+ @classmethod
+ def fork(cls):
+ return cls('fork')
+
+ @classmethod
+ def ip_multicast_ttl(cls, ttl):
+ return cls('ip-multicast-ttl', int(ttl))
+
+ @classmethod
+ def ip_multicast_if(cls, interface_address):
+ return cls('ip-multicast-if', interface_address)
+
+ @classmethod
+ def ip_add_membership(cls, multicast_address, interface_address):
+ return cls('ip-add-membership', multicast_address, interface_address)
+
+ def __str__(self):
+ result = self.name
+ args = self.args
+ if args:
+ result += '=' + ':'.join(str(a) for a in args)
+ return result
+
+
+class SocatCommand(object):
+
+ def __init__(self, source=STDIO, destination=STDIO, command=COMMAND):
+ self.source = source
+ self.destination = destination
+ self.command = command
+
+ def __str__(self):
+ words = [self.command, self.source, self.destination]
+ return ' '.join(str(obj) for obj in words)
+
+
+def socat_command(source=STDIO, destination=STDIO, command=COMMAND):
+ command = SocatCommand(source=source, destination=destination,
+ command=command)
+ return str(command)
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index b919b65..9812f4c 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -12,13 +12,247 @@
# License for the specific language governing permissions and limitations
# under the License.
+import os
+import time
+
+from oslo_log import log
+import paramiko
from tempest.lib.common import ssh
+from tempest.lib import exceptions
from neutron_tempest_plugin import config
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
class Client(ssh.Client):
- def __init__(self, *args, **kwargs):
- if 'timeout' not in kwargs:
- kwargs['timeout'] = config.CONF.validation.ssh_timeout
- super(Client, self).__init__(*args, **kwargs)
+
+ default_ssh_lang = 'en_US.UTF-8'
+
+ timeout = CONF.validation.ssh_timeout
+
+ proxy_jump_host = CONF.neutron_plugin_options.ssh_proxy_jump_host
+ proxy_jump_username = CONF.neutron_plugin_options.ssh_proxy_jump_username
+ proxy_jump_password = CONF.neutron_plugin_options.ssh_proxy_jump_password
+ proxy_jump_keyfile = CONF.neutron_plugin_options.ssh_proxy_jump_keyfile
+ proxy_jump_port = CONF.neutron_plugin_options.ssh_proxy_jump_port
+
+ def __init__(self, host, username, password=None, timeout=None, pkey=None,
+ channel_timeout=10, look_for_keys=False, key_filename=None,
+ port=22, proxy_client=None):
+
+ timeout = timeout or self.timeout
+
+ if self.proxy_jump_host:
+ # Perform all SSH connections passing through configured SSH server
+ proxy_client = proxy_client or self.create_proxy_client(
+ timeout=timeout, channel_timeout=channel_timeout)
+
+ super(Client, self).__init__(
+ host=host, username=username, password=password, timeout=timeout,
+ pkey=pkey, channel_timeout=channel_timeout,
+ look_for_keys=look_for_keys, key_filename=key_filename, port=port,
+ proxy_client=proxy_client)
+
+ @classmethod
+ def create_proxy_client(cls, look_for_keys=True, **kwargs):
+ host = cls.proxy_jump_host
+ if not host:
+ # proxy_jump_host string cannot be empty or None
+ raise ValueError(
+ "'proxy_jump_host' configuration option is empty.")
+
+ # Let accept an empty string as a synonymous of default value on below
+ # options
+ password = cls.proxy_jump_password or None
+ key_file = cls.proxy_jump_keyfile or None
+ username = cls.proxy_jump_username
+
+ # Port must be a positive integer
+ port = cls.proxy_jump_port
+ if port <= 0 or port > 65535:
+ raise ValueError(
+ "Invalid value for 'proxy_jump_port' configuration option: "
+ "{!r}".format(port))
+
+ login = "{username}@{host}:{port}".format(username=username, host=host,
+ port=port)
+
+ if key_file:
+ # expand ~ character with user HOME directory
+ key_file = os.path.expanduser(key_file)
+ if os.path.isfile(key_file):
+ LOG.debug("Going to create SSH connection to %r using key "
+ "file: %s", login, key_file)
+
+ else:
+ # This message could help the user to identify a
+ # mis-configuration in tempest.conf
+ raise ValueError(
+ "Cannot find file specified as 'proxy_jump_keyfile' "
+ "option: {!r}".format(key_file))
+
+ elif password:
+ LOG.debug("Going to create SSH connection to %r using password.",
+ login)
+
+ elif look_for_keys:
+ # This message could help the user to identify a mis-configuration
+ # in tempest.conf
+ LOG.info("Both 'proxy_jump_password' and 'proxy_jump_keyfile' "
+ "options are empty. Going to create SSH connection to %r "
+ "looking for key file location into %r directory.",
+ login, os.path.expanduser('~/.ssh'))
+ else:
+ # An user that forces look_for_keys=False should really know what
+ # he really wants
+ LOG.warning("No authentication method provided to create an SSH "
+ "connection to %r. If it fails, then please "
+ "set 'proxy_jump_keyfile' to provide a valid SSH key "
+ "file.", login)
+
+ return ssh.Client(
+ host=host, username=username, password=password,
+ look_for_keys=look_for_keys, key_filename=key_file,
+ port=port, proxy_client=None, **kwargs)
+
+ # attribute used to keep reference to opened client connection
+ _client = None
+
+ def connect(self, *args, **kwargs):
+ """Creates paramiko.SSHClient and connect it to remote SSH server
+
+ In case this method is called more times it returns the same client
+ and no new SSH connection is created until close method is called.
+
+ :returns: paramiko.Client connected to remote server.
+
+ :raises tempest.lib.exceptions.SSHTimeout: in case it fails to connect
+ to remote server.
+ """
+ client = self._client
+ if client is None:
+ client = super(Client, self)._get_ssh_connection(
+ *args, **kwargs)
+ self._client = client
+
+ return client
+
+ # This overrides superclass protected method to make sure exec_command
+ # method is going to reuse the same SSH client and connection if called
+ # more times
+ _get_ssh_connection = connect
+
+ def close(self):
+ """Closes connection to SSH server and cleanup resources.
+ """
+ client = self._client
+ if client is not None:
+ client.close()
+ self._client = None
+
+ def open_session(self):
+ """Gets connection to SSH server and open a new paramiko.Channel
+
+ :returns: new paramiko.Channel
+ """
+
+ client = self.connect()
+
+ try:
+ return client.get_transport().open_session()
+ except paramiko.SSHException:
+ # the request is rejected, the session ends prematurely or
+ # there is a timeout opening a channel
+ LOG.exception("Unable to open SSH session")
+ raise exceptions.SSHTimeout(host=self.host,
+ user=self.username,
+ password=self.password)
+
+ def execute_script(self, script, become_root=False,
+ combine_stderr=True, shell='sh -eux'):
+ """Connect to remote machine and executes script.
+
+ Implementation note: it passes script lines to shell interpreter via
+ STDIN. Therefore script line number could be not available to some
+ script interpreters for debugging porposes.
+
+ :param script: script lines to be executed.
+
+ :param become_root: executes interpreter as root with sudo.
+
+ :param combine_stderr (bool): whenever to redirect STDERR to STDOUT so
+ that output from both streams are returned together. True by default.
+
+ :param shell: command line used to launch script interpreter. By
+ default it executes Bash with -eux options enabled. This means that
+ any command returning non-zero exist status or any any undefined
+ variable would interrupt script execution with an error and every
+ command executed by the script is going to be traced to STDERR.
+
+ :returns output written by script to STDOUT.
+
+ :raises tempest.lib.exceptions.SSHTimeout: in case it fails to connect
+ to remote server or it fails to open a channel.
+
+ :raises tempest.lib.exceptions.SSHExecCommandFailed: in case command
+ script exits with non zero exit status.
+ """
+
+ channel = self.open_session()
+ with channel:
+
+ # Combine STOUT and STDERR to have to handle with only one stream
+ channel.set_combine_stderr(combine_stderr)
+
+ # Set default environment
+ channel.update_environment({
+ # Language and encoding
+ 'LC_ALL': os.environ.get('LC_ALL') or self.default_ssh_lang,
+ 'LANG': os.environ.get('LANG') or self.default_ssh_lang
+ })
+
+ if become_root:
+ shell = 'sudo ' + shell
+ # Spawn a Bash
+ channel.exec_command(shell)
+
+ lines_iterator = iter(script.splitlines())
+ output_data = b''
+ error_data = b''
+
+ while not channel.exit_status_ready():
+ # Drain incoming data buffers
+ while channel.recv_ready():
+ output_data += channel.recv(self.buf_size)
+ while channel.recv_stderr_ready():
+ error_data += channel.recv_stderr(self.buf_size)
+
+ if channel.send_ready():
+ try:
+ line = next(lines_iterator)
+ except StopIteration:
+ # Finalize Bash script execution
+ channel.shutdown_write()
+ else:
+ # Send script to Bash STDIN line by line
+ channel.send((line + '\n').encode('utf-8'))
+ else:
+ time.sleep(.1)
+
+ # Get exit status and drain incoming data buffers
+ exit_status = channel.recv_exit_status()
+ while channel.recv_ready():
+ output_data += channel.recv(self.buf_size)
+ while channel.recv_stderr_ready():
+ error_data += channel.recv_stderr(self.buf_size)
+
+ if exit_status != 0:
+ raise exceptions.SSHExecCommandFailed(
+ command='bash', exit_status=exit_status,
+ stderr=error_data.decode('utf-8'),
+ stdout=output_data.decode('utf-8'))
+
+ return output_data.decode('utf-8')
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index fc07e81..e15748d 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -56,7 +56,25 @@
'"provider:network_type":<TYPE> - string '
'"mtu":<MTU> - integer '
'"cidr"<SUBNET/MASK> - string '
- '"provider:segmentation_id":<VLAN_ID> - integer')
+ '"provider:segmentation_id":<VLAN_ID> - integer'),
+
+ # Option for feature to connect via SSH to VMs using an intermediate SSH
+ # server
+ cfg.StrOpt('ssh_proxy_jump_host',
+ default=None,
+ help='Proxy jump host used to connect via SSH to VMs..'),
+ cfg.StrOpt('ssh_proxy_jump_username',
+ default='root',
+ help='User name used to connect to "ssh_proxy_jump_host".'),
+ cfg.StrOpt('ssh_proxy_jump_password',
+ default=None,
+ help='Password used to connect to "ssh_proxy_jump_host".'),
+ cfg.StrOpt('ssh_proxy_jump_keyfile',
+ default=None,
+ help='Keyfile used to connect to "ssh_proxy_jump_host".'),
+ cfg.IntOpt('ssh_proxy_jump_port',
+ default=22,
+ help='Port used to connect to "ssh_proxy_jump_host".'),
]
# TODO(amuller): Redo configuration options registration as part of the planned
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index 10cdaf1..1aaf8ce 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -14,6 +14,7 @@
# under the License.
import subprocess
+from debtcollector import removals
import netaddr
from neutron_lib.api import validators
from neutron_lib import constants as neutron_lib_constants
@@ -158,14 +159,21 @@
cls.routers.append(router)
return router
+ @removals.remove(version='Stein',
+ message="Please use create_floatingip method instead of "
+ "create_and_associate_floatingip.")
def create_and_associate_floatingip(self, port_id, client=None):
client = client or self.os_primary.network_client
- fip = client.create_floatingip(
- CONF.network.public_network_id,
- port_id=port_id)['floatingip']
- if client is self.os_primary.network_client:
- self.floating_ips.append(fip)
- return fip
+ return self.create_floatingip(port_id=port_id, client=client)
+
+ def create_interface(cls, server_id, port_id, client=None):
+ client = client or cls.os_primary.interfaces_client
+ body = client.create_interface(server_id, port_id=port_id)
+ return body['interfaceAttachment']
+
+ def delete_interface(cls, server_id, port_id, client=None):
+ client = client or cls.os_primary.interfaces_client
+ client.delete_interface(server_id, port_id=port_id)
def setup_network_and_server(
self, router=None, server_name=None, **kwargs):
@@ -206,7 +214,7 @@
self.port = self.client.list_ports(network_id=self.network['id'],
device_id=self.server[
'server']['id'])['ports'][0]
- self.fip = self.create_and_associate_floatingip(self.port['id'])
+ self.fip = self.create_floatingip(port=self.port)
def check_connectivity(self, host, ssh_user, ssh_key, servers=None):
ssh_client = ssh.Client(host, ssh_user, pkey=ssh_key)
diff --git a/neutron_tempest_plugin/scenario/test_floatingip.py b/neutron_tempest_plugin/scenario/test_floatingip.py
index bc40176..504af12 100644
--- a/neutron_tempest_plugin/scenario/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/test_floatingip.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import time
+
from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts
from tempest.common import utils
@@ -26,6 +28,7 @@
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils as common_utils
from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants
from neutron_tempest_plugin.scenario import test_qos
@@ -199,8 +202,95 @@
gateway_external_ip)
+class FloatingIPPortDetailsTest(FloatingIpTestCasesMixin,
+ base.BaseTempestTestCase):
+ same_network = True
+
+ @classmethod
+ @utils.requires_ext(extension="router", service="network")
+ @utils.requires_ext(extension="fip-port-details", service="network")
+ def resource_setup(cls):
+ super(FloatingIPPortDetailsTest, cls).resource_setup()
+
+ @decorators.idempotent_id('a663aeee-dd81-492b-a207-354fd6284dbe')
+ def test_floatingip_port_details(self):
+ """Tests the following:
+
+ 1. Create a port with floating ip in Neutron.
+ 2. Create two servers in Nova.
+ 3. Attach the port to the server.
+ 4. Detach the port from the server.
+ 5. Attach the port to the second server.
+ 6. Detach the port from the second server.
+ """
+ port = self.create_port(self.network)
+ fip = self.create_and_associate_floatingip(port['id'])
+ server1 = self._create_server(create_floating_ip=False)
+ server2 = self._create_server(create_floating_ip=False)
+
+ for server in [server1, server2]:
+ # attach the port to the server
+ self.create_interface(
+ server['server']['id'], port_id=port['id'])
+ waiters.wait_for_interface_status(
+ self.os_primary.interfaces_client, server['server']['id'],
+ port['id'], 'ACTIVE')
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self._check_port_details(
+ fip, port, status='ACTIVE',
+ device_id=server['server']['id'], device_owner='compute:nova')
+
+ # detach the port from the server; this is a cast in the compute
+ # API so we have to poll the port until the device_id is unset.
+ self.delete_interface(server['server']['id'], port['id'])
+ self._wait_for_port_detach(port['id'])
+ fip = self.client.show_floatingip(fip['id'])['floatingip']
+ self._check_port_details(
+ fip, port, status='DOWN', device_id='', device_owner='')
+
+ def _check_port_details(self, fip, port, status, device_id, device_owner):
+ self.assertIn('port_details', fip)
+ port_details = fip['port_details']
+ self.assertEqual(port['name'], port_details['name'])
+ self.assertEqual(port['network_id'], port_details['network_id'])
+ self.assertEqual(port['mac_address'], port_details['mac_address'])
+ self.assertEqual(port['admin_state_up'],
+ port_details['admin_state_up'])
+ self.assertEqual(status, port_details['status'])
+ self.assertEqual(device_id, port_details['device_id'])
+ self.assertEqual(device_owner, port_details['device_owner'])
+
+ def _wait_for_port_detach(self, port_id, timeout=120, interval=10):
+ """Waits for the port's device_id to be unset.
+
+ :param port_id: The id of the port being detached.
+ :returns: The final port dict from the show_port response.
+ """
+ port = self.client.show_port(port_id)['port']
+ device_id = port['device_id']
+ start = int(time.time())
+
+ # NOTE(mriedem): Nova updates the port's device_id to '' rather than
+ # None, but it's not contractual so handle Falsey either way.
+ while device_id:
+ time.sleep(interval)
+ port = self.client.show_port(port_id)['port']
+ device_id = port['device_id']
+
+ timed_out = int(time.time()) - start >= timeout
+
+ if device_id and timed_out:
+ message = ('Port %s failed to detach (device_id %s) within '
+ 'the required time (%s s).' %
+ (port_id, device_id, timeout))
+ raise exceptions.TimeoutException(message)
+
+ return port
+
+
class FloatingIPQosTest(FloatingIpTestCasesMixin,
- test_qos.QoSTest):
+ test_qos.QoSTestMixin,
+ base.BaseTempestTestCase):
same_network = True
diff --git a/neutron_tempest_plugin/scenario/test_migration.py b/neutron_tempest_plugin/scenario/test_migration.py
index 5e081f1..f4b918c 100644
--- a/neutron_tempest_plugin/scenario/test_migration.py
+++ b/neutron_tempest_plugin/scenario/test_migration.py
@@ -67,6 +67,19 @@
device_owner),
timeout=300, sleep=5)
+ def _wait_until_router_ports_down(self, router_id):
+
+ def _is_port_down(port_id):
+ port = self.os_admin.network_client.show_port(port_id).get('port')
+ return port['status'] == const.DOWN
+
+ ports = self.os_admin.network_client.list_ports(
+ device_id=router_id).get('ports')
+ for port in ports:
+ common_utils.wait_until_true(
+ functools.partial(_is_port_down, port['id']),
+ timeout=300, sleep=5)
+
def _is_port_active(self, router_id, device_owner):
ports = self.os_admin.network_client.list_ports(
device_id=router_id,
@@ -120,6 +133,8 @@
self.os_admin.network_client.update_router(
router_id=router['id'], admin_state_up=False)
+ self._wait_until_router_ports_down(router['id'])
+
self.os_admin.network_client.update_router(
router_id=router['id'], distributed=after_dvr, ha=after_ha)
self._check_update(router, after_dvr, after_ha)
diff --git a/neutron_tempest_plugin/scenario/test_mtu.py b/neutron_tempest_plugin/scenario/test_mtu.py
index 0e3afe9..dbfde9b 100644
--- a/neutron_tempest_plugin/scenario/test_mtu.py
+++ b/neutron_tempest_plugin/scenario/test_mtu.py
@@ -19,6 +19,7 @@
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+import testtools
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin import config
@@ -118,6 +119,9 @@
self.keypair['private_key'])
return server_ssh_client1, fip1, server_ssh_client2, fip2
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.image_is_advanced,
+ "Advanced image is required to run this test.")
@decorators.idempotent_id('3d73ec1a-2ec6-45a9-b0f8-04a273d9d344')
def test_connectivity_min_max_mtu(self):
server_ssh_client, _, _, fip2 = self._create_setup()
@@ -207,6 +211,9 @@
self.keypair['private_key'])
return server_ssh_client1, fip1, server_ssh_client2, fip2
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.image_is_advanced,
+ "Advanced image is required to run this test.")
@decorators.idempotent_id('bc470200-d8f4-4f07-b294-1b4cbaaa35b9')
def test_connectivity_min_max_mtu(self):
server_ssh_client, _, _, fip2 = self._create_setup()
diff --git a/neutron_tempest_plugin/scenario/test_qos.py b/neutron_tempest_plugin/scenario/test_qos.py
index 0611160..702bbaa 100644
--- a/neutron_tempest_plugin/scenario/test_qos.py
+++ b/neutron_tempest_plugin/scenario/test_qos.py
@@ -66,7 +66,7 @@
port=port)
-class QoSTest(base.BaseTempestTestCase):
+class QoSTestMixin(object):
credentials = ['primary', 'admin']
force_tenant_isolation = False
@@ -81,22 +81,16 @@
NC_PORT = 1234
- @classmethod
- @tutils.requires_ext(extension="qos", service="network")
- @base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
- def resource_setup(cls):
- super(QoSTest, cls).resource_setup()
-
def _create_file_for_bw_tests(self, ssh_client):
cmd = ("(dd if=/dev/zero bs=%(bs)d count=%(count)d of=%(file_path)s) "
- % {'bs': QoSTest.BS, 'count': QoSTest.COUNT,
- 'file_path': QoSTest.FILE_PATH})
+ % {'bs': QoSTestMixin.BS, 'count': QoSTestMixin.COUNT,
+ 'file_path': QoSTestMixin.FILE_PATH})
ssh_client.exec_command(cmd)
- cmd = "stat -c %%s %s" % QoSTest.FILE_PATH
+ cmd = "stat -c %%s %s" % QoSTestMixin.FILE_PATH
filesize = ssh_client.exec_command(cmd)
- if int(filesize.strip()) != QoSTest.FILE_SIZE:
+ if int(filesize.strip()) != QoSTestMixin.FILE_SIZE:
raise sc_exceptions.FileCreationFailedException(
- file=QoSTest.FILE_PATH)
+ file=QoSTestMixin.FILE_PATH)
def _check_bw(self, ssh_client, host, port):
cmd = "killall -q nc"
@@ -105,15 +99,15 @@
except exceptions.SSHExecCommandFailed:
pass
cmd = ("(nc -ll -p %(port)d < %(file_path)s > /dev/null &)" % {
- 'port': port, 'file_path': QoSTest.FILE_PATH})
+ 'port': port, 'file_path': QoSTestMixin.FILE_PATH})
ssh_client.exec_command(cmd)
start_time = time.time()
client_socket = _connect_socket(host, port)
total_bytes_read = 0
- while total_bytes_read < QoSTest.FILE_SIZE:
- data = client_socket.recv(QoSTest.BUFFER_SIZE)
+ while total_bytes_read < QoSTestMixin.FILE_SIZE:
+ data = client_socket.recv(QoSTestMixin.BUFFER_SIZE)
total_bytes_read += len(data)
time_elapsed = time.time() - start_time
@@ -126,7 +120,7 @@
'total_bytes_read': total_bytes_read,
'bytes_per_second': bytes_per_second})
- return bytes_per_second <= QoSTest.LIMIT_BYTES_SEC
+ return bytes_per_second <= QoSTestMixin.LIMIT_BYTES_SEC
def _create_ssh_client(self):
return ssh.Client(self.fip['floating_ip_address'],
@@ -153,6 +147,14 @@
shared=True)
return policy['policy']['id']
+
+class QoSTest(QoSTestMixin, base.BaseTempestTestCase):
+ @classmethod
+ @tutils.requires_ext(extension="qos", service="network")
+ @base_api.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
+ def resource_setup(cls):
+ super(QoSTest, cls).resource_setup()
+
@decorators.idempotent_id('1f7ed39b-428f-410a-bd2b-db9f465680df')
def test_qos(self):
"""This is a basic test that check that a QoS policy with
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 2fd548a..a28d668 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -751,26 +751,23 @@
body = jsonutils.loads(body)
return service_client.ResponseBody(resp, body)
- def create_trunk(self, parent_port_id, subports,
+ def create_trunk(self, parent_port_id=None, subports=None,
tenant_id=None, name=None, admin_state_up=None,
- description=None):
+ description=None, **kwargs):
uri = '%s/trunks' % self.uri_prefix
- post_data = {
- 'trunk': {
- 'port_id': parent_port_id,
- }
- }
+ if parent_port_id:
+ kwargs['port_id'] = parent_port_id
if subports is not None:
- post_data['trunk']['sub_ports'] = subports
+ kwargs['sub_ports'] = subports
if tenant_id is not None:
- post_data['trunk']['tenant_id'] = tenant_id
+ kwargs['tenant_id'] = tenant_id
if name is not None:
- post_data['trunk']['name'] = name
+ kwargs['name'] = name
if description is not None:
- post_data['trunk']['description'] = description
+ kwargs['description'] = description
if admin_state_up is not None:
- post_data['trunk']['admin_state_up'] = admin_state_up
- resp, body = self.post(uri, self.serialize(post_data))
+ kwargs['admin_state_up'] = admin_state_up
+ resp, body = self.post(uri, self.serialize({'trunk': kwargs}))
body = self.deserialize_single(body)
self.expected_success(201, resp.status)
return service_client.ResponseBody(resp, body)
diff --git a/releasenotes/notes/mark-methods-removals-f8b230171c045a3e.yaml b/releasenotes/notes/mark-methods-removals-f8b230171c045a3e.yaml
new file mode 100644
index 0000000..ab9f37a
--- /dev/null
+++ b/releasenotes/notes/mark-methods-removals-f8b230171c045a3e.yaml
@@ -0,0 +1,11 @@
+---
+
+features:
+ - |
+ Add new 'debtcollector' dependency with the purpose of deprecating methods
+ that are going to be removed.
+
+deprecations:
+ - |
+ Deprecate method BaseTempestTestCase.create_and_associate_floatingip after
+ replacing it with method BaseNetworkTest.create_floatingip.
diff --git a/requirements.txt b/requirements.txt
index 2ecce4e..dc77e63 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -3,16 +3,18 @@
# process, which may cause wedges in the gate later.
pbr!=2.1.0,>=2.0.0 # Apache-2.0
-neutron-lib>=1.13.0 # Apache-2.0
+neutron-lib>=1.18.0 # Apache-2.0
oslo.config>=5.2.0 # Apache-2.0
ipaddress>=1.0.17;python_version<'3.3' # PSF
netaddr>=0.7.18 # BSD
oslo.log>=3.36.0 # Apache-2.0
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
+paramiko>=2.0.0 # LGPLv2.1+
six>=1.10.0 # MIT
tempest>=17.1.0 # Apache-2.0
ddt>=1.0.1 # MIT
testtools>=2.2.0 # MIT
testscenarios>=0.4 # Apache-2.0/BSD
eventlet!=0.18.3,!=0.20.1,>=0.18.2 # MIT
+debtcollector>=1.2.0 # Apache-2.0