Merge "[OVS] Add "openflow_processed_per_port=True" to OVS scenario"
diff --git a/devstack/customize_image.sh b/devstack/customize_image.sh
index 77ff699..0b9d8ab 100644
--- a/devstack/customize_image.sh
+++ b/devstack/customize_image.sh
@@ -149,6 +149,7 @@
function customize_image {
local image_file=$1
local top_dir=$(dirname "${NEUTRON_TEMPEST_PLUGIN_DIR}")
+ install_customize_image_tools
(
export TEMP_DIR DISK_FORMAT RC_DIR
if [[ "$(basename ${image_file})" == ubuntu-* ]]; then
diff --git a/devstack/functions.sh b/devstack/functions.sh
index 8d8a4bf..f758ff6 100644
--- a/devstack/functions.sh
+++ b/devstack/functions.sh
@@ -85,3 +85,15 @@
fi
iniset $TEMPEST_CONFIG neutron_plugin_options advanced_image_flavor_ref $flavor_ref
}
+
+
+function create_flavor_for_advance_image {
+ local name=$1
+ local ram=$2
+ local disk=$3
+ local vcpus=$4
+
+ if [[ -z $(openstack flavor list | grep $name) ]]; then
+ openstack flavor create --ram $ram --disk $disk --vcpus $vcpus $name
+ fi
+}
diff --git a/devstack/plugin.sh b/devstack/plugin.sh
index 7a46014..42c31cd 100644
--- a/devstack/plugin.sh
+++ b/devstack/plugin.sh
@@ -6,7 +6,6 @@
# install_neutron_tempest_plugin
function install_neutron_tempest_plugin {
setup_dev_lib "neutron-tempest-plugin"
- install_customize_image_tools
}
if [[ "$1" == "stack" ]]; then
@@ -20,6 +19,7 @@
test-config)
echo_summary "Configuring neutron-tempest-plugin tempest options"
configure_advanced_image
+ create_flavor_for_advance_image $ADVANCED_INSTANCE_TYPE 256 4 1
configure_flavor_for_advanced_image
esac
fi
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..6fe7c34
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,4 @@
+reno>=3.1.0 # Apache-2.0
+sphinx>=2.0.0,!=2.1.0 # BSD
+openstackdocstheme>=2.2.1 # Apache-2.0
+
diff --git a/neutron_tempest_plugin/api/admin/test_agent_management.py b/neutron_tempest_plugin/api/admin/test_agent_management.py
index f63e81b..399e428 100644
--- a/neutron_tempest_plugin/api/admin/test_agent_management.py
+++ b/neutron_tempest_plugin/api/admin/test_agent_management.py
@@ -38,9 +38,13 @@
# Heartbeats must be excluded from comparison
self.agent.pop('heartbeat_timestamp', None)
self.agent.pop('configurations', None)
+ # Exclude alive as it can happen that when testclass'
+ # resource_setup executed the selected agent is not up
+ self.agent.pop('alive', None)
for agent in agents:
agent.pop('heartbeat_timestamp', None)
agent.pop('configurations', None)
+ agent.pop('alive', None)
self.assertIn(self.agent, agents)
@decorators.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
diff --git a/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py b/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py
index d0adcb8..9dc4438 100644
--- a/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py
+++ b/neutron_tempest_plugin/api/admin/test_dhcp_agent_scheduler.py
@@ -13,6 +13,7 @@
# under the License.
from neutron_lib import constants
+from neutron_lib.utils import test
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
@@ -33,6 +34,7 @@
cls.cidr = cls.subnet['cidr']
cls.port = cls.create_port(cls.network)
+ @test.unstable_test("bug 1906654")
@decorators.idempotent_id('f164801e-1dd8-4b8b-b5d3-cc3ac77cfaa5')
def test_dhcp_port_status_active(self):
diff --git a/neutron_tempest_plugin/api/admin/test_logging.py b/neutron_tempest_plugin/api/admin/test_logging.py
index f4cbe29..b76377d 100644
--- a/neutron_tempest_plugin/api/admin/test_logging.py
+++ b/neutron_tempest_plugin/api/admin/test_logging.py
@@ -28,9 +28,11 @@
@decorators.idempotent_id('8d2e1ba5-455b-4519-a88e-e587002faba6')
def test_log_lifecycle(self):
+ security_group = self.create_security_group()
name = data_utils.rand_name('test-log')
description = data_utils.rand_name('test-log-desc')
log = self.create_log(name=name, description=description,
+ resource_id=security_group['id'],
resource_type='security_group', enabled=True)
# Test 'show log'
@@ -72,3 +74,27 @@
# Verify that only required fields present in logging types
for log_type in actual_list_log_types:
self.assertEqual(tuple(expected_log_keys), tuple(log_type.keys()))
+
+ @decorators.idempotent_id('1ab4eb2a-76f5-45b9-816b-1aa497a71eea')
+ def test_log_deleted_with_corresponding_security_group(self):
+ security_group = self.create_security_group()
+ name = data_utils.rand_name('test-log')
+ log = self.create_log(
+ name=name,
+ resource_type='security_group',
+ resource_id=security_group['id'],
+ enabled=True)
+
+ # Ensure log was created
+ retrieved_log = self.admin_client.show_log(log['id'])['log']
+ self.assertEqual(name, retrieved_log['name'])
+ self.assertEqual(security_group['id'], retrieved_log['resource_id'])
+ self.assertEqual('security_group', retrieved_log['resource_type'])
+ self.assertTrue(retrieved_log['enabled'])
+
+ # Delete SG
+ self.delete_security_group(security_group)
+
+ # Ensure log is also deleted
+ self.assertRaises(exceptions.NotFound,
+ self.admin_client.show_log, log['id'])
diff --git a/neutron_tempest_plugin/api/admin/test_ports.py b/neutron_tempest_plugin/api/admin/test_ports.py
index 9c94322..e9a1bdb 100644
--- a/neutron_tempest_plugin/api/admin/test_ports.py
+++ b/neutron_tempest_plugin/api/admin/test_ports.py
@@ -14,7 +14,6 @@
# under the License.
import netaddr
-import six
from neutron_lib import constants as const
from tempest.common import utils
@@ -75,6 +74,7 @@
EGRESS_KBPS = 1000
INGRESS_KBPS = 2000
+ ANY_KPPS = 500
@classmethod
def skip_checks(cls):
@@ -95,16 +95,36 @@
cls.prov_network = cls.create_provider_network(
physnet_name=cls.physnet_name, start_segmentation_id=base_segm)
+ @classmethod
+ def setup_clients(cls):
+ super(PortTestCasesResourceRequest, cls).setup_clients()
+ cls.qos_minimum_bandwidth_rules_client = \
+ cls.os_admin.qos_minimum_bandwidth_rules_client
+ cls.qos_bw_limit_rule_client = \
+ cls.os_admin.qos_limit_bandwidth_rules_client
+ cls.qos_minimum_packet_rate_rules_client = \
+ cls.os_admin.qos_minimum_packet_rate_rules_client
+
def _create_qos_policy_and_port(self, network, vnic_type,
- network_policy=False):
+ network_policy=False, min_kpps=False):
qos_policy = self.create_qos_policy(
name=data_utils.rand_name('test_policy'), shared=True)
- self.create_qos_minimum_bandwidth_rule(qos_policy['id'],
- self.EGRESS_KBPS,
- const.EGRESS_DIRECTION)
- self.create_qos_minimum_bandwidth_rule(qos_policy['id'],
- self.INGRESS_KBPS,
- const.INGRESS_DIRECTION)
+ self.qos_minimum_bandwidth_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=qos_policy['id'],
+ **{'direction': const.EGRESS_DIRECTION,
+ 'min_kbps': self.EGRESS_KBPS})
+
+ self.qos_minimum_bandwidth_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=qos_policy['id'],
+ **{'direction': const.INGRESS_DIRECTION,
+ 'min_kbps': self.INGRESS_KBPS})
+
+ if min_kpps:
+ self.qos_minimum_packet_rate_rules_client.\
+ create_minimum_packet_rate_rule(
+ qos_policy_id=qos_policy['id'],
+ **{'direction': const.ANY_DIRECTION,
+ 'min_kpps': min_kpps})
port_policy_id = qos_policy['id'] if not network_policy else None
port_kwargs = {
@@ -119,18 +139,56 @@
port_id = self.create_port(network, **port_kwargs)['id']
return self.admin_client.show_port(port_id)['port']
- def _assert_resource_request(self, port, vnic_type):
+ def _assert_resource_request(self, port, vnic_type, min_kpps=None):
self.assertIn('resource_request', port)
vnic_trait = 'CUSTOM_VNIC_TYPE_%s' % vnic_type.upper()
physnet_trait = 'CUSTOM_PHYSNET_%s' % self.physnet_name.upper()
- six.assertCountEqual(self, [physnet_trait, vnic_trait],
- port['resource_request']['required'])
+ if utils.is_extension_enabled('port-resource-request-groups',
+ 'network'):
+ min_bw_group_found = False
+ min_pps_group_found = False if min_kpps else True
+ for rg in port['resource_request']['request_groups']:
+ self.assertIn(rg['id'],
+ port['resource_request']['same_subtree'])
+ if (('NET_BW_EGR_KILOBIT_PER_SEC' in rg['resources'] or
+ 'NET_BW_IGR_KILOBIT_PER_SEC' in rg['resources']) and
+ not min_bw_group_found):
+ self.assertCountEqual([physnet_trait, vnic_trait],
+ rg['required'])
- self.assertEqual(
- {'NET_BW_EGR_KILOBIT_PER_SEC': self.EGRESS_KBPS,
- 'NET_BW_IGR_KILOBIT_PER_SEC': self.INGRESS_KBPS},
- port['resource_request']['resources']
- )
+ self.assertEqual(
+ {'NET_BW_EGR_KILOBIT_PER_SEC': self.EGRESS_KBPS,
+ 'NET_BW_IGR_KILOBIT_PER_SEC': self.INGRESS_KBPS},
+ rg['resources']
+ )
+ min_bw_group_found = True
+ elif (('NET_PACKET_RATE_KILOPACKET_PER_SEC' in
+ rg['resources'] and min_kpps) and
+ not min_pps_group_found):
+ self.assertCountEqual([vnic_trait], rg['required'])
+
+ self.assertEqual(
+ {'NET_PACKET_RATE_KILOPACKET_PER_SEC': min_kpps},
+ rg['resources']
+ )
+ min_pps_group_found = True
+ else:
+ self.fail('"resource_request" contains unexpected request '
+ 'group: %s', rg)
+
+ if not min_bw_group_found or not min_pps_group_found:
+ self.fail('Did not find expected request groups in '
+ '"resource_request": %s',
+ port['resource_request']['request_groups'])
+ else:
+ self.assertCountEqual([physnet_trait, vnic_trait],
+ port['resource_request']['required'])
+
+ self.assertEqual(
+ {'NET_BW_EGR_KILOBIT_PER_SEC': self.EGRESS_KBPS,
+ 'NET_BW_IGR_KILOBIT_PER_SEC': self.INGRESS_KBPS},
+ port['resource_request']['resources']
+ )
@decorators.idempotent_id('ebb86dc4-716c-4558-8516-6dfc4a67601f')
def test_port_resource_request(self):
@@ -149,6 +207,27 @@
port = self.admin_client.show_port(port_id)['port']
self.assertIsNone(port['resource_request'])
+ @decorators.idempotent_id('5ae93aa0-408a-11ec-bbca-17b1a60f3438')
+ @utils.requires_ext(service='network',
+ extension='port-resource-request-groups')
+ def test_port_resource_request_min_bw_and_min_pps(self):
+ port = self._create_qos_policy_and_port(
+ network=self.prov_network, vnic_type=self.vnic_type,
+ network_policy=False, min_kpps=self.ANY_KPPS)
+ port_id = port['id']
+
+ self._assert_resource_request(port, self.vnic_type,
+ min_kpps=self.ANY_KPPS)
+
+ # Note(lajoskatona): port-resource-request is an admin only feature,
+ # so test if non-admin user can't see the new field.
+ port = self.client.show_port(port_id)['port']
+ self.assertNotIn('resource_request', port)
+
+ self.update_port(port, **{'qos_policy_id': None})
+ port = self.admin_client.show_port(port_id)['port']
+ self.assertIsNone(port['resource_request'])
+
@decorators.idempotent_id('7261391f-64cc-45a6-a1e3-435694c54bf5')
def test_port_resource_request_no_provider_net_conflict(self):
conflict = self.assertRaises(
@@ -164,9 +243,11 @@
# Note(lajoskatona): Add a non-minimum-bandwidth-rule to the policy
# to make sure that the resource request is not filled with it.
- self.create_qos_bandwidth_limit_rule(qos_policy['id'],
- self.EGRESS_KBPS, 800,
- const.EGRESS_DIRECTION)
+ self.qos_bw_limit_rule_client.create_limit_bandwidth_rule(
+ qos_policy['id'],
+ **{'max_kbps': self.EGRESS_KBPS,
+ 'max_burst_kbps': 800,
+ 'direction': const.EGRESS_DIRECTION})
port_kwargs = {
'qos_policy_id': qos_policy['id'],
@@ -181,8 +262,12 @@
@decorators.idempotent_id('b6c34ae4-44c8-47f0-86de-7ef9866fa000')
def test_port_resource_request_inherited_policy(self):
+ base_segm = CONF.neutron_plugin_options.provider_net_base_segm_id
+ prov_network = self.create_provider_network(
+ physnet_name=self.physnet_name,
+ start_segmentation_id=base_segm)
port = self._create_qos_policy_and_port(
- network=self.prov_network, vnic_type=self.vnic_type,
+ network=prov_network, vnic_type=self.vnic_type,
network_policy=True)
self._assert_resource_request(port, self.vnic_type)
diff --git a/neutron_tempest_plugin/api/admin/test_quotas.py b/neutron_tempest_plugin/api/admin/test_quotas.py
index ae773c8..0cf474e 100644
--- a/neutron_tempest_plugin/api/admin/test_quotas.py
+++ b/neutron_tempest_plugin/api/admin/test_quotas.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from tempest.common import utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -145,7 +144,7 @@
# as requested for tenant
quota_set = self.admin_client.show_details_quota(tenant_id)
quota_set = quota_set['quota']
- for key, value in six.iteritems(new_quotas):
+ for key, value in new_quotas.items():
self.assertEqual(new_quotas[key]['limit'],
quota_set[key]['limit'])
self.assertEqual(new_quotas[key]['reserved'],
@@ -155,5 +154,5 @@
# validate 'default' action for old extension
quota_limit = self.admin_client.show_quotas(tenant_id)['quota']
- for key, value in six.iteritems(new_quotas):
+ for key, value in new_quotas.items():
self.assertEqual(new_quotas[key]['limit'], quota_limit[key])
diff --git a/neutron_tempest_plugin/api/admin/test_tag.py b/neutron_tempest_plugin/api/admin/test_tag.py
index 7879b4c..eae7977 100644
--- a/neutron_tempest_plugin/api/admin/test_tag.py
+++ b/neutron_tempest_plugin/api/admin/test_tag.py
@@ -28,13 +28,13 @@
def _get_and_compare_tags(self, tags):
res_body = self.client.get_tags(self.resource, self.res_id)
- self.assertItemsEqual(tags, res_body['tags'])
+ self.assertCountEqual(tags, res_body['tags'])
def _test_tag_operations(self):
# create and get tags
tags = ['red', 'blue']
res_body = self.client.update_tags(self.resource, self.res_id, tags)
- self.assertItemsEqual(tags, res_body['tags'])
+ self.assertCountEqual(tags, res_body['tags'])
self._get_and_compare_tags(tags)
# add a tag
@@ -52,7 +52,7 @@
# replace tags
tags = ['red', 'yellow', 'purple']
res_body = self.client.update_tags(self.resource, self.res_id, tags)
- self.assertItemsEqual(tags, res_body['tags'])
+ self.assertCountEqual(tags, res_body['tags'])
self._get_and_compare_tags(tags)
# get tag
@@ -477,7 +477,7 @@
# nothing specific about networks here, just a resource that is
# available in all setups
res_body = self.client.get_tags('networks', res_id)
- self.assertItemsEqual(tags, res_body['tags'])
+ self.assertCountEqual(tags, res_body['tags'])
@decorators.attr(type='smoke')
@decorators.idempotent_id('74c56fb1-a3b1-4a62-a8d2-d04dca6bd4cd')
diff --git a/neutron_tempest_plugin/api/base.py b/neutron_tempest_plugin/api/base.py
index d63dec8..07fcb0b 100644
--- a/neutron_tempest_plugin/api/base.py
+++ b/neutron_tempest_plugin/api/base.py
@@ -118,6 +118,8 @@
cls.routers = []
cls.floating_ips = []
cls.port_forwardings = []
+ cls.local_ips = []
+ cls.local_ip_associations = []
cls.metering_labels = []
cls.service_profiles = []
cls.flavors = []
@@ -125,6 +127,8 @@
cls.qos_rules = []
cls.qos_policies = []
cls.ethertype = "IPv" + str(cls._ip_version)
+ cls.address_groups = []
+ cls.admin_address_groups = []
cls.address_scopes = []
cls.admin_address_scopes = []
cls.subnetpools = []
@@ -140,6 +144,17 @@
cls.conntrack_helpers = []
@classmethod
+ def reserve_external_subnet_cidrs(cls):
+ client = cls.os_admin.network_client
+ ext_nets = client.list_networks(
+ **{"router:external": True})['networks']
+ for ext_net in ext_nets:
+ ext_subnets = client.list_subnets(
+ network_id=ext_net['id'])['subnets']
+ for ext_subnet in ext_subnets:
+ cls.reserve_subnet_cidr(ext_subnet['cidr'])
+
+ @classmethod
def resource_cleanup(cls):
if CONF.service_available.neutron:
# Clean up trunks
@@ -154,6 +169,15 @@
for floating_ip in cls.floating_ips:
cls._try_delete_resource(cls.delete_floatingip, floating_ip)
+ # Clean up Local IP Associations
+ for association in cls.local_ip_associations:
+ cls._try_delete_resource(cls.delete_local_ip_association,
+ association)
+ # Clean up Local IPs
+ for local_ip in cls.local_ips:
+ cls._try_delete_resource(cls.delete_local_ip,
+ local_ip)
+
# Clean up conntrack helpers
for cth in cls.conntrack_helpers:
cls._try_delete_resource(cls.delete_conntrack_helper, cth)
@@ -719,6 +743,98 @@
client = client or pf.get('client') or cls.client
client.delete_port_forwarding(pf['floatingip_id'], pf['id'])
+ def create_local_ip(cls, network_id=None,
+ client=None, **kwargs):
+ """Creates a Local IP.
+
+ Create a Local IP and schedule it for later deletion.
+ If a client is passed, then it is used for deleting the IP too.
+
+ :param network_id: network ID where to create
+ By default this is 'CONF.network.public_network_id'.
+
+ :param client: network client to be used for creating and cleaning up
+ the Local IP.
+
+ :param **kwargs: additional creation parameters to be forwarded to
+ networking server.
+ """
+
+ client = client or cls.client
+ network_id = (network_id or
+ cls.external_network_id)
+
+ local_ip = client.create_local_ip(network_id,
+ **kwargs)['local_ip']
+
+ # save client to be used later in cls.delete_local_ip
+ # for final cleanup
+ local_ip['client'] = client
+ cls.local_ips.append(local_ip)
+ return local_ip
+
+ @classmethod
+ def delete_local_ip(cls, local_ip, client=None):
+ """Delete Local IP
+
+ :param client: Client to be used
+ If client is not given it will use the client used to create
+ the Local IP, or cls.client if unknown.
+ """
+
+ client = client or local_ip.get('client') or cls.client
+ client.delete_local_ip(local_ip['id'])
+
+ @classmethod
+ def create_local_ip_association(cls, local_ip_id, fixed_port_id,
+ fixed_ip_address=None, client=None):
+ """Creates a Local IP association.
+
+ Create a Local IP Association and schedule it for later deletion.
+ If a client is passed, then it is used for deleting the association
+ too.
+
+ :param local_ip_id: The ID of the Local IP.
+
+ :param fixed_port_id: The ID of the Neutron port
+ to be associated with the Local IP
+
+ :param fixed_ip_address: The fixed IPv4 address of the Neutron
+ port to be associated with the Local IP
+
+ :param client: network client to be used for creating and cleaning up
+ the Local IP Association.
+ """
+
+ client = client or cls.client
+
+ association = client.create_local_ip_association(
+ local_ip_id, fixed_port_id,
+ fixed_ip_address)['port_association']
+
+ # save ID of Local IP for final cleanup
+ association['local_ip_id'] = local_ip_id
+
+ # save client to be used later in
+ # cls.delete_local_ip_association for final cleanup
+ association['client'] = client
+ cls.local_ip_associations.append(association)
+ return association
+
+ @classmethod
+ def delete_local_ip_association(cls, association, client=None):
+
+ """Delete Local IP Association
+
+ :param client: Client to be used
+ If client is not given it will use the client used to create
+ the local IP association, or cls.client if unknown.
+ """
+
+ client = client or association.get('client') or cls.client
+ client.delete_local_ip_association(association['local_ip_id'],
+ association['fixed_port_id'])
+
@classmethod
def create_router_interface(cls, router_id, subnet_id):
"""Wrapper utility that returns a router interface."""
@@ -750,23 +866,11 @@
return qos_policy
@classmethod
- def create_qos_bandwidth_limit_rule(cls, policy_id, max_kbps,
- max_burst_kbps,
- direction=const.EGRESS_DIRECTION):
- """Wrapper utility that returns a test QoS bandwidth limit rule."""
- body = cls.admin_client.create_bandwidth_limit_rule(
- policy_id, max_kbps, max_burst_kbps, direction)
- qos_rule = body['bandwidth_limit_rule']
- cls.qos_rules.append(qos_rule)
- return qos_rule
-
- @classmethod
- def create_qos_minimum_bandwidth_rule(cls, policy_id, min_kbps,
- direction=const.EGRESS_DIRECTION):
- """Wrapper utility that creates and returns a QoS min bw rule."""
- body = cls.admin_client.create_minimum_bandwidth_rule(
- policy_id, direction, min_kbps)
- qos_rule = body['minimum_bandwidth_rule']
+ def create_qos_dscp_marking_rule(cls, policy_id, dscp_mark):
+ """Wrapper utility that creates and returns a QoS dscp rule."""
+ body = cls.admin_client.create_dscp_marking_rule(
+ policy_id, dscp_mark)
+ qos_rule = body['dscp_marking_rule']
cls.qos_rules.append(qos_rule)
return qos_rule
@@ -810,6 +914,16 @@
return body['subnetpool']
@classmethod
+ def create_address_group(cls, name, is_admin=False, **kwargs):
+ if is_admin:
+ body = cls.admin_client.create_address_group(name=name, **kwargs)
+ cls.admin_address_groups.append(body['address_group'])
+ else:
+ body = cls.client.create_address_group(name=name, **kwargs)
+ cls.address_groups.append(body['address_group'])
+ return body['address_group']
+
+ @classmethod
def create_project(cls, name=None, description=None):
test_project = name or data_utils.rand_name('test_project_')
test_description = description or data_utils.rand_name('desc_')
@@ -875,6 +989,9 @@
ip_version = ip_version or cls._ip_version
default_params = (
constants.DEFAULT_SECURITY_GROUP_RULE_PARAMS[ip_version])
+ if ('remote_address_group_id' in kwargs and 'remote_ip_prefix' in
+ default_params):
+ default_params.pop('remote_ip_prefix')
for key, value in default_params.items():
kwargs.setdefault(key, value)
@@ -1091,12 +1208,13 @@
target_id=None, event='ALL', enabled=True):
"""Wrapper utility that returns a test log object."""
log_args = {'name': name,
- 'description': description,
'resource_type': resource_type,
'resource_id': resource_id,
'target_id': target_id,
'event': event,
'enabled': enabled}
+ if description:
+ log_args['description'] = description
body = cls.admin_client.create_log(**log_args)
log_object = body['log']
cls.log_objects.append(log_object)
diff --git a/neutron_tempest_plugin/api/clients.py b/neutron_tempest_plugin/api/clients.py
index 407e694..053e5ea 100644
--- a/neutron_tempest_plugin/api/clients.py
+++ b/neutron_tempest_plugin/api/clients.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from tempest import clients as tempest_clients
+from tempest.lib.services import clients
from tempest.lib.services.compute import availability_zone_client
from tempest.lib.services.compute import hypervisor_client
from tempest.lib.services.compute import interfaces_client
@@ -20,7 +22,9 @@
from tempest.lib.services.compute import servers_client
from tempest.lib.services.identity.v2 import tenants_client
from tempest.lib.services.identity.v3 import projects_client
-from tempest import manager
+from tempest.lib.services.network import qos_limit_bandwidth_rules_client
+from tempest.lib.services.network import qos_minimum_bandwidth_rules_client
+from tempest.lib.services.network import qos_minimum_packet_rate_rules_client
from neutron_tempest_plugin import config
from neutron_tempest_plugin.services.network.json import network_client
@@ -28,7 +32,7 @@
CONF = config.CONF
-class Manager(manager.Manager):
+class Manager(clients.ServiceClients):
"""Top level manager for OpenStack tempest clients"""
default_params = {
'disable_ssl_certificate_validation':
@@ -47,7 +51,15 @@
default_params_with_timeout_values.update(default_params)
def __init__(self, credentials=None, service=None):
- super(Manager, self).__init__(credentials=credentials)
+ dscv = CONF.identity.disable_ssl_certificate_validation
+ _, uri = tempest_clients.get_auth_provider_class(credentials)
+ super(Manager, self).__init__(
+ credentials=credentials,
+ identity_uri=uri,
+ scope='project',
+ disable_ssl_certificate_validation=dscv,
+ ca_certs=CONF.identity.ca_certificates_file,
+ trace_requests=CONF.debug.trace_requests)
self._set_identity_clients()
@@ -77,12 +89,44 @@
self.interfaces_client = interfaces_client.InterfacesClient(
self.auth_provider, **params)
self.keypairs_client = keypairs_client.KeyPairsClient(
- self.auth_provider, **params)
+ self.auth_provider, ssh_key_type=CONF.validation.ssh_key_type,
+ **params)
self.hv_client = hypervisor_client.HypervisorClient(
self.auth_provider, **params)
self.az_client = availability_zone_client.AvailabilityZoneClient(
self.auth_provider, **params)
+ self.qos_limit_bandwidth_rules_client = \
+ qos_limit_bandwidth_rules_client.QosLimitBandwidthRulesClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
+
+ self.qos_minimum_bandwidth_rules_client = \
+ qos_minimum_bandwidth_rules_client.QosMinimumBandwidthRulesClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
+
+ self.qos_minimum_packet_rate_rules_client = \
+ qos_minimum_packet_rate_rules_client.\
+ QosMinimumPacketRateRulesClient(
+ self.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **self.default_params)
+
def _set_identity_clients(self):
params = {
'service': CONF.identity.catalog_type,
diff --git a/neutron_tempest_plugin/api/test_address_groups.py b/neutron_tempest_plugin/api/test_address_groups.py
new file mode 100644
index 0000000..69f22d0
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_address_groups.py
@@ -0,0 +1,298 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import random
+
+from neutron_lib import constants
+from oslo_log import log
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+import testtools
+
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin.api import base_security_groups
+
+LOG = log.getLogger(__name__)
+
+
+ADDRESS_GROUP_NAME = 'test-address-group'
+
+
+class AddressGroupTest(base.BaseAdminNetworkTest):
+
+ credentials = ['primary', 'admin']
+ required_extensions = ['address-group']
+
+ @decorators.idempotent_id('496fef1b-22ce-483b-ab93-d28bf46954b0')
+ def test_address_group_lifecycle(self):
+ ag_description = "Test AG description"
+ ag_name = data_utils.rand_name(ADDRESS_GROUP_NAME)
+ addresses = ['10.10.10.3/32', '192.168.0.10/24', '2001:db8::f00/64']
+ expected_addresses = [
+ '10.10.10.3/32', '192.168.0.0/24', '2001:db8::/64']
+ ag = self.create_address_group(
+ description=ag_description,
+ name=ag_name,
+ addresses=addresses)
+ self.assertEqual(ag_description, ag['description'])
+ self.assertEqual(ag_name, ag['name'])
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(ag['addresses']))
+
+ new_description = 'New AG description'
+ updated_ag = self.client.update_address_group(
+ ag['id'], description=new_description)['address_group']
+ self.assertEqual(new_description, updated_ag['description'])
+ self.assertEqual(ag_name, updated_ag['name'])
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(updated_ag['addresses']))
+
+ self.client.delete_address_group(ag['id'])
+ with testtools.ExpectedException(exceptions.NotFound):
+ self.client.show_address_group(ag['id'])
+
+ @decorators.idempotent_id('8a42029a-40eb-4b44-a7cf-38500046f9b8')
+ def test_address_group_create_with_wrong_address(self):
+ with testtools.ExpectedException(exceptions.BadRequest):
+ self.create_address_group(
+ name=data_utils.rand_name(ADDRESS_GROUP_NAME),
+ addresses=['10.20.30.40'])
+
+ with testtools.ExpectedException(exceptions.BadRequest):
+ self.create_address_group(
+ name=data_utils.rand_name(ADDRESS_GROUP_NAME),
+ addresses=['this is bad IP address'])
+
+ @decorators.idempotent_id('27c03921-bb12-4b9a-b32e-7083bc90ff1f')
+ def test_edit_addresses_in_address_group(self):
+ addresses = ['10.10.10.3/32', '192.168.0.10/24', '2001:db8::f00/64']
+ expected_addresses = [
+ '10.10.10.3/32', '192.168.0.0/24', '2001:db8::/64']
+ ag = self.create_address_group(
+ name=data_utils.rand_name(ADDRESS_GROUP_NAME),
+ addresses=addresses)
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(ag['addresses']))
+
+ added_addresses = ['10.20.30.40/32']
+ self.client.add_addresses_to_address_group(
+ ag['id'], addresses=added_addresses)
+ updated_ag = self.client.show_address_group(ag['id'])['address_group']
+ expected_addresses += added_addresses
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(updated_ag['addresses']))
+
+ removed_addresses = [expected_addresses.pop(0)]
+ self.client.remove_addresses_from_address_group(
+ ag['id'], addresses=removed_addresses)
+ updated_ag = self.client.show_address_group(ag['id'])['address_group']
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(updated_ag['addresses']))
+
+ @decorators.idempotent_id('feec6747-b4b8-49e3-8cff-817d3f097f2c')
+ def test_add_wrong_address_to_address_group(self):
+ addresses = ['10.10.10.3/32', '192.168.0.10/24', '2001:db8::f00/64']
+ expected_addresses = [
+ '10.10.10.3/32', '192.168.0.0/24', '2001:db8::/64']
+ ag = self.create_address_group(
+ name=data_utils.rand_name(ADDRESS_GROUP_NAME),
+ addresses=addresses)
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(ag['addresses']))
+ with testtools.ExpectedException(exceptions.BadRequest):
+ self.client.add_addresses_to_address_group(
+ ag['id'], addresses=['this is bad IP address'])
+ updated_ag = self.client.show_address_group(ag['id'])['address_group']
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(updated_ag['addresses']))
+
+ @decorators.idempotent_id('74f6fd4c-257b-4725-887b-470e96960e24')
+ def test_remove_wrong_address_from_address_group(self):
+ addresses = ['10.10.10.3/32', '192.168.0.10/24', '2001:db8::f00/64']
+ expected_addresses = [
+ '10.10.10.3/32', '192.168.0.0/24', '2001:db8::/64']
+ ag = self.create_address_group(
+ name=data_utils.rand_name(ADDRESS_GROUP_NAME),
+ addresses=addresses)
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(ag['addresses']))
+ with testtools.ExpectedException(exceptions.NotFound):
+ self.client.remove_addresses_from_address_group(
+ ag['id'], addresses=['10.200.200.200'])
+ updated_ag = self.client.show_address_group(ag['id'])['address_group']
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(updated_ag['addresses']))
+
+ with testtools.ExpectedException(exceptions.BadRequest):
+ self.client.remove_addresses_from_address_group(
+ ag['id'], addresses=['this is bad IP address'])
+ updated_ag = self.client.show_address_group(ag['id'])['address_group']
+ self.assertListEqual(
+ sorted(expected_addresses), sorted(updated_ag['addresses']))
+
+
+class RbacSharedAddressGroupTest(base.BaseAdminNetworkTest):
+
+ force_tenant_isolation = True
+ credentials = ['primary', 'alt', 'admin']
+ required_extensions = ['security-group', 'address-group',
+ 'rbac-address-group']
+
+ @classmethod
+ def resource_setup(cls):
+ super(RbacSharedAddressGroupTest, cls).resource_setup()
+ cls.client2 = cls.os_alt.network_client
+
+ def _create_address_group(self, is_admin=False, **kwargs):
+ name = data_utils.rand_name(ADDRESS_GROUP_NAME)
+ return self.create_address_group(name=name, is_admin=is_admin,
+ **kwargs)
+
+ def _make_admin_ag_shared_to_project_id(self, project_id):
+ ag = self._create_address_group(is_admin=True)
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='address_group',
+ object_id=ag['id'],
+ action='access_as_shared',
+ target_tenant=project_id,
+ )['rbac_policy']
+ return {'address_group': ag, 'rbac_policy': rbac_policy}
+
+ @decorators.idempotent_id('95f59a88-c47e-4dd9-a231-85f1782753a7')
+ def test_policy_target_update(self):
+ res = self._make_admin_ag_shared_to_project_id(
+ self.client.tenant_id)
+ # change to client2
+ update_res = self.admin_client.update_rbac_policy(
+ res['rbac_policy']['id'], target_tenant=self.client2.tenant_id)
+ self.assertEqual(self.client2.tenant_id,
+ update_res['rbac_policy']['target_tenant'])
+ # make sure everything else stayed the same
+ res['rbac_policy'].pop('target_tenant')
+ update_res['rbac_policy'].pop('target_tenant')
+ self.assertEqual(res['rbac_policy'], update_res['rbac_policy'])
+
+ @decorators.idempotent_id('35a214c9-5c99-468f-9242-34d0529cabfa')
+ def test_secgrprule_presence_prevents_policy_rbac_policy_deletion(self):
+ res = self._make_admin_ag_shared_to_project_id(
+ self.client2.tenant_id)
+ ag_id = res['address_group']['id']
+ security_group = self.create_security_group(client=self.client2)
+ protocol = random.choice(list(base_security_groups.V4_PROTOCOL_NAMES))
+ sec_grp_rule = self.create_security_group_rule(
+ security_group=security_group,
+ client=self.client2, protocol=protocol,
+ direction=constants.INGRESS_DIRECTION,
+ remote_address_group_id=ag_id)
+
+ # a port with shared sg should prevent the deletion of an
+ # rbac-policy required for it to be shared
+ with testtools.ExpectedException(exceptions.Conflict):
+ self.admin_client.delete_rbac_policy(res['rbac_policy']['id'])
+
+ # cleanup
+ self.client2.delete_security_group_rule(sec_grp_rule['id'])
+ self.admin_client.delete_rbac_policy(res['rbac_policy']['id'])
+
+ @decorators.idempotent_id('c89db8d4-0b52-4072-ac7e-672860491843')
+ def test_regular_client_shares_to_another_regular_client(self):
+ # owned by self.admin_client
+ ag = self._create_address_group(is_admin=True)
+ with testtools.ExpectedException(exceptions.NotFound):
+ self.client.show_address_group(ag['id'])
+ rbac_policy = self.admin_client.create_rbac_policy(
+ object_type='address_group', object_id=ag['id'],
+ action='access_as_shared',
+ target_tenant=self.client.tenant_id)['rbac_policy']
+ self.client.show_address_group(ag['id'])
+
+ self.assertIn(rbac_policy,
+ self.admin_client.list_rbac_policies()['rbac_policies'])
+ # ensure that 'client2' can't see the rbac-policy sharing the
+ # ag to it because the rbac-policy belongs to 'client'
+ self.assertNotIn(rbac_policy['id'], [p['id'] for p in
+ self.client2.list_rbac_policies()['rbac_policies']])
+
+ @decorators.idempotent_id('55a9fbb6-3333-48e8-90e4-11ab2a49567b')
+ def test_filter_fields(self):
+ ag = self._create_address_group()
+ self.admin_client.create_rbac_policy(
+ object_type='address_group', object_id=ag['id'],
+ action='access_as_shared', target_tenant=self.client2.tenant_id)
+ field_args = (('id',), ('id', 'action'), ('object_type', 'object_id'),
+ ('project_id', 'target_tenant'))
+ for fields in field_args:
+ res = self.admin_client.list_rbac_policies(fields=fields)
+ self.assertEqual(set(fields), set(res['rbac_policies'][0].keys()))
+
+ @decorators.idempotent_id('20b2706b-1cea-4724-ab72-d7452ecb1fc4')
+ def test_rbac_policy_show(self):
+ res = self._make_admin_ag_shared_to_project_id(
+ self.client.tenant_id)
+ p1 = res['rbac_policy']
+ p2 = self.admin_client.create_rbac_policy(
+ object_type='address_group',
+ object_id=res['address_group']['id'],
+ action='access_as_shared',
+ target_tenant='*')['rbac_policy']
+
+ self.assertEqual(
+ p1, self.admin_client.show_rbac_policy(p1['id'])['rbac_policy'])
+ self.assertEqual(
+ p2, self.admin_client.show_rbac_policy(p2['id'])['rbac_policy'])
+
+ @decorators.idempotent_id('774fc038-486c-4507-ab04-c5aac0fca5ab')
+ def test_filter_rbac_policies(self):
+ ag = self._create_address_group()
+ rbac_pol1 = self.admin_client.create_rbac_policy(
+ object_type='address_group', object_id=ag['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id)['rbac_policy']
+ rbac_pol2 = self.admin_client.create_rbac_policy(
+ object_type='address_group', object_id=ag['id'],
+ action='access_as_shared',
+ target_tenant=self.admin_client.tenant_id)['rbac_policy']
+ res1 = self.admin_client.list_rbac_policies(id=rbac_pol1['id'])[
+ 'rbac_policies']
+ res2 = self.admin_client.list_rbac_policies(id=rbac_pol2['id'])[
+ 'rbac_policies']
+ self.assertEqual(1, len(res1))
+ self.assertEqual(1, len(res2))
+ self.assertEqual(rbac_pol1['id'], res1[0]['id'])
+ self.assertEqual(rbac_pol2['id'], res2[0]['id'])
+
+ @decorators.idempotent_id('a0f3a01a-e2c7-47d6-9385-0cd7a7f0c996')
+ def test_regular_client_blocked_from_sharing_anothers_policy(self):
+ ag = self._make_admin_ag_shared_to_project_id(
+ self.client.tenant_id)['address_group']
+ with testtools.ExpectedException(exceptions.BadRequest):
+ self.client.create_rbac_policy(
+ object_type='address_group', object_id=ag['id'],
+ action='access_as_shared',
+ target_tenant=self.client2.tenant_id)
+
+ # make sure the rbac-policy is invisible to the tenant for which it's
+ # being shared
+ self.assertFalse(self.client.list_rbac_policies()['rbac_policies'])
+
+ @decorators.idempotent_id('f39e32d9-4733-48ec-b550-07f0ec4998a9')
+ def test_regular_client_blocked_from_updating_shared_address_group(self):
+ # owned by self.admin_client
+ ag = self._create_address_group(is_admin=True)
+ self.admin_client.create_rbac_policy(
+ object_type='address_group', object_id=ag['id'],
+ action='access_as_shared',
+ target_tenant=self.client.tenant_id)['rbac_policy']
+ self.client.show_address_group(ag['id'])
+ with testtools.ExpectedException(exceptions.NotFound):
+ self.client.update_address_group(ag['id'], name='new_name')
diff --git a/neutron_tempest_plugin/api/test_allowed_address_pair.py b/neutron_tempest_plugin/api/test_allowed_address_pair.py
deleted file mode 100644
index dd48382..0000000
--- a/neutron_tempest_plugin/api/test_allowed_address_pair.py
+++ /dev/null
@@ -1,121 +0,0 @@
-# Copyright 2014 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.lib import decorators
-
-from neutron_tempest_plugin.api import base
-
-
-class AllowedAddressPairTestJSON(base.BaseNetworkTest):
-
- """AllowedAddressPairTestJSON class
-
- Tests the Neutron Allowed Address Pair API extension using the Tempest
- REST client. The following API operations are tested with this extension:
-
- create port
- list ports
- update port
- show port
-
- v2.0 of the Neutron API is assumed. It is also assumed that the following
- options are defined in the [network-feature-enabled] section of
- etc/tempest.conf
-
- api_extensions
- """
-
- required_extensions = ['allowed-address-pairs']
-
- @classmethod
- def resource_setup(cls):
- super(AllowedAddressPairTestJSON, cls).resource_setup()
- cls.network = cls.create_network()
- cls.create_subnet(cls.network)
- port = cls.create_port(cls.network)
- cls.ip_address = port['fixed_ips'][0]['ip_address']
- cls.mac_address = port['mac_address']
-
- @decorators.idempotent_id('86c3529b-1231-40de-803c-00e40882f043')
- def test_create_list_port_with_address_pair(self):
- # Create port with allowed address pair attribute
- allowed_address_pairs = [{'ip_address': self.ip_address,
- 'mac_address': self.mac_address}]
- body = self.create_port(
- self.network,
- allowed_address_pairs=allowed_address_pairs)
- port_id = body['id']
-
- # Confirm port was created with allowed address pair attribute
- body = self.client.list_ports()
- ports = body['ports']
- port = [p for p in ports if p['id'] == port_id]
- msg = 'Created port not found in list of ports returned by Neutron'
- self.assertTrue(port, msg)
- self._confirm_allowed_address_pair(port[0], self.ip_address)
-
- def _update_port_with_address(self, address, mac_address=None, **kwargs):
- # Create a port without allowed address pair
- body = self.create_port(self.network)
- port_id = body['id']
- if mac_address is None:
- mac_address = self.mac_address
-
- # Update allowed address pair attribute of port
- allowed_address_pairs = [{'ip_address': address,
- 'mac_address': mac_address}]
- if kwargs:
- allowed_address_pairs.append(kwargs['allowed_address_pairs'])
- body = self.client.update_port(
- port_id, allowed_address_pairs=allowed_address_pairs)
- allowed_address_pair = body['port']['allowed_address_pairs']
- self.assertItemsEqual(allowed_address_pair, allowed_address_pairs)
-
- @decorators.idempotent_id('9599b337-272c-47fd-b3cf-509414414ac4')
- def test_update_port_with_address_pair(self):
- # Update port with allowed address pair
- self._update_port_with_address(self.ip_address)
-
- @decorators.idempotent_id('4d6d178f-34f6-4bff-a01c-0a2f8fe909e4')
- def test_update_port_with_cidr_address_pair(self):
- # Update allowed address pair with cidr
- cidr = str(next(self.get_subnet_cidrs()))
- self._update_port_with_address(cidr)
-
- @decorators.idempotent_id('b3f20091-6cd5-472b-8487-3516137df933')
- def test_update_port_with_multiple_ip_mac_address_pair(self):
- # Create an ip _address and mac_address through port create
- resp = self.create_port(self.network)
- ipaddress = resp['fixed_ips'][0]['ip_address']
- macaddress = resp['mac_address']
-
- # Update allowed address pair port with multiple ip and mac
- allowed_address_pairs = {'ip_address': ipaddress,
- 'mac_address': macaddress}
- self._update_port_with_address(
- self.ip_address, self.mac_address,
- allowed_address_pairs=allowed_address_pairs)
-
- def _confirm_allowed_address_pair(self, port, ip):
- msg = 'Port allowed address pairs should not be empty'
- self.assertTrue(port['allowed_address_pairs'], msg)
- ip_address = port['allowed_address_pairs'][0]['ip_address']
- mac_address = port['allowed_address_pairs'][0]['mac_address']
- self.assertEqual(ip_address, ip)
- self.assertEqual(mac_address, self.mac_address)
-
-
-class AllowedAddressPairIpV6TestJSON(AllowedAddressPairTestJSON):
- _ip_version = 6
diff --git a/neutron_tempest_plugin/api/test_extra_dhcp_options.py b/neutron_tempest_plugin/api/test_extra_dhcp_options.py
index 844666a..91c270d 100644
--- a/neutron_tempest_plugin/api/test_extra_dhcp_options.py
+++ b/neutron_tempest_plugin/api/test_extra_dhcp_options.py
@@ -13,6 +13,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+from neutron_lib import constants
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -48,9 +49,14 @@
cls.ip_server = ('123.123.123.45' if cls._ip_version == 4
else '2015::badd')
cls.extra_dhcp_opts = [
- {'opt_value': 'pxelinux.0', 'opt_name': 'bootfile-name'},
- {'opt_value': cls.ip_tftp, 'opt_name': 'tftp-server'},
- {'opt_value': cls.ip_server, 'opt_name': 'server-ip-address'}
+ {'opt_value': 'pxelinux.0',
+ 'opt_name': 'bootfile-name'}, # default ip_version is 4
+ {'opt_value': cls.ip_tftp,
+ 'opt_name': 'tftp-server',
+ 'ip_version': cls._ip_version},
+ {'opt_value': cls.ip_server,
+ 'opt_name': 'server-ip-address',
+ 'ip_version': cls._ip_version}
]
@decorators.idempotent_id('d2c17063-3767-4a24-be4f-a23dbfa133c9')
@@ -85,8 +91,11 @@
self.assertEqual(len(retrieved), len(extra_dhcp_opts))
for retrieved_option in retrieved:
for option in extra_dhcp_opts:
+ # default ip_version is 4
+ ip_version = option.get('ip_version', constants.IP_VERSION_4)
if (retrieved_option['opt_value'] == option['opt_value'] and
- retrieved_option['opt_name'] == option['opt_name']):
+ retrieved_option['opt_name'] == option['opt_name'] and
+ retrieved_option['ip_version'] == ip_version):
break
else:
self.fail('Extra DHCP option not found in port %s' %
diff --git a/neutron_tempest_plugin/api/test_local_ip.py b/neutron_tempest_plugin/api/test_local_ip.py
new file mode 100644
index 0000000..3895f4f
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_local_ip.py
@@ -0,0 +1,142 @@
+# Copyright 2021 Huawei, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin import config
+
+CONF = config.CONF
+
+
+class LocalIPTestJSON(base.BaseNetworkTest):
+
+ credentials = ['primary', 'admin']
+ required_extensions = ['local_ip']
+
+ @classmethod
+ def resource_setup(cls):
+ super(LocalIPTestJSON, cls).resource_setup()
+ cls.ext_net_id = CONF.network.public_network_id
+
+ # Create network and subnet
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+
+ @decorators.idempotent_id('369257b0-521d-43f5-9482-50e18e87a472')
+ def test_local_ip_lifecycle(self):
+ port = self.create_port(self.network)
+ lip_description = 'Test Local IP description'
+ lip_name = 'test-local-ip'
+ created_local_ip = self.create_local_ip(
+ name=lip_name,
+ description=lip_description,
+ local_port_id=port['id'],
+ local_ip_address=port['fixed_ips'][0]['ip_address'])
+ self.assertEqual(self.network['id'], created_local_ip['network_id'])
+ self.assertEqual(lip_description, created_local_ip['description'])
+ self.assertEqual(lip_name, created_local_ip['name'])
+ self.assertEqual(port['id'], created_local_ip['local_port_id'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ created_local_ip['local_ip_address'])
+
+ # Show created local_ip
+ body = self.client.get_local_ip(created_local_ip['id'])
+ local_ip = body['local_ip']
+
+ self.assertEqual(lip_description, local_ip['description'])
+ self.assertEqual(lip_name, local_ip['name'])
+
+ # List local_ips
+ body = self.client.list_local_ips()
+
+ local_ip_ids = [lip['id'] for lip in body['local_ips']]
+ self.assertIn(created_local_ip['id'], local_ip_ids)
+
+ # Update local_ip
+ updated_local_ip = self.client.update_local_ip(
+ created_local_ip['id'],
+ name='updated_local_ip')
+ self.assertEqual('updated_local_ip',
+ updated_local_ip['local_ip']['name'])
+
+ self.delete_local_ip(created_local_ip)
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_local_ip, created_local_ip['id'])
+
+ @decorators.idempotent_id('e32df8ac-4e29-4adf-8057-46ae8684eff2')
+ def test_create_local_ip_with_network(self):
+ local_ip = self.create_local_ip(self.network['id'])
+ self.assertEqual(self.network['id'], local_ip['network_id'])
+
+
+class LocalIPAssociationTestJSON(base.BaseNetworkTest):
+
+ required_extensions = ['local_ip']
+
+ @classmethod
+ def resource_setup(cls):
+ super(LocalIPAssociationTestJSON, cls).resource_setup()
+ cls.ext_net_id = CONF.network.public_network_id
+ # Create network
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+
+ @decorators.idempotent_id('602d2874-49be-4c72-8799-b20c95853b6b')
+ def test_local_ip_association_lifecycle(self):
+ local_ip = self.create_local_ip(self.network['id'])
+ port = self.create_port(self.network)
+ local_ip_association = self.create_local_ip_association(
+ local_ip['id'],
+ fixed_port_id=port['id'])
+ self.assertEqual(local_ip['id'], local_ip_association['local_ip_id'])
+ self.assertEqual(port['id'], local_ip_association['fixed_port_id'])
+
+ # Test List Local IP Associations
+ body = self.client.list_local_ip_associations(local_ip['id'])
+ associations = body['port_associations']
+ self.assertEqual(local_ip['id'], associations[0]['local_ip_id'])
+ self.assertEqual(port['id'], associations[0]['fixed_port_id'])
+
+ # Show
+ body = self.client.get_local_ip_association(
+ local_ip['id'], port['id'])
+ association = body['port_association']
+ self.assertEqual(local_ip['id'], association['local_ip_id'])
+ self.assertEqual(port['id'], association['fixed_port_id'])
+
+ # Delete
+ self.client.delete_local_ip_association(local_ip['id'], port['id'])
+ self.assertRaises(exceptions.NotFound,
+ self.client.get_local_ip_association,
+ local_ip['id'], port['id'])
+
+ @decorators.idempotent_id('5d26edab-78d2-4cbd-9d0b-3c0b19f0f52d')
+ def test_local_ip_association_with_two_ips_on_port(self):
+ local_ip = self.create_local_ip(self.network['id'])
+ s = self.subnet
+ port = self.create_port(self.network)
+ # request another IP on the same subnet
+ port['fixed_ips'].append({'subnet_id': s['id']})
+ updated = self.client.update_port(port['id'],
+ fixed_ips=port['fixed_ips'])
+ port = updated['port']
+ local_ip_association = self.create_local_ip_association(
+ local_ip['id'],
+ fixed_port_id=port['id'],
+ fixed_ip_address=port['fixed_ips'][0]['ip_address'])
+ self.assertEqual(port['fixed_ips'][0]['ip_address'],
+ local_ip_association['fixed_ip'])
diff --git a/neutron_tempest_plugin/api/test_network_ip_availability.py b/neutron_tempest_plugin/api/test_network_ip_availability.py
index e798680..22d2fc6 100644
--- a/neutron_tempest_plugin/api/test_network_ip_availability.py
+++ b/neutron_tempest_plugin/api/test_network_ip_availability.py
@@ -175,3 +175,22 @@
class NetworksIpAvailabilityIPv6Test(NetworksIpAvailabilityIPv4Test):
_ip_version = lib_constants.IP_VERSION_6
+
+ def setUp(self):
+ super(NetworksIpAvailabilityIPv6Test, self).setUp()
+ net_name = data_utils.rand_name('network')
+ self.network = self.create_network(network_name=net_name)
+
+ @decorators.idempotent_id('0d5a03f2-fdb7-4ec3-b746-734c51d74b69')
+ def test_list_ipv6_ip_availability_after_subnet_and_ports(self):
+ subnet = self.create_subnet(self.network, ip_version=self._ip_version,
+ enable_dhcp=False)
+ prefix = netaddr.IPNetwork(subnet['cidr']).prefixlen
+ body = self.admin_client.list_network_ip_availabilities()
+ used_ips_before_port_create = self._get_used_ips(self.network, body)
+ self.create_port(self.network)
+ net_availability = self.admin_client.list_network_ip_availabilities()
+ self._assert_total_and_used_ips(
+ used_ips_before_port_create + 1,
+ calc_total_ips(prefix, self._ip_version),
+ self.network, net_availability)
diff --git a/neutron_tempest_plugin/api/test_ports.py b/neutron_tempest_plugin/api/test_ports.py
index c59ee83..f1dfe5c 100644
--- a/neutron_tempest_plugin/api/test_ports.py
+++ b/neutron_tempest_plugin/api/test_ports.py
@@ -15,6 +15,7 @@
import copy
+from neutron_lib import constants as lib_constants
from tempest.common import utils
from tempest.lib import decorators
@@ -60,6 +61,20 @@
body = self.client.list_ports(id=body['port']['id'])['ports'][0]
self.assertEqual('d2', body['description'])
+ @decorators.idempotent_id('3ae162e8-ff00-490c-a423-6a88e48f1ed6')
+ def test_create_update_port_security(self):
+ body = self.create_port(self.network,
+ port_security_enabled=True)
+ self.assertTrue(body['port_security_enabled'])
+ body = self.client.list_ports(id=body['id'])['ports'][0]
+ self.assertTrue(body['port_security_enabled'])
+ body = self.client.update_port(body['id'],
+ port_security_enabled=False,
+ security_groups=[])
+ self.assertFalse(body['port']['port_security_enabled'])
+ body = self.client.list_ports(id=body['port']['id'])['ports'][0]
+ self.assertFalse(body['port_security_enabled'])
+
@decorators.idempotent_id('539fbefe-fb36-48aa-9a53-8c5fbd44e492')
@utils.requires_ext(extension="dns-integration",
service="network")
@@ -137,6 +152,28 @@
self.assertEqual(expected, subnets)
+class PortsIpv6TestJSON(base.BaseNetworkTest):
+
+ _ip_version = lib_constants.IP_VERSION_6
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortsIpv6TestJSON, cls).resource_setup()
+ cls.network = cls.create_network()
+
+ @decorators.idempotent_id('b85879fb-4852-4b99-aa32-3f8a7a6a3f01')
+ def test_add_ipv6_ips_to_port(self):
+ s = self.create_subnet(self.network, ip_version=self._ip_version)
+ port = self.create_port(self.network)
+ # request another IP on the same subnet
+ port['fixed_ips'].append({'subnet_id': s['id']})
+ updated = self.client.update_port(port['id'],
+ fixed_ips=port['fixed_ips'])
+ subnets = [ip['subnet_id'] for ip in updated['port']['fixed_ips']]
+ expected = [s['id'], s['id']]
+ self.assertEqual(expected, subnets)
+
+
class PortsSearchCriteriaTest(base.BaseSearchCriteriaTest):
resource = 'port'
diff --git a/neutron_tempest_plugin/api/test_ports_negative.py b/neutron_tempest_plugin/api/test_ports_negative.py
new file mode 100644
index 0000000..004feb9
--- /dev/null
+++ b/neutron_tempest_plugin/api/test_ports_negative.py
@@ -0,0 +1,79 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from neutron_lib.db import constants as db_const
+from oslo_utils import uuidutils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.api import base
+
+LONG_NAME_NG = 'z' * (db_const.NAME_FIELD_SIZE + 1)
+LONG_DESCRIPTION_NG = 'z' * (db_const.LONG_DESCRIPTION_FIELD_SIZE + 1)
+
+
+class PortsNegativeTestJSON(base.BaseNetworkTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(PortsNegativeTestJSON, cls).resource_setup()
+ cls.network = cls.create_network()
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('0cbd256a-a6d4-4afa-a039-44cc13704bab')
+ def test_add_port_with_too_long_name(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_port,
+ self.network, name=LONG_NAME_NG)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('e10da38c-1071-49c9-95c2-0c451e18ae31')
+ def test_add_port_with_too_long_description(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_port,
+ self.network, description=LONG_DESCRIPTION_NG)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('5b69a905-3a84-43a4-807a-1a67ab85caeb')
+ def test_add_port_with_nonexist_tenant_id(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_port,
+ self.network,
+ project_id=uuidutils.generate_uuid())
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('7cf473ae-7ec8-4834-ae17-9ef6ec6b8a32')
+ def test_add_port_with_nonexist_network_id(self):
+ network = self.network
+ # Copy and restore net ID so the cleanup will delete correct net
+ original_network_id = network['id']
+ network['id'] = uuidutils.generate_uuid()
+ self.assertRaises(lib_exc.NotFound,
+ self.create_port,
+ network)
+ network['id'] = original_network_id
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('cad2d349-25fa-490e-9675-cd2ea24164bc')
+ def test_add_port_with_nonexist_security_groups_id(self):
+ self.assertRaises(lib_exc.NotFound,
+ self.create_port,
+ self.network,
+ security_groups=[uuidutils.generate_uuid()])
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('9b0a4152-9aa4-4169-9b2c-579609e2fb4a')
+ def test_add_port_with_illegal_ip(self):
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_port,
+ self.network,
+ allowed_address_pairs=[{"ip_address: 12.12.12.a"}])
diff --git a/neutron_tempest_plugin/api/test_qos.py b/neutron_tempest_plugin/api/test_qos.py
index 5fb0511..2929542 100644
--- a/neutron_tempest_plugin/api/test_qos.py
+++ b/neutron_tempest_plugin/api/test_qos.py
@@ -17,6 +17,7 @@
from neutron_lib.services.qos import constants as qos_consts
from tempest.common import utils
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -34,12 +35,32 @@
required_extensions = [qos_apidef.ALIAS]
+ @classmethod
+ def setup_clients(cls):
+ super(QosTestJSON, cls).setup_clients()
+ cls.qos_bw_limit_rule_client = \
+ cls.os_admin.qos_limit_bandwidth_rules_client
+
+ def setUp(self):
+ super(QosTestJSON, self).setUp()
+ self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+
@staticmethod
def _get_driver_details(rule_type_details, driver_name):
for driver in rule_type_details['drivers']:
if driver['name'] == driver_name:
return driver
+ def _create_qos_bw_limit_rule(self, policy_id, rule_data):
+ rule = self.qos_bw_limit_rule_client.create_limit_bandwidth_rule(
+ qos_policy_id=policy_id,
+ **rule_data)['bandwidth_limit_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_bw_limit_rule_client.delete_limit_bandwidth_rule,
+ policy_id, rule['id'])
+ return rule
+
@decorators.idempotent_id('108fbdf7-3463-4e47-9871-d07f3dcf5bbb')
def test_create_policy(self):
policy = self.create_qos_policy(name='test-policy',
@@ -61,7 +82,7 @@
@decorators.idempotent_id('606a48e2-5403-4052-b40f-4d54b855af76')
@utils.requires_ext(extension="project-id", service="network")
def test_show_policy_has_project_id(self):
- policy = self.create_qos_policy(name='test-policy', shared=False)
+ policy = self.create_qos_policy(name=self.policy_name, shared=False)
body = self.admin_client.show_qos_policy(policy['id'])
show_policy = body['policy']
self.assertIn('project_id', show_policy)
@@ -70,17 +91,19 @@
@decorators.idempotent_id('f8d20e92-f06d-4805-b54f-230f77715815')
def test_list_policy_filter_by_name(self):
- self.create_qos_policy(name='test', description='test policy',
+ policy1 = 'test' + data_utils.rand_name("policy")
+ policy2 = 'test' + data_utils.rand_name("policy")
+ self.create_qos_policy(name=policy1, description='test policy',
shared=False)
- self.create_qos_policy(name='test2', description='test policy',
+ self.create_qos_policy(name=policy2, description='test policy',
shared=False)
policies = (self.admin_client.
- list_qos_policies(name='test')['policies'])
+ list_qos_policies(name=policy1)['policies'])
self.assertEqual(1, len(policies))
retrieved_policy = policies[0]
- self.assertEqual('test', retrieved_policy['name'])
+ self.assertEqual(policy1, retrieved_policy['name'])
@decorators.idempotent_id('dde0b449-a400-4a87-b5a5-4d1c413c917b')
def test_list_policy_sort_by_name(self):
@@ -111,7 +134,7 @@
@decorators.idempotent_id('8e88a54b-f0b2-4b7d-b061-a15d93c2c7d6')
def test_policy_update(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='',
shared=False,
project_id=self.admin_client.tenant_id)
@@ -127,7 +150,7 @@
@decorators.idempotent_id('6e880e0f-bbfc-4e54-87c6-680f90e1b618')
def test_policy_update_forbidden_for_regular_tenants_own_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='',
shared=False,
project_id=self.client.tenant_id)
@@ -138,7 +161,7 @@
@decorators.idempotent_id('4ecfd7e7-47b6-4702-be38-be9235901a87')
def test_policy_update_forbidden_for_regular_tenants_foreign_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='',
shared=False,
project_id=self.admin_client.tenant_id)
@@ -149,7 +172,7 @@
@decorators.idempotent_id('ee263db4-009a-4641-83e5-d0e83506ba4c')
def test_shared_policy_update(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='',
shared=True,
project_id=self.admin_client.tenant_id)
@@ -232,7 +255,7 @@
@decorators.idempotent_id('65b9ef75-1911-406a-bbdb-ca1d68d528b0')
def test_policy_association_with_admin_network(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
network = self.create_network('test network', shared=True,
@@ -244,7 +267,7 @@
@decorators.idempotent_id('1738de5d-0476-4163-9022-5e1b548c208e')
def test_policy_association_with_tenant_network(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=True)
network = self.create_network('test network',
@@ -264,7 +287,7 @@
@decorators.idempotent_id('1aa55a79-324f-47d9-a076-894a8fc2448b')
def test_policy_association_with_network_non_shared_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
self.assertRaises(
@@ -274,7 +297,7 @@
@decorators.idempotent_id('09a9392c-1359-4cbb-989f-fb768e5834a8')
def test_policy_update_association_with_admin_network(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
network = self.create_network('test network', shared=True)
@@ -289,7 +312,7 @@
@decorators.idempotent_id('98fcd95e-84cf-4746-860e-44692e674f2e')
def test_policy_association_with_port_shared_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=True)
network = self.create_network('test network', shared=True)
@@ -310,7 +333,7 @@
@decorators.idempotent_id('f53d961c-9fe5-4422-8b66-7add972c6031')
def test_policy_association_with_port_non_shared_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
network = self.create_network('test network', shared=True)
@@ -321,7 +344,7 @@
@decorators.idempotent_id('f8163237-fba9-4db5-9526-bad6d2343c76')
def test_policy_update_association_with_port_shared_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=True)
network = self.create_network('test network', shared=True)
@@ -336,7 +359,7 @@
@decorators.idempotent_id('18163237-8ba9-4db5-9525-bad6d2343c75')
def test_delete_not_allowed_if_policy_in_use_by_network(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=True)
self.create_network('test network', qos_policy_id=policy['id'],
@@ -347,7 +370,7 @@
@decorators.idempotent_id('24153230-84a9-4dd5-9525-bad6d2343c75')
def test_delete_not_allowed_if_policy_in_use_by_port(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=True)
network = self.create_network('test network', shared=True)
@@ -358,14 +381,12 @@
@decorators.idempotent_id('a2a5849b-dd06-4b18-9664-0b6828a1fc27')
def test_qos_policy_delete_with_rules(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- self.admin_client.create_bandwidth_limit_rule(
- policy['id'], 200, 1337)['bandwidth_limit_rule']
-
+ self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 200, 'max_burst_kbps': 1337})
self.admin_client.delete_qos_policy(policy['id'])
-
with testtools.ExpectedException(exceptions.NotFound):
self.admin_client.show_qos_policy(policy['id'])
@@ -389,7 +410,7 @@
@decorators.idempotent_id('18d94f22-b9d5-4390-af12-d30a0cfc4cd3')
def test_default_policy_creating_network_without_policy(self):
project_id = self.create_project()['id']
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
project_id=project_id,
is_default=True)
network = self.create_network('test network', client=self.admin_client,
@@ -416,7 +437,7 @@
@decorators.idempotent_id('06060880-2956-4c16-9a63-f284c3879229')
def test_user_create_port_with_admin_qos_policy(self):
qos_policy = self.create_qos_policy(
- name='test-policy',
+ name=self.policy_name,
project_id=self.admin_client.tenant_id,
shared=False)
network = self.create_network(
@@ -429,14 +450,37 @@
class QosBandwidthLimitRuleTestJSON(base.BaseAdminNetworkTest):
+ credentials = ['primary', 'admin']
direction = None
required_extensions = [qos_apidef.ALIAS]
@classmethod
+ def setup_clients(cls):
+ super(QosBandwidthLimitRuleTestJSON, cls).setup_clients()
+ cls.qos_bw_limit_rule_client = \
+ cls.os_admin.qos_limit_bandwidth_rules_client
+ cls.qos_bw_limit_rule_client_primary = \
+ cls.os_primary.qos_limit_bandwidth_rules_client
+
+ @classmethod
@base.require_qos_rule_type(qos_consts.RULE_TYPE_BANDWIDTH_LIMIT)
def resource_setup(cls):
super(QosBandwidthLimitRuleTestJSON, cls).resource_setup()
+ def setUp(self):
+ super(QosBandwidthLimitRuleTestJSON, self).setUp()
+ self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+
+ def _create_qos_bw_limit_rule(self, policy_id, rule_data):
+ rule = self.qos_bw_limit_rule_client.create_limit_bandwidth_rule(
+ qos_policy_id=policy_id,
+ **rule_data)['bandwidth_limit_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_bw_limit_rule_client.delete_limit_bandwidth_rule,
+ policy_id, rule['id'])
+ return rule
+
@property
def opposite_direction(self):
if self.direction == "ingress":
@@ -448,27 +492,27 @@
@decorators.idempotent_id('8a59b00b-3e9c-4787-92f8-93a5cdf5e378')
def test_rule_create(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- rule = self.create_qos_bandwidth_limit_rule(
- policy_id=policy['id'],
- max_kbps=200,
- max_burst_kbps=1337,
- direction=self.direction)
+ rule = self._create_qos_bw_limit_rule(
+ policy['id'],
+ {'max_kbps': 200, 'max_burst_kbps': 1337, 'direction': 'ingress'})
# Test 'show rule'
- retrieved_rule = self.admin_client.show_bandwidth_limit_rule(
- policy['id'], rule['id'])
+ retrieved_rule = \
+ self.qos_bw_limit_rule_client.show_limit_bandwidth_rule(
+ policy['id'], rule['id'])
+
retrieved_rule = retrieved_rule['bandwidth_limit_rule']
self.assertEqual(rule['id'], retrieved_rule['id'])
self.assertEqual(200, retrieved_rule['max_kbps'])
self.assertEqual(1337, retrieved_rule['max_burst_kbps'])
- if self.direction:
- self.assertEqual(self.direction, retrieved_rule['direction'])
+ self.assertEqual('ingress', retrieved_rule['direction'])
# Test 'list rules'
- rules = self.admin_client.list_bandwidth_limit_rules(policy['id'])
+ rules = self.qos_bw_limit_rule_client.list_limit_bandwidth_rules(
+ policy['id'])
rules = rules['bandwidth_limit_rules']
rules_ids = [r['id'] for r in rules]
self.assertIn(rule['id'], rules_ids)
@@ -483,39 +527,37 @@
@decorators.idempotent_id('8a59b00b-ab01-4787-92f8-93a5cdf5e378')
def test_rule_create_fail_for_the_same_type(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=200,
- max_burst_kbps=1337,
- direction=self.direction)
+ self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 200, 'max_burst_kbps': 1337})
- self.assertRaises(exceptions.Conflict,
- self.create_qos_bandwidth_limit_rule,
- policy_id=policy['id'],
- max_kbps=201, max_burst_kbps=1338,
- direction=self.direction)
+ self.assertRaises(
+ exceptions.Conflict,
+ self._create_qos_bw_limit_rule,
+ policy['id'],
+ {'max_kbps': 201, 'max_burst_kbps': 1338})
@decorators.idempotent_id('149a6988-2568-47d2-931e-2dbc858943b3')
def test_rule_update(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=1,
- max_burst_kbps=1,
- direction=self.direction)
+ rule = self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 1, 'max_burst_kbps': 1})
- self.admin_client.update_bandwidth_limit_rule(
- policy['id'],
- rule['id'],
- max_kbps=200,
- max_burst_kbps=1337,
- direction=self.opposite_direction)
-
- retrieved_policy = self.admin_client.show_bandwidth_limit_rule(
- policy['id'], rule['id'])
+ if self.opposite_direction:
+ self.qos_bw_limit_rule_client.update_limit_bandwidth_rule(
+ policy['id'], rule['id'],
+ **{'max_kbps': 200, 'max_burst_kbps': 1337,
+ 'direction': self.opposite_direction})
+ else:
+ self.qos_bw_limit_rule_client.update_limit_bandwidth_rule(
+ policy['id'], rule['id'],
+ **{'max_kbps': 200, 'max_burst_kbps': 1337})
+ retrieved_policy = self.qos_bw_limit_rule_client.\
+ show_limit_bandwidth_rule(policy['id'], rule['id'])
retrieved_policy = retrieved_policy['bandwidth_limit_rule']
self.assertEqual(200, retrieved_policy['max_kbps'])
self.assertEqual(1337, retrieved_policy['max_burst_kbps'])
@@ -525,86 +567,84 @@
@decorators.idempotent_id('67ee6efd-7b33-4a68-927d-275b4f8ba958')
def test_rule_delete(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- rule = self.admin_client.create_bandwidth_limit_rule(
- policy['id'], 200, 1337, self.direction)['bandwidth_limit_rule']
-
- retrieved_policy = self.admin_client.show_bandwidth_limit_rule(
- policy['id'], rule['id'])
+ rule = self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 200, 'max_burst_kbps': 1337})
+ retrieved_policy = \
+ self.qos_bw_limit_rule_client.show_limit_bandwidth_rule(
+ policy['id'], rule['id'])
retrieved_policy = retrieved_policy['bandwidth_limit_rule']
self.assertEqual(rule['id'], retrieved_policy['id'])
-
- self.admin_client.delete_bandwidth_limit_rule(policy['id'], rule['id'])
- self.assertRaises(exceptions.NotFound,
- self.admin_client.show_bandwidth_limit_rule,
- policy['id'], rule['id'])
+ self.qos_bw_limit_rule_client.delete_limit_bandwidth_rule(
+ policy['id'], rule['id'])
+ self.assertRaises(
+ exceptions.NotFound,
+ self.qos_bw_limit_rule_client.show_limit_bandwidth_rule,
+ policy['id'], rule['id'])
@decorators.idempotent_id('f211222c-5808-46cb-a961-983bbab6b852')
def test_rule_create_rule_nonexistent_policy(self):
self.assertRaises(
exceptions.NotFound,
- self.create_qos_bandwidth_limit_rule,
- 'policy', 200, 1337, self.direction)
+ self._create_qos_bw_limit_rule,
+ 'policy', {'max_kbps': 200, 'max_burst_kbps': 1337})
@decorators.idempotent_id('a4a2e7ad-786f-4927-a85a-e545a93bd274')
def test_rule_create_forbidden_for_regular_tenants(self):
self.assertRaises(
exceptions.Forbidden,
- self.client.create_bandwidth_limit_rule,
- 'policy', 1, 2, self.direction)
+ self.qos_bw_limit_rule_client_primary.create_limit_bandwidth_rule,
+ 'policy', **{'max_kbps': 1, 'max_burst_kbps': 2})
@decorators.idempotent_id('1bfc55d9-6fd8-4293-ab3a-b1d69bf7cd2e')
def test_rule_update_forbidden_for_regular_tenants_own_policy(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False,
project_id=self.client.tenant_id)
- rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=1,
- max_burst_kbps=1,
- direction=self.direction)
+ rule = self._create_qos_bw_limit_rule(
+ policy['id'],
+ {'max_kbps': 1, 'max_burst_kbps': 1})
self.assertRaises(
exceptions.Forbidden,
- self.client.update_bandwidth_limit_rule,
- policy['id'], rule['id'], max_kbps=2, max_burst_kbps=4)
+ self.qos_bw_limit_rule_client_primary.update_limit_bandwidth_rule,
+ policy['id'], rule['id'], **{'max_kbps': 2, 'max_burst_kbps': 4})
@decorators.idempotent_id('9a607936-4b6f-4c2f-ad21-bd5b3d4fc91f')
def test_rule_update_forbidden_for_regular_tenants_foreign_policy(self):
- policy = self.create_qos_policy(name='test-policy',
- description='test policy',
- shared=False,
- project_id=self.admin_client.tenant_id)
- rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=1,
- max_burst_kbps=1,
- direction=self.direction)
+ policy = self.create_qos_policy(
+ name=self.policy_name,
+ description='test policy',
+ shared=False,
+ project_id=self.admin_client.tenant_id)
+ rule = self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 1, 'max_burst_kbps': 1})
self.assertRaises(
exceptions.NotFound,
- self.client.update_bandwidth_limit_rule,
- policy['id'], rule['id'], max_kbps=2, max_burst_kbps=4)
+ self.qos_bw_limit_rule_client_primary.update_limit_bandwidth_rule,
+ policy['id'], rule['id'], **{'max_kbps': 2, 'max_burst_kbps': 4})
@decorators.idempotent_id('ce0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2')
def test_get_rules_by_policy(self):
- policy1 = self.create_qos_policy(name='test-policy1',
- description='test policy1',
- shared=False)
- rule1 = self.create_qos_bandwidth_limit_rule(policy_id=policy1['id'],
- max_kbps=200,
- max_burst_kbps=1337,
- direction=self.direction)
+ policy1 = self.create_qos_policy(
+ name='test-policy1',
+ description='test policy1',
+ shared=False)
+ rule1 = self._create_qos_bw_limit_rule(
+ policy1['id'], {'max_kbps': 200, 'max_burst_kbps': 1337})
- policy2 = self.create_qos_policy(name='test-policy2',
- description='test policy2',
- shared=False)
- rule2 = self.create_qos_bandwidth_limit_rule(policy_id=policy2['id'],
- max_kbps=5000,
- max_burst_kbps=2523,
- direction=self.direction)
+ policy2 = self.create_qos_policy(
+ name='test-policy2',
+ description='test policy2',
+ shared=False)
+ rule2 = self._create_qos_bw_limit_rule(
+ policy2['id'], {'max_kbps': 5000, 'max_burst_kbps': 2523})
# Test 'list rules'
- rules = self.admin_client.list_bandwidth_limit_rules(policy1['id'])
+ rules = self.qos_bw_limit_rule_client.list_limit_bandwidth_rules(
+ policy1['id'])
rules = rules['bandwidth_limit_rules']
rules_ids = [r['id'] for r in rules]
self.assertIn(rule1['id'], rules_ids)
@@ -619,12 +659,11 @@
# As an admin create an non shared QoS policy,add a rule
# and associate it with a network
self.network = self.create_network()
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy for attach',
shared=False)
-
- self.admin_client.create_bandwidth_limit_rule(
- policy['id'], 1024, 1024)
+ self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 1024, 'max_burst_kbps': 1024})
self.admin_client.update_network(
self.network['id'], qos_policy_id=policy['id'])
@@ -671,44 +710,45 @@
def resource_setup(cls):
super(QosBandwidthLimitRuleWithDirectionTestJSON, cls).resource_setup()
+ def setUp(self):
+ super(QosBandwidthLimitRuleWithDirectionTestJSON, self).setUp()
+ self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+
@decorators.idempotent_id('c8cbe502-0f7e-11ea-8d71-362b9e155667')
def test_create_policy_with_multiple_rules(self):
# Create a policy with multiple rules
- policy = self.create_qos_policy(name='test-policy1',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy1',
shared=False)
- rule1 = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=1024,
- max_burst_kbps=1024,
- direction=n_constants.
- EGRESS_DIRECTION)
- rule2 = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=1024,
- max_burst_kbps=1024,
- direction=n_constants.
- INGRESS_DIRECTION)
+ rule1 = self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 1024, 'max_burst_kbps': 1024,
+ 'direction': n_constants.EGRESS_DIRECTION})
+ rule2 = self._create_qos_bw_limit_rule(
+ policy['id'], {'max_kbps': 1024, 'max_burst_kbps': 1024,
+ 'direction': n_constants.INGRESS_DIRECTION})
# Check that the rules were added to the policy
- rules = self.admin_client.list_bandwidth_limit_rules(
+ rules = self.qos_bw_limit_rule_client.list_limit_bandwidth_rules(
policy['id'])['bandwidth_limit_rules']
+
rules_ids = [rule['id'] for rule in rules]
self.assertIn(rule1['id'], rules_ids)
self.assertIn(rule2['id'], rules_ids)
# Check that the rules creation fails for the same rule types
- self.assertRaises(exceptions.Conflict,
- self.create_qos_bandwidth_limit_rule,
- policy_id=policy['id'],
- max_kbps=1025,
- max_burst_kbps=1025,
- direction=n_constants.EGRESS_DIRECTION)
+ self.assertRaises(
+ exceptions.Conflict,
+ self._create_qos_bw_limit_rule,
+ policy['id'],
+ {'max_kbps': 1025, 'max_burst_kbps': 1025,
+ 'direction': n_constants.EGRESS_DIRECTION})
- self.assertRaises(exceptions.Conflict,
- self.create_qos_bandwidth_limit_rule,
- policy_id=policy['id'],
- max_kbps=1025,
- max_burst_kbps=1025,
- direction=n_constants.INGRESS_DIRECTION)
+ self.assertRaises(
+ exceptions.Conflict,
+ self._create_qos_bw_limit_rule,
+ policy['id'],
+ {'max_kbps': 1025, 'max_burst_kbps': 1025,
+ 'direction': n_constants.INGRESS_DIRECTION})
class RbacSharedQosPoliciesTest(base.BaseAdminNetworkTest):
@@ -976,9 +1016,13 @@
def resource_setup(cls):
super(QosDscpMarkingRuleTestJSON, cls).resource_setup()
+ def setUp(self):
+ super(QosDscpMarkingRuleTestJSON, self).setUp()
+ self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+
@decorators.idempotent_id('f5cbaceb-5829-497c-9c60-ad70969e9a08')
def test_rule_create(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
rule = self.admin_client.create_dscp_marking_rule(
@@ -1007,7 +1051,7 @@
@decorators.idempotent_id('08553ffe-030f-4037-b486-7e0b8fb9385a')
def test_rule_create_fail_for_the_same_type(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
self.admin_client.create_dscp_marking_rule(
@@ -1020,7 +1064,7 @@
@decorators.idempotent_id('76f632e5-3175-4408-9a32-3625e599c8a2')
def test_rule_update(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
rule = self.admin_client.create_dscp_marking_rule(
@@ -1036,7 +1080,7 @@
@decorators.idempotent_id('74f81904-c35f-48a3-adae-1f5424cb3c18')
def test_rule_delete(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
rule = self.admin_client.create_dscp_marking_rule(
@@ -1068,7 +1112,7 @@
@decorators.idempotent_id('33646b08-4f05-4493-a48a-bde768a18533')
def test_invalid_rule_create(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
self.assertRaises(
@@ -1135,7 +1179,7 @@
# Create QoS policy
dscp_policy_id = self.create_qos_policy(
- name='test-policy',
+ name=self.policy_name,
description='test-qos-policy',
shared=True)['id']
@@ -1175,26 +1219,40 @@
def resource_setup(cls):
super(QosMinimumBandwidthRuleTestJSON, cls).resource_setup()
+ @classmethod
+ def setup_clients(cls):
+ super(QosMinimumBandwidthRuleTestJSON, cls).setup_clients()
+ cls.qos_min_bw_rules_client = \
+ cls.os_admin.qos_minimum_bandwidth_rules_client
+ cls.qos_min_bw_rules_client_primary = \
+ cls.os_primary.qos_minimum_bandwidth_rules_client
+
+ def setUp(self):
+ super(QosMinimumBandwidthRuleTestJSON, self).setUp()
+ self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+
@decorators.idempotent_id('aa59b00b-3e9c-4787-92f8-93a5cdf5e378')
def test_rule_create(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- rule = self.admin_client.create_minimum_bandwidth_rule(
- policy_id=policy['id'],
- direction=self.DIRECTION_EGRESS,
- min_kbps=1138)[self.RULE_NAME]
+ rule = self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=policy['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 1138})[self.RULE_NAME]
# Test 'show rule'
- retrieved_rule = self.admin_client.show_minimum_bandwidth_rule(
- policy['id'], rule['id'])
+ retrieved_rule = \
+ self.qos_min_bw_rules_client.show_minimum_bandwidth_rule(
+ policy['id'], rule['id'])
retrieved_rule = retrieved_rule[self.RULE_NAME]
self.assertEqual(rule['id'], retrieved_rule['id'])
self.assertEqual(1138, retrieved_rule['min_kbps'])
self.assertEqual(self.DIRECTION_EGRESS, retrieved_rule['direction'])
# Test 'list rules'
- rules = self.admin_client.list_minimum_bandwidth_rules(policy['id'])
+ rules = self.qos_min_bw_rules_client.list_minimum_bandwidth_rules(
+ policy['id'])
rules = rules[self.RULES_NAME]
rules_ids = [r['id'] for r in rules]
self.assertIn(rule['id'], rules_ids)
@@ -1209,39 +1267,43 @@
@decorators.idempotent_id('266d9b87-e51c-48bd-9aa7-8269573621be')
def test_rule_create_fail_for_missing_min_kbps(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- self.assertRaises(exceptions.BadRequest,
- self.admin_client.create_minimum_bandwidth_rule,
- policy_id=policy['id'],
- direction=self.DIRECTION_EGRESS)
+ self.assertRaises(
+ exceptions.BadRequest,
+ self.qos_min_bw_rules_client.create_minimum_bandwidth_rule,
+ qos_policy_id=policy['id'],
+ **{'direction': self.DIRECTION_EGRESS})
@decorators.idempotent_id('aa59b00b-ab01-4787-92f8-93a5cdf5e378')
def test_rule_create_fail_for_the_same_type(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- self.admin_client.create_minimum_bandwidth_rule(
- policy_id=policy['id'],
- direction=self.DIRECTION_EGRESS, min_kbps=200)
+ self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=policy['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 200})
- self.assertRaises(exceptions.Conflict,
- self.admin_client.create_minimum_bandwidth_rule,
- policy_id=policy['id'],
- direction=self.DIRECTION_EGRESS, min_kbps=201)
+ self.assertRaises(
+ exceptions.Conflict,
+ self.qos_min_bw_rules_client.create_minimum_bandwidth_rule,
+ qos_policy_id=policy['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 201})
@decorators.idempotent_id('35baf998-ae65-495c-9902-35a0d11e8936')
@utils.requires_ext(extension="qos-bw-minimum-ingress",
service="network")
def test_rule_create_pass_for_direction_ingress(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- self.admin_client.create_minimum_bandwidth_rule(
- policy_id=policy['id'],
- direction=self.DIRECTION_INGRESS,
- min_kbps=201)
+ self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=policy['id'],
+ **{'direction': self.DIRECTION_INGRESS,
+ 'min_kbps': 201})
retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
policy_rules = retrieved_policy['policy']['rules']
@@ -1252,76 +1314,308 @@
@decorators.idempotent_id('a49a6988-2568-47d2-931e-2dbc858943b3')
def test_rule_update(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- rule = self.admin_client.create_minimum_bandwidth_rule(
- policy_id=policy['id'],
- direction=self.DIRECTION_EGRESS,
- min_kbps=300)[self.RULE_NAME]
+ rule = self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=policy['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 300})[self.RULE_NAME]
- self.admin_client.update_minimum_bandwidth_rule(policy['id'],
- rule['id'], min_kbps=350, direction=self.DIRECTION_EGRESS)
+ self.qos_min_bw_rules_client.update_minimum_bandwidth_rule(
+ policy['id'], rule['id'],
+ **{'min_kbps': 350, 'direction': self.DIRECTION_EGRESS})
- retrieved_policy = self.admin_client.show_minimum_bandwidth_rule(
- policy['id'], rule['id'])
+ retrieved_policy = \
+ self.qos_min_bw_rules_client.show_minimum_bandwidth_rule(
+ policy['id'], rule['id'])
retrieved_policy = retrieved_policy[self.RULE_NAME]
self.assertEqual(350, retrieved_policy['min_kbps'])
self.assertEqual(self.DIRECTION_EGRESS, retrieved_policy['direction'])
@decorators.idempotent_id('a7ee6efd-7b33-4a68-927d-275b4f8ba958')
def test_rule_delete(self):
- policy = self.create_qos_policy(name='test-policy',
+ policy = self.create_qos_policy(name=self.policy_name,
description='test policy',
shared=False)
- rule = self.admin_client.create_minimum_bandwidth_rule(
- policy['id'], self.DIRECTION_EGRESS, min_kbps=200)[self.RULE_NAME]
+ rule = self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ policy['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 200})[self.RULE_NAME]
- retrieved_policy = self.admin_client.show_minimum_bandwidth_rule(
- policy['id'], rule['id'])
+ retrieved_policy = \
+ self.qos_min_bw_rules_client.show_minimum_bandwidth_rule(
+ policy['id'], rule['id'])
retrieved_policy = retrieved_policy[self.RULE_NAME]
self.assertEqual(rule['id'], retrieved_policy['id'])
- self.admin_client.delete_minimum_bandwidth_rule(policy['id'],
- rule['id'])
- self.assertRaises(exceptions.NotFound,
- self.admin_client.show_minimum_bandwidth_rule,
- policy['id'], rule['id'])
+ self.qos_min_bw_rules_client.delete_minimum_bandwidth_rule(
+ policy['id'], rule['id'])
+ self.assertRaises(
+ exceptions.NotFound,
+ self.qos_min_bw_rules_client.show_minimum_bandwidth_rule,
+ policy['id'], rule['id'])
@decorators.idempotent_id('a211222c-5808-46cb-a961-983bbab6b852')
def test_rule_create_rule_nonexistent_policy(self):
self.assertRaises(
exceptions.NotFound,
- self.admin_client.create_minimum_bandwidth_rule,
- 'policy', self.DIRECTION_EGRESS, min_kbps=200)
+ self.qos_min_bw_rules_client.create_minimum_bandwidth_rule,
+ 'policy',
+ **{'direction': self.DIRECTION_EGRESS, 'min_kbps': 200})
@decorators.idempotent_id('b4a2e7ad-786f-4927-a85a-e545a93bd274')
def test_rule_create_forbidden_for_regular_tenants(self):
self.assertRaises(
exceptions.Forbidden,
- self.client.create_minimum_bandwidth_rule,
- 'policy', self.DIRECTION_EGRESS, min_kbps=300)
+ self.qos_min_bw_rules_client_primary.create_minimum_bandwidth_rule,
+ 'policy', **{'direction': self.DIRECTION_EGRESS, 'min_kbps': 300})
@decorators.idempotent_id('de0bd0c2-54d9-4e29-85f1-cfb36ac3ebe2')
def test_get_rules_by_policy(self):
policy1 = self.create_qos_policy(name='test-policy1',
description='test policy1',
shared=False)
- rule1 = self.admin_client.create_minimum_bandwidth_rule(
- policy_id=policy1['id'],
- direction=self.DIRECTION_EGRESS,
- min_kbps=200)[self.RULE_NAME]
+ rule1 = self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=policy1['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 200})[self.RULE_NAME]
policy2 = self.create_qos_policy(name='test-policy2',
description='test policy2',
shared=False)
- rule2 = self.admin_client.create_minimum_bandwidth_rule(
- policy_id=policy2['id'],
- direction=self.DIRECTION_EGRESS,
- min_kbps=5000)[self.RULE_NAME]
+ rule2 = self.qos_min_bw_rules_client.create_minimum_bandwidth_rule(
+ qos_policy_id=policy2['id'],
+ **{'direction': self.DIRECTION_EGRESS,
+ 'min_kbps': 5000})[self.RULE_NAME]
# Test 'list rules'
- rules = self.admin_client.list_minimum_bandwidth_rules(policy1['id'])
+ rules = self.qos_min_bw_rules_client.list_minimum_bandwidth_rules(
+ policy1['id'])
+ rules = rules[self.RULES_NAME]
+ rules_ids = [r['id'] for r in rules]
+ self.assertIn(rule1['id'], rules_ids)
+ self.assertNotIn(rule2['id'], rules_ids)
+
+
+class QosMinimumPpsRuleTestJSON(base.BaseAdminNetworkTest):
+ required_extensions = [qos_apidef.ALIAS]
+
+ @classmethod
+ @utils.requires_ext(service='network',
+ extension='port-resource-request-groups')
+ def resource_setup(cls):
+ super(QosMinimumPpsRuleTestJSON, cls).resource_setup()
+
+ @classmethod
+ def setup_clients(cls):
+ super(QosMinimumPpsRuleTestJSON, cls).setup_clients()
+ cls.min_pps_client = cls.os_admin.qos_minimum_packet_rate_rules_client
+ cls.min_pps_client_primary = \
+ cls.os_primary.qos_minimum_packet_rate_rules_client
+
+ def setUp(self):
+ super(QosMinimumPpsRuleTestJSON, self).setUp()
+ self.policy_name = data_utils.rand_name(name='test', prefix='policy')
+ self.RULE_NAME = qos_consts.RULE_TYPE_MINIMUM_PACKET_RATE + "_rule"
+ self.RULES_NAME = self.RULE_NAME + "s"
+
+ def _create_qos_min_pps_rule(self, policy_id, rule_data):
+ rule = self.min_pps_client.create_minimum_packet_rate_rule(
+ policy_id, **rule_data)['minimum_packet_rate_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.min_pps_client.delete_minimum_packet_rate_rule,
+ policy_id, rule['id'])
+ return rule
+
+ @decorators.idempotent_id('66a5b9b4-d4f9-4af8-b238-9e1881b78487')
+ def test_rule_create(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ rule = self._create_qos_min_pps_rule(
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 1138})
+
+ # Test 'show rule'
+ retrieved_rule = self.min_pps_client.show_minimum_packet_rate_rule(
+ policy['id'], rule['id'])[self.RULE_NAME]
+ self.assertEqual(rule['id'], retrieved_rule['id'])
+ self.assertEqual(1138, retrieved_rule[qos_consts.MIN_KPPS])
+ self.assertEqual(n_constants.EGRESS_DIRECTION,
+ retrieved_rule[qos_consts.DIRECTION])
+
+ # Test 'list rules'
+ rules = self.min_pps_client.list_minimum_packet_rate_rules(
+ policy['id'])
+ rules = rules[self.RULES_NAME]
+ rules_ids = [r['id'] for r in rules]
+ self.assertIn(rule['id'], rules_ids)
+
+ # Test 'show policy'
+ retrieved_policy = self.admin_client.show_qos_policy(policy['id'])
+ policy_rules = retrieved_policy['policy']['rules']
+ self.assertEqual(1, len(policy_rules))
+ self.assertEqual(rule['id'], policy_rules[0]['id'])
+ self.assertEqual('minimum_packet_rate',
+ policy_rules[0]['type'])
+
+ @decorators.idempotent_id('6b656b57-d2bf-47f9-89a9-1baad1bd5418')
+ def test_rule_create_fail_for_missing_min_kpps(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ self.assertRaises(exceptions.BadRequest,
+ self._create_qos_min_pps_rule,
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION})
+
+ @decorators.idempotent_id('f41213e5-2ab8-4916-b106-38d2cac5e18c')
+ def test_rule_create_fail_for_the_same_type(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ self._create_qos_min_pps_rule(policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 200})
+
+ self.assertRaises(exceptions.Conflict,
+ self._create_qos_min_pps_rule,
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 201})
+
+ @decorators.idempotent_id('ceb8e41e-3d72-11ec-a446-d7faae6daec2')
+ def test_rule_create_any_direction_when_egress_direction_exists(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ self._create_qos_min_pps_rule(policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 200})
+
+ self.assertRaises(exceptions.Conflict,
+ self._create_qos_min_pps_rule,
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.ANY_DIRECTION,
+ qos_consts.MIN_KPPS: 201})
+
+ @decorators.idempotent_id('a147a71e-3d7b-11ec-8097-278b1afd5fa2')
+ def test_rule_create_egress_direction_when_any_direction_exists(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ self._create_qos_min_pps_rule(policy['id'],
+ {qos_consts.DIRECTION: n_constants.ANY_DIRECTION,
+ qos_consts.MIN_KPPS: 200})
+
+ self.assertRaises(exceptions.Conflict,
+ self._create_qos_min_pps_rule,
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 201})
+
+ @decorators.idempotent_id('522ed09a-1d7f-4c1b-9195-61f19caf916f')
+ def test_rule_update(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ rule = self._create_qos_min_pps_rule(
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 300})
+
+ self.min_pps_client.update_minimum_packet_rate_rule(
+ policy['id'], rule['id'],
+ **{qos_consts.MIN_KPPS: 350,
+ qos_consts.DIRECTION: n_constants.ANY_DIRECTION})
+
+ retrieved_rule = self.min_pps_client.show_minimum_packet_rate_rule(
+ policy['id'], rule['id'])[self.RULE_NAME]
+ self.assertEqual(350, retrieved_rule[qos_consts.MIN_KPPS])
+ self.assertEqual(n_constants.ANY_DIRECTION,
+ retrieved_rule[qos_consts.DIRECTION])
+
+ @decorators.idempotent_id('a020e186-3d60-11ec-88ca-d7f5eec22764')
+ def test_rule_update_direction_conflict(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ rule1 = self._create_qos_min_pps_rule(
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 300})
+
+ rule2 = self._create_qos_min_pps_rule(
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.INGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 300})
+
+ retrieved_rule1 = self.min_pps_client.show_minimum_packet_rate_rule(
+ policy['id'], rule1['id'])[self.RULE_NAME]
+ self.assertEqual(n_constants.EGRESS_DIRECTION,
+ retrieved_rule1[qos_consts.DIRECTION])
+ retrieved_rule2 = self.min_pps_client.show_minimum_packet_rate_rule(
+ policy['id'], rule2['id'])[self.RULE_NAME]
+ self.assertEqual(n_constants.INGRESS_DIRECTION,
+ retrieved_rule2[qos_consts.DIRECTION])
+
+ self.assertRaises(exceptions.Conflict,
+ self.min_pps_client.update_minimum_packet_rate_rule,
+ policy['id'], rule2['id'],
+ **{qos_consts.DIRECTION: n_constants.ANY_DIRECTION})
+
+ @decorators.idempotent_id('c49018b6-d358-49a1-a94b-d53224165045')
+ def test_rule_delete(self):
+ policy = self.create_qos_policy(name=self.policy_name,
+ description='test policy',
+ shared=False)
+ rule = self._create_qos_min_pps_rule(
+ policy['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 200})
+
+ retrieved_rule = self.min_pps_client.show_minimum_packet_rate_rule(
+ policy['id'], rule['id'])[self.RULE_NAME]
+ self.assertEqual(rule['id'], retrieved_rule['id'])
+
+ self.min_pps_client.delete_minimum_packet_rate_rule(policy['id'],
+ rule['id'])
+ self.assertRaises(exceptions.NotFound,
+ self.min_pps_client.show_minimum_packet_rate_rule,
+ policy['id'], rule['id'])
+
+ @decorators.idempotent_id('1a6b6128-3d3e-11ec-bf49-57b326d417c0')
+ def test_rule_create_forbidden_for_regular_tenants(self):
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.min_pps_client_primary.create_minimum_packet_rate_rule,
+ 'policy', **{qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 300})
+
+ @decorators.idempotent_id('1b94f4e2-3d3e-11ec-bb21-6f98e4044b8b')
+ def test_get_rules_by_policy(self):
+ policy1 = self.create_qos_policy(name='test-policy1',
+ description='test policy1',
+ shared=False)
+ rule1 = self._create_qos_min_pps_rule(
+ policy1['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 200})
+
+ policy2 = self.create_qos_policy(name='test-policy2',
+ description='test policy2',
+ shared=False)
+ rule2 = self._create_qos_min_pps_rule(
+ policy2['id'],
+ {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 5000})
+
+ # Test 'list rules'
+ rules = self.min_pps_client.list_minimum_packet_rate_rules(
+ policy1['id'])
rules = rules[self.RULES_NAME]
rules_ids = [r['id'] for r in rules]
self.assertIn(rule1['id'], rules_ids)
diff --git a/neutron_tempest_plugin/api/test_qos_negative.py b/neutron_tempest_plugin/api/test_qos_negative.py
index 8432c6a..1f3ff05 100644
--- a/neutron_tempest_plugin/api/test_qos_negative.py
+++ b/neutron_tempest_plugin/api/test_qos_negative.py
@@ -11,7 +11,10 @@
# under the License.
from neutron_lib.api.definitions import qos as qos_apidef
+from neutron_lib import constants as n_constants
from neutron_lib.db import constants as db_const
+from neutron_lib.services.qos import constants as qos_consts
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -52,8 +55,10 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('0e85f3e4-7a93-4187-b847-8f4e835aae1b')
def test_update_policy_with_too_long_name(self):
- policy = self.create_qos_policy(name='test', description='test policy',
- shared=False)
+ policy = self.create_qos_policy(
+ name=data_utils.rand_name('test', 'policy'),
+ description='test policy',
+ shared=False)
self.assertRaises(lib_exc.BadRequest,
self.client.update_qos_policy, policy['id'],
name=LONG_NAME_NG)
@@ -61,40 +66,207 @@
@decorators.attr(type='negative')
@decorators.idempotent_id('925c7eaf-474b-4a02-a4ba-76a9f82bc45a')
def test_update_policy_with_too_long_description(self):
- policy = self.create_qos_policy(name='test', description='test policy',
- shared=False)
+ policy = self.create_qos_policy(
+ name=data_utils.rand_name('test', 'policy'),
+ description='test policy',
+ shared=False)
self.assertRaises(lib_exc.BadRequest,
self.client.update_qos_policy, policy['id'],
description=LONG_DESCRIPTION_NG)
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('88b54ab0-804b-446c-bc19-8e54222d70ef')
+ def test_get_non_existent_qos_policy(self):
+ non_exist_id = data_utils.rand_name('qos_policy')
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_client.show_qos_policy, non_exist_id)
-class QosBandwidthLimitRuleNegativeTestJSON(base.BaseAdminNetworkTest):
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('21050859-1284-4bf5-b05a-13846f83988f')
+ def test_update_non_existent_qos_policy(self):
+ non_exist_id = data_utils.rand_name('qos_policy')
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_client.update_qos_policy, non_exist_id,
+ shared=False)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('09e435b7-44d3-4f9d-8aa8-c295d46b5866')
+ def test_delete_non_existent_qos_policy(self):
+ non_exist_id = data_utils.rand_name('qos_policy')
+ self.assertRaises(lib_exc.NotFound,
+ self.admin_client.delete_qos_policy, non_exist_id)
+
+
+class QosRuleNegativeBaseTestJSON(base.BaseAdminNetworkTest):
required_extensions = [qos_apidef.ALIAS]
- @decorators.attr(type='negative')
- @decorators.idempotent_id('e9ce8042-c828-4cb9-b1f1-85bd35e6553a')
- def test_rule_update_rule_nonexistent_policy(self):
+ def _test_rule_update_rule_nonexistent_policy(self, create_params,
+ update_params):
non_exist_id = data_utils.rand_name('qos_policy')
- policy = self.create_qos_policy(name='test-policy',
- description='test policy',
- shared=False)
- rule = self.create_qos_bandwidth_limit_rule(policy_id=policy['id'],
- max_kbps=1,
- max_burst_kbps=1)
+ policy = self.create_qos_policy(
+ name=data_utils.rand_name('test', 'policy'),
+ description='test policy',
+ shared=False)
+ rule = self.rule_create_m(policy['id'], **create_params)
+ if "minimum_bandwidth_rule" in rule.keys():
+ rule_id = rule['minimum_bandwidth_rule']['id']
+ if "minimum_packet_rate_rule" in rule.keys():
+ rule_id = rule['minimum_packet_rate_rule']['id']
+ if "bandwidth_limit_rule" in rule.keys():
+ rule_id = rule['bandwidth_limit_rule']['id']
+ if "dscp_mark" in rule.keys():
+ rule_id = rule['id']
self.assertRaises(
lib_exc.NotFound,
- self.admin_client.update_bandwidth_limit_rule,
- non_exist_id, rule['id'], max_kbps=200, max_burst_kbps=1337)
+ self.rule_update_m,
+ non_exist_id, rule_id, **update_params)
- @decorators.attr(type='negative')
- @decorators.idempotent_id('a2c72066-0c32-4f28-be7f-78fa721588b6')
- def test_rule_update_rule_nonexistent_rule(self):
+ def _test_rule_create_rule_non_existent_policy(self, create_params):
+ non_exist_id = data_utils.rand_name('qos_policy')
+ self.assertRaises(
+ lib_exc.NotFound,
+ self.rule_create_m,
+ non_exist_id, **create_params)
+
+ def _test_rule_update_rule_nonexistent_rule(self, update_params):
non_exist_id = data_utils.rand_name('qos_rule')
policy = self.create_qos_policy(name='test-policy',
description='test policy',
shared=False)
self.assertRaises(
lib_exc.NotFound,
- self.admin_client.update_bandwidth_limit_rule,
- policy['id'], non_exist_id, max_kbps=200, max_burst_kbps=1337)
+ self.rule_update_m,
+ policy['id'], non_exist_id, **update_params)
+
+
+class QosBandwidthLimitRuleNegativeTestJSON(QosRuleNegativeBaseTestJSON):
+
+ @classmethod
+ def setup_clients(cls):
+ super(QosBandwidthLimitRuleNegativeTestJSON, cls).setup_clients()
+ cls.qos_bw_limit_rule_client = \
+ cls.os_admin.qos_limit_bandwidth_rules_client
+
+ @classmethod
+ def resource_setup(cls):
+ cls.rule_create_m = \
+ cls.qos_bw_limit_rule_client.create_limit_bandwidth_rule
+ cls.rule_update_m = \
+ cls.qos_bw_limit_rule_client.update_limit_bandwidth_rule
+ super(QosBandwidthLimitRuleNegativeTestJSON, cls).resource_setup()
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('e9ce8042-c828-4cb9-b1f1-85bd35e6553a')
+ def test_rule_update_rule_nonexistent_policy(self):
+ create_params = {'max_kbps': 1, 'max_burst_kbps': 1}
+ update_params = {'max_kbps': 200, 'max_burst_kbps': 1337}
+ self._test_rule_update_rule_nonexistent_policy(
+ create_params, update_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('1b592566-745f-4e15-a439-073afe341244')
+ def test_rule_create_rule_non_existent_policy(self):
+ create_params = {'max_kbps': 200, 'max_burst_kbps': 300}
+ self._test_rule_create_rule_non_existent_policy(create_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('a2c72066-0c32-4f28-be7f-78fa721588b6')
+ def test_rule_update_rule_nonexistent_rule(self):
+ update_params = {'max_kbps': 200, 'max_burst_kbps': 1337}
+ self._test_rule_update_rule_nonexistent_rule(update_params)
+
+
+class QosMinimumBandwidthRuleNegativeTestJSON(QosRuleNegativeBaseTestJSON):
+
+ @classmethod
+ def resource_setup(cls):
+ cls.rule_create_m = cls.os_admin.qos_minimum_bandwidth_rules_client.\
+ create_minimum_bandwidth_rule
+ cls.rule_update_m = cls.os_admin.qos_minimum_bandwidth_rules_client.\
+ update_minimum_bandwidth_rule
+ super(QosMinimumBandwidthRuleNegativeTestJSON, cls).resource_setup()
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('08b8455b-4d4f-4119-bad3-9357085c3a80')
+ def test_rule_update_rule_nonexistent_policy(self):
+ create_params = {'min_kbps': 1}
+ update_params = {'min_kbps': 200}
+ self._test_rule_update_rule_nonexistent_policy(
+ create_params, update_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('5a714a4a-bfbc-4cf9-b0c0-13fd185204f7')
+ def test_rule_create_rule_non_existent_policy(self):
+ create_params = {'min_kbps': 200}
+ self._test_rule_create_rule_non_existent_policy(create_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('8470cbe0-8ca5-46ab-9c66-7cf69301b121')
+ def test_rule_update_rule_nonexistent_rule(self):
+ update_params = {'min_kbps': 200}
+ self._test_rule_update_rule_nonexistent_rule(update_params)
+
+
+class QosMinimumPpsRuleNegativeTestJSON(QosRuleNegativeBaseTestJSON):
+
+ @classmethod
+ @utils.requires_ext(service='network',
+ extension='port-resource-request-groups')
+ def resource_setup(cls):
+ cls.rule_create_m = cls.os_admin.qos_minimum_packet_rate_rules_client.\
+ create_minimum_packet_rate_rule
+ cls.rule_update_m = cls.os_admin.qos_minimum_packet_rate_rules_client.\
+ update_minimum_packet_rate_rule
+ super(QosMinimumPpsRuleNegativeTestJSON, cls).resource_setup()
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('ddd16824-3e10-11ec-928d-5b1ef3fb9f43')
+ def test_rule_update_rule_nonexistent_policy(self):
+ create_params = {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 1}
+ update_params = {qos_consts.MIN_KPPS: 200}
+ self._test_rule_update_rule_nonexistent_policy(
+ create_params, update_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('de4f5540-3e10-11ec-9700-4bf3629b843e')
+ def test_rule_create_rule_non_existent_policy(self):
+ create_params = {qos_consts.DIRECTION: n_constants.EGRESS_DIRECTION,
+ qos_consts.MIN_KPPS: 200}
+ self._test_rule_create_rule_non_existent_policy(create_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('deb914ee-3e10-11ec-b3dc-03e52f9269c9')
+ def test_rule_update_rule_nonexistent_rule(self):
+ update_params = {qos_consts.MIN_KPPS: 200}
+ self._test_rule_update_rule_nonexistent_rule(update_params)
+
+
+class QosDscpRuleNegativeTestJSON(QosRuleNegativeBaseTestJSON):
+
+ @classmethod
+ def resource_setup(cls):
+ cls.rule_create_m = cls.create_qos_dscp_marking_rule
+ cls.rule_update_m = cls.admin_client.update_dscp_marking_rule
+ super(QosDscpRuleNegativeTestJSON, cls).resource_setup()
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('d47d5fbe-3e98-476f-b2fd-97818175dea5')
+ def test_rule_update_rule_nonexistent_policy(self):
+ create_params = {'dscp_mark': 26}
+ update_params = {'dscp_mark': 16}
+ self._test_rule_update_rule_nonexistent_policy(
+ create_params, update_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('07d17f09-3dc4-4c24-9bb1-49081a153c5a')
+ def test_rule_create_rule_non_existent_policy(self):
+ create_params = {'dscp_mark': 16}
+ self._test_rule_create_rule_non_existent_policy(create_params)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('9c0bd085-5a7a-496f-a984-50dc631a64f2')
+ def test_rule_update_rule_nonexistent_rule(self):
+ update_params = {'dscp_mark': 16}
+ self._test_rule_update_rule_nonexistent_rule(update_params)
diff --git a/neutron_tempest_plugin/api/test_routers.py b/neutron_tempest_plugin/api/test_routers.py
index d866dbc..5e916f5 100644
--- a/neutron_tempest_plugin/api/test_routers.py
+++ b/neutron_tempest_plugin/api/test_routers.py
@@ -15,9 +15,12 @@
import netaddr
+from neutron_lib import constants as const
+
from tempest.common import utils as tutils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
from neutron_tempest_plugin.api import base
from neutron_tempest_plugin.api import base_routers
@@ -453,3 +456,57 @@
@decorators.idempotent_id('fb102124-20f8-4cb3-8c81-f16f5e41d192')
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
+
+
+class RoutersDeleteTest(base_routers.BaseRouterTest):
+ """The only test in this class is a test that removes router!
+
+ * We cannot delete common and mandatory resources (router in this case)
+ * using the existing classes, as it will cause failure in other tests
+ * running in parallel.
+ """
+ @classmethod
+ def resource_setup(cls):
+ super(RoutersDeleteTest, cls).resource_setup()
+ cls.secgroup = cls.create_security_group(
+ name=data_utils.rand_name("test_port_secgroup"))
+ router_kwargs = {
+ 'router_name': data_utils.rand_name('router_to_delete'),
+ 'external_network_id': CONF.network.public_network_id}
+ cls.router = cls.create_router(**router_kwargs)
+
+ @decorators.idempotent_id('dbbc5c74-63c8-11eb-8881-74e5f9e2a801')
+ def test_delete_router(self):
+ # Create a port on tenant network and associate to the router.
+ # Try to delete router. Expected result: "Conflict Error" is raised.
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ self.create_router_interface(self.router['id'], subnet['id'])
+ port = self.create_port(
+ network, name=data_utils.rand_name("port"),
+ security_groups=[self.secgroup['id']])
+ self.create_floatingip(port=port)
+ self.assertRaises(
+ lib_exc.Conflict, self.client.delete_router, self.router['id'])
+ # Delete the associated port
+ # Try to delete router. Expected result: "Conflict Error" is raised.
+ # Note: there are still interfaces in use.
+ self.client.delete_port(port['id'])
+ self.assertRaises(
+ lib_exc.Conflict, self.client.delete_router, self.router['id'])
+ # Delete the rest of the router's ports
+ # Try to delete router. Expected result: "PASS"
+ interfaces = [
+ port for port in self.client.list_router_interfaces(
+ self.router['id'])['ports']
+ if port['device_owner'] in const.ROUTER_INTERFACE_OWNERS]
+ for i in interfaces:
+ try:
+ self.assertRaises(
+ lib_exc.Conflict, self.client.delete_router,
+ self.router['id'])
+ self.client.remove_router_interface_with_subnet_id(
+ self.router['id'], i['fixed_ips'][0]['subnet_id'])
+ except lib_exc.NotFound:
+ pass
+ self.client.delete_router(self.router['id'])
diff --git a/neutron_tempest_plugin/api/test_routers_negative.py b/neutron_tempest_plugin/api/test_routers_negative.py
index 8700761..9c83fc7 100644
--- a/neutron_tempest_plugin/api/test_routers_negative.py
+++ b/neutron_tempest_plugin/api/test_routers_negative.py
@@ -13,12 +13,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+from neutron_lib import constants
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
import testtools
from neutron_tempest_plugin.api import base_routers as base
+from neutron_tempest_plugin import config
+
+
+CONF = config.CONF
class RoutersNegativeTestBase(base.BaseRouterTest):
@@ -33,19 +38,6 @@
cls.subnet = cls.create_subnet(cls.network)
-class RoutersNegativeTest(RoutersNegativeTestBase):
-
- @decorators.attr(type='negative')
- @decorators.idempotent_id('e3e751af-15a2-49cc-b214-a7154579e94f')
- def test_delete_router_in_use(self):
- # This port is deleted after a test by remove_router_interface.
- port = self.create_port(self.network)
- self.client.add_router_interface_with_port_id(
- self.router['id'], port['id'])
- with testtools.ExpectedException(lib_exc.Conflict):
- self.client.delete_router(self.router['id'])
-
-
class RoutersNegativePolicyTest(RoutersNegativeTestBase):
credentials = ['admin', 'primary', 'alt']
@@ -88,6 +80,29 @@
self.client.add_router_interface_with_port_id,
self.router['id'], invalid_id)
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('dad7a8ba-2726-11eb-82dd-74e5f9e2a801')
+ def test_remove_associated_ports(self):
+ self.client.update_router(
+ self.router['id'],
+ external_gateway_info={
+ 'network_id': CONF.network.public_network_id})
+ network = self.create_network()
+ subnet = self.create_subnet(network)
+ self.create_router_interface(self.router['id'], subnet['id'])
+ port_ids = [
+ item['id'] for item in self.admin_client.list_ports(
+ device_id=self.router['id'])['ports']
+ if item['device_owner'] not in [
+ constants.DEVICE_OWNER_ROUTER_HA_INTF,
+ constants.DEVICE_OWNER_HA_REPLICATED_INT]]
+ if not port_ids:
+ self.fail("No ports other than HA ports found for the router %s" %
+ self.router['id'])
+ for port_id in port_ids:
+ with testtools.ExpectedException(lib_exc.Conflict):
+ self.admin_client.delete_port(port_id)
+
class DvrRoutersNegativeTest(RoutersNegativeTestBase):
diff --git a/neutron_tempest_plugin/api/test_security_groups.py b/neutron_tempest_plugin/api/test_security_groups.py
index afce414..d251f8c 100644
--- a/neutron_tempest_plugin/api/test_security_groups.py
+++ b/neutron_tempest_plugin/api/test_security_groups.py
@@ -127,6 +127,62 @@
self.assertIn(secgrp['name'], sec_nm)
self.assertIsNotNone(secgrp['id'])
+ @decorators.idempotent_id('e93f33d8-57ea-11eb-b69b-74e5f9e2a801')
+ def test_create_sec_groups_with_the_same_name(self):
+ same_name_sg_number = 5
+ sg_name = 'sg_zahlabut'
+ sg_names = [sg_name] * same_name_sg_number
+ for name in sg_names:
+ self.create_security_group(name=name)
+ sec_groups = [item['id'] for item in
+ self.client.list_security_groups(
+ name=sg_name)['security_groups']]
+ self.assertEqual(
+ same_name_sg_number, len(set(sec_groups)),
+ 'Failed - expected number of groups with the same name'
+ ' is: {}'.format(same_name_sg_number))
+
+
+class StatelessSecGroupTest(base.BaseAdminNetworkTest):
+
+ required_extensions = ['security-group', 'stateful-security-group']
+
+ @decorators.idempotent_id('0a6c1476-3d1a-11ec-b0ec-0800277ac3d9')
+ def test_stateless_security_group_update(self):
+ security_group = self.create_security_group(stateful=True)
+
+ # List security groups and verify if created group is there in response
+ security_groups = self.client.list_security_groups()['security_groups']
+ found = False
+ for sg in security_groups:
+ if sg['id'] == security_group['id']:
+ found = True
+ break
+ self.assertTrue(found)
+ self.assertTrue(sg['stateful'])
+
+ # Switch to stateless
+ updated_security_group = self.client.update_security_group(
+ security_group['id'], stateful=False)['security_group']
+
+ # Verify if security group is updated
+ self.assertFalse(updated_security_group['stateful'])
+
+ observed_security_group = self.client.show_security_group(
+ security_group['id'])['security_group']
+ self.assertFalse(observed_security_group['stateful'])
+
+ # Switch back to stateful
+ updated_security_group = self.client.update_security_group(
+ security_group['id'], stateful=True)['security_group']
+
+ # Verify if security group is stateful again
+ self.assertTrue(updated_security_group['stateful'])
+
+ observed_security_group = self.client.show_security_group(
+ security_group['id'])['security_group']
+ self.assertTrue(observed_security_group['stateful'])
+
class BaseSecGroupQuota(base.BaseAdminNetworkTest):
@@ -225,12 +281,14 @@
def _create_security_group_rules(self, amount, port_index=1):
for i in range(amount):
- self.create_security_group_rule(**{
+ ingress_rule = self.create_security_group_rule(**{
'project_id': self.client.tenant_id,
'direction': 'ingress',
'port_range_max': port_index + i,
'port_range_min': port_index + i,
'protocol': 'tcp'})
+ self.addCleanup(
+ self.client.delete_security_group_rule, ingress_rule['id'])
def _increase_sg_rules_quota(self):
sg_rules_quota = self._get_sg_rules_quota()
diff --git a/neutron_tempest_plugin/api/test_subnets.py b/neutron_tempest_plugin/api/test_subnets.py
index b8842ab..3b075d5 100644
--- a/neutron_tempest_plugin/api/test_subnets.py
+++ b/neutron_tempest_plugin/api/test_subnets.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import netaddr
from tempest.lib import decorators
from neutron_tempest_plugin.api import base
@@ -69,3 +70,44 @@
self._test_list_validation_filters(self.list_kwargs)
self._test_list_validation_filters({
'unknown_filter': 'value'}, filter_is_valid=False)
+
+
+class SubnetServiceTypeTestJSON(base.BaseNetworkTest):
+
+ required_extensions = ['subnet-service-types']
+
+ @classmethod
+ def resource_setup(cls):
+ super(SubnetServiceTypeTestJSON, cls).resource_setup()
+ cls.network = cls.create_network()
+
+ @decorators.idempotent_id('7e0edb66-1bb2-4473-ab83-d039cddced0d')
+ def test_allocate_ips_are_from_correct_subnet(self):
+ cidr_1 = netaddr.IPNetwork('192.168.1.0/24')
+ cidr_2 = netaddr.IPNetwork('192.168.2.0/24')
+
+ # NOTE(slaweq): service_type "network:distributed" is needed for
+ # ML2/OVN backend. It's needed because OVN driver creates additional
+ # port for metadata service in each subnet with enabled dhcp and such
+ # port needs to have allocated IP address from the subnet also.
+ self.create_subnet(
+ self.network,
+ service_types=['test:type_1', 'network:distributed'],
+ cidr=str(cidr_1))
+ self.create_subnet(
+ self.network,
+ service_types=['test:type_2', 'network:distributed'],
+ cidr=str(cidr_2))
+ port_type_1 = self.create_port(self.network,
+ device_owner="test:type_1")
+ port_type_2 = self.create_port(self.network,
+ device_owner="test:type_2")
+
+ self.assertEqual(1, len(port_type_1['fixed_ips']))
+ self.assertEqual(1, len(port_type_2['fixed_ips']))
+ self.assertIn(
+ netaddr.IPAddress(port_type_1['fixed_ips'][0]['ip_address']),
+ cidr_1)
+ self.assertIn(
+ netaddr.IPAddress(port_type_2['fixed_ips'][0]['ip_address']),
+ cidr_2)
diff --git a/neutron_tempest_plugin/api/test_trunk.py b/neutron_tempest_plugin/api/test_trunk.py
index 1f83bd8..26f8de8 100644
--- a/neutron_tempest_plugin/api/test_trunk.py
+++ b/neutron_tempest_plugin/api/test_trunk.py
@@ -247,21 +247,23 @@
@classmethod
def skip_checks(cls):
super(TrunkTestMtusJSONBase, cls).skip_checks()
- if not all(cls.is_type_driver_enabled(t) for t in ['gre', 'vxlan']):
- msg = "Either vxlan or gre type driver not enabled."
+ if not all(cls.is_type_driver_enabled(t) for t in ['vlan', 'vxlan']):
+ msg = "Either vxlan or vlan type driver not enabled."
raise cls.skipException(msg)
def setUp(self):
super(TrunkTestMtusJSONBase, self).setUp()
+ physnet_name = CONF.neutron_plugin_options.provider_vlans[0]
- # VXLAN autocomputed MTU (1450) is smaller than that of GRE (1458)
+ # VXLAN autocomputed MTU (1450) is smaller than that of VLAN (1480)
self.smaller_mtu_net = self.create_network(
name=data_utils.rand_name('vxlan-net'),
provider_network_type='vxlan')
self.larger_mtu_net = self.create_network(
- name=data_utils.rand_name('gre-net'),
- provider_network_type='gre')
+ name=data_utils.rand_name('vlan-net'),
+ provider_network_type='vlan',
+ provider_physical_network=physnet_name)
self.smaller_mtu_port = self.create_port(self.smaller_mtu_net)
self.smaller_mtu_port_2 = self.create_port(self.smaller_mtu_net)
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/manager.py b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
index 4ff1c0d..90c2bb1 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/manager.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/manager.py
@@ -14,325 +14,25 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import netaddr
from oslo_log import log
-from oslo_utils import netutils
-from tempest.common import compute
from tempest.common import utils
-from tempest.common.utils.linux import remote_client
-from tempest.common.utils import net_utils
-from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
-import tempest.test
+from tempest.scenario import manager
CONF = config.CONF
LOG = log.getLogger(__name__)
-class ScenarioTest(tempest.test.BaseTestCase):
+class ScenarioTest(manager.NetworkScenarioTest):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
- @classmethod
- def setup_clients(cls):
- super(ScenarioTest, cls).setup_clients()
- # Clients (in alphabetical order)
- cls.keypairs_client = cls.os_primary.keypairs_client
- cls.servers_client = cls.os_primary.servers_client
- # Neutron network client
- cls.networks_client = cls.os_primary.networks_client
- cls.ports_client = cls.os_primary.ports_client
- cls.routers_client = cls.os_primary.routers_client
- cls.subnets_client = cls.os_primary.subnets_client
- cls.floating_ips_client = cls.os_primary.floating_ips_client
- cls.security_groups_client = cls.os_primary.security_groups_client
- cls.security_group_rules_client = (
- cls.os_primary.security_group_rules_client)
-
- # ## Test functions library
- #
- # The create_[resource] functions only return body and discard the
- # resp part which is not used in scenario tests
-
- def _create_port(self, network_id, client=None, namestart='port-quotatest',
- **kwargs):
- if not client:
- client = self.ports_client
- name = data_utils.rand_name(namestart)
- result = client.create_port(
- name=name,
- network_id=network_id,
- **kwargs)
- self.assertIsNotNone(result, 'Unable to allocate port')
- port = result['port']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_port, port['id'])
- return port
-
- def create_keypair(self, client=None):
- if not client:
- client = self.keypairs_client
- name = data_utils.rand_name(self.__class__.__name__)
- # We don't need to create a keypair by pubkey in scenario
- body = client.create_keypair(name=name)
- self.addCleanup(client.delete_keypair, name)
- return body['keypair']
-
- def create_server(self, name=None, image_id=None, flavor=None,
- validatable=False, wait_until='ACTIVE',
- clients=None, **kwargs):
- """Wrapper utility that returns a test server.
-
- This wrapper utility calls the common create test server and
- returns a test server. The purpose of this wrapper is to minimize
- the impact on the code of the tests already using this
- function.
- """
-
- # NOTE(jlanoux): As a first step, ssh checks in the scenario
- # tests need to be run regardless of the run_validation and
- # validatable parameters and thus until the ssh validation job
- # becomes voting in CI. The test resources management and IP
- # association are taken care of in the scenario tests.
- # Therefore, the validatable parameter is set to false in all
- # those tests. In this way create_server just return a standard
- # server and the scenario tests always perform ssh checks.
-
- # Needed for the cross_tenant_traffic test:
- if clients is None:
- clients = self.os_primary
-
- if name is None:
- name = data_utils.rand_name(self.__class__.__name__ + "-server")
-
- vnic_type = CONF.network.port_vnic_type
-
- # If vnic_type is configured create port for
- # every network
- if vnic_type:
- ports = []
-
- create_port_body = {'binding:vnic_type': vnic_type,
- 'namestart': 'port-smoke'}
- if kwargs:
- # Convert security group names to security group ids
- # to pass to create_port
- if 'security_groups' in kwargs:
- security_groups = \
- clients.security_groups_client.list_security_groups(
- ).get('security_groups')
- sec_dict = dict([(s['name'], s['id'])
- for s in security_groups])
-
- sec_groups_names = [s['name'] for s in kwargs.pop(
- 'security_groups')]
- security_groups_ids = [sec_dict[s]
- for s in sec_groups_names]
-
- if security_groups_ids:
- create_port_body[
- 'security_groups'] = security_groups_ids
- networks = kwargs.pop('networks', [])
- else:
- networks = []
-
- # If there are no networks passed to us we look up
- # for the project's private networks and create a port.
- # The same behaviour as we would expect when passing
- # the call to the clients with no networks
- if not networks:
- networks = clients.networks_client.list_networks(
- **{'router:external': False, 'fields': 'id'})['networks']
-
- # It's net['uuid'] if networks come from kwargs
- # and net['id'] if they come from
- # clients.networks_client.list_networks
- for net in networks:
- net_id = net.get('uuid', net.get('id'))
- if 'port' not in net:
- port = self._create_port(network_id=net_id,
- client=clients.ports_client,
- **create_port_body)
- ports.append({'port': port['id']})
- else:
- ports.append({'port': net['port']})
- if ports:
- kwargs['networks'] = ports
- self.ports = ports
-
- tenant_network = self.get_tenant_network()
-
- body, servers = compute.create_test_server(
- clients,
- tenant_network=tenant_network,
- wait_until=wait_until,
- name=name, flavor=flavor,
- image_id=image_id, **kwargs)
-
- self.addCleanup(waiters.wait_for_server_termination,
- clients.servers_client, body['id'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- clients.servers_client.delete_server, body['id'])
- server = clients.servers_client.show_server(body['id'])['server']
- return server
-
- def get_remote_client(self, ip_address, username=None, private_key=None):
- """Get a SSH client to a remote server
-
- @param ip_address the server floating or fixed IP address to use
- for ssh validation
- @param username name of the Linux account on the remote server
- @param private_key the SSH private key to use
- @return a RemoteClient object
- """
-
- if username is None:
- username = CONF.validation.image_ssh_user
- # Set this with 'keypair' or others to log in with keypair or
- # username/password.
- if CONF.validation.auth_method == 'keypair':
- password = None
- if private_key is None:
- private_key = self.keypair['private_key']
- else:
- password = CONF.validation.image_ssh_password
- private_key = None
- linux_client = remote_client.RemoteClient(ip_address, username,
- pkey=private_key,
- password=password)
- try:
- linux_client.validate_authentication()
- except Exception as e:
- message = ('Initializing SSH connection to %(ip)s failed. '
- 'Error: %(error)s' % {'ip': ip_address,
- 'error': e})
- caller = test_utils.find_test_caller()
- if caller:
- message = '(%s) %s' % (caller, message)
- LOG.exception(message)
- self._log_console_output()
- raise
-
- return linux_client
-
- def _log_console_output(self, servers=None):
- if not CONF.compute_feature_enabled.console_output:
- LOG.debug('Console output not supported, cannot log')
- return
- if not servers:
- servers = self.servers_client.list_servers()
- servers = servers['servers']
- for server in servers:
- try:
- console_output = self.servers_client.get_console_output(
- server['id'])['output']
- LOG.debug('Console output for %s\nbody=\n%s',
- server['id'], console_output)
- except lib_exc.NotFound:
- LOG.debug("Server %s disappeared(deleted) while looking "
- "for the console log", server['id'])
-
- def _log_net_info(self, exc):
- # network debug is called as part of ssh init
- if not isinstance(exc, lib_exc.SSHTimeout):
- LOG.debug('Network information on a devstack host')
-
- def ping_ip_address(self, ip_address, should_succeed=True,
- ping_timeout=None, mtu=None):
- timeout = ping_timeout or CONF.validation.ping_timeout
- cmd = ['ping', '-c1', '-w1']
-
- if mtu:
- cmd += [
- # don't fragment
- '-M', 'do',
- # ping receives just the size of ICMP payload
- '-s', str(net_utils.get_ping_payload_size(mtu, 4))
- ]
- cmd.append(ip_address)
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.communicate()
-
- return (proc.returncode == 0) == should_succeed
-
- caller = test_utils.find_test_caller()
- LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
- ' expected result is %(should_succeed)s', {
- 'caller': caller, 'ip': ip_address, 'timeout': timeout,
- 'should_succeed':
- 'reachable' if should_succeed else 'unreachable'
- })
- result = test_utils.call_until_true(ping, timeout, 1)
- LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
- 'ping result is %(result)s', {
- 'caller': caller, 'ip': ip_address, 'timeout': timeout,
- 'result': 'expected' if result else 'unexpected'
- })
- return result
-
- def check_vm_connectivity(self, ip_address,
- username=None,
- private_key=None,
- should_connect=True,
- mtu=None):
- """Check server connectivity
-
- :param ip_address: server to test against
- :param username: server's ssh username
- :param private_key: server's ssh private key to be used
- :param should_connect: True/False indicates positive/negative test
- positive - attempt ping and ssh
- negative - attempt ping and fail if succeed
- :param mtu: network MTU to use for connectivity validation
-
- :raises: AssertError if the result of the connectivity check does
- not match the value of the should_connect param
- """
- if should_connect:
- msg = "Timed out waiting for %s to become reachable" % ip_address
- else:
- msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self.ping_ip_address(ip_address,
- should_succeed=should_connect,
- mtu=mtu),
- msg=msg)
- if should_connect:
- # no need to check ssh for negative connectivity
- self.get_remote_client(ip_address, username, private_key)
-
- def check_public_network_connectivity(self, ip_address, username,
- private_key, should_connect=True,
- msg=None, servers=None, mtu=None):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- LOG.debug('checking network connections to IP %s with user: %s',
- ip_address, username)
- try:
- self.check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect,
- mtu=mtu)
- except Exception:
- ex_msg = 'Public network connectivity check failed'
- if msg:
- ex_msg += ": " + msg
- LOG.exception(ex_msg)
- self._log_console_output(servers)
- raise
-
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
@@ -357,222 +57,6 @@
msg = "Bgpvpn extension not enabled."
raise cls.skipException(msg)
- def _create_network(self, networks_client=None,
- tenant_id=None,
- namestart='network-smoke-',
- port_security_enabled=True):
- if not networks_client:
- networks_client = self.networks_client
- if not tenant_id:
- tenant_id = networks_client.tenant_id
- name = data_utils.rand_name(namestart)
- network_kwargs = dict(name=name, tenant_id=tenant_id)
- # Neutron disables port security by default so we have to check the
- # config before trying to create the network with port_security_enabled
- if CONF.network_feature_enabled.port_security:
- network_kwargs['port_security_enabled'] = port_security_enabled
- result = networks_client.create_network(**network_kwargs)
- network = result['network']
-
- self.assertEqual(network['name'], name)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- networks_client.delete_network,
- network['id'])
- return network
-
- def _create_subnet(self, network, subnets_client=None,
- routers_client=None, namestart='subnet-smoke',
- **kwargs):
- """Create a subnet for the given network
-
- within the cidr block configured for tenant networks.
- """
- if not subnets_client:
- subnets_client = self.subnets_client
- if not routers_client:
- routers_client = self.routers_client
-
- def cidr_in_use(cidr, tenant_id):
- """Check cidr existence
-
- :returns: True if subnet with cidr already exist in tenant
- False else
- """
- cidr_in_use = self.os_admin.subnets_client.list_subnets(
- tenant_id=tenant_id, cidr=cidr)['subnets']
- return len(cidr_in_use) != 0
-
- ip_version = kwargs.pop('ip_version', 4)
-
- if ip_version == 6:
- tenant_cidr = netaddr.IPNetwork(
- CONF.network.project_network_v6_cidr)
- num_bits = CONF.network.project_network_v6_mask_bits
- else:
- tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- num_bits = CONF.network.project_network_mask_bits
-
- result = None
- str_cidr = None
- # Repeatedly attempt subnet creation with sequential cidr
- # blocks until an unallocated block is found.
- for subnet_cidr in tenant_cidr.subnet(num_bits):
- str_cidr = str(subnet_cidr)
- if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
- continue
-
- subnet = dict(
- name=data_utils.rand_name(namestart),
- network_id=network['id'],
- tenant_id=network['tenant_id'],
- cidr=str_cidr,
- ip_version=ip_version,
- **kwargs
- )
- try:
- result = subnets_client.create_subnet(**subnet)
- break
- except lib_exc.Conflict as e:
- is_overlapping_cidr = 'overlaps with another subnet' in str(e)
- if not is_overlapping_cidr:
- raise
- self.assertIsNotNone(result, 'Unable to allocate tenant network')
-
- subnet = result['subnet']
- self.assertEqual(subnet['cidr'], str_cidr)
-
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- subnets_client.delete_subnet, subnet['id'])
-
- return subnet
-
- def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'], fixed_ip=ip_addr)['ports']
- # A port can have more than one IP address in some cases.
- # If the network is dual-stack (IPv4 + IPv6), this port is associated
- # with 2 subnets
- p_status = ['ACTIVE']
- # NOTE(vsaienko) With Ironic, instances live on separate hardware
- # servers. Neutron does not bind ports for Ironic instances, as a
- # result the port remains in the DOWN state.
- # TODO(vsaienko) remove once bug: #1599836 is resolved.
- if getattr(CONF.service_available, 'ironic', False):
- p_status.append('DOWN')
- port_map = [(p["id"], fxip["ip_address"])
- for p in ports
- for fxip in p["fixed_ips"]
- if netutils.is_valid_ipv4(fxip["ip_address"]) and
- p['status'] in p_status]
- inactive = [p for p in ports if p['status'] != 'ACTIVE']
- if inactive:
- LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
- self.assertNotEqual(0, len(port_map),
- "No IPv4 addresses found in: %s" % ports)
- self.assertEqual(len(port_map), 1,
- "Found multiple IPv4 addresses: %s. "
- "Unable to determine which port to target."
- % port_map)
- return port_map[0]
-
- def _get_network_by_name(self, network_name):
- net = self.os_admin.networks_client.list_networks(
- name=network_name)['networks']
- self.assertNotEqual(len(net), 0,
- "Unable to get network by name: %s" % network_name)
- return net[0]
-
- def create_floating_ip(self, thing, external_network_id=None,
- port_id=None, client=None):
- """Create a floating IP and associates to a resource/port on Neutron"""
- if not external_network_id:
- external_network_id = CONF.network.public_network_id
- if not client:
- client = self.floating_ips_client
- if not port_id:
- port_id, ip4 = self._get_server_port_id_and_ip4(thing)
- else:
- ip4 = None
- result = client.create_floatingip(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing['tenant_id'],
- fixed_ip_address=ip4
- )
- floating_ip = result['floatingip']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_floatingip,
- floating_ip['id'])
- return floating_ip
-
- def _associate_floating_ip(self, floating_ip, server):
- port_id, _ = self._get_server_port_id_and_ip4(server)
- kwargs = dict(port_id=port_id)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertEqual(port_id, floating_ip['port_id'])
- return floating_ip
-
- def _disassociate_floating_ip(self, floating_ip):
- """:param floating_ip: floating_ips_client.create_floatingip"""
- kwargs = dict(port_id=None)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertIsNone(floating_ip['port_id'])
- return floating_ip
-
- def check_floating_ip_status(self, floating_ip, status):
- """Verifies floatingip reaches the given status
-
- :param dict floating_ip: floating IP dict to check status
- :param status: target status
- :raises: AssertionError if status doesn't match
- """
- floatingip_id = floating_ip['id']
-
- def refresh():
- result = (self.floating_ips_client.
- show_floatingip(floatingip_id)['floatingip'])
- return status == result['status']
-
- test_utils.call_until_true(refresh,
- CONF.network.build_timeout,
- CONF.network.build_interval)
- floating_ip = self.floating_ips_client.show_floatingip(
- floatingip_id)['floatingip']
- self.assertEqual(status, floating_ip['status'],
- message="FloatingIP: {fp} is at status: {cst}. "
- "failed to reach status: {st}"
- .format(fp=floating_ip, cst=floating_ip['status'],
- st=status))
- LOG.info("FloatingIP: {fp} is at status: {st}"
- .format(fp=floating_ip, st=status))
-
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
- if not CONF.network.project_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- for net_name, ip_addresses in server['addresses'].items():
- for ip_address in ip_addresses:
- self.check_vm_connectivity(ip_address['addr'],
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers_for_debug)
- self._log_net_info(e)
- raise
-
def _check_remote_connectivity(self, source, dest, should_succeed=True,
nic=None):
"""check ping server via source ssh connection
@@ -597,124 +81,9 @@
CONF.validation.ping_timeout,
1)
- def _create_security_group(self, security_group_rules_client=None,
- tenant_id=None,
- namestart='secgroup-smoke',
- security_groups_client=None):
- if security_group_rules_client is None:
- security_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- if tenant_id is None:
- tenant_id = security_groups_client.tenant_id
- secgroup = self._create_empty_security_group(
- namestart=namestart, client=security_groups_client,
- tenant_id=tenant_id)
-
- # Add rules to the security group
- rules = self._create_loginable_secgroup_rule(
- security_group_rules_client=security_group_rules_client,
- secgroup=secgroup,
- security_groups_client=security_groups_client)
- for rule in rules:
- self.assertEqual(tenant_id, rule['tenant_id'])
- self.assertEqual(secgroup['id'], rule['security_group_id'])
- return secgroup
-
- def _create_empty_security_group(self, client=None, tenant_id=None,
- namestart='secgroup-smoke'):
- """Create a security group without rules.
-
- Default rules will be created:
- - IPv4 egress to any
- - IPv6 egress to any
-
- :param tenant_id: secgroup will be created in this tenant
- :returns: the created security group
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- sg_dict = dict(name=sg_name,
- description=sg_desc)
- sg_dict['tenant_id'] = tenant_id
- result = client.create_security_group(**sg_dict)
-
- secgroup = result['security_group']
- self.assertEqual(secgroup['name'], sg_name)
- self.assertEqual(tenant_id, secgroup['tenant_id'])
- self.assertEqual(secgroup['description'], sg_desc)
-
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_security_group, secgroup['id'])
- return secgroup
-
- def _default_security_group(self, client=None, tenant_id=None):
- """Get default secgroup for given tenant_id.
-
- :returns: default secgroup for given tenant
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sgs = [
- sg for sg in list(client.list_security_groups().values())[0]
- if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
- ]
- msg = "No default security group for tenant %s." % (tenant_id)
- self.assertGreater(len(sgs), 0, msg)
- return sgs[0]
-
- def _create_security_group_rule(self, secgroup=None,
- sec_group_rules_client=None,
- tenant_id=None,
- security_groups_client=None, **kwargs):
- """Create a rule from a dictionary of rule parameters.
-
- Create a rule in a secgroup. if secgroup not defined will search for
- default secgroup in tenant_id.
-
- :param secgroup: the security group.
- :param tenant_id: if secgroup not passed -- the tenant in which to
- search for default secgroup
- :param kwargs: a dictionary containing rule parameters:
- for example, to allow incoming ssh:
- rule = {
- direction: 'ingress'
- protocol:'tcp',
- port_range_min: 22,
- port_range_max: 22
- }
- """
- if sec_group_rules_client is None:
- sec_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- if not tenant_id:
- tenant_id = security_groups_client.tenant_id
- if secgroup is None:
- secgroup = self._default_security_group(
- client=security_groups_client, tenant_id=tenant_id)
-
- ruleset = dict(security_group_id=secgroup['id'],
- tenant_id=secgroup['tenant_id'])
- ruleset.update(kwargs)
-
- sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
- sg_rule = sg_rule['security_group_rule']
-
- self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
- self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
- return sg_rule
-
- def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
- secgroup=None,
- security_groups_client=None):
+ def create_loginable_secgroup_rule(self, security_group_rules_client=None,
+ secgroup=None,
+ security_groups_client=None):
"""Create loginable security group rule
This function will create:
@@ -759,7 +128,7 @@
for r_direction in ['ingress', 'egress']:
ruleset['direction'] = r_direction
try:
- sg_rule = self._create_security_group_rule(
+ sg_rule = self.create_security_group_rule(
sec_group_rules_client=sec_group_rules_client,
secgroup=secgroup,
security_groups_client=security_groups_client,
@@ -775,33 +144,6 @@
return rules
- def _get_router(self, client=None, tenant_id=None):
- """Retrieve a router for the given tenant id.
-
- If a public router has been configured, it will be returned.
-
- If a public router has not been configured, but a public
- network has, a tenant router will be created and returned that
- routes traffic to the public network.
- """
- if not client:
- client = self.routers_client
- if not tenant_id:
- tenant_id = client.tenant_id
- router_id = CONF.network.public_router_id
- network_id = CONF.network.public_network_id
- if router_id:
- body = client.show_router(router_id)
- return body['router']
- elif network_id:
- router = self._create_router(client, tenant_id)
- kwargs = {'external_gateway_info': dict(network_id=network_id)}
- router = client.update_router(router['id'], **kwargs)['router']
- return router
- else:
- raise Exception("Neither of 'public_router_id' or "
- "'public_network_id' has been defined.")
-
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
@@ -818,62 +160,3 @@
client.delete_router,
router['id'])
return router
-
- def _update_router_admin_state(self, router, admin_state_up):
- kwargs = dict(admin_state_up=admin_state_up)
- router = self.routers_client.update_router(
- router['id'], **kwargs)['router']
- self.assertEqual(admin_state_up, router['admin_state_up'])
-
- def create_networks(self, networks_client=None,
- routers_client=None, subnets_client=None,
- tenant_id=None, dns_nameservers=None,
- port_security_enabled=True):
- """Create a network with a subnet connected to a router.
-
- The baremetal driver is a special case since all nodes are
- on the same shared network.
-
- :param tenant_id: id of tenant to create resources in.
- :param dns_nameservers: list of dns servers to send to subnet.
- :returns: network, subnet, router
- """
- if CONF.network.shared_physical_network:
- # NOTE(Shrews): This exception is for environments where tenant
- # credential isolation is available, but network separation is
- # not (the current baremetal case). Likely can be removed when
- # test account mgmt is reworked:
- # https://blueprints.launchpad.net/tempest/+spec/test-accounts
- if not CONF.compute.fixed_network_name:
- m = 'fixed_network_name must be specified in config'
- raise lib_exc.InvalidConfiguration(m)
- network = self._get_network_by_name(
- CONF.compute.fixed_network_name)
- router = None
- subnet = None
- else:
- network = self._create_network(
- networks_client=networks_client,
- tenant_id=tenant_id,
- port_security_enabled=port_security_enabled)
- router = self._get_router(client=routers_client,
- tenant_id=tenant_id)
- subnet_kwargs = dict(network=network,
- subnets_client=subnets_client,
- routers_client=routers_client)
- # use explicit check because empty list is a valid option
- if dns_nameservers is not None:
- subnet_kwargs['dns_nameservers'] = dns_nameservers
- subnet = self._create_subnet(**subnet_kwargs)
- if not routers_client:
- routers_client = self.routers_client
- router_id = router['id']
- routers_client.add_router_interface(router_id,
- subnet_id=subnet['id'])
-
- # save a cleanup job to remove this association between
- # router and subnet
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- routers_client.remove_router_interface, router_id,
- subnet_id=subnet['id'])
- return network, subnet, router
diff --git a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
index 937b0dc..0142045 100644
--- a/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
+++ b/neutron_tempest_plugin/bgpvpn/scenario/test_bgpvpn_basic.py
@@ -353,6 +353,7 @@
self._update_l3_bgpvpn(rts=[self.RT1], import_rts=[], export_rts=[])
self._check_l3_bgpvpn()
+ @test.unstable_test("bug 1897408")
@decorators.idempotent_id('c8bfd695-f731-47a6-86e3-3dfa492e08e0')
@utils.services('compute', 'network')
def test_bgpvpn_update_rt_and_keep_local_connectivity_variant1(self):
@@ -404,6 +405,7 @@
self._check_l3_bgpvpn(self.servers[0], self.servers[2])
self._check_l3_bgpvpn(self.servers[1], self.servers[3])
+ @test.unstable_test("bug 1897408")
@decorators.idempotent_id('758a8731-5070-4b1e-9a66-d6ff05bb5be1')
@utils.services('compute', 'network')
def test_bgpvpn_update_rt_and_keep_local_connectivity_variant2(self):
@@ -544,6 +546,7 @@
to_server_ip=IP_C_S1_1,
validate_server=destination_srv_2)
+ @test.unstable_test("bug 1897408")
@decorators.idempotent_id('f762e6ac-920e-4d0f-aa67-02bdd4ab8433')
@utils.services('compute', 'network')
def test_bgpvpn_tenant_separation_and_local_connectivity(self):
@@ -1100,8 +1103,8 @@
self._check_l3_bgpvpn(should_succeed=False)
def _create_security_group_for_test(self):
- self.security_group = self._create_security_group(
- tenant_id=self.bgpvpn_client.tenant_id)
+ self.security_group = self.create_security_group(
+ project_id=self.bgpvpn_client.project_id)
def _create_networks_and_subnets(self, names=None, subnet_cidrs=None,
port_security=True):
@@ -1110,8 +1113,9 @@
if not subnet_cidrs:
subnet_cidrs = [[NET_A_S1], [NET_B_S1], [NET_C_S1]]
for (name, subnet_cidrs) in zip(names, subnet_cidrs):
- network = self._create_network(
- namestart=name, port_security_enabled=port_security)
+ network = super(manager.NetworkScenarioTest,
+ self).create_network(namestart=name,
+ port_security_enabled=port_security)
self.networks[name] = network
self.subnets[name] = []
for (j, cidr) in enumerate(subnet_cidrs):
@@ -1178,9 +1182,12 @@
create_port_body = {'fixed_ips': [{'ip_address': ip_address}],
'namestart': 'port-smoke',
'security_groups': security_groups}
- port = self._create_port(network_id=network['id'],
- client=clients.ports_client,
- **create_port_body)
+
+ port = super(manager.NetworkScenarioTest,
+ self).create_port(network_id=network['id'],
+ client=clients.ports_client,
+ **create_port_body)
+
create_server_kwargs = {
'key_name': keypair['name'],
'networks': [{'uuid': network['id'], 'port': port['id']}]
@@ -1252,7 +1259,8 @@
private_key = self.servers_keypairs[server['id']][
'private_key']
ssh_client = self.get_remote_client(server_fip,
- private_key=private_key)
+ private_key=private_key,
+ server=server)
return ssh_client
def _setup_http_server(self, server_index):
diff --git a/neutron_tempest_plugin/common/ip.py b/neutron_tempest_plugin/common/ip.py
index 83cd3d9..9fe49db 100644
--- a/neutron_tempest_plugin/common/ip.py
+++ b/neutron_tempest_plugin/common/ip.py
@@ -36,13 +36,19 @@
sudo = 'sudo'
ip_path = '/sbin/ip'
- def __init__(self, ssh_client=None, timeout=None):
+ def __init__(self, ssh_client=None, timeout=None, namespace=None):
self.ssh_client = ssh_client
self.timeout = timeout
+ self.namespace = namespace
def get_command(self, obj, *command):
- command_line = '{sudo!s} {ip_path!r} {object!s} {command!s}'.format(
- sudo=self.sudo, ip_path=self.ip_path, object=obj,
+ command_line = '{sudo!s} {ip_path!r} '.format(sudo=self.sudo,
+ ip_path=self.ip_path)
+ if self.namespace:
+ command_line += 'netns exec {ns_name!s} {ip_path!r} '.format(
+ ns_name=self.namespace, ip_path=self.ip_path)
+ command_line += '{object!s} {command!s}'.format(
+ object=obj,
command=subprocess.list2cmdline([str(c) for c in command]))
return command_line
@@ -51,6 +57,20 @@
return shell.execute(command_line, ssh_client=self.ssh_client,
timeout=self.timeout).stdout
+ def configure_vlan(self, addresses, port, vlan_tag, subport_ips):
+ port_device = get_port_device_name(addresses=addresses, port=port)
+ subport_device = '{!s}.{!s}'.format(port_device, vlan_tag)
+ LOG.debug('Configuring VLAN subport interface %r on top of interface '
+ '%r with IPs: %s', subport_device, port_device,
+ ', '.join(subport_ips))
+
+ self.add_link(link=port_device, name=subport_device, link_type='vlan',
+ segmentation_id=vlan_tag)
+ self.set_link(device=subport_device, state='up')
+ for subport_ip in subport_ips:
+ self.add_address(address=subport_ip, device=subport_device)
+ return subport_device
+
def configure_vlan_subport(self, port, subport, vlan_tag, subnets):
addresses = self.list_addresses()
try:
@@ -71,18 +91,26 @@
"Unable to get IP address and subnet prefix lengths for "
"subport")
- port_device = get_port_device_name(addresses=addresses, port=port)
- subport_device = '{!s}.{!s}'.format(port_device, vlan_tag)
- LOG.debug('Configuring VLAN subport interface %r on top of interface '
- '%r with IPs: %s', subport_device, port_device,
- ', '.join(subport_ips))
+ return self.configure_vlan(addresses, port, vlan_tag, subport_ips)
- self.add_link(link=port_device, name=subport_device, link_type='vlan',
- segmentation_id=vlan_tag)
- self.set_link(device=subport_device, state='up')
- for subport_ip in subport_ips:
- self.add_address(address=subport_ip, device=subport_device)
- return subport_device
+ def configure_vlan_transparent(self, port, vlan_tag, ip_addresses):
+ addresses = self.list_addresses()
+ try:
+ subport_device = get_vlan_device_name(addresses, ip_addresses)
+ except ValueError:
+ pass
+ else:
+ LOG.debug('Interface %r already configured.', subport_device)
+ return subport_device
+
+ return self.configure_vlan(addresses, port, vlan_tag, ip_addresses)
+
+ def list_namespaces(self):
+ namespaces_output = self.execute("netns")
+ ns_list = []
+ for ns_line in namespaces_output.split("\n"):
+ ns_list.append(ns_line.split(" ", 1)[0])
+ return ns_list
def list_addresses(self, device=None, ip_addresses=None, port=None,
subnets=None):
@@ -115,6 +143,23 @@
# ip addr add 192.168.1.1/24 dev em1
return self.execute('address', 'add', address, 'dev', device)
+ def delete_address(self, address, device):
+ # ip addr del 192.168.1.1/24 dev em1
+ return self.execute('address', 'del', address, 'dev', device)
+
+ def add_route(self, address, device, gateway=None):
+ if gateway:
+ # ip route add 192.168.1.0/24 via 192.168.22.1 dev em1
+ return self.execute(
+ 'route', 'add', address, 'via', gateway, 'dev', device)
+ else:
+ # ip route add 192.168.1.0/24 dev em1
+ return self.execute('route', 'add', address, 'dev', device)
+
+ def delete_route(self, address, device):
+ # ip route del 192.168.1.0/24 dev em1
+ return self.execute('route', 'del', address, 'dev', device)
+
def list_routes(self, *args):
output = self.execute('route', 'show', *args)
return list(parse_routes(output))
@@ -299,6 +344,15 @@
raise ValueError(msg)
+def get_vlan_device_name(addresses, ip_addresses):
+ for address in list_ip_addresses(addresses=addresses,
+ ip_addresses=ip_addresses):
+ return address.device.name
+
+ msg = "Fixed IPs {0!r} not found on server.".format(' '.join(ip_addresses))
+ raise ValueError(msg)
+
+
def _get_ip_address_prefix_len_pairs(port, subnets):
subnets = {subnet['id']: subnet for subnet in subnets}
for fixed_ip in port['fixed_ips']:
@@ -308,23 +362,44 @@
netaddr.IPNetwork(subnet['cidr']).prefixlen)
-def arp_table():
+def arp_table(namespace=None):
# 192.168.0.16 0x1 0x2 dc:a6:32:06:56:51 * enp0s31f6
regex_str = (r"([^ ]+)\s+(0x\d+)\s+(0x\d+)\s+(\w{2}\:\w{2}\:\w{2}\:\w{2}\:"
r"\w{2}\:\w{2})\s+([\w+\*]+)\s+([\-\w]+)")
regex = re.compile(regex_str)
arp_table = []
- with open('/proc/net/arp', 'r') as proc_file:
- for line in proc_file.readlines():
- m = regex.match(line)
- if m:
- arp_table.append(ARPregister(
- ip_address=m.group(1), hw_type=m.group(2),
- flags=m.group(3), mac_address=m.group(4),
- mask=m.group(5), device=m.group(6)))
+ cmd = ""
+ if namespace:
+ cmd = "sudo ip netns exec %s " % namespace
+ cmd += "cat /proc/net/arp"
+ arp_entries = shell.execute(cmd).stdout.split("\n")
+ for line in arp_entries:
+ m = regex.match(line)
+ if m:
+ arp_table.append(ARPregister(
+ ip_address=m.group(1), hw_type=m.group(2),
+ flags=m.group(3), mac_address=m.group(4),
+ mask=m.group(5), device=m.group(6)))
return arp_table
+def list_iptables(version=constants.IP_VERSION_4, namespace=None):
+ cmd = ''
+ if namespace:
+ cmd = 'sudo ip netns exec %s ' % namespace
+ cmd += ('iptables-save' if version == constants.IP_VERSION_4 else
+ 'ip6tables-save')
+ return shell.execute(cmd).stdout
+
+
+def list_listening_sockets(namespace=None):
+ cmd = ''
+ if namespace:
+ cmd = 'sudo ip netns exec %s ' % namespace
+ cmd += 'netstat -nlp'
+ return shell.execute(cmd).stdout
+
+
class Route(HasProperties,
collections.namedtuple('Route',
['dest', 'properties'])):
diff --git a/neutron_tempest_plugin/common/ssh.py b/neutron_tempest_plugin/common/ssh.py
index fa731d8..4cb1474 100644
--- a/neutron_tempest_plugin/common/ssh.py
+++ b/neutron_tempest_plugin/common/ssh.py
@@ -19,7 +19,6 @@
from oslo_log import log
import paramiko
-import six
from tempest.lib.common import ssh
from tempest.lib import exceptions
import tenacity
@@ -33,13 +32,7 @@
RETRY_EXCEPTIONS = (exceptions.TimeoutException, paramiko.SSHException,
- socket.error)
-if six.PY2:
- # NOTE(ralonsoh): TimeoutError was added in 3.3 and corresponds to
- # OSError(errno.ETIMEDOUT)
- RETRY_EXCEPTIONS += (OSError, )
-else:
- RETRY_EXCEPTIONS += (TimeoutError, )
+ socket.error, TimeoutError)
class Client(ssh.Client):
@@ -69,7 +62,8 @@
host=host, username=username, password=password, timeout=timeout,
pkey=pkey, channel_timeout=channel_timeout,
look_for_keys=look_for_keys, key_filename=key_filename, port=port,
- proxy_client=proxy_client)
+ proxy_client=proxy_client,
+ ssh_key_type=CONF.validation.ssh_key_type)
@classmethod
def create_proxy_client(cls, look_for_keys=True, **kwargs):
@@ -293,6 +287,13 @@
command=shell, host=self.host, script=script, stderr=stderr,
stdout=stdout, exit_status=exit_status)
+ def get_hostname(self):
+ """Retrieve the remote machine hostname"""
+ try:
+ return self.exec_command('hostname')
+ except exceptions.SSHExecCommandFailed:
+ return self.exec_command('cat /etc/hostname')
+
def _buffer_to_string(data_buffer, encoding):
return data_buffer.decode(encoding).replace("\r\n", "\n").replace(
diff --git a/neutron_tempest_plugin/common/utils.py b/neutron_tempest_plugin/common/utils.py
index f03762c..4ccec72 100644
--- a/neutron_tempest_plugin/common/utils.py
+++ b/neutron_tempest_plugin/common/utils.py
@@ -29,11 +29,14 @@
from tempest.lib import exceptions
+from neutron_tempest_plugin import config
+
SCHEMA_PORT_MAPPING = {
"http": 80,
"https": 443,
}
+CONF = config.CONF
class classproperty(object):
@@ -136,3 +139,85 @@
def call_url_remote(ssh_client, url):
cmd = "curl %s --retry 3 --connect-timeout 2" % url
return ssh_client.exec_command(cmd)
+
+
+class StatefulConnection:
+ """Class to test connection that should remain opened
+
+ Can be used to perform some actions while the initiated connection
+ remain opened
+ """
+
+ def __init__(self, client_ssh, server_ssh, target_ip, target_port):
+ self.client_ssh = client_ssh
+ self.server_ssh = server_ssh
+ self.ip = target_ip
+ self.port = target_port
+ self.connection_started = False
+ self.test_attempt = 0
+
+ def __enter__(self):
+ return self
+
+ @property
+ def test_str(self):
+ return 'attempt_{}'.format(str(self.test_attempt).zfill(3))
+
+ def _start_connection(self):
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ server_exec_method = self.server_ssh.execute_script
+ client_exec_method = self.client_ssh.execute_script
+ else:
+ server_exec_method = self.server_ssh.exec_command
+ client_exec_method = self.client_ssh.exec_command
+
+ self.server_ssh.exec_command(
+ 'echo "{}" > input.txt'.format(self.test_str))
+ server_exec_method('tail -f input.txt | nc -lp '
+ '{} &> output.txt &'.format(self.port))
+ self.client_ssh.exec_command(
+ 'echo "{}" > input.txt'.format(self.test_str))
+ client_exec_method('tail -f input.txt | nc {} {} &>'
+ 'output.txt &'.format(self.ip, self.port))
+
+ def _test_connection(self):
+ if not self.connection_started:
+ self._start_connection()
+ else:
+ self.server_ssh.exec_command(
+ 'echo "{}" >> input.txt'.format(self.test_str))
+ self.client_ssh.exec_command(
+ 'echo "{}" >> input.txt & sleep 1'.format(self.test_str))
+ try:
+ self.server_ssh.exec_command(
+ 'grep {} output.txt'.format(self.test_str))
+ self.client_ssh.exec_command(
+ 'grep {} output.txt'.format(self.test_str))
+ if not self.should_pass:
+ return False
+ else:
+ if not self.connection_started:
+ self.connection_started = True
+ return True
+ except exceptions.SSHExecCommandFailed:
+ if self.should_pass:
+ return False
+ else:
+ return True
+ finally:
+ self.test_attempt += 1
+
+ def test_connection(self, should_pass=True, timeout=10, sleep_timer=1):
+ self.should_pass = should_pass
+ wait_until_true(
+ self._test_connection, timeout=timeout, sleep=sleep_timer)
+
+ def __exit__(self, type, value, traceback):
+ self.server_ssh.exec_command('sudo killall nc || killall nc || '
+ 'echo "True"')
+ self.server_ssh.exec_command(
+ 'sudo killall tail || killall tail || echo "True"')
+ self.client_ssh.exec_command('sudo killall nc || killall nc || '
+ 'echo "True"')
+ self.client_ssh.exec_command(
+ 'sudo killall tail || killall tail || echo "True"')
diff --git a/neutron_tempest_plugin/config.py b/neutron_tempest_plugin/config.py
index c0e21c1..aea79ad 100644
--- a/neutron_tempest_plugin/config.py
+++ b/neutron_tempest_plugin/config.py
@@ -68,6 +68,11 @@
default=None,
choices=['None', 'linuxbridge', 'ovs', 'sriov'],
help='Agent used for devstack@q-agt.service'),
+ cfg.StrOpt('firewall_driver',
+ default=None,
+ choices=['None', 'openvswitch', 'ovn',
+ 'iptables_hybrid', 'iptables'],
+ help='Driver for security groups firewall in the L2 agent'),
# Multicast tests settings
cfg.StrOpt('multicast_group_range',
@@ -92,6 +97,9 @@
cfg.IntOpt('ssh_proxy_jump_port',
default=22,
help='Port used to connect to "ssh_proxy_jump_host".'),
+ cfg.IntOpt('reboots_in_test',
+ default=1,
+ help='Number of reboots to apply if tests requires reboots'),
# Options for special, "advanced" image like e.g. Ubuntu. Such image can be
# used in tests which require some more advanced tool than available in
@@ -120,18 +128,6 @@
'This is required if advanced image has to be used in '
'tests.'),
- # Enable/disable metadata over IPv6 tests. This feature naturally
- # does not have an API extension, but at the time of first implementation
- # it works only on victoria+ deployments with dhcp- and/or l3-agents
- # (which in the gate is the same as non-ovn jobs).
- cfg.BoolOpt('ipv6_metadata',
- default=True,
- help='Enable metadata over IPv6 tests where the feature is '
- 'implemented, disable where it is not. Use this instead '
- 'of network-feature-enabled.api_extensions, since API '
- 'extensions do not make sense for a feature not '
- 'exposed on the API.'),
-
# Option for creating QoS policies configures as "shared".
# The default is false in order to prevent undesired usage
# while testing in parallel.
@@ -145,6 +141,12 @@
'If True, multicast test(s) will assert that multicast '
'traffic is not being flooded to all ports. Defaults '
'to False.'),
+ # Option for scheduling BGP speakers to agents explicitly
+ # The default is false with automatic scheduling on creation
+ # happening with the default scheduler
+ cfg.BoolOpt('bgp_schedule_speakers_to_agents',
+ default=False,
+ help='Schedule BGP speakers to agents explicitly.'),
]
# TODO(amuller): Redo configuration options registration as part of the planned
@@ -218,6 +220,25 @@
CONF.register_group(sfc_group)
CONF.register_opts(SfcGroup, group="sfc")
+
+TaasGroup = [
+ cfg.StrOpt('provider_physical_network',
+ default='',
+ help='Physical network to be used for creating SRIOV network.'),
+ cfg.StrOpt('provider_segmentation_id',
+ default='',
+ help='Segmentation-id to be used for creating SRIOV network.'),
+ cfg.StrOpt('vlan_filter',
+ default='',
+ help='Comma separated list of VLANs to be mirrored '
+ 'for a Tap-Flow.'),
+]
+taas_group = cfg.OptGroup(name='taas',
+ title='TaaS Tempest Options')
+CONF.register_group(taas_group)
+CONF.register_opts(TaasGroup, group="taas")
+
+
config_opts_translator = {
'project_network_cidr': 'tenant_network_cidr',
'project_network_v6_cidr': 'tenant_network_v6_cidr',
diff --git a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
index f085e6d..0dd18f1 100644
--- a/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
+++ b/neutron_tempest_plugin/fwaas/api/test_fwaasv2_extensions.py
@@ -13,7 +13,6 @@
# under the License.
import netaddr
-import six
from tempest.common import utils
from tempest import config
@@ -212,7 +211,7 @@
# show a created firewall rule
fw_rule = self.firewall_rules_client.show_firewall_rule(
self.fw_rule_1['id'])
- for key, value in six.iteritems(fw_rule['firewall_rule']):
+ for key, value in fw_rule['firewall_rule'].items():
if key != 'firewall_policy_id':
self.assertEqual(self.fw_rule_1[key], value)
# This check is placed because we cannot modify policy during
@@ -258,7 +257,7 @@
fw_policy = self.firewall_policies_client.show_firewall_policy(
self.fw_policy_1['id'])
fw_policy = fw_policy['firewall_policy']
- for key, value in six.iteritems(fw_policy):
+ for key, value in fw_policy.items():
self.assertEqual(self.fw_policy_1[key], value)
@decorators.idempotent_id('48dfcd75-3924-479d-bb65-b3ed33397663')
@@ -283,7 +282,7 @@
fwg_id)
fwg = firewall_group['firewall_group']
- for key, value in six.iteritems(fwg):
+ for key, value in fwg.items():
if key == 'status':
continue
self.assertEqual(created_firewall_group[key], value)
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
index 00cdf2c..f8eb44c 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_base.py
@@ -46,6 +46,7 @@
try:
client = ssh.Client(ip_address, username, pkey=private_key,
channel_timeout=connect_timeout,
+ ssh_key_type=CONF.validation.ssh_key_type,
**kwargs)
client.test_connection_auth()
self.assertTrue(should_connect, "Unexpectedly reachable")
diff --git a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
index 5ead2a7..517c96e 100644
--- a/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
+++ b/neutron_tempest_plugin/fwaas/scenario/fwaas_v2_manager.py
@@ -14,28 +14,19 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import netaddr
from oslo_log import log
-from oslo_utils import netutils
-from tempest.common import compute
-from tempest.common.utils.linux import remote_client
-from tempest.common.utils import net_utils
-from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
-from tempest.lib import exceptions as lib_exc
-import tempest.test
+from tempest.scenario import manager
CONF = config.CONF
LOG = log.getLogger(__name__)
-class ScenarioTest(tempest.test.BaseTestCase):
+class ScenarioTest(manager.NetworkScenarioTest):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
@@ -50,298 +41,6 @@
if msg:
raise cls.skipException(msg)
- @classmethod
- def setup_clients(cls):
- super(ScenarioTest, cls).setup_clients()
- # Clients (in alphabetical order)
- cls.keypairs_client = cls.os_primary.keypairs_client
- cls.servers_client = cls.os_primary.servers_client
- # Neutron network client
- cls.networks_client = cls.os_primary.networks_client
- cls.ports_client = cls.os_primary.ports_client
- cls.routers_client = cls.os_primary.routers_client
- cls.subnets_client = cls.os_primary.subnets_client
- cls.floating_ips_client = cls.os_primary.floating_ips_client
- cls.security_groups_client = cls.os_primary.security_groups_client
- cls.security_group_rules_client = (
- cls.os_primary.security_group_rules_client)
-
- # Test functions library
- #
- # The create_[resource] functions only return body and discard the
- # resp part which is not used in scenario tests
-
- def _create_port(self, network_id, client=None, namestart='port-quotatest',
- **kwargs):
- if not client:
- client = self.ports_client
- name = data_utils.rand_name(namestart)
- result = client.create_port(
- name=name,
- network_id=network_id,
- **kwargs)
- self.assertIsNotNone(result, 'Unable to allocate port')
- port = result['port']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_port, port['id'])
- return port
-
- def create_keypair(self, client=None):
- if not client:
- client = self.keypairs_client
- name = data_utils.rand_name(self.__class__.__name__)
- # We don't need to create a keypair by pubkey in scenario
- body = client.create_keypair(name=name)
- self.addCleanup(client.delete_keypair, name)
- return body['keypair']
-
- def create_server(self, name=None, image_id=None, flavor=None,
- validatable=False, wait_until='ACTIVE',
- clients=None, **kwargs):
- """Wrapper utility that returns a test server.
-
- This wrapper utility calls the common create test server and
- returns a test server. The purpose of this wrapper is to minimize
- the impact on the code of the tests already using this
- function.
- """
-
- # NOTE(jlanoux): As a first step, ssh checks in the scenario
- # tests need to be run regardless of the run_validation and
- # validatable parameters and thus until the ssh validation job
- # becomes voting in CI. The test resources management and IP
- # association are taken care of in the scenario tests.
- # Therefore, the validatable parameter is set to false in all
- # those tests. In this way create_server just return a standard
- # server and the scenario tests always perform ssh checks.
-
- # Needed for the cross_tenant_traffic test:
- if clients is None:
- clients = self.os_primary
-
- if name is None:
- name = data_utils.rand_name(self.__class__.__name__ + "-server")
-
- vnic_type = CONF.network.port_vnic_type
-
- # If vnic_type is configured create port for
- # every network
- if vnic_type:
- ports = []
-
- create_port_body = {'binding:vnic_type': vnic_type,
- 'namestart': 'port-smoke'}
- if kwargs:
- # Convert security group names to security group ids
- # to pass to create_port
- if 'security_groups' in kwargs:
- security_groups = \
- clients.security_groups_client.list_security_groups(
- ).get('security_groups')
- sec_dict = dict([(s['name'], s['id'])
- for s in security_groups])
-
- sec_groups_names = [s['name'] for s in kwargs.pop(
- 'security_groups')]
- security_groups_ids = [sec_dict[s]
- for s in sec_groups_names]
-
- if security_groups_ids:
- create_port_body[
- 'security_groups'] = security_groups_ids
- networks = kwargs.pop('networks', [])
- else:
- networks = []
-
- # If there are no networks passed to us we look up
- # for the project's private networks and create a port.
- # The same behaviour as we would expect when passing
- # the call to the clients with no networks
- if not networks:
- networks = clients.networks_client.list_networks(
- **{'router:external': False, 'fields': 'id'})['networks']
-
- # It's net['uuid'] if networks come from kwargs
- # and net['id'] if they come from
- # clients.networks_client.list_networks
- for net in networks:
- net_id = net.get('uuid', net.get('id'))
- if 'port' not in net:
- port = self._create_port(network_id=net_id,
- client=clients.ports_client,
- **create_port_body)
- ports.append({'port': port['id']})
- else:
- ports.append({'port': net['port']})
- if ports:
- kwargs['networks'] = ports
- self.ports = ports
-
- tenant_network = self.get_tenant_network()
-
- body, servers = compute.create_test_server(
- clients,
- tenant_network=tenant_network,
- wait_until=wait_until,
- name=name, flavor=flavor,
- image_id=image_id, **kwargs)
-
- self.addCleanup(waiters.wait_for_server_termination,
- clients.servers_client, body['id'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- clients.servers_client.delete_server, body['id'])
- server = clients.servers_client.show_server(body['id'])['server']
- return server
-
- def get_remote_client(self, ip_address, username=None, private_key=None):
- """Get a SSH client to a remote server
-
- @param ip_address the server floating or fixed IP address to use
- for ssh validation
- @param username name of the Linux account on the remote server
- @param private_key the SSH private key to use
- @return a RemoteClient object
- """
-
- if username is None:
- username = CONF.validation.image_ssh_user
- # Set this with 'keypair' or others to log in with keypair or
- # username/password.
- if CONF.validation.auth_method == 'keypair':
- password = None
- if private_key is None:
- private_key = self.keypair['private_key']
- else:
- password = CONF.validation.image_ssh_password
- private_key = None
- linux_client = remote_client.RemoteClient(ip_address, username,
- pkey=private_key,
- password=password)
- try:
- linux_client.validate_authentication()
- except Exception as e:
- message = ('Initializing SSH connection to %(ip)s failed. '
- 'Error: %(error)s' % {'ip': ip_address,
- 'error': e})
- caller = test_utils.find_test_caller()
- if caller:
- message = '(%s) %s' % (caller, message)
- LOG.exception(message)
- self._log_console_output()
- raise
-
- return linux_client
-
- def _log_console_output(self, servers=None):
- if not CONF.compute_feature_enabled.console_output:
- LOG.debug('Console output not supported, cannot log')
- return
- if not servers:
- servers = self.servers_client.list_servers()
- servers = servers['servers']
- for server in servers:
- try:
- console_output = self.servers_client.get_console_output(
- server['id'])['output']
- LOG.debug('Console output for %s\nbody=\n%s',
- server['id'], console_output)
- except lib_exc.NotFound:
- LOG.debug("Server %s disappeared(deleted) while looking "
- "for the console log", server['id'])
-
- def _log_net_info(self, exc):
- # network debug is called as part of ssh init
- if not isinstance(exc, lib_exc.SSHTimeout):
- LOG.debug('Network information on a devstack host')
-
- def ping_ip_address(self, ip_address, should_succeed=True,
- ping_timeout=None, mtu=None):
- timeout = ping_timeout or CONF.validation.ping_timeout
- cmd = ['ping', '-c1', '-w1']
-
- if mtu:
- cmd += [
- # don't fragment
- '-M', 'do',
- # ping receives just the size of ICMP payload
- '-s', str(net_utils.get_ping_payload_size(mtu, 4))
- ]
- cmd.append(ip_address)
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.communicate()
-
- return (proc.returncode == 0) == should_succeed
-
- caller = test_utils.find_test_caller()
- LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
- ' expected result is %(should_succeed)s', {
- 'caller': caller, 'ip': ip_address, 'timeout': timeout,
- 'should_succeed':
- 'reachable' if should_succeed else 'unreachable'
- })
- result = test_utils.call_until_true(ping, timeout, 1)
- LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
- 'ping result is %(result)s', {
- 'caller': caller, 'ip': ip_address, 'timeout': timeout,
- 'result': 'expected' if result else 'unexpected'
- })
- return result
-
- def check_vm_connectivity(self, ip_address,
- username=None,
- private_key=None,
- should_connect=True,
- mtu=None):
- """Check server connectivity
-
- :param ip_address: server to test against
- :param username: server's ssh username
- :param private_key: server's ssh private key to be used
- :param should_connect: True/False indicates positive/negative test
- positive - attempt ping and ssh
- negative - attempt ping and fail if succeed
- :param mtu: network MTU to use for connectivity validation
-
- :raises: AssertError if the result of the connectivity check does
- not match the value of the should_connect param
- """
- if should_connect:
- msg = "Timed out waiting for %s to become reachable" % ip_address
- else:
- msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self.ping_ip_address(ip_address,
- should_succeed=should_connect,
- mtu=mtu),
- msg=msg)
- if should_connect:
- # no need to check ssh for negative connectivity
- self.get_remote_client(ip_address, username, private_key)
-
- def check_public_network_connectivity(self, ip_address, username,
- private_key, should_connect=True,
- msg=None, servers=None, mtu=None):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- LOG.debug('checking network connections to IP %s with user: %s',
- ip_address, username)
- try:
- self.check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect,
- mtu=mtu)
- except Exception:
- ex_msg = 'Public network connectivity check failed'
- if msg:
- ex_msg += ": " + msg
- LOG.exception(ex_msg)
- self._log_console_output(servers)
- raise
-
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
@@ -363,443 +62,6 @@
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
- def _create_network(self, networks_client=None,
- tenant_id=None,
- namestart='network-smoke-',
- port_security_enabled=True):
- if not networks_client:
- networks_client = self.networks_client
- if not tenant_id:
- tenant_id = networks_client.tenant_id
- name = data_utils.rand_name(namestart)
- network_kwargs = dict(name=name, tenant_id=tenant_id)
- # Neutron disables port security by default so we have to check the
- # config before trying to create the network with port_security_enabled
- if CONF.network_feature_enabled.port_security:
- network_kwargs['port_security_enabled'] = port_security_enabled
- result = networks_client.create_network(**network_kwargs)
- network = result['network']
-
- self.assertEqual(network['name'], name)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- networks_client.delete_network,
- network['id'])
- return network
-
- def _create_subnet(self, network, subnets_client=None,
- routers_client=None, namestart='subnet-smoke',
- **kwargs):
- """Create a subnet for the given network
-
- within the cidr block configured for tenant networks.
- """
- if not subnets_client:
- subnets_client = self.subnets_client
- if not routers_client:
- routers_client = self.routers_client
-
- def cidr_in_use(cidr, tenant_id):
- """Check cidr existence
-
- :returns: True if subnet with cidr already exist in tenant
- False else
- """
- cidr_in_use = self.os_admin.subnets_client.list_subnets(
- tenant_id=tenant_id, cidr=cidr)['subnets']
- return len(cidr_in_use) != 0
-
- ip_version = kwargs.pop('ip_version', 4)
-
- if ip_version == 6:
- tenant_cidr = netaddr.IPNetwork(
- CONF.network.project_network_v6_cidr)
- num_bits = CONF.network.project_network_v6_mask_bits
- else:
- tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- num_bits = CONF.network.project_network_mask_bits
-
- result = None
- str_cidr = None
- # Repeatedly attempt subnet creation with sequential cidr
- # blocks until an unallocated block is found.
- for subnet_cidr in tenant_cidr.subnet(num_bits):
- str_cidr = str(subnet_cidr)
- if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
- continue
-
- subnet = dict(
- name=data_utils.rand_name(namestart),
- network_id=network['id'],
- tenant_id=network['tenant_id'],
- cidr=str_cidr,
- ip_version=ip_version,
- **kwargs
- )
- try:
- result = subnets_client.create_subnet(**subnet)
- break
- except lib_exc.Conflict as e:
- is_overlapping_cidr = 'overlaps with another subnet' in str(e)
- if not is_overlapping_cidr:
- raise
- self.assertIsNotNone(result, 'Unable to allocate tenant network')
-
- subnet = result['subnet']
- self.assertEqual(subnet['cidr'], str_cidr)
-
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- subnets_client.delete_subnet, subnet['id'])
-
- return subnet
-
- def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'], fixed_ip=ip_addr)['ports']
- # A port can have more than one IP address in some cases.
- # If the network is dual-stack (IPv4 + IPv6), this port is associated
- # with 2 subnets
- p_status = ['ACTIVE']
- # NOTE(vsaienko) With Ironic, instances live on separate hardware
- # servers. Neutron does not bind ports for Ironic instances, as a
- # result the port remains in the DOWN state.
- # TODO(vsaienko) remove once bug: #1599836 is resolved.
- if getattr(CONF.service_available, 'ironic', False):
- p_status.append('DOWN')
- port_map = [(p["id"], fxip["ip_address"])
- for p in ports
- for fxip in p["fixed_ips"]
- if (netutils.is_valid_ipv4(fxip["ip_address"]) and
- p['status'] in p_status)]
- inactive = [p for p in ports if p['status'] != 'ACTIVE']
- if inactive:
- LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
- self.assertNotEqual(0, len(port_map),
- "No IPv4 addresses found in: %s" % ports)
- self.assertEqual(len(port_map), 1,
- "Found multiple IPv4 addresses: %s. "
- "Unable to determine which port to target."
- % port_map)
- return port_map[0]
-
- def _get_network_by_name(self, network_name):
- net = self.os_admin.networks_client.list_networks(
- name=network_name)['networks']
- self.assertNotEqual(len(net), 0,
- "Unable to get network by name: %s" % network_name)
- return net[0]
-
- def create_floating_ip(self, thing, external_network_id=None,
- port_id=None, client=None):
- """Create a floating IP and associates to a resource/port on Neutron"""
- if not external_network_id:
- external_network_id = CONF.network.public_network_id
- if not client:
- client = self.floating_ips_client
- if not port_id:
- port_id, ip4 = self._get_server_port_id_and_ip4(thing)
- else:
- ip4 = None
- result = client.create_floatingip(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing['tenant_id'],
- fixed_ip_address=ip4
- )
- floating_ip = result['floatingip']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_floatingip,
- floating_ip['id'])
- return floating_ip
-
- def _associate_floating_ip(self, floating_ip, server):
- port_id, _ = self._get_server_port_id_and_ip4(server)
- kwargs = dict(port_id=port_id)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertEqual(port_id, floating_ip['port_id'])
- return floating_ip
-
- def _disassociate_floating_ip(self, floating_ip):
- """:param floating_ip: floating_ips_client.create_floatingip"""
- kwargs = dict(port_id=None)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertIsNone(floating_ip['port_id'])
- return floating_ip
-
- def check_floating_ip_status(self, floating_ip, status):
- """Verifies floatingip reaches the given status
-
- :param dict floating_ip: floating IP dict to check status
- :param status: target status
- :raises: AssertionError if status doesn't match
- """
- floatingip_id = floating_ip['id']
-
- def refresh():
- result = (self.floating_ips_client.
- show_floatingip(floatingip_id)['floatingip'])
- return status == result['status']
-
- test_utils.call_until_true(refresh,
- CONF.network.build_timeout,
- CONF.network.build_interval)
- floating_ip = self.floating_ips_client.show_floatingip(
- floatingip_id)['floatingip']
- self.assertEqual(status, floating_ip['status'],
- message="FloatingIP: {fp} is at status: {cst}. "
- "failed to reach status: {st}"
- .format(fp=floating_ip, cst=floating_ip['status'],
- st=status))
- LOG.info("FloatingIP: {fp} is at status: {st}"
- .format(fp=floating_ip, st=status))
-
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
- if not CONF.network.project_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- for net_name, ip_addresses in server['addresses'].items():
- for ip_address in ip_addresses:
- self.check_vm_connectivity(ip_address['addr'],
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers_for_debug)
- self._log_net_info(e)
- raise
-
- def _check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
- """check ping server via source ssh connection
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :param nic: specific network interface to ping from
- :returns: boolean -- should_succeed == ping
- :returns: ping is false if ping failed
- """
- def ping_remote():
- try:
- source.ping_host(dest, nic=nic)
- except lib_exc.SSHExecCommandFailed:
- LOG.warning('Failed to ping IP: %s via a ssh connection '
- 'from: %s.', dest, source.ssh_client.host)
- return not should_succeed
- return should_succeed
-
- return test_utils.call_until_true(ping_remote,
- CONF.validation.ping_timeout,
- 1)
-
- def _create_security_group(self, security_group_rules_client=None,
- tenant_id=None,
- namestart='secgroup-smoke',
- security_groups_client=None):
- if security_group_rules_client is None:
- security_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- if tenant_id is None:
- tenant_id = security_groups_client.tenant_id
- secgroup = self._create_empty_security_group(
- namestart=namestart, client=security_groups_client,
- tenant_id=tenant_id)
-
- # Add rules to the security group
- rules = self._create_loginable_secgroup_rule(
- security_group_rules_client=security_group_rules_client,
- secgroup=secgroup,
- security_groups_client=security_groups_client)
- for rule in rules:
- self.assertEqual(tenant_id, rule['tenant_id'])
- self.assertEqual(secgroup['id'], rule['security_group_id'])
- return secgroup
-
- def _create_empty_security_group(self, client=None, tenant_id=None,
- namestart='secgroup-smoke'):
- """Create a security group without rules.
-
- Default rules will be created:
- - IPv4 egress to any
- - IPv6 egress to any
-
- :param tenant_id: secgroup will be created in this tenant
- :returns: the created security group
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- sg_dict = dict(name=sg_name,
- description=sg_desc)
- sg_dict['tenant_id'] = tenant_id
- result = client.create_security_group(**sg_dict)
-
- secgroup = result['security_group']
- self.assertEqual(secgroup['name'], sg_name)
- self.assertEqual(tenant_id, secgroup['tenant_id'])
- self.assertEqual(secgroup['description'], sg_desc)
-
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_security_group, secgroup['id'])
- return secgroup
-
- def _default_security_group(self, client=None, tenant_id=None):
- """Get default secgroup for given tenant_id.
-
- :returns: default secgroup for given tenant
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sgs = [
- sg for sg in list(client.list_security_groups().values())[0]
- if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
- ]
- msg = "No default security group for tenant %s." % (tenant_id)
- self.assertGreater(len(sgs), 0, msg)
- return sgs[0]
-
- def _create_security_group_rule(self, secgroup=None,
- sec_group_rules_client=None,
- tenant_id=None,
- security_groups_client=None, **kwargs):
- """Create a rule from a dictionary of rule parameters.
-
- Create a rule in a secgroup. if secgroup not defined will search for
- default secgroup in tenant_id.
-
- :param secgroup: the security group.
- :param tenant_id: if secgroup not passed -- the tenant in which to
- search for default secgroup
- :param kwargs: a dictionary containing rule parameters:
- for example, to allow incoming ssh:
- rule = {
- direction: 'ingress'
- protocol:'tcp',
- port_range_min: 22,
- port_range_max: 22
- }
- """
- if sec_group_rules_client is None:
- sec_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- if not tenant_id:
- tenant_id = security_groups_client.tenant_id
- if secgroup is None:
- secgroup = self._default_security_group(
- client=security_groups_client, tenant_id=tenant_id)
-
- ruleset = dict(security_group_id=secgroup['id'],
- tenant_id=secgroup['tenant_id'])
- ruleset.update(kwargs)
-
- sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
- sg_rule = sg_rule['security_group_rule']
-
- self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
- self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
- return sg_rule
-
- def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
- secgroup=None,
- security_groups_client=None):
- """Create loginable security group rule
-
- This function will create:
- 1. egress and ingress tcp port 22 allow rule in order to allow ssh
- access for ipv4.
- 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
- 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
- """
-
- if security_group_rules_client is None:
- security_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- rules = []
- rulesets = [
- dict(
- # ssh
- protocol='tcp',
- port_range_min=22,
- port_range_max=22,
- ),
- dict(
- # ping
- protocol='icmp',
- ),
- dict(
- # ipv6-icmp for ping6
- protocol='icmp',
- ethertype='IPv6',
- )
- ]
- sec_group_rules_client = security_group_rules_client
- for ruleset in rulesets:
- for r_direction in ['ingress', 'egress']:
- ruleset['direction'] = r_direction
- try:
- sg_rule = self._create_security_group_rule(
- sec_group_rules_client=sec_group_rules_client,
- secgroup=secgroup,
- security_groups_client=security_groups_client,
- **ruleset)
- except lib_exc.Conflict as ex:
- # if rule already exist - skip rule and continue
- msg = 'Security group rule already exists'
- if msg not in ex._error_string:
- raise ex
- else:
- self.assertEqual(r_direction, sg_rule['direction'])
- rules.append(sg_rule)
-
- return rules
-
- def _get_router(self, client=None, tenant_id=None):
- """Retrieve a router for the given tenant id.
-
- If a public router has been configured, it will be returned.
-
- If a public router has not been configured, but a public
- network has, a tenant router will be created and returned that
- routes traffic to the public network.
- """
- if not client:
- client = self.routers_client
- if not tenant_id:
- tenant_id = client.tenant_id
- router_id = CONF.network.public_router_id
- network_id = CONF.network.public_network_id
- if router_id:
- body = client.show_router(router_id)
- return body['router']
- elif network_id:
- router = self._create_router(client, tenant_id)
- kwargs = {'external_gateway_info': dict(network_id=network_id)}
- router = client.update_router(router['id'], **kwargs)['router']
- return router
- else:
- raise Exception("Neither of 'public_router_id' or "
- "'public_network_id' has been defined.")
-
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
@@ -816,62 +78,3 @@
client.delete_router,
router['id'])
return router
-
- def _update_router_admin_state(self, router, admin_state_up):
- kwargs = dict(admin_state_up=admin_state_up)
- router = self.routers_client.update_router(
- router['id'], **kwargs)['router']
- self.assertEqual(admin_state_up, router['admin_state_up'])
-
- def create_networks(self, networks_client=None,
- routers_client=None, subnets_client=None,
- tenant_id=None, dns_nameservers=None,
- port_security_enabled=True):
- """Create a network with a subnet connected to a router.
-
- The baremetal driver is a special case since all nodes are
- on the same shared network.
-
- :param tenant_id: id of tenant to create resources in.
- :param dns_nameservers: list of dns servers to send to subnet.
- :returns: network, subnet, router
- """
- if CONF.network.shared_physical_network:
- # NOTE(Shrews): This exception is for environments where tenant
- # credential isolation is available, but network separation is
- # not (the current baremetal case). Likely can be removed when
- # test account mgmt is reworked:
- # https://blueprints.launchpad.net/tempest/+spec/test-accounts
- if not CONF.compute.fixed_network_name:
- m = 'fixed_network_name must be specified in config'
- raise lib_exc.InvalidConfiguration(m)
- network = self._get_network_by_name(
- CONF.compute.fixed_network_name)
- router = None
- subnet = None
- else:
- network = self._create_network(
- networks_client=networks_client,
- tenant_id=tenant_id,
- port_security_enabled=port_security_enabled)
- router = self._get_router(client=routers_client,
- tenant_id=tenant_id)
- subnet_kwargs = dict(network=network,
- subnets_client=subnets_client,
- routers_client=routers_client)
- # use explicit check because empty list is a valid option
- if dns_nameservers is not None:
- subnet_kwargs['dns_nameservers'] = dns_nameservers
- subnet = self._create_subnet(**subnet_kwargs)
- if not routers_client:
- routers_client = self.routers_client
- router_id = router['id']
- routers_client.add_router_interface(router_id,
- subnet_id=subnet['id'])
-
- # save a cleanup job to remove this association between
- # router and subnet
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- routers_client.remove_router_interface, router_id,
- subnet_id=subnet['id'])
- return network, subnet, router
diff --git a/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py b/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
index 4681b88..4d5fdac 100644
--- a/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
+++ b/neutron_tempest_plugin/fwaas/scenario/test_fwaas_v2.py
@@ -130,9 +130,9 @@
return resp
def _create_network_subnet(self):
- network = self._create_network()
+ network = self.create_network()
subnet_kwargs = dict(network=network)
- subnet = self._create_subnet(**subnet_kwargs)
+ subnet = self.create_subnet(**subnet_kwargs)
return network, subnet
def _create_test_server(self, network, security_group):
@@ -196,7 +196,7 @@
resp['router_portid_2'] = router_portid_2
# Create a VM on each of the network and assign it a floating IP.
- security_group = self._create_security_group()
+ security_group = self.create_security_group()
server1, private_key1, server_fixed_ip_1, server_floating_ip_1 = (
self._create_test_server(network1, security_group))
server2, private_key2, server_fixed_ip_2, server_floating_ip_2 = (
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base.py
index f752436..c7023af 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base.py
@@ -19,16 +19,15 @@
import time
import netaddr
-import six
from os_ken.tests.integrated.common import docker_base as ctn_base
from tempest.common import utils
from tempest import config
from neutron_tempest_plugin.api import base
+from neutron_tempest_plugin import config as nconfig
from neutron_tempest_plugin.services.bgp import bgp_client
-
CONF = config.CONF
Scope = collections.namedtuple('Scope', 'name')
@@ -125,10 +124,10 @@
while True:
with self.lock:
try:
- yield (i, str(six.next(subnet_gen)))
+ yield (i, str(next(subnet_gen)))
except StopIteration:
subnet_gen = netaddr.iter_iprange(start, end, step=step)
- yield (i, str(six.next(subnet_gen)))
+ yield (i, str(next(subnet_gen)))
i += 1
def net_resource_cleanup(self):
@@ -150,6 +149,9 @@
if auto_delete:
self.addCleanup(self.bgp_adm_client.delete_bgp_speaker,
bgp_speaker_id)
+ if nconfig.CONF.neutron_plugin_options.bgp_schedule_speakers_to_agents:
+ self.add_bgp_speaker_to_dragent(self.get_dragent_id(),
+ bgp_speaker_id)
return bgp_speaker['bgp_speaker']
def delete_bgp_speaker(self, id):
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base_test_proto.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base_test_proto.py
index 1bcf5b1..643620f 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base_test_proto.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/base_test_proto.py
@@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-import six
from tempest import config
from neutron_tempest_plugin.neutron_dynamic_routing.scenario import base
@@ -28,7 +27,7 @@
def _test_check_neighbor_established(self, ip_version):
self.bgp_peer_args[0]['peer_ip'] = self.r_as_ip[0].split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
@@ -47,7 +46,7 @@
def _test_check_advertised_tenant_network(self, ip_version):
self.bgp_peer_args[0]['peer_ip'] = self.r_as_ip[0].split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
@@ -73,7 +72,7 @@
tnets = []
tnets_cidr = []
for i in range(0, 3):
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
@@ -102,7 +101,7 @@
for (bgp_peer_args, r_as_ip) in zip(self.bgp_peer_args,
self.r_as_ip):
bgp_peer_args['peer_ip'] = r_as_ip.split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
@@ -124,7 +123,7 @@
for (bgp_peer_args, r_as_ip) in zip(self.bgp_peer_args,
self.r_as_ip):
bgp_peer_args['peer_ip'] = r_as_ip.split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/basic/test_basic.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/basic/test_basic.py
index 1c680f9..90a6815 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/basic/test_basic.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/basic/test_basic.py
@@ -15,7 +15,6 @@
# under the License.
from os_ken.tests.integrated.common import docker_base as ctn_base
-import six
from tempest import config
from tempest.lib import decorators
@@ -31,7 +30,7 @@
@decorators.idempotent_id('cc615252-c6cb-4d75-a70e-608fb2c3736a')
def test_schedule_added_speaker(self):
self.bgp_peer_args[0]['peer_ip'] = self.r_as_ip[0].split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = s_base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
@@ -51,7 +50,7 @@
@decorators.idempotent_id('ce98c33c-0ffa-49ae-b365-da836406793b')
def test_unschedule_deleted_speaker(self):
self.bgp_peer_args[0]['peer_ip'] = self.r_as_ip[0].split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = s_base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
@@ -77,7 +76,7 @@
@decorators.idempotent_id('aa6c565c-ded3-413b-8dc9-3928b3b0e38f')
def test_remove_add_speaker_agent(self):
self.bgp_peer_args[0]['peer_ip'] = self.r_as_ip[0].split('/')[0]
- num, subnet = six.next(self.tnet_gen)
+ num, subnet = next(self.tnet_gen)
mask = '/' + str(self.TPool.prefixlen)
TNet = s_base.Net(name='', net=subnet, mask=self.TPool.prefixlen,
cidr=subnet + mask, router=None)
diff --git a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
index 85cc810..3ec231e 100644
--- a/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
+++ b/neutron_tempest_plugin/neutron_dynamic_routing/scenario/test_simple_bgp.py
@@ -214,7 +214,8 @@
left_server = self._create_server()
ssh_client = ssh.Client(left_server['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
- pkey=self.keypair['private_key'])
+ pkey=self.keypair['private_key'],
+ ssh_key_type=CONF.validation.ssh_key_type)
# check LEFT -> RIGHT connectivity via BGP advertised routes
self.check_remote_connectivity(
diff --git a/neutron_tempest_plugin/scenario/admin/test_floatingip.py b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
index a08acc3..d9abaf5 100644
--- a/neutron_tempest_plugin/scenario/admin/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/admin/test_floatingip.py
@@ -28,6 +28,14 @@
credentials = ['primary', 'admin']
@classmethod
+ def setup_clients(cls):
+ super(FloatingIpTestCasesAdmin, cls).setup_clients()
+ # admin_client set in BaseAdminNetworkTest but here we inherit from
+ # BaseNetworkTest
+ if not cls.admin_client:
+ cls.admin_client = cls.os_admin.network_client
+
+ @classmethod
@utils.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(FloatingIpTestCasesAdmin, cls).resource_setup()
@@ -75,7 +83,7 @@
waiters.wait_for_server_status(
self.os_admin.servers_client, server['server']['id'],
const.SERVER_STATUS_ACTIVE)
- port = self.client.list_ports(
+ port = self.admin_client.list_ports(
network_id=self.network['id'],
device_id=server['server']['id']
)['ports'][0]
diff --git a/neutron_tempest_plugin/scenario/base.py b/neutron_tempest_plugin/scenario/base.py
index 402a901..cf68224 100644
--- a/neutron_tempest_plugin/scenario/base.py
+++ b/neutron_tempest_plugin/scenario/base.py
@@ -21,6 +21,7 @@
from neutron_lib.api import validators
from neutron_lib import constants as neutron_lib_constants
from oslo_log import log
+from paramiko import ssh_exception as ssh_exc
from tempest.common.utils import net_utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
@@ -37,8 +38,11 @@
from neutron_tempest_plugin.scenario import constants
CONF = config.CONF
-
LOG = log.getLogger(__name__)
+SSH_EXC_TUPLE = (lib_exc.SSHTimeout,
+ ssh_exc.AuthenticationException,
+ ssh_exc.NoValidConnectionsError,
+ ConnectionResetError)
def get_ncat_version(ssh_client=None):
@@ -121,9 +125,11 @@
if not kwargs.get('security_groups'):
kwargs['security_groups'] = [{'name': 'default'}]
- client = self.os_primary.servers_client
- if kwargs.get('availability_zone'):
- client = self.os_admin.servers_client
+ client = kwargs.pop('client', None)
+ if client is None:
+ client = self.os_primary.servers_client
+ if kwargs.get('availability_zone'):
+ client = self.os_admin.servers_client
server = client.create_server(
flavorRef=flavor_ref,
@@ -139,6 +145,10 @@
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
client.delete_server,
server['server']['id'])
+
+ self.wait_for_server_active(server['server'], client=client)
+ self.wait_for_guest_os_ready(server['server'], client=client)
+
return server
@classmethod
@@ -151,13 +161,14 @@
if sg['name'] == constants.DEFAULT_SECURITY_GROUP:
secgroup_id = sg['id']
break
-
+ resp = []
for rule in rule_list:
direction = rule.pop('direction')
- client.create_security_group_rule(
- direction=direction,
- security_group_id=secgroup_id,
- **rule)
+ resp.append(client.create_security_group_rule(
+ direction=direction,
+ security_group_id=secgroup_id,
+ **rule)['security_group_rule'])
+ return resp
@classmethod
def create_loginable_secgroup_rule(cls, secgroup_id=None,
@@ -198,9 +209,44 @@
else:
router = cls.create_admin_router(**kwargs)
LOG.debug("Created router %s", router['name'])
- cls.routers.append(router)
+ cls._wait_for_router_ha_active(router['id'])
return router
+ @classmethod
+ def _wait_for_router_ha_active(cls, router_id):
+ router = cls.os_admin.network_client.show_router(router_id)['router']
+ if not router.get('ha'):
+ return
+
+ def _router_active_on_l3_agent():
+ agents = cls.os_admin.network_client.list_l3_agents_hosting_router(
+ router_id)['agents']
+ return "active" in [agent['ha_state'] for agent in agents]
+
+ error_msg = (
+ "Router %s is not active on any of the L3 agents" % router_id)
+ # NOTE(slaweq): timeout here should be lower for sure, but due to
+ # the bug https://launchpad.net/bugs/1923633 let's wait even 10
+ # minutes until router will be active on some of the L3 agents
+ utils.wait_until_true(_router_active_on_l3_agent,
+ timeout=600, sleep=5,
+ exception=lib_exc.TimeoutException(error_msg))
+
+ @classmethod
+ def skip_if_no_extension_enabled_in_l3_agents(cls, extension):
+ l3_agents = cls.os_admin.network_client.list_agents(
+ binary='neutron-l3-agent')['agents']
+ if not l3_agents:
+ # the tests should not be skipped when neutron-l3-agent does not
+ # exist (this validation doesn't apply to the setups like
+ # e.g. ML2/OVN)
+ return
+ for agent in l3_agents:
+ if extension in agent['configurations'].get('extensions', []):
+ return
+ raise cls.skipTest("No L3 agent with '%s' extension enabled found." %
+ extension)
+
@removals.remove(version='Stein',
message="Please use create_floatingip method instead of "
"create_and_associate_floatingip.")
@@ -252,19 +298,20 @@
server_kwargs['name'] = server_name
self.server = self.create_server(**server_kwargs)
- self.wait_for_server_active(self.server['server'])
self.port = self.client.list_ports(network_id=self.network['id'],
device_id=self.server[
'server']['id'])['ports'][0]
self.fip = self.create_floatingip(port=self.port)
- def check_connectivity(self, host, ssh_user, ssh_key,
- servers=None, ssh_timeout=None):
- ssh_client = ssh.Client(host, ssh_user,
- pkey=ssh_key, timeout=ssh_timeout)
+ def check_connectivity(self, host, ssh_user=None, ssh_key=None,
+ servers=None, ssh_timeout=None, ssh_client=None):
+ # Either ssh_client or ssh_user+ssh_key is mandatory.
+ if ssh_client is None:
+ ssh_client = ssh.Client(host, ssh_user,
+ pkey=ssh_key, timeout=ssh_timeout)
try:
ssh_client.test_connection_auth()
- except lib_exc.SSHTimeout as ssh_e:
+ except SSH_EXC_TUPLE as ssh_e:
LOG.debug(ssh_e)
self._log_console_output(servers)
self._log_local_network_status()
@@ -294,16 +341,37 @@
"for the console log", server['id'])
def _log_local_network_status(self):
- local_routes = ip_utils.IPCommand().list_routes()
- LOG.debug('Local routes:\n%s', '\n'.join(str(r) for r in local_routes))
- arp_table = ip_utils.arp_table()
- LOG.debug('Local ARP table:\n%s', '\n'.join(str(r) for r in arp_table))
+ self._log_ns_network_status()
+ for ns_name in ip_utils.IPCommand().list_namespaces():
+ self._log_ns_network_status(ns_name=ns_name)
+
+ def _log_ns_network_status(self, ns_name=None):
+ try:
+ local_ips = ip_utils.IPCommand(namespace=ns_name).list_addresses()
+ local_routes = ip_utils.IPCommand(namespace=ns_name).list_routes()
+ arp_table = ip_utils.arp_table(namespace=ns_name)
+ iptables = ip_utils.list_iptables(namespace=ns_name)
+ lsockets = ip_utils.list_listening_sockets(namespace=ns_name)
+ except exceptions.ShellCommandFailed:
+ LOG.debug('Namespace %s has been deleted synchronously during the '
+ 'host network collection process', ns_name)
+ return
+
+ LOG.debug('Namespace %s; IP Addresses:\n%s',
+ ns_name, '\n'.join(str(r) for r in local_ips))
+ LOG.debug('Namespace %s; Local routes:\n%s',
+ ns_name, '\n'.join(str(r) for r in local_routes))
+ LOG.debug('Namespace %s; Local ARP table:\n%s',
+ ns_name, '\n'.join(str(r) for r in arp_table))
+ LOG.debug('Namespace %s; Local iptables:\n%s', ns_name, iptables)
+ LOG.debug('Namespace %s; Listening sockets:\n%s', ns_name, lsockets)
def _check_remote_connectivity(self, source, dest, count,
should_succeed=True,
nic=None, mtu=None, fragmentation=True,
timeout=None, pattern=None,
- forbid_packet_loss=False):
+ forbid_packet_loss=False,
+ check_response_ip=True):
"""check ping server via source ssh connection
:param source: RemoteClient: an ssh connection from which to ping
@@ -316,6 +384,7 @@
:param timeout: Timeout for all ping packet(s) to succeed
:param pattern: hex digits included in ICMP messages
:param forbid_packet_loss: forbid or allow some lost packets
+ :param check_response_ip: check response ip
:returns: boolean -- should_succeed == ping
:returns: ping is false if ping failed
"""
@@ -358,7 +427,8 @@
LOG.debug('Packet loss detected')
return not should_succeed
- if validators.validate_ip_address(dest) is None:
+ if (check_response_ip and
+ validators.validate_ip_address(dest) is None):
# Assert that the return traffic was from the correct
# source address.
from_source = 'from %s' % dest
@@ -372,19 +442,23 @@
nic=None, mtu=None, fragmentation=True,
servers=None, timeout=None,
ping_count=CONF.validation.ping_count,
- pattern=None, forbid_packet_loss=False):
+ pattern=None, forbid_packet_loss=False,
+ check_response_ip=True):
try:
self.assertTrue(self._check_remote_connectivity(
source, dest, ping_count, should_succeed, nic, mtu,
fragmentation,
timeout=timeout, pattern=pattern,
- forbid_packet_loss=forbid_packet_loss))
- except lib_exc.SSHTimeout as ssh_e:
+ forbid_packet_loss=forbid_packet_loss,
+ check_response_ip=check_response_ip))
+ except SSH_EXC_TUPLE as ssh_e:
LOG.debug(ssh_e)
self._log_console_output(servers)
+ self._log_local_network_status()
raise
except AssertionError:
self._log_console_output(servers)
+ self._log_local_network_status()
raise
def ping_ip_address(self, ip_address, should_succeed=True,
@@ -452,6 +526,28 @@
self.wait_for_server_status(
server, constants.SERVER_STATUS_ACTIVE, client)
+ def wait_for_guest_os_ready(self, server, client=None):
+ if not CONF.compute_feature_enabled.console_output:
+ LOG.debug('Console output not supported, cannot check if server '
+ '%s is ready.', server['id'])
+ return
+
+ client = client or self.os_primary.servers_client
+
+ def system_booted():
+ console_output = client.get_console_output(server['id'])['output']
+ for line in console_output.split('\n'):
+ if 'login:' in line.lower():
+ return True
+ return False
+
+ try:
+ utils.wait_until_true(system_booted, timeout=90, sleep=5)
+ except utils.WaitTimeout:
+ LOG.debug("No correct output in console of server %s found. "
+ "Guest operating system status can't be checked.",
+ server['id'])
+
def check_servers_hostnames(self, servers, timeout=None, log_errors=True,
external_port=None):
"""Compare hostnames of given servers with their names."""
@@ -471,16 +567,18 @@
pkey=self.keypair['private_key'],
**kwargs)
self.assertIn(server['name'],
- ssh_client.exec_command('hostname'))
- except lib_exc.SSHTimeout as ssh_e:
+ ssh_client.get_hostname())
+ except SSH_EXC_TUPLE as ssh_e:
LOG.debug(ssh_e)
if log_errors:
self._log_console_output(servers)
+ self._log_local_network_status()
raise
except AssertionError as assert_e:
LOG.debug(assert_e)
if log_errors:
self._log_console_output(servers)
+ self._log_local_network_status()
raise
def ensure_nc_listen(self, ssh_client, port, protocol, echo_msg=None,
@@ -505,9 +603,10 @@
return ssh_client.execute_script(
get_ncat_server_cmd(port, protocol, echo_msg),
become_root=True, combine_stderr=True)
- except lib_exc.SSHTimeout as ssh_e:
+ except SSH_EXC_TUPLE as ssh_e:
LOG.debug(ssh_e)
self._log_console_output(servers)
+ self._log_local_network_status()
raise
def nc_client(self, ip_address, port, protocol):
@@ -519,3 +618,38 @@
result = shell.execute_local_command(cmd)
self.assertEqual(0, result.exit_status)
return result.stdout
+
+ def _ensure_public_router(self, client=None, tenant_id=None):
+ """Retrieve a router for the given tenant id.
+
+ If a public router has been configured, it will be returned.
+
+ If a public router has not been configured, but a public
+ network has, a tenant router will be created and returned that
+ routes traffic to the public network.
+ """
+ if not client:
+ client = self.client
+ if not tenant_id:
+ tenant_id = client.tenant_id
+ router_id = CONF.network.public_router_id
+ network_id = CONF.network.public_network_id
+ if router_id:
+ body = client.show_router(router_id)
+ return body['router']
+ elif network_id:
+ router = self.create_router_by_client()
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ client.delete_router, router['id'])
+ kwargs = {'external_gateway_info': dict(network_id=network_id)}
+ router = client.update_router(router['id'], **kwargs)['router']
+ return router
+ else:
+ raise Exception("Neither of 'public_router_id' or "
+ "'public_network_id' has been defined.")
+
+ def _update_router_admin_state(self, router, admin_state_up):
+ kwargs = dict(admin_state_up=admin_state_up)
+ router = self.client.update_router(
+ router['id'], **kwargs)['router']
+ self.assertEqual(admin_state_up, router['admin_state_up'])
diff --git a/neutron_tempest_plugin/scenario/test_basic.py b/neutron_tempest_plugin/scenario/test_basic.py
index 38bc40b..7583f88 100644
--- a/neutron_tempest_plugin/scenario/test_basic.py
+++ b/neutron_tempest_plugin/scenario/test_basic.py
@@ -24,7 +24,7 @@
class NetworkBasicTest(base.BaseTempestTestCase):
- credentials = ['primary']
+ credentials = ['primary', 'admin']
force_tenant_isolation = False
# Default to ipv4.
diff --git a/neutron_tempest_plugin/scenario/test_connectivity.py b/neutron_tempest_plugin/scenario/test_connectivity.py
index 1a7468a..ca7d755 100644
--- a/neutron_tempest_plugin/scenario/test_connectivity.py
+++ b/neutron_tempest_plugin/scenario/test_connectivity.py
@@ -65,6 +65,7 @@
for vm in vms:
self.wait_for_server_active(vm['server'])
+ self.wait_for_guest_os_ready(vm['server'])
return vms
@@ -87,6 +88,8 @@
ap2_rt = self.create_router(
router_name=data_utils.rand_name("ap2_rt"),
admin_state_up=True)
+ self._wait_for_router_ha_active(ap1_rt['id'])
+ self._wait_for_router_ha_active(ap2_rt['id'])
ap1_internal_port = self.create_port(
ap1_net, security_groups=[self.secgroup['id']])
@@ -139,6 +142,7 @@
router_name=data_utils.rand_name("east_west_traffic_router"),
admin_state_up=True,
external_network_id=CONF.network.public_network_id)
+ self._wait_for_router_ha_active(router['id'])
internal_port_1 = self.create_port(
net_1, security_groups=[self.secgroup['id']])
@@ -231,6 +235,7 @@
networks=[{'uuid': network['id']}],
security_groups=[{'name': self.secgroup['name']}])
self.wait_for_server_active(vm['server'])
+ self.wait_for_guest_os_ready(vm['server'])
vm_port = self.client.list_ports(
network_id=network['id'], device_id=vm['server']['id'])['ports'][0]
diff --git a/neutron_tempest_plugin/scenario/test_dhcp.py b/neutron_tempest_plugin/scenario/test_dhcp.py
new file mode 100644
index 0000000..d0545e2
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_dhcp.py
@@ -0,0 +1,191 @@
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import netaddr
+from neutron_lib import constants
+from oslo_log import log
+from paramiko import ssh_exception as ssh_exc
+from tempest.common import utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+import testtools
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils as neutron_utils
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+class DHCPTest(base.BaseTempestTestCase):
+
+ credentials = ['primary', 'admin']
+ force_tenant_isolation = False
+
+ @classmethod
+ def resource_setup(cls):
+ super(DHCPTest, cls).resource_setup()
+ cls.rand_name = data_utils.rand_name(
+ cls.__name__.rsplit('.', 1)[-1])
+ cls.network = cls.create_network(name=cls.rand_name)
+ cls.subnet = cls.create_subnet(
+ network=cls.network, name=cls.rand_name)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ cls.keypair = cls.create_keypair(name=cls.rand_name)
+ cls.security_group = cls.create_security_group(name=cls.rand_name)
+ cls.create_loginable_secgroup_rule(cls.security_group['id'])
+
+ @utils.requires_ext(extension='extra_dhcp_opt', service='network')
+ @decorators.idempotent_id('58f7c094-1980-4e03-b0d3-6c4dd27217b1')
+ def test_extra_dhcp_opts(self):
+ """This test case tests DHCP extra options configured for Neutron port.
+
+ Test is checking just extra option "15" which is domain-name
+ according to the RFC 2132:
+ https://tools.ietf.org/html/rfc2132#section-5.3
+
+ To test that option, there is spawned VM connected to the port with
+ configured extra_dhcp_opts and test asserts that search domain name is
+ configured inside VM in /etc/resolv.conf file
+ """
+
+ test_domain = "test.domain"
+ extra_dhcp_opts = [
+ {'opt_name': 'domain-name',
+ 'opt_value': '"%s"' % test_domain}]
+ port = self.create_port(
+ network=self.network, name=self.rand_name,
+ security_groups=[self.security_group['id']],
+ extra_dhcp_opts=extra_dhcp_opts)
+ floating_ip = self.create_floatingip(port=port)
+
+ server = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'],
+ networks=[{'port': port['id']}])
+ self.wait_for_server_active(server['server'])
+ self.wait_for_guest_os_ready(server['server'])
+
+ try:
+ ssh_client = ssh.Client(
+ floating_ip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+ vm_resolv_conf = ssh_client.exec_command(
+ "cat /etc/resolv.conf")
+ self.assertIn(test_domain, vm_resolv_conf)
+ except (lib_exc.SSHTimeout,
+ ssh_exc.AuthenticationException,
+ AssertionError) as error:
+ LOG.debug(error)
+ self._log_console_output([server])
+ self._log_local_network_status()
+ raise
+
+
+class DHCPPortUpdateTest(base.BaseTempestTestCase):
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def resource_setup(cls):
+ super(DHCPPortUpdateTest, cls).resource_setup()
+ cls.rand_name = data_utils.rand_name(
+ cls.__name__.rsplit('.', 1)[-1])
+ cls.network = cls.create_network(name=cls.rand_name)
+ cls.router = cls.create_router_by_client()
+ cls.keypair = cls.create_keypair(name=cls.rand_name)
+ cls.security_group = cls.create_security_group(name=cls.rand_name)
+ cls.create_loginable_secgroup_rule(cls.security_group['id'])
+ cls.create_pingable_secgroup_rule(cls.security_group['id'])
+
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.firewall_driver == 'ovn',
+ "OVN driver is required to run this test - "
+ "LP#1942794 solution only applied to OVN")
+ @decorators.idempotent_id('8171cc68-9dbb-46ca-b065-17b5b2e26094')
+ def test_modify_dhcp_port_ip_address(self):
+ """Test Scenario
+
+ 1) Create a network and a subnet with DHCP enabled
+ 2) Modify the default IP address from the subnet DHCP port
+ 3) Create a server in this network and check ssh connectivity
+
+ For the step 3), the server needs to obtain ssh keys from the metadata
+
+ Related bug: LP#1942794
+ """
+ # create subnet (dhcp is enabled by default)
+ subnet = self.create_subnet(network=self.network, name=self.rand_name)
+
+ def _get_dhcp_ports():
+ # in some cases, like ML2/OVS, the subnet port associated to DHCP
+ # is created with device_owner='network:dhcp'
+ dhcp_ports = self.client.list_ports(
+ network_id=self.network['id'],
+ device_owner=constants.DEVICE_OWNER_DHCP)['ports']
+ # in other cases, like ML2/OVN, the subnet port used for metadata
+ # is created with device_owner='network:distributed'
+ distributed_ports = self.client.list_ports(
+ network_id=self.network['id'],
+ device_owner=constants.DEVICE_OWNER_DISTRIBUTED)['ports']
+ self.dhcp_ports = dhcp_ports + distributed_ports
+ self.assertLessEqual(
+ len(self.dhcp_ports), 1, msg='Only one port was expected')
+ return len(self.dhcp_ports) == 1
+
+ # obtain the dhcp port
+ # in some cases this port is not created together with the subnet, but
+ # immediately after it, so some delay may be needed and that is the
+ # reason why a waiter function is used here
+ self.dhcp_ports = []
+ neutron_utils.wait_until_true(
+ lambda: _get_dhcp_ports(),
+ timeout=10)
+ dhcp_port = self.dhcp_ports[0]
+
+ # modify DHCP port IP address
+ old_dhcp_port_ip = netaddr.IPAddress(
+ dhcp_port['fixed_ips'][0]['ip_address'])
+ if str(old_dhcp_port_ip) != subnet['allocation_pools'][0]['end']:
+ new_dhcp_port_ip = str(old_dhcp_port_ip + 1)
+ else:
+ new_dhcp_port_ip = str(old_dhcp_port_ip - 1)
+ self.update_port(port=dhcp_port,
+ fixed_ips=[{'subnet_id': subnet['id'],
+ 'ip_address': new_dhcp_port_ip}])
+
+ # create server
+ server = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'],
+ security_groups=[{'name': self.security_group['name']}],
+ networks=[{'uuid': self.network['id']}])
+
+ # attach fip to the server
+ self.create_router_interface(self.router['id'], subnet['id'])
+ server_port = self.client.list_ports(
+ network_id=self.network['id'],
+ device_id=server['server']['id'])['ports'][0]
+ fip = self.create_floatingip(port_id=server_port['id'])
+
+ # check connectivity
+ self.check_connectivity(fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ self.keypair['private_key'])
diff --git a/neutron_tempest_plugin/scenario/test_dns_integration.py b/neutron_tempest_plugin/scenario/test_dns_integration.py
index e5995c0..6f2756c 100644
--- a/neutron_tempest_plugin/scenario/test_dns_integration.py
+++ b/neutron_tempest_plugin/scenario/test_dns_integration.py
@@ -42,13 +42,12 @@
class BaseDNSIntegrationTests(base.BaseTempestTestCase, DNSMixin):
- credentials = ['primary']
+ credentials = ['primary', 'admin']
@classmethod
def setup_clients(cls):
super(BaseDNSIntegrationTests, cls).setup_clients()
- cls.dns_client = cls.os_tempest.zones_client
- cls.query_client = cls.os_tempest.query_client
+ cls.dns_client = cls.os_tempest.dns_v2.ZonesClient()
cls.query_client.build_timeout = 30
@classmethod
@@ -148,11 +147,16 @@
provider_segmentation_id=12345)
cls.subnet2 = cls.create_subnet(cls.network2)
+ def _verify_dns_assignment(self, port):
+ expected_fqdn = '%s.%s' % (port['dns_name'], self.zone['name'])
+ self.assertEqual(expected_fqdn, port['dns_assignment'][0]['fqdn'])
+
@decorators.idempotent_id('fa6477ce-a12b-41da-b671-5a3bbdafab07')
def test_port_on_special_network(self):
name = data_utils.rand_name('port-test')
port = self.create_port(self.network2,
dns_name=name)
+ self._verify_dns_assignment(port)
addr = port['fixed_ips'][0]['ip_address']
self._verify_dns_records(addr, name)
self.client.delete_port(port['id'])
@@ -199,3 +203,56 @@
self.client.delete_port(port['id'])
self._verify_dns_records(addr_v6, name, record_type='AAAA',
found=False)
+
+
+class DNSIntegrationDomainPerProjectTests(BaseDNSIntegrationTests):
+
+ credentials = ['primary', 'admin']
+
+ required_extensions = ['subnet-dns-publish-fixed-ip',
+ 'dns-integration-domain-keywords']
+
+ @classmethod
+ def resource_setup(cls):
+ super(BaseDNSIntegrationTests, cls).resource_setup()
+
+ name = data_utils.rand_name('test-domain')
+ zone_name = "%s.%s.%s.zone." % (cls.client.user_id,
+ cls.client.tenant_id,
+ name)
+ dns_domain_template = "<user_id>.<project_id>.%s.zone." % name
+
+ _, cls.zone = cls.dns_client.create_zone(name=zone_name)
+ cls.addClassResourceCleanup(cls.dns_client.delete_zone,
+ cls.zone['id'], ignore_errors=lib_exc.NotFound)
+ dns_waiters.wait_for_zone_status(
+ cls.dns_client, cls.zone['id'], 'ACTIVE')
+
+ cls.network = cls.create_network(dns_domain=dns_domain_template)
+ cls.subnet = cls.create_subnet(cls.network,
+ dns_publish_fixed_ip=True)
+ cls.subnet_v6 = cls.create_subnet(cls.network,
+ ip_version=6,
+ dns_publish_fixed_ip=True)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ cls.keypair = cls.create_keypair()
+
+ @decorators.idempotent_id('43a67509-3161-4125-8f2c-0d4a67599721')
+ def test_port_with_dns_name(self):
+ name = data_utils.rand_name('port-test')
+ port = self.create_port(self.network,
+ dns_name=name)
+ addr = port['fixed_ips'][0]['ip_address']
+ self._verify_dns_records(addr, name)
+ self.client.delete_port(port['id'])
+ self._verify_dns_records(addr, name, found=False)
+
+ @decorators.idempotent_id('ac89db9b-5ca4-43bd-85ba-40fbeb47e208')
+ def test_fip_admin_delete(self):
+ name = data_utils.rand_name('fip-test')
+ fip = self._create_floatingip_with_dns(name)
+ addr = fip['floating_ip_address']
+ self._verify_dns_records(addr, name)
+ self.delete_floatingip(fip, client=self.admin_client)
+ self._verify_dns_records(addr, name, found=False)
diff --git a/neutron_tempest_plugin/scenario/test_floatingip.py b/neutron_tempest_plugin/scenario/test_floatingip.py
index 6d4d830..a5f6486 100644
--- a/neutron_tempest_plugin/scenario/test_floatingip.py
+++ b/neutron_tempest_plugin/scenario/test_floatingip.py
@@ -18,6 +18,7 @@
from neutron_lib import constants as lib_constants
from neutron_lib.services.qos import constants as qos_consts
from neutron_lib.utils import test
+from oslo_log import log
from tempest.common import utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
@@ -37,6 +38,7 @@
CONF = config.CONF
+LOG = log.getLogger(__name__)
load_tests = testscenarios.load_tests_apply_scenarios
@@ -217,7 +219,6 @@
def resource_setup(cls):
super(FloatingIPPortDetailsTest, cls).resource_setup()
- @test.unstable_test("bug 1815585")
@decorators.idempotent_id('a663aeee-dd81-492b-a207-354fd6284dbe')
def test_floatingip_port_details(self):
"""Tests the following:
@@ -245,11 +246,17 @@
self._check_port_details(
fip, port, status=lib_constants.PORT_STATUS_ACTIVE,
device_id=server['server']['id'], device_owner='compute:nova')
+ LOG.debug('Port check for server %s and FIP %s finished, '
+ 'lets detach port %s from server!',
+ server['server']['id'], fip['id'], port['id'])
# detach the port from the server; this is a cast in the compute
# API so we have to poll the port until the device_id is unset.
self.delete_interface(server['server']['id'], port['id'])
port = self._wait_for_port_detach(port['id'])
+ LOG.debug('Port %s has been detached from server %s, lets check '
+ 'the status of port in FIP %s details!',
+ port['id'], server['server']['id'], fip['id'])
fip = self._wait_for_fip_port_down(fip['id'])
self._check_port_details(
fip, port, status=lib_constants.PORT_STATUS_DOWN,
@@ -324,6 +331,8 @@
(fip_id, status, timeout, port))
raise exceptions.TimeoutException(message)
+ LOG.debug('Port %s attached to FIP %s is down after %s!',
+ fip.get("port_id"), fip_id, int(time.time()) - start)
return fip
@@ -341,6 +350,13 @@
def resource_setup(cls):
super(FloatingIPQosTest, cls).resource_setup()
+ @classmethod
+ def setup_clients(cls):
+ super(FloatingIPQosTest, cls).setup_clients()
+ cls.admin_client = cls.os_admin.network_client
+ cls.qos_bw_limit_rule_client = \
+ cls.os_admin.qos_limit_bandwidth_rules_client
+
@decorators.idempotent_id('5eb48aea-eaba-4c20-8a6f-7740070a0aa3')
def test_qos(self):
"""Test floating IP is binding to a QoS policy with
@@ -351,19 +367,28 @@
received / elapsed time.
"""
+ self.skip_if_no_extension_enabled_in_l3_agents("fip_qos")
+
self._test_basic_resources()
+
+ # Create a new QoS policy
policy_id = self._create_qos_policy()
ssh_client = self._create_ssh_client()
- self.os_admin.network_client.create_bandwidth_limit_rule(
- policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
- max_burst_kbps=constants.LIMIT_KILO_BYTES,
- direction=lib_constants.INGRESS_DIRECTION)
- self.os_admin.network_client.create_bandwidth_limit_rule(
- policy_id, max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
- max_burst_kbps=constants.LIMIT_KILO_BYTES,
- direction=lib_constants.EGRESS_DIRECTION)
- rules = self.os_admin.network_client.list_bandwidth_limit_rules(
+ # As admin user create a new QoS rules
+ rule_data = {'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND,
+ 'max_burst_kbps': constants.LIMIT_KILO_BYTES,
+ 'direction': lib_constants.INGRESS_DIRECTION}
+ self.qos_bw_limit_rule_client.create_limit_bandwidth_rule(
+ qos_policy_id=policy_id, **rule_data)
+
+ rule_data = {'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND,
+ 'max_burst_kbps': constants.LIMIT_KILO_BYTES,
+ 'direction': lib_constants.EGRESS_DIRECTION}
+ self.qos_bw_limit_rule_client.create_limit_bandwidth_rule(
+ qos_policy_id=policy_id, **rule_data)
+
+ rules = self.qos_bw_limit_rule_client.list_limit_bandwidth_rules(
policy_id)
self.assertEqual(2, len(rules['bandwidth_limit_rules']))
@@ -371,6 +396,7 @@
self.fip['id'])['floatingip']
self.assertEqual(self.port['id'], fip['port_id'])
+ # Associate QoS to the FIP
self.os_admin.network_client.update_floatingip(
self.fip['id'],
qos_policy_id=policy_id)
@@ -379,12 +405,37 @@
self.fip['id'])['floatingip']
self.assertEqual(policy_id, fip['qos_policy_id'])
+ # Basic test, Check that actual BW while downloading file
+ # is as expected (Original BW)
common_utils.wait_until_true(lambda: self._check_bw(
ssh_client,
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=120,
- sleep=1)
+ sleep=1,
+ exception=RuntimeError(
+ 'Failed scenario: "Create a QoS policy associated with FIP" '
+ 'Actual BW is not as expected!'))
+
+ # As admin user update QoS rules
+ for rule in rules['bandwidth_limit_rules']:
+ self.qos_bw_limit_rule_client.update_limit_bandwidth_rule(
+ policy_id, rule['id'],
+ **{'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND * 2,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND * 2})
+
+ # Check that actual BW while downloading file
+ # is as expected (Update BW)
+ common_utils.wait_until_true(lambda: self._check_bw(
+ ssh_client,
+ self.fip['floating_ip_address'],
+ port=self.NC_PORT,
+ expected_bw=test_qos.QoSTestMixin.LIMIT_BYTES_SEC * 2),
+ timeout=120,
+ sleep=1,
+ exception=RuntimeError(
+ 'Failed scenario: "Update QoS policy associated with FIP" '
+ 'Actual BW is not as expected!'))
class TestFloatingIPUpdate(FloatingIpTestCasesMixin,
@@ -417,6 +468,7 @@
servers.append(server)
for server in servers:
self.wait_for_server_active(server)
+ self.wait_for_guest_os_ready(server)
self.fip = self.create_floatingip(port=ports[0])
self.check_connectivity(self.fip['floating_ip_address'],
diff --git a/neutron_tempest_plugin/scenario/test_internal_dns.py b/neutron_tempest_plugin/scenario/test_internal_dns.py
index d19286c..692bb70 100644
--- a/neutron_tempest_plugin/scenario/test_internal_dns.py
+++ b/neutron_tempest_plugin/scenario/test_internal_dns.py
@@ -13,7 +13,9 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log
from tempest.common import utils
+from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from neutron_tempest_plugin.common import ssh
@@ -21,9 +23,64 @@
from neutron_tempest_plugin.scenario import base
CONF = config.CONF
+LOG = log.getLogger(__name__)
-class InternalDNSTest(base.BaseTempestTestCase):
+class InternalDNSBase(base.BaseTempestTestCase):
+ """Base class of useful resources and functionalities for test class."""
+
+ port_error_msg = ('Openstack command returned incorrect '
+ 'hostname value in port.')
+ ssh_error_msg = ('Remote shell command returned incorrect hostname value '
+ "(command: 'hostname' OR 'cat /etc/hostname').")
+
+ @staticmethod
+ def _rand_name(name):
+ """'data_utils.rand_name' wrapper, show name related to test suite."""
+ return data_utils.rand_name(f'internal-dns-test-{name}')
+
+ @classmethod
+ def resource_setup(cls):
+ super(InternalDNSBase, cls).resource_setup()
+ cls.router = cls.create_router_by_client()
+ cls.keypair = cls.create_keypair(
+ name=cls._rand_name('shared-keypair'))
+ cls.secgroup = cls.create_security_group(
+ name=cls._rand_name('shared-secgroup'))
+ cls.security_groups.append(cls.secgroup)
+ cls.create_loginable_secgroup_rule(
+ secgroup_id=cls.secgroup['id'])
+ cls.vm_kwargs = {
+ 'flavor_ref': CONF.compute.flavor_ref,
+ 'image_ref': CONF.compute.image_ref,
+ 'key_name': cls.keypair['name'],
+ 'security_groups': [{'name': cls.secgroup['name']}]
+ }
+
+ def _create_ssh_client(self, ip_addr):
+ return ssh.Client(ip_addr,
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ def _validate_port_dns_details(self, checked_hostname, checked_port):
+ """Validates reused objects for correct dns values in tests."""
+ dns_details = checked_port['dns_assignment'][0]
+ self.assertEqual(checked_hostname, checked_port['dns_name'],
+ self.port_error_msg)
+ self.assertEqual(checked_hostname, dns_details['hostname'],
+ self.port_error_msg)
+ self.assertIn(checked_hostname, dns_details['fqdn'],
+ self.port_error_msg)
+
+ def _validate_ssh_dns_details(self, checked_hostname, ssh_client):
+ """Validates correct dns values returned from ssh command in tests."""
+ ssh_output = ssh_client.get_hostname()
+ self.assertIn(checked_hostname, ssh_output, self.ssh_error_msg)
+
+
+class InternalDNSTest(InternalDNSBase):
+ """Tests internal DNS capabilities."""
+ credentials = ['primary', 'admin']
@utils.requires_ext(extension="dns-integration", service="network")
@decorators.idempotent_id('988347de-07af-471a-abfa-65aea9f452a6')
@@ -51,14 +108,13 @@
security_groups=[
{'name': self.security_groups[-1]['name']}],
name='leia')
- self.wait_for_server_active(leia['server'])
ssh_client = ssh.Client(
self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
pkey=self.keypair['private_key'])
- self.assertIn('luke', ssh_client.exec_command('hostname'))
+ self.assertIn('luke', ssh_client.get_hostname())
leia_port = self.client.list_ports(
network_id=self.network['id'],
@@ -80,3 +136,49 @@
servers=[self.server, leia])
self.check_remote_connectivity(ssh_client, 'leia.openstackgate.local',
servers=[self.server, leia])
+
+ @utils.requires_ext(extension="dns-integration", service="network")
+ @decorators.idempotent_id('db5e612f-f17f-4974-b5f1-9fe89f4a6fc9')
+ def test_create_and_update_port_with_dns_name(self):
+ """Test creation of port with correct internal dns-name (hostname)."""
+
+ # 1) Create resources: network, subnet, etc.
+ # 2) Create a port with wrong dns-name (not as VM name).
+ # 3) Verify that wrong port initial dns-name.
+ # was queried from openstack API.
+ # 4) Update the port with correct dns-name (as VM name).
+ # 5) Boot a VM with corrected predefined port.
+ # 6) Verify that correct port dns-name
+ # was queried from openstack API.
+ # 7) Validate hostname configured on VM is same as VM's name.
+
+ # NOTE: VM's hostname has to be the same as VM's name
+ # when a VM is created, it is a known limitation.
+ # Therefore VM's dns-name/hostname is checked to be as VM's name.
+
+ vm_correct_name = self._rand_name('vm')
+ vm_wrong_name = self._rand_name('bazinga')
+ # create resources
+ network = self.create_network(name=self._rand_name('network'))
+ subnet = self.create_subnet(network, name=self._rand_name('subnet'))
+ self.create_router_interface(self.router['id'], subnet['id'])
+ # create port with wrong dns-name (not as VM name)
+ dns_port = self.create_port(network,
+ dns_name=vm_wrong_name,
+ security_groups=[self.secgroup['id']],
+ name=self._rand_name('port'))
+ # validate dns port with wrong initial hostname from API
+ self._validate_port_dns_details(vm_wrong_name, dns_port)
+ # update port with correct dns-name (as VM name)
+ dns_port = self.update_port(dns_port, dns_name=vm_correct_name)
+ # create VM with correct predefined dns-name on port
+ vm_1 = self.create_server(name=vm_correct_name,
+ networks=[{'port': dns_port['id']}],
+ **self.vm_kwargs)
+ # validate dns port with correct changed hostname using API
+ self._validate_port_dns_details(vm_correct_name, dns_port)
+ # validate hostname configured on VM is same as VM's name.
+ vm_1['fip'] = self.create_floatingip(port=dns_port)
+ vm_1['ssh_client'] = self._create_ssh_client(
+ vm_1['fip']['floating_ip_address'])
+ self._validate_ssh_dns_details(vm_correct_name, vm_1['ssh_client'])
diff --git a/neutron_tempest_plugin/scenario/test_ipv6.py b/neutron_tempest_plugin/scenario/test_ipv6.py
index 02e2846..41ac2e6 100644
--- a/neutron_tempest_plugin/scenario/test_ipv6.py
+++ b/neutron_tempest_plugin/scenario/test_ipv6.py
@@ -15,10 +15,12 @@
from neutron_lib import constants as lib_constants
from oslo_log import log
+from paramiko import ssh_exception as ssh_exc
from tempest.common import utils as tempest_utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
+import testtools
from neutron_tempest_plugin.common import ip
from neutron_tempest_plugin.common import ssh
@@ -31,11 +33,15 @@
LOG = log.getLogger(__name__)
-def turn_nic6_on(ssh, ipv6_port):
+def turn_nic6_on(ssh, ipv6_port, config_nic=True):
"""Turns the IPv6 vNIC on
Required because guest images usually set only the first vNIC on boot.
Searches for the IPv6 vNIC's MAC and brings it up.
+ # NOTE(slaweq): on RHEL based OS ifcfg file for new interface is
+ # needed to make IPv6 working on it, so if
+ # /etc/sysconfig/network-scripts directory exists ifcfg-%(nic)s file
+ # should be added in it
@param ssh: RemoteClient ssh instance to server
@param ipv6_port: port from IPv6 network attached to the server
@@ -43,36 +49,73 @@
ip_command = ip.IPCommand(ssh)
nic = ip_command.get_nic_name_by_mac(ipv6_port['mac_address'])
- # NOTE(slaweq): on RHEL based OS ifcfg file for new interface is
- # needed to make IPv6 working on it, so if
- # /etc/sysconfig/network-scripts directory exists ifcfg-%(nic)s file
- # should be added in it
- if sysconfig_network_scripts_dir_exists(ssh):
+ if config_nic:
try:
- ssh.execute_script(
- 'echo -e "DEVICE=%(nic)s\\nNAME=%(nic)s\\nIPV6INIT=yes" | '
- 'tee /etc/sysconfig/network-scripts/ifcfg-%(nic)s; '
- 'nmcli connection reload' % {'nic': nic},
- become_root=True)
- ssh.execute_script('nmcli connection up %s' % nic,
- become_root=True)
+ if sysconfig_network_scripts_dir_exists(ssh):
+ ssh.execute_script(
+ 'echo -e "DEVICE=%(nic)s\\nNAME=%(nic)s\\nIPV6INIT=yes" | '
+ 'tee /etc/sysconfig/network-scripts/ifcfg-%(nic)s; '
+ % {'nic': nic}, become_root=True)
+ if nmcli_command_exists(ssh):
+ ssh.execute_script('nmcli connection reload %s' % nic,
+ become_root=True)
+ ssh.execute_script('nmcli con mod %s ipv6.addr-gen-mode eui64'
+ % nic, become_root=True)
+ ssh.execute_script('nmcli connection up %s' % nic,
+ become_root=True)
+
except lib_exc.SSHExecCommandFailed as e:
# NOTE(slaweq): Sometimes it can happen that this SSH command
# will fail because of some error from network manager in
# guest os.
# But even then doing ip link set up below is fine and
# IP address should be configured properly.
- LOG.debug("Error during restarting %(nic)s interface on "
- "instance. Error message: %(error)s",
- {'nic': nic, 'error': e})
+ LOG.debug("Error creating NetworkManager profile. "
+ "Error message: %(error)s",
+ {'error': e})
+
ip_command.set_link(nic, "up")
+def configure_eth_connection_profile_NM(ssh):
+ """Prepare a Network manager profile for ipv6 port
+
+ By default the NetworkManager uses IPv6 privacy
+ format it isn't supported by neutron then we create
+ a ether profile with eui64 supported format
+
+ @param ssh: RemoteClient ssh instance to server
+ """
+ # NOTE(ccamposr): on RHEL based OS we need a ether profile with
+ # eui64 format
+ if nmcli_command_exists(ssh):
+ try:
+ ssh.execute_script('nmcli connection add type ethernet con-name '
+ 'ether ifname "*"', become_root=True)
+ ssh.execute_script('nmcli con mod ether ipv6.addr-gen-mode eui64',
+ become_root=True)
+
+ except lib_exc.SSHExecCommandFailed as e:
+ # NOTE(slaweq): Sometimes it can happen that this SSH command
+ # will fail because of some error from network manager in
+ # guest os.
+ # But even then doing ip link set up below is fine and
+ # IP address should be configured properly.
+ LOG.debug("Error creating NetworkManager profile. "
+ "Error message: %(error)s",
+ {'error': e})
+
+
def sysconfig_network_scripts_dir_exists(ssh):
return "False" not in ssh.execute_script(
'test -d /etc/sysconfig/network-scripts/ || echo "False"')
+def nmcli_command_exists(ssh):
+ return "False" not in ssh.execute_script(
+ 'if ! type nmcli > /dev/null ; then echo "False"; fi')
+
+
class IPv6Test(base.BaseTempestTestCase):
credentials = ['primary', 'admin']
@@ -80,9 +123,16 @@
ipv6_address_mode = 'slaac'
@classmethod
+ def skip_checks(cls):
+ super(IPv6Test, cls).skip_checks()
+ if not CONF.network_feature_enabled.ipv6:
+ raise cls.skipException("IPv6 is not enabled")
+
+ @classmethod
@tempest_utils.requires_ext(extension="router", service="network")
def resource_setup(cls):
super(IPv6Test, cls).resource_setup()
+ cls.reserve_external_subnet_cidrs()
cls._setup_basic_resources()
@classmethod
@@ -107,17 +157,44 @@
if expected_address in ip_address:
return True
return False
+ # Set NIC with IPv6 to be UP and wait until IPv6 address
+ # will be configured on this NIC
+ turn_nic6_on(ssh_client, ipv6_port, False)
+ # And check if IPv6 address will be properly configured
+ # on this NIC
+ try:
+ utils.wait_until_true(
+ lambda: guest_has_address(ipv6_address),
+ timeout=60)
+ except utils.WaitTimeout:
+ LOG.debug('Timeout without NM configuration')
+ except (lib_exc.SSHTimeout,
+ ssh_exc.AuthenticationException) as ssh_e:
+ LOG.debug(ssh_e)
+ self._log_console_output([vm])
+ self._log_local_network_status()
+ raise
- # Set NIC with IPv6 to be UP and wait until IPv6 address will be
- # configured on this NIC
- turn_nic6_on(ssh_client, ipv6_port)
- # And check if IPv6 address will be properly configured on this NIC
- utils.wait_until_true(
- lambda: guest_has_address(ipv6_address),
- timeout=120,
- exception=RuntimeError(
- "Timed out waiting for IP address {!r} to be configured in "
- "the VM {!r}.".format(ipv6_address, vm['id'])))
+ if not guest_has_address(ipv6_address):
+ try:
+ # Set NIC with IPv6 to be UP and wait until IPv6 address
+ # will be configured on this NIC
+ turn_nic6_on(ssh_client, ipv6_port)
+ # And check if IPv6 address will be properly configured
+ # on this NIC
+ utils.wait_until_true(
+ lambda: guest_has_address(ipv6_address),
+ timeout=90,
+ exception=RuntimeError(
+ "Timed out waiting for IP address {!r} to be "
+ "configured in the VM {!r}.".format(ipv6_address,
+ vm['id'])))
+ except (lib_exc.SSHTimeout,
+ ssh_exc.AuthenticationException) as ssh_e:
+ LOG.debug(ssh_e)
+ self._log_console_output([vm])
+ self._log_local_network_status()
+ raise
def _test_ipv6_hotplug(self, ra_mode, address_mode):
ipv6_networks = [self.create_network() for _ in range(2)]
@@ -138,6 +215,7 @@
}
vm = self.create_server(**server_kwargs)['server']
self.wait_for_server_active(vm)
+ self.wait_for_guest_os_ready(vm)
ipv4_port = self.client.list_ports(
network_id=self.network['id'],
device_id=vm['id'])['ports'][0]
@@ -156,6 +234,8 @@
# And plug VM to the second IPv6 network
ipv6_port = self.create_port(ipv6_networks[1])
+ # Add NetworkManager profile with ipv6 eui64 format to guest OS
+ configure_eth_connection_profile_NM(ssh_client)
self.create_interface(vm['id'], ipv6_port['id'])
ip.wait_for_interface_status(
self.os_primary.interfaces_client, vm['id'],
@@ -163,10 +243,14 @@
ssh_client=ssh_client, mac_address=ipv6_port['mac_address'])
self._test_ipv6_address_configured(ssh_client, vm, ipv6_port)
+ @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
+ "DHCPv6 attributes are not enabled.")
@decorators.idempotent_id('b13e5408-5250-4a42-8e46-6996ce613e91')
def test_ipv6_hotplug_slaac(self):
self._test_ipv6_hotplug("slaac", "slaac")
+ @testtools.skipUnless(CONF.network_feature_enabled.ipv6_subnet_attributes,
+ "DHCPv6 attributes are not enabled.")
@decorators.idempotent_id('9aaedbc4-986d-42d5-9177-3e721728e7e0')
def test_ipv6_hotplug_dhcpv6stateless(self):
self._test_ipv6_hotplug("dhcpv6-stateless", "dhcpv6-stateless")
diff --git a/neutron_tempest_plugin/scenario/test_local_ip.py b/neutron_tempest_plugin/scenario/test_local_ip.py
new file mode 100644
index 0000000..0cc9de1
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_local_ip.py
@@ -0,0 +1,103 @@
+# Copyright 2021 Huawei, Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+
+from oslo_log import log as logging
+from tempest.common import utils
+from tempest.common import waiters
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.scenario import constants as const
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class LocalIPTest(base.BaseTempestTestCase):
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ @utils.requires_ext(extension="local_ip", service="network")
+ def resource_setup(cls):
+ super(LocalIPTest, cls).resource_setup()
+ cls.network = cls.create_network()
+ cls.subnet = cls.create_subnet(cls.network)
+ cls.keypair = cls.create_keypair()
+
+ # Create security group with admin privileges
+ cls.secgroup = cls.create_security_group(
+ name=data_utils.rand_name('secgroup'))
+
+ # Execute funcs to achieve ssh and ICMP capabilities
+ cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+
+ # Create router
+ cls.router = cls.create_router(
+ router_name=data_utils.rand_name("router-test"),
+ admin_state_up=True,
+ external_network_id=CONF.network.public_network_id)
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+
+ def _create_server(self, name=None):
+ port = self.create_port(
+ self.network, security_groups=[self.secgroup['id']])
+ server = self.create_server(
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ key_name=self.keypair['name'], name=name,
+ networks=[{'port': port['id']}])['server']
+ waiters.wait_for_server_status(self.os_primary.servers_client,
+ server['id'],
+ const.SERVER_STATUS_ACTIVE)
+
+ return {'port': port, 'server': server}
+
+ @decorators.idempotent_id('3aa4b288-011a-4aa2-9024-19ad2ce40bfd')
+ def test_local_ip_connectivity(self):
+ server1 = self._create_server(name='local_ip_vm1')
+ server2 = self._create_server(name='local_ip_vm2')
+
+ fip = self.create_and_associate_floatingip(server1['port']['id'])
+ ssh_client = ssh.Client(
+ fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+
+ servers = [server1['server'], server2['server']]
+
+ # first check basic connectivity
+ self.check_remote_connectivity(
+ ssh_client,
+ server2['port']['fixed_ips'][0]['ip_address'],
+ servers=servers)
+
+ local_ip = self.create_local_ip(network_id=self.network['id'])
+ self.create_local_ip_association(local_ip['id'],
+ fixed_port_id=server2['port']['id'])
+ # check connectivity with local ip address
+ self.check_remote_connectivity(
+ ssh_client, local_ip['local_ip_address'],
+ servers=servers, check_response_ip=False)
+
+ # check basic connectivity after local ip association
+ self.check_remote_connectivity(
+ ssh_client,
+ server2['port']['fixed_ips'][0]['ip_address'],
+ servers=servers,
+ check_response_ip=False)
diff --git a/neutron_tempest_plugin/scenario/test_mac_learning.py b/neutron_tempest_plugin/scenario/test_mac_learning.py
new file mode 100644
index 0000000..409a6d8
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_mac_learning.py
@@ -0,0 +1,215 @@
+# Copyright 2021 Red Hat, Inc
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin.common import utils
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin import exceptions
+from neutron_tempest_plugin.scenario import base
+
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+# -s0 -l -c5 &> /tmp/tcpdump_out &
+def get_receiver_script(result_file, packets_expected):
+ """Script that listen icmp echos and write the output on result_file."""
+ return """#!/bin/bash
+export LC_ALL=en_US.UTF-8
+tcpdump -i any -n -v 'icmp[icmptype] = icmp-echoreply or icmp[icmptype] = \
+icmp-echo' -s0 -l -c%(packets_expected)d &> %(result_file)s &
+ """ % {'result_file': result_file,
+ 'packets_expected': packets_expected}
+
+
+def get_sender_script(result_file, receiver_address, completed_message):
+ """Script that sends packets to the receiver server."""
+ return """#!/bin/bash
+export LC_ALL=en_US.UTF-8
+ping -c 5 %(address)s
+echo '%(completed_message)s' > %(result_file)s &
+ """ % {'result_file': result_file,
+ 'address': receiver_address,
+ 'completed_message': completed_message}
+
+
+class MacLearningTest(base.BaseTempestTestCase):
+
+ credentials = ['primary', 'admin']
+ force_tenant_isolation = False
+
+ # Import configuration options
+ available_type_drivers = (
+ CONF.neutron_plugin_options.available_type_drivers)
+
+ completed_message = "Done!"
+ output_file = "/tmp/tcpdump_out"
+ sender_output_file = "/tmp/sender_out"
+ sender_script_file = "/tmp/ping.sh"
+ receiver_script_file = "/tmp/traffic.sh"
+
+ @classmethod
+ def skip_checks(cls):
+ super(MacLearningTest, cls).skip_checks()
+ advanced_image_available = (
+ CONF.neutron_plugin_options.advanced_image_ref or
+ CONF.neutron_plugin_options.default_image_is_advanced)
+ if not advanced_image_available:
+ skip_reason = "This test requires advanced tools to be executed"
+ raise cls.skipException(skip_reason)
+
+ @classmethod
+ def resource_setup(cls):
+ super(MacLearningTest, cls).resource_setup()
+
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ cls.flavor_ref = CONF.compute.flavor_ref
+ cls.image_ref = CONF.compute.image_ref
+ cls.username = CONF.validation.image_ssh_user
+ else:
+ cls.flavor_ref = (
+ CONF.neutron_plugin_options.advanced_image_flavor_ref)
+ cls.image_ref = CONF.neutron_plugin_options.advanced_image_ref
+ cls.username = CONF.neutron_plugin_options.advanced_image_ssh_user
+
+ # Setup basic topology for servers so that we can log into them
+ # It's important to keep port security and DHCP disabled for this test
+ cls.network = cls.create_network(port_security_enabled=False)
+ cls.subnet = cls.create_subnet(cls.network, enable_dhcp=False)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+
+ cls.keypair = cls.create_keypair()
+
+ def _create_server(self):
+ name = data_utils.rand_name("maclearning-server")
+ server = self.create_server(
+ flavor_ref=self.flavor_ref,
+ image_ref=self.image_ref,
+ key_name=self.keypair['name'], name=name,
+ networks=[{'uuid': self.network['id']}],
+ config_drive='True')['server']
+ self.wait_for_server_active(server)
+ self.wait_for_guest_os_ready(server)
+ server['port'] = self.client.list_ports(
+ network_id=self.network['id'], device_id=server['id'])['ports'][0]
+ server['fip'] = self.create_floatingip(port=server['port'])
+ server['ssh_client'] = ssh.Client(server['fip']['floating_ip_address'],
+ self.username,
+ pkey=self.keypair['private_key'])
+ return server
+
+ def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
+ try:
+ ssh_client.execute_script('which %s' % cmd)
+ except base.SSH_EXC_TUPLE as ssh_e:
+ LOG.debug(ssh_e)
+ self._log_console_output([server])
+ self._log_local_network_status()
+ raise
+ except exceptions.SSHScriptFailed:
+ raise self.skipException(
+ "%s is not available on server %s" % (cmd, server['id']))
+
+ def _prepare_sender(self, server, address):
+ check_script = get_sender_script(self.sender_output_file, address,
+ self.completed_message)
+ self._check_cmd_installed_on_server(server['ssh_client'], server,
+ 'tcpdump')
+ server['ssh_client'].execute_script(
+ 'echo "%s" > %s' % (check_script, self.sender_script_file))
+
+ def _prepare_listener(self, server, n_packets):
+ check_script = get_receiver_script(
+ result_file=self.output_file,
+ packets_expected=n_packets)
+ self._check_cmd_installed_on_server(server['ssh_client'], server,
+ 'tcpdump')
+ server['ssh_client'].execute_script(
+ 'echo "%s" > %s' % (check_script, self.receiver_script_file))
+
+ @decorators.idempotent_id('013686ac-23b1-23e4-8361-10b1c98a2861')
+ def test_mac_learning_vms_on_same_network(self):
+ """Test mac learning works in a network.
+
+ The receiver server will receive all the sent packets.
+ The non receiver should not receive any.
+
+ """
+ sender = self._create_server()
+ receiver = self._create_server()
+ non_receiver = self._create_server()
+
+ def check_server_result(server, expected_result, output_file):
+ result = server['ssh_client'].execute_script(
+ "cat {path} || echo '{path} not exists yet'".format(
+ path=output_file))
+ LOG.debug("VM result: %s", result)
+ return expected_result in result
+
+ # Prepare the server that is intended to receive the packets
+ self._prepare_listener(receiver, 5)
+
+ # Prepare the server that is not intended receive of the packets.
+ self._prepare_listener(non_receiver, 2)
+
+ # Run the scripts
+ for server in [receiver, non_receiver]:
+ server['ssh_client'].execute_script(
+ "bash %s" % self.receiver_script_file, become_root=True)
+
+ # Prepare the server that will make the ping.
+ target_ip = receiver['port']['fixed_ips'][0]['ip_address']
+ self._prepare_sender(sender, address=target_ip)
+
+ LOG.debug("The receiver IP is: %s", target_ip)
+ # Run the sender node script
+ sender['ssh_client'].execute_script(
+ "bash %s" % self.sender_script_file, become_root=True)
+
+ # Check if the message was sent.
+ utils.wait_until_true(
+ lambda: check_server_result(
+ sender, self.completed_message,
+ self.sender_output_file),
+ exception=RuntimeError(
+ "Sender script wasn't executed properly"))
+
+ # Check receiver server
+ receiver_expected_result = '5 packets captured'
+ utils.wait_until_true(
+ lambda: check_server_result(receiver,
+ receiver_expected_result, self.output_file),
+ exception=RuntimeError(
+ 'Receiver server did not receive expected packet'))
+
+ # Check the non_receiver server
+ non_receiver_expected_result = '0 packets captured'
+ try:
+ LOG.debug("Try killing non-receiver tcpdump")
+ non_receiver['ssh_client'].execute_script(
+ "killall tcpdump && sleep 2", become_root=True)
+ except exceptions.SSHScriptFailed:
+ LOG.debug("Killing tcpdump failed")
+ self.assertTrue(check_server_result(non_receiver,
+ non_receiver_expected_result,
+ self.output_file),
+ 'Non targeted server received unexpected packets')
+ return
diff --git a/neutron_tempest_plugin/scenario/test_metadata.py b/neutron_tempest_plugin/scenario/test_metadata.py
index c78ff69..af6bd09 100644
--- a/neutron_tempest_plugin/scenario/test_metadata.py
+++ b/neutron_tempest_plugin/scenario/test_metadata.py
@@ -16,8 +16,10 @@
from neutron_lib import constants as nlib_const
from oslo_log import log as logging
+from tempest.common import utils
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from tempest.lib import exceptions
import testtools
from neutron_tempest_plugin.common import ssh
@@ -45,10 +47,17 @@
force_tenant_isolation = False
@classmethod
+ def skip_checks(cls):
+ super(MetadataTest, cls).skip_checks()
+ if not utils.is_network_feature_enabled('ipv6_metadata'):
+ raise cls.skipException("Metadata over IPv6 is not enabled")
+
+ @classmethod
def resource_setup(cls):
super(MetadataTest, cls).resource_setup()
cls.rand_name = data_utils.rand_name(
cls.__name__.rsplit('.', 1)[-1])
+ cls.reserve_external_subnet_cidrs()
cls.network = cls.create_network(name=cls.rand_name)
cls.subnet_v4 = cls.create_subnet(
network=cls.network, name=cls.rand_name)
@@ -113,11 +122,9 @@
return interface
@testtools.skipUnless(
- (CONF.neutron_plugin_options.ipv6_metadata and
- (CONF.neutron_plugin_options.advanced_image_ref or
- CONF.neutron_plugin_options.default_image_is_advanced)),
- 'Advanced image and neutron_plugin_options.ipv6_metadata=True '
- 'is required to run this test.')
+ CONF.neutron_plugin_options.advanced_image_ref or
+ CONF.neutron_plugin_options.default_image_is_advanced,
+ 'Advanced image is required to run this test.')
@decorators.idempotent_id('e680949a-f1cc-11ea-b49a-cba39bbbe5ad')
def test_metadata_routed(self):
use_advanced_image = (
@@ -126,17 +133,23 @@
vm = self._create_server_with_network(
self.network, use_advanced_image=use_advanced_image)
self.wait_for_server_active(server=vm.server)
- self._assert_has_ssh_connectivity(vm.ssh_client)
+ self.wait_for_guest_os_ready(vm.server)
+ self.check_connectivity(host=vm.floating_ip['floating_ip_address'],
+ ssh_client=vm.ssh_client)
interface = self._get_primary_interface(vm.ssh_client)
- out = vm.ssh_client.exec_command(
- 'curl http://[%(address)s%%25%(interface)s]/' % {
- 'address': nlib_const.METADATA_V6_IP,
- 'interface': interface})
- self.assertIn('latest', out)
+ try:
+ out = vm.ssh_client.exec_command(
+ 'curl http://[%(address)s%%25%(interface)s]/' % {
+ 'address': nlib_const.METADATA_V6_IP,
+ 'interface': interface})
+ self.assertIn('latest', out)
- out = vm.ssh_client.exec_command(
- 'curl http://[%(address)s%%25%(interface)s]/openstack/' % {
- 'address': nlib_const.METADATA_V6_IP,
- 'interface': interface})
- self.assertIn('latest', out)
+ out = vm.ssh_client.exec_command(
+ 'curl http://[%(address)s%%25%(interface)s]/openstack/' % {
+ 'address': nlib_const.METADATA_V6_IP,
+ 'interface': interface})
+ self.assertIn('latest', out)
+ except exceptions.SSHExecCommandFailed:
+ self._log_console_output()
+ self._log_local_network_status()
diff --git a/neutron_tempest_plugin/scenario/test_multicast.py b/neutron_tempest_plugin/scenario/test_multicast.py
index b2ea8ae..4fd41cf 100644
--- a/neutron_tempest_plugin/scenario/test_multicast.py
+++ b/neutron_tempest_plugin/scenario/test_multicast.py
@@ -15,11 +15,11 @@
import netaddr
from neutron_lib import constants
-from neutron_lib.utils import test
from oslo_log import log
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
+from neutron_tempest_plugin.common import ip
from neutron_tempest_plugin.common import ssh
from neutron_tempest_plugin.common import utils
from neutron_tempest_plugin import config
@@ -111,17 +111,18 @@
'result_file': result_file}
-def get_unregistered_script(group, result_file):
+def get_unregistered_script(interface, group, result_file):
return """#!/bin/bash
export LC_ALL=en_US.UTF-8
-tcpdump -i any -s0 -vv host %(group)s -vvneA -s0 -l &> %(result_file)s &
- """ % {'group': group,
+tcpdump -i %(interface)s host %(group)s -vvneA -s0 -l -c1 &> %(result_file)s &
+ """ % {'interface': interface,
+ 'group': group,
'result_file': result_file}
class BaseMulticastTest(object):
- credentials = ['primary']
+ credentials = ['primary', 'admin']
force_tenant_isolation = False
# Import configuration options
@@ -201,22 +202,28 @@
security_groups=[{'name': self.secgroup['security_group']['name']}]
)['server']
self.wait_for_server_active(server)
- port = self.client.list_ports(
+ self.wait_for_guest_os_ready(server)
+ server['port'] = self.client.list_ports(
network_id=self.network['id'], device_id=server['id'])['ports'][0]
- server['fip'] = self.create_floatingip(port=port)
+ server['fip'] = self.create_floatingip(port=server['port'])
server['ssh_client'] = ssh.Client(server['fip']['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
self._check_cmd_installed_on_server(server['ssh_client'],
- server['id'], PYTHON3_BIN)
+ server, PYTHON3_BIN)
return server
- def _check_cmd_installed_on_server(self, ssh_client, server_id, cmd):
+ def _check_cmd_installed_on_server(self, ssh_client, server, cmd):
try:
ssh_client.execute_script('which %s' % cmd)
+ except base.SSH_EXC_TUPLE as ssh_e:
+ LOG.debug(ssh_e)
+ self._log_console_output([server])
+ self._log_local_network_status()
+ raise
except exceptions.SSHScriptFailed:
raise self.skipException(
- "%s is not available on server %s" % (cmd, server_id))
+ "%s is not available on server %s" % (cmd, server['id']))
def _prepare_sender(self, server, mcast_address):
check_script = get_sender_script(
@@ -235,24 +242,27 @@
server['fip']['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
- self._check_cmd_installed_on_server(ssh_client, server['id'],
+ self._check_cmd_installed_on_server(ssh_client, server,
PYTHON3_BIN)
server['ssh_client'].execute_script(
'echo "%s" > /tmp/multicast_traffic_receiver.py' % check_script)
def _prepare_unregistered(self, server, mcast_address):
- check_script = get_unregistered_script(
- group=mcast_address, result_file=self.unregistered_output_file)
ssh_client = ssh.Client(
server['fip']['floating_ip_address'],
self.username,
pkey=self.keypair['private_key'])
- self._check_cmd_installed_on_server(ssh_client, server['id'],
+ ip_command = ip.IPCommand(ssh_client=ssh_client)
+ addresses = ip_command.list_addresses(port=server['port'])
+ port_iface = ip.get_port_device_name(addresses, server['port'])
+ check_script = get_unregistered_script(
+ interface=port_iface, group=mcast_address,
+ result_file=self.unregistered_output_file)
+ self._check_cmd_installed_on_server(ssh_client, server,
'tcpdump')
server['ssh_client'].execute_script(
'echo "%s" > /tmp/unregistered_traffic_receiver.sh' % check_script)
- @test.unstable_test("bug 1850288")
@decorators.idempotent_id('113486fc-24c9-4be4-8361-03b1c9892867')
def test_multicast_between_vms_on_same_network(self):
"""Test multicast messaging between two servers on the same network
@@ -342,19 +352,39 @@
for receiver_id in receiver_ids:
self.assertIn(receiver_id, replies_result)
- # Kill the tcpdump command running on the unregistered node so
- # tcpdump flushes its output to the output file
- unregistered['ssh_client'].execute_script(
- "killall tcpdump && sleep 2", become_root=True)
+ def check_unregistered_host():
+ unregistered_result = unregistered['ssh_client'].execute_script(
+ "cat {path} || echo '{path} not exists yet'".format(
+ path=self.unregistered_output_file))
+ LOG.debug("Unregistered VM result: %s", unregistered_result)
+ return expected_result in unregistered_result
- unregistered_result = unregistered['ssh_client'].execute_script(
- "cat {path} || echo '{path} not exists yet'".format(
- path=self.unregistered_output_file))
- LOG.debug("Unregistered VM result: %s", unregistered_result)
- expected_result = '0 packets captured'
- if self._is_multicast_traffic_expected(mcast_address):
- expected_result = '1 packet captured'
- self.assertIn(expected_result, unregistered_result)
+ expected_result = '1 packet captured'
+ unregistered_error_message = (
+ 'Unregistered server did not received expected packet.')
+ if not self._is_multicast_traffic_expected(mcast_address):
+ # Kill the tcpdump command runs on the unregistered node with "-c"
+ # option so it will be stopped automatically if it will receive
+ # packet matching filters,
+ # We don't expect any packets to be captured really in this case
+ # so let's kill tcpdump so it flushes its output to the output
+ # file.
+ expected_result = ('0 packets captured')
+ unregistered_error_message = (
+ 'Unregistered server received unexpected packet(s).')
+ try:
+ unregistered['ssh_client'].execute_script(
+ "killall tcpdump && sleep 2", become_root=True)
+ except exceptions.SSHScriptFailed:
+ # Probably some packet was captured by tcpdump and due to that
+ # it is already stopped
+ self.assertTrue(check_unregistered_host(),
+ unregistered_error_message)
+ return
+
+ utils.wait_until_true(
+ check_unregistered_host,
+ exception=RuntimeError(unregistered_error_message))
class MulticastTestIPv4(BaseMulticastTest, base.BaseTempestTestCase):
diff --git a/neutron_tempest_plugin/scenario/test_port_forwardings.py b/neutron_tempest_plugin/scenario/test_port_forwardings.py
index 4080bca..a3adc03 100644
--- a/neutron_tempest_plugin/scenario/test_port_forwardings.py
+++ b/neutron_tempest_plugin/scenario/test_port_forwardings.py
@@ -14,7 +14,6 @@
# under the License.
from neutron_lib import constants
-from neutron_lib.utils import test
from oslo_log import log
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -32,11 +31,13 @@
class PortForwardingTestJSON(base.BaseTempestTestCase):
+ credentials = ['primary', 'admin']
required_extensions = ['router', 'floating-ip-port-forwarding']
@classmethod
def resource_setup(cls):
super(PortForwardingTestJSON, cls).resource_setup()
+ cls.skip_if_no_extension_enabled_in_l3_agents("port_forwarding")
cls.network = cls.create_network()
cls.subnet = cls.create_subnet(cls.network)
cls.router = cls.create_router_by_client()
@@ -70,6 +71,7 @@
networks=[{'port': port['id']}])['server']
server['name'] = name
self.wait_for_server_active(server)
+ self.wait_for_guest_os_ready(server)
server['port_forwarding_tcp'] = self.create_port_forwarding(
self.fip['id'],
internal_port_id=port['id'],
@@ -119,7 +121,6 @@
lambda: _message_received(server, ssh_client, expected_msg),
**wait_params)
- @test.unstable_test("bug 1896735")
@decorators.idempotent_id('ab40fc48-ca8d-41a0-b2a3-f6679c847bfe')
def test_port_forwarding_to_2_servers(self):
servers = self._prepare_resources(num_servers=2,
@@ -129,7 +130,6 @@
# And now test UDP port forwarding using nc
self._test_udp_port_forwarding(servers)
- @test.unstable_test("bug 1896735")
@decorators.idempotent_id('aa19d46c-a4a6-11ea-bb37-0242ac130002')
def test_port_forwarding_editing_and_deleting_tcp_rule(self):
test_ext_port = 3333
@@ -187,7 +187,6 @@
server[0]['id'],
server[0]['port_forwarding_tcp']['external_port'])))
- @test.unstable_test("bug 1896735")
@decorators.idempotent_id('6d05b1b2-6109-4c30-b402-1503f4634acb')
def test_port_forwarding_editing_and_deleting_udp_rule(self):
test_ext_port = 3344
@@ -244,7 +243,6 @@
server[0]['id'],
server[0]['port_forwarding_udp']['external_port'])))
- @test.unstable_test("bug 1896735")
@decorators.idempotent_id('5971881d-06a0-459e-b636-ce5d1929e2d4')
def test_port_forwarding_to_2_fixed_ips(self):
port = self.create_port(self.network,
@@ -255,6 +253,7 @@
name=name, networks=[{'port': port['id']}])['server']
server['name'] = name
self.wait_for_server_active(server)
+ self.wait_for_guest_os_ready(server)
# Add a second fixed_ip address to port (same subnet)
internal_subnet_id = port['fixed_ips'][0]['subnet_id']
diff --git a/neutron_tempest_plugin/scenario/test_ports.py b/neutron_tempest_plugin/scenario/test_ports.py
index 3b0408a..c661d39 100644
--- a/neutron_tempest_plugin/scenario/test_ports.py
+++ b/neutron_tempest_plugin/scenario/test_ports.py
@@ -12,6 +12,9 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import ipaddress
+
+from oslo_log import log as logging
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
@@ -20,11 +23,12 @@
from neutron_tempest_plugin.scenario import base
from neutron_tempest_plugin.scenario import constants as const
+LOG = logging.getLogger(__name__)
CONF = config.CONF
class PortsTest(base.BaseTempestTestCase):
- credentials = ['primary']
+ credentials = ['primary', 'admin']
@classmethod
def resource_setup(cls):
@@ -78,3 +82,34 @@
self.os_primary.servers_client,
servers[0]['server']['id'])
self._try_delete_resource(self.delete_floatingip, fips[0])
+
+ @decorators.idempotent_id('62e32802-1d21-11eb-b322-74e5f9e2a801')
+ def test_port_with_fixed_ip(self):
+ """Test scenario:
+
+ 1) Get the last IP from the range of Subnet "Allocation pool"
+ 2) Create Port with fixed_ip resolved in #1
+ 3) Create a VM using updated Port in #2 and add Floating IP
+ 4) Check SSH access to VM
+ """
+ ip_range = [str(ip) for ip in ipaddress.IPv4Network(
+ self.subnet['cidr'])]
+ # Because of the tests executed in Parallel the IP may already
+ # be in use, so we'll try using IPs from Allocation pool
+ # (in reverse order) up until Port is successfully created.
+ for ip in reversed(ip_range):
+ try:
+ port = self.create_port(
+ self.network,
+ name=data_utils.rand_name("fixed_ip_port"),
+ security_groups=[self.secgroup['id']],
+ fixed_ips=[{'ip_address': ip}])
+ if port is not None:
+ break
+ except Exception as e:
+ LOG.warning('Failed to create Port, using Fixed_IP:{}, '
+ 'the Error was:{}'.format(ip, e))
+ fip, server = self._create_instance_with_port(port)
+ self.check_connectivity(fip[0]['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ self.keypair['private_key'])
diff --git a/neutron_tempest_plugin/scenario/test_portsecurity.py b/neutron_tempest_plugin/scenario/test_portsecurity.py
index 257627c..c90db08 100644
--- a/neutron_tempest_plugin/scenario/test_portsecurity.py
+++ b/neutron_tempest_plugin/scenario/test_portsecurity.py
@@ -21,7 +21,7 @@
class PortSecurityTest(base.BaseTempestTestCase):
- credentials = ['primary']
+ credentials = ['primary', 'admin']
required_extensions = ['port-security']
@decorators.idempotent_id('61ab176e-d48b-42b7-b38a-1ba571ecc033')
diff --git a/neutron_tempest_plugin/scenario/test_qos.py b/neutron_tempest_plugin/scenario/test_qos.py
index 938d2b0..74be216 100644
--- a/neutron_tempest_plugin/scenario/test_qos.py
+++ b/neutron_tempest_plugin/scenario/test_qos.py
@@ -20,6 +20,7 @@
from oslo_log import log as logging
from tempest.common import utils as tutils
from tempest.common import waiters
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from neutron_tempest_plugin.api import base as base_api
@@ -137,8 +138,21 @@
name='test-policy',
description='test-qos-policy',
shared=True)
+ self.qos_policies.append(policy['policy'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.os_admin.network_client.delete_qos_policy, policy)
return policy['policy']['id']
+ def _create_qos_bw_limit_rule(self, policy_id, rule_data):
+ rule = self.qos_bw_limit_rule_client.create_limit_bandwidth_rule(
+ qos_policy_id=policy_id,
+ **rule_data)['bandwidth_limit_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_bw_limit_rule_client.delete_limit_bandwidth_rule,
+ policy_id, rule['id'])
+ return rule
+
def _create_server_by_port(self, port=None):
"""Launch an instance using a port interface;
@@ -175,6 +189,7 @@
server = self.create_server(**server_kwargs)
self.wait_for_server_active(server['server'])
+ self.wait_for_guest_os_ready(server['server'])
self.check_connectivity(self.fip['floating_ip_address'],
CONF.validation.image_ssh_user,
keypair['private_key'])
@@ -188,34 +203,35 @@
def resource_setup(cls):
super(QoSTest, cls).resource_setup()
+ @classmethod
+ def setup_clients(cls):
+ super(QoSTest, cls).setup_clients()
+ cls.admin_client = cls.os_admin.network_client
+ cls.qos_bw_limit_rule_client = \
+ cls.os_admin.qos_limit_bandwidth_rules_client
+
@decorators.idempotent_id('00682a0c-b72e-11e8-b81e-8c16450ea513')
def test_qos_basic_and_update(self):
- """This test covers both:
+ """This test covers following scenarios:
- 1) Basic QoS functionality
- This is a basic test that check that a QoS policy with
- a bandwidth limit rule is applied correctly by sending
- a file from the instance to the test node.
- Then calculating the bandwidth every ~1 sec by the number of bits
- received / elapsed time.
+ 1) Create a QoS policy associated with the network.
+ Expected result: BW is limited according the values set in
+ QoS policy rule.
- 2) Update QoS policy
- Administrator has the ability to update existing QoS policy,
- this test is planned to verify that:
- - actual BW is affected as expected after updating QoS policy.
- Test scenario:
- 1) Associating QoS Policy with "Original_bandwidth"
- to the test node
- 2) BW validation - by downloading file on test node.
- ("Original_bandwidth" is expected)
- 3) Updating existing QoS Policy to a new BW value
- "Updated_bandwidth"
- 4) BW validation - by downloading file on test node.
- ("Updated_bandwidth" is expected)
- Note:
- There are two options to associate QoS policy to VM:
- "Neutron Port" or "Network", in this test
- both options are covered.
+ 2) Update QoS policy associated with the network.
+ Expected result: BW is limited according the new values
+ set in QoS policy rule.
+
+ 3) Create a new QoS policy associated with the VM port.
+ Expected result: BW is limited according the values set in
+ new QoS policy rule.
+ Note: Neutron port is prioritized higher than Network, means
+ that: "Neutron Port Priority" is also covered.
+
+ 4) Update QoS policy associated with the VM port.
+ Expected result: BW is limited according the new values set
+ in QoS policy rule.
+
"""
# Setup resources
@@ -226,11 +242,11 @@
bw_limit_policy_id = self._create_qos_policy()
# As admin user create QoS rule
- rule_id = self.os_admin.network_client.create_bandwidth_limit_rule(
- policy_id=bw_limit_policy_id,
- max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
- max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
- 'bandwidth_limit_rule']['id']
+ rule_data = {
+ 'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND}
+ rule_id = self._create_qos_bw_limit_rule(
+ bw_limit_policy_id, rule_data)['id']
# Associate QoS to the network
self.os_admin.network_client.update_network(
@@ -243,14 +259,18 @@
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=self.CHECK_TIMEOUT,
- sleep=1)
+ sleep=1,
+ exception=RuntimeError(
+ 'Failed scenario: "Create a QoS policy associated with'
+ ' the network" Actual BW is not as expected!'))
# As admin user update QoS rule
- self.os_admin.network_client.update_bandwidth_limit_rule(
- bw_limit_policy_id,
- rule_id,
- max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 2,
- max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 2)
+ rule_update_data = {
+ 'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND * 2,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND * 2}
+ self.qos_bw_limit_rule_client.update_limit_bandwidth_rule(
+ qos_policy_id=bw_limit_policy_id, rule_id=rule_id,
+ **rule_update_data)
# Check that actual BW while downloading file
# is as expected (Update BW)
@@ -260,17 +280,20 @@
port=self.NC_PORT,
expected_bw=QoSTest.LIMIT_BYTES_SEC * 2),
timeout=self.CHECK_TIMEOUT,
- sleep=1)
+ sleep=1,
+ exception=RuntimeError(
+ 'Failed scenario: "Update QoS policy associated with'
+ ' the network" Actual BW is not as expected!'))
# Create a new QoS policy
bw_limit_policy_id_new = self._create_qos_policy()
# As admin user create a new QoS rule
- rule_id_new = self.os_admin.network_client.create_bandwidth_limit_rule(
- policy_id=bw_limit_policy_id_new,
- max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
- max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
- 'bandwidth_limit_rule']['id']
+ rule_data_new = {
+ 'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND}
+ rule_id_new = self._create_qos_bw_limit_rule(
+ bw_limit_policy_id_new, rule_data_new)['id']
# Associate a new QoS policy to Neutron port
self.os_admin.network_client.update_port(
@@ -283,14 +306,18 @@
self.fip['floating_ip_address'],
port=self.NC_PORT),
timeout=self.CHECK_TIMEOUT,
- sleep=1)
+ sleep=1,
+ exception=RuntimeError(
+ 'Failed scenario: "Create a new QoS policy associated with'
+ ' the VM port" Actual BW is not as expected!'))
# As admin user update QoS rule
- self.os_admin.network_client.update_bandwidth_limit_rule(
- bw_limit_policy_id_new,
- rule_id_new,
- max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 3,
- max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND * 3)
+ rule_update_data = {
+ 'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND * 3,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND * 3}
+ self.qos_bw_limit_rule_client.update_limit_bandwidth_rule(
+ qos_policy_id=bw_limit_policy_id_new, rule_id=rule_id_new,
+ **rule_update_data)
# Check that actual BW while downloading file
# is as expected (Update BW)
@@ -300,7 +327,10 @@
port=self.NC_PORT,
expected_bw=QoSTest.LIMIT_BYTES_SEC * 3),
timeout=self.CHECK_TIMEOUT,
- sleep=1)
+ sleep=1,
+ exception=RuntimeError(
+ 'Failed scenario: "Update QoS policy associated with'
+ ' the VM port" Actual BW is not as expected!'))
@decorators.idempotent_id('66e5673e-0522-11ea-8d71-362b9e155667')
def test_attach_previously_used_port_to_new_instance(self):
@@ -321,11 +351,10 @@
description='policy for attach',
shared=False)['policy']
- rule = self.os_admin.network_client.create_bandwidth_limit_rule(
- policy_id=port_policy['id'],
- max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
- max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)[
- 'bandwidth_limit_rule']
+ rule_data = {
+ 'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND}
+ rule = self._create_qos_bw_limit_rule(port_policy['id'], rule_data)
self.os_admin.network_client.update_port(
vm_port['id'], qos_policy_id=port_policy['id'])
@@ -365,10 +394,10 @@
name='network-policy',
shared=False)['policy']
- rule = self.os_admin.network_client.create_bandwidth_limit_rule(
- policy_id=qos_policy['id'],
- max_kbps=constants.LIMIT_KILO_BITS_PER_SECOND,
- max_burst_kbps=constants.LIMIT_KILO_BITS_PER_SECOND)
+ rule_data = {
+ 'max_kbps': constants.LIMIT_KILO_BITS_PER_SECOND,
+ 'max_burst_kbps': constants.LIMIT_KILO_BITS_PER_SECOND}
+ rule = self._create_qos_bw_limit_rule(qos_policy['id'], rule_data)
network = self.os_admin.network_client.update_network(
network['id'],
@@ -386,9 +415,9 @@
retrieved_net['network']['qos_policy_id'])
retrieved_rule_id = retrieved_policy['policy']['rules'][0]['id']
- self.assertEqual(rule['bandwidth_limit_rule']['id'],
+ self.assertEqual(rule['id'],
retrieved_rule_id,
"""The expected rule ID is {0},
the actual value is {1}""".
- format(rule['bandwidth_limit_rule']['id'],
+ format(rule['id'],
retrieved_rule_id))
diff --git a/neutron_tempest_plugin/scenario/test_security_groups.py b/neutron_tempest_plugin/scenario/test_security_groups.py
index 23a5224..f47ce44 100644
--- a/neutron_tempest_plugin/scenario/test_security_groups.py
+++ b/neutron_tempest_plugin/scenario/test_security_groups.py
@@ -12,8 +12,12 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
-from neutron_lib import constants
+import netaddr
+from neutron_lib import constants
+import testtools
+
+from tempest.common import utils as tempest_utils
from tempest.common import waiters
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -33,13 +37,15 @@
required_extensions = ['router', 'security-group']
def _verify_http_connection(self, ssh_client, ssh_server,
- test_ip, test_port, should_pass=True):
+ test_ip, test_port, servers, should_pass=True):
"""Verify if HTTP connection works using remote hosts.
:param ssh.Client ssh_client: The client host active SSH client.
:param ssh.Client ssh_server: The HTTP server host active SSH client.
:param string test_ip: IP address of HTTP server
:param string test_port: Port of HTTP server
+ :param list servers: List of servers for which console output will be
+ logged in case when test case
:param bool should_pass: Wheter test should pass or not.
:return: if passed or not
@@ -48,6 +54,7 @@
utils.kill_nc_process(ssh_server)
url = 'http://%s:%d' % (test_ip, test_port)
utils.spawn_http_server(ssh_server, port=test_port, message='foo_ok')
+ utils.process_is_running(ssh_server, 'nc')
try:
ret = utils.call_url_remote(ssh_client, url)
if should_pass:
@@ -57,15 +64,21 @@
except Exception as e:
if not should_pass:
return
+ self._log_console_output(servers)
+ self._log_local_network_status()
raise e
@classmethod
def setup_credentials(cls):
super(NetworkSecGroupTest, cls).setup_credentials()
- cls.project_id = cls.os_primary.credentials.tenant_id
cls.network_client = cls.os_admin.network_client
@classmethod
+ def setup_clients(cls):
+ super(NetworkSecGroupTest, cls).setup_clients()
+ cls.project_id = cls.os_primary.credentials.tenant_id
+
+ @classmethod
def resource_setup(cls):
super(NetworkSecGroupTest, cls).resource_setup()
# setup basic topology for servers we can log into it
@@ -269,6 +282,50 @@
'remote_ip_prefix': cidr}]
self._test_ip_prefix(rule_list, should_succeed=False)
+ @decorators.idempotent_id('01f0ddca-b049-47eb-befd-82acb502c9ec')
+ def test_established_tcp_session_after_re_attachinging_sg(self):
+ """Test existing connection remain open after sg has been re-attached
+
+ Verifies that new packets can pass over the existing connection when
+ the security group has been removed from the server and then added
+ back
+ """
+
+ ssh_sg = self.create_security_group()
+ self.create_loginable_secgroup_rule(secgroup_id=ssh_sg['id'])
+ vm_ssh, fips, vms = self.create_vm_testing_sec_grp(
+ security_groups=[{'name': ssh_sg['name']}])
+ sg = self.create_security_group()
+ nc_rule = [{'protocol': constants.PROTO_NUM_TCP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'port_range_min': 6666,
+ 'port_range_max': 6666}]
+ self.create_secgroup_rules(nc_rule, secgroup_id=sg['id'])
+ srv_port = self.client.list_ports(network_id=self.network['id'],
+ device_id=vms[1]['server']['id'])['ports'][0]
+ srv_ip = srv_port['fixed_ips'][0]['ip_address']
+ with utils.StatefulConnection(
+ vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
+ self.client.update_port(srv_port['id'],
+ security_groups=[ssh_sg['id'], sg['id']])
+ con.test_connection()
+ with utils.StatefulConnection(
+ vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
+ self.client.update_port(
+ srv_port['id'], security_groups=[ssh_sg['id']])
+ con.test_connection(should_pass=False)
+ with utils.StatefulConnection(
+ vm_ssh[0], vm_ssh[1], srv_ip, 6666) as con:
+ self.client.update_port(srv_port['id'],
+ security_groups=[ssh_sg['id'], sg['id']])
+ con.test_connection()
+ self.client.update_port(srv_port['id'],
+ security_groups=[ssh_sg['id']])
+ con.test_connection(should_pass=False)
+ self.client.update_port(srv_port['id'],
+ security_groups=[ssh_sg['id'], sg['id']])
+ con.test_connection()
+
@decorators.idempotent_id('7ed39b86-006d-40fb-887a-ae46693dabc9')
def test_remote_group(self):
# create a new sec group
@@ -306,6 +363,95 @@
self.ping_ip_address(fips[0]['floating_ip_address'],
should_succeed=False)
+ @testtools.skipUnless(
+ CONF.neutron_plugin_options.firewall_driver == 'openvswitch',
+ "Openvswitch agent is required to run this test")
+ @decorators.idempotent_id('678dd4c0-2953-4626-b89c-8e7e4110ec4b')
+ @tempest_utils.requires_ext(extension="address-group", service="network")
+ @tempest_utils.requires_ext(
+ extension="security-groups-remote-address-group", service="network")
+ def test_remote_group_and_remote_address_group(self):
+ """Test SG rules with remote group and remote address group
+
+ This test checks the ICMP connection among two servers using a security
+ group rule with remote group and another rule with remote address
+ group. The connection should be granted when at least one of the rules
+ is applied. When both rules are applied (overlapped), removing one of
+ them should not disable the connection.
+ """
+ # create a new sec group
+ ssh_secgrp_name = data_utils.rand_name('ssh_secgrp')
+ ssh_secgrp = self.os_primary.network_client.create_security_group(
+ name=ssh_secgrp_name)
+ # add cleanup
+ self.security_groups.append(ssh_secgrp['security_group'])
+ # configure sec group to support SSH connectivity
+ self.create_loginable_secgroup_rule(
+ secgroup_id=ssh_secgrp['security_group']['id'])
+ # spawn two instances with the sec group created
+ server_ssh_clients, fips, servers = self.create_vm_testing_sec_grp(
+ security_groups=[{'name': ssh_secgrp_name}])
+ # verify SSH functionality
+ for i in range(2):
+ self.check_connectivity(fips[i]['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ self.keypair['private_key'])
+ # try to ping instances without ICMP permissions
+ self.check_remote_connectivity(
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ should_succeed=False)
+ # add ICMP support to the remote group
+ rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'remote_group_id': ssh_secgrp['security_group']['id']}]
+ remote_sg_rid = self.create_secgroup_rules(
+ rule_list, secgroup_id=ssh_secgrp['security_group']['id'])[0]['id']
+ # verify ICMP connectivity between instances works
+ self.check_remote_connectivity(
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ servers=servers)
+ # make sure ICMP connectivity doesn't work from framework
+ self.ping_ip_address(fips[0]['floating_ip_address'],
+ should_succeed=False)
+
+ # add ICMP rule with remote address group
+ test_ag = self.create_address_group(
+ name=data_utils.rand_name('test_ag'),
+ addresses=[str(netaddr.IPNetwork(fips[0]['fixed_ip_address']))])
+ rule_list = [{'protocol': constants.PROTO_NUM_ICMP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'remote_address_group_id': test_ag['id']}]
+ remote_ag_rid = self.create_secgroup_rules(
+ rule_list, secgroup_id=ssh_secgrp['security_group']['id'])[0]['id']
+ # verify ICMP connectivity between instances still works
+ self.check_remote_connectivity(
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ servers=servers)
+ # make sure ICMP connectivity doesn't work from framework
+ self.ping_ip_address(fips[0]['floating_ip_address'],
+ should_succeed=False)
+
+ # Remove the ICMP rule with remote group
+ self.client.delete_security_group_rule(remote_sg_rid)
+ # verify ICMP connectivity between instances still works as granted
+ # by the rule with remote address group
+ self.check_remote_connectivity(
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ servers=servers)
+ # make sure ICMP connectivity doesn't work from framework
+ self.ping_ip_address(fips[0]['floating_ip_address'],
+ should_succeed=False)
+
+ # Remove the ICMP rule with remote address group
+ self.client.delete_security_group_rule(remote_ag_rid)
+ # verify ICMP connectivity between instances doesn't work now
+ self.check_remote_connectivity(
+ server_ssh_clients[0], fips[1]['fixed_ip_address'],
+ should_succeed=False)
+ # make sure ICMP connectivity doesn't work from framework
+ self.ping_ip_address(fips[0]['floating_ip_address'],
+ should_succeed=False)
+
@decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad488')
def test_multiple_ports_secgroup_inheritance(self):
"""Test multiple port security group inheritance
@@ -378,6 +524,7 @@
ssh_clients[0],
ssh_clients[2],
test_ip, port,
+ servers,
should_pass=False)
# add two remote-group rules with port-ranges
@@ -399,7 +546,8 @@
self._verify_http_connection(
ssh_clients[0],
ssh_clients[2],
- test_ip, port)
+ test_ip, port,
+ servers)
@decorators.idempotent_id('f07d0159-8f9e-4faa-87f5-a869ab0ad490')
def test_intra_sg_isolation(self):
@@ -473,3 +621,45 @@
ssh_clients[0], fips[1]['fixed_ip_address'])
self.check_remote_connectivity(
ssh_clients[1], fips[0]['fixed_ip_address'])
+
+ @decorators.idempotent_id('cd66b826-d86c-4fb4-ab37-17c8391753cb')
+ def test_overlapping_sec_grp_rules(self):
+ """Test security group rules with overlapping port ranges"""
+ client_ssh, _, vms = self.create_vm_testing_sec_grp(num_servers=2)
+ tmp_ssh, _, tmp_vm = self.create_vm_testing_sec_grp(num_servers=1)
+ srv_ssh = tmp_ssh[0]
+ srv_vm = tmp_vm[0]
+ srv_port = self.client.list_ports(network_id=self.network['id'],
+ device_id=srv_vm['server']['id'])['ports'][0]
+ srv_ip = srv_port['fixed_ips'][0]['ip_address']
+ secgrps = []
+ for i, vm in enumerate(vms):
+ sg = self.create_security_group(name='secgrp-%d' % i)
+ self.create_loginable_secgroup_rule(secgroup_id=sg['id'])
+ port = self.client.list_ports(network_id=self.network['id'],
+ device_id=vm['server']['id'])['ports'][0]
+ self.client.update_port(port['id'], security_groups=[sg['id']])
+ secgrps.append(sg)
+ tcp_port = 3000
+ rule_list = [{'protocol': constants.PROTO_NUM_TCP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'port_range_min': tcp_port,
+ 'port_range_max': tcp_port,
+ 'remote_group_id': secgrps[0]['id']},
+ {'protocol': constants.PROTO_NUM_TCP,
+ 'direction': constants.INGRESS_DIRECTION,
+ 'port_range_min': tcp_port,
+ 'port_range_max': tcp_port + 2,
+ 'remote_group_id': secgrps[1]['id']}]
+ self.client.update_port(srv_port['id'],
+ security_groups=[secgrps[0]['id'], secgrps[1]['id']])
+ self.create_secgroup_rules(rule_list, secgroup_id=secgrps[0]['id'])
+ # The conntrack entries are ruled by the OF definitions but conntrack
+ # status can change the datapath. Let's check the rules in two
+ # attempts
+ for _ in range(2):
+ self._verify_http_connection(client_ssh[0], srv_ssh, srv_ip,
+ tcp_port, [])
+ for port in range(tcp_port, tcp_port + 3):
+ self._verify_http_connection(client_ssh[1], srv_ssh, srv_ip,
+ port, [])
diff --git a/neutron_tempest_plugin/scenario/test_trunk.py b/neutron_tempest_plugin/scenario/test_trunk.py
index 585af06..b994775 100644
--- a/neutron_tempest_plugin/scenario/test_trunk.py
+++ b/neutron_tempest_plugin/scenario/test_trunk.py
@@ -15,6 +15,7 @@
import collections
from neutron_lib import constants
+from neutron_lib.utils import test
from oslo_log import log as logging
from tempest.common import utils as tutils
from tempest.lib.common.utils import data_utils
@@ -96,6 +97,32 @@
floating_ip=floating_ip, server=server,
ssh_client=ssh_client)
+ def _create_advanced_servers_with_trunk_port(self, num_servers=1,
+ subport_network=None,
+ segmentation_id=None,
+ vlan_subnet=None,
+ use_advanced_image=False):
+ server_list = []
+ for _ in range(0, num_servers):
+ vm = self._create_server_with_trunk_port(
+ subport_network,
+ segmentation_id,
+ use_advanced_image)
+ server_list.append(vm)
+ self._configure_vlan_subport(
+ vm=vm,
+ vlan_tag=segmentation_id,
+ vlan_subnet=vlan_subnet)
+
+ return server_list
+
+ def _check_servers_remote_connectivity(self, vms=None,
+ should_succeed=True):
+ self.check_remote_connectivity(
+ vms[0].ssh_client,
+ vms[1].subport['fixed_ips'][0]['ip_address'],
+ should_succeed=should_succeed)
+
def _create_server_port(self, network=None, **params):
network = network or self.network
return self.create_port(network=network, name=self.rand_name,
@@ -161,9 +188,14 @@
def _configure_vlan_subport(self, vm, vlan_tag, vlan_subnet):
self.wait_for_server_active(server=vm.server)
+ self.wait_for_guest_os_ready(vm.server)
self._wait_for_trunk(trunk=vm.trunk)
self._wait_for_port(port=vm.port)
self._wait_for_port(port=vm.subport)
+ self.check_connectivity(
+ host=vm.floating_ip['floating_ip_address'],
+ ssh_client=vm.ssh_client,
+ servers=[vm.server])
ip_command = ip.IPCommand(ssh_client=vm.ssh_client)
for address in ip_command.list_addresses(port=vm.port):
@@ -199,6 +231,7 @@
vm2 = self._create_server_with_trunk_port()
for vm in (vm1, vm2):
self.wait_for_server_active(server=vm.server)
+ self.wait_for_guest_os_ready(vm.server)
self._wait_for_trunk(vm.trunk)
self._assert_has_ssh_connectivity(vm.ssh_client)
@@ -248,45 +281,69 @@
(CONF.neutron_plugin_options.advanced_image_ref or
CONF.neutron_plugin_options.default_image_is_advanced),
"Advanced image is required to run this test.")
- @decorators.idempotent_id('a8a02c9b-b453-49b5-89a2-cce7da66aafb')
+ @testtools.skipUnless(
+ (CONF.neutron_plugin_options.reboots_in_test > 0),
+ "Number of reboots > 0 is reqired for this test")
+ @decorators.idempotent_id('a8a02c9b-b453-49b5-89a2-cce7da6680fb')
+ def test_subport_connectivity_soft_reboot(self):
+ vlan_tag = 10
+ vlan_network = self.create_network()
+ vlan_subnet = self.create_subnet(network=vlan_network, gateway=None)
+ use_advanced_image = (
+ not CONF.neutron_plugin_options.default_image_is_advanced)
+
+ # allow intra-security-group traffic
+ sg_rule = self.create_pingable_secgroup_rule(self.security_group['id'])
+ self.addCleanup(
+ self.os_primary.network_client.delete_security_group_rule,
+ sg_rule['id'])
+
+ vms = self._create_advanced_servers_with_trunk_port(
+ num_servers=2,
+ subport_network=vlan_network,
+ segmentation_id=vlan_tag,
+ vlan_subnet=vlan_subnet,
+ use_advanced_image=use_advanced_image)
+ # check remote connectivity true before reboots
+ self._check_servers_remote_connectivity(vms=vms)
+ client = self.os_tempest.compute.ServersClient()
+ for _ in range(CONF.neutron_plugin_options.reboots_in_test):
+ client.reboot_server(vms[1].server['id'],
+ **{'type': 'SOFT'})
+ self.wait_for_server_active(vms[1].server)
+ self._configure_vlan_subport(vm=vms[1],
+ vlan_tag=vlan_tag,
+ vlan_subnet=vlan_subnet)
+ self._check_servers_remote_connectivity(vms=vms)
+
+ @test.unstable_test("bug 1897796")
+ @testtools.skipUnless(
+ (CONF.neutron_plugin_options.advanced_image_ref or
+ CONF.neutron_plugin_options.default_image_is_advanced),
+ "Advanced image is required to run this test.")
+ @decorators.idempotent_id('a8a02c9b-b453-49b5-89a2-cce7da66bbcb')
def test_subport_connectivity(self):
vlan_tag = 10
vlan_network = self.create_network()
vlan_subnet = self.create_subnet(network=vlan_network, gateway=None)
-
use_advanced_image = (
not CONF.neutron_plugin_options.default_image_is_advanced)
-
- vm1 = self._create_server_with_trunk_port(
+ vms = self._create_advanced_servers_with_trunk_port(
+ num_servers=2,
subport_network=vlan_network,
segmentation_id=vlan_tag,
+ vlan_subnet=vlan_subnet,
use_advanced_image=use_advanced_image)
- vm2 = self._create_server_with_trunk_port(
- subport_network=vlan_network,
- segmentation_id=vlan_tag,
- use_advanced_image=use_advanced_image)
-
- for vm in [vm1, vm2]:
- self._configure_vlan_subport(vm=vm,
- vlan_tag=vlan_tag,
- vlan_subnet=vlan_subnet)
-
# Ping from server1 to server2 via VLAN interface should fail because
# we haven't allowed ICMP
- self.check_remote_connectivity(
- vm1.ssh_client,
- vm2.subport['fixed_ips'][0]['ip_address'],
- should_succeed=False)
-
+ self._check_servers_remote_connectivity(vms=vms,
+ should_succeed=False)
# allow intra-security-group traffic
sg_rule = self.create_pingable_secgroup_rule(self.security_group['id'])
self.addCleanup(
self.os_primary.network_client.delete_security_group_rule,
sg_rule['id'])
- self.check_remote_connectivity(
- vm1.ssh_client,
- vm2.subport['fixed_ips'][0]['ip_address'],
- servers=[vm1, vm2])
+ self._check_servers_remote_connectivity(vms=vms)
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
'Cold migration is not available.')
@@ -325,6 +382,7 @@
use_advanced_image=use_advanced_image)
for role in ['migrate', 'connection_test']:
self.wait_for_server_active(servers[role].server)
+ self.wait_for_guest_os_ready(servers[role].server)
self._configure_vlan_subport(vm=servers[role],
vlan_tag=vlan_tag,
vlan_subnet=vlan_subnet)
@@ -377,6 +435,7 @@
vlan_subnet=vlan_subnet)
for vm in vms:
self.wait_for_server_active(vm.server)
+ self.wait_for_guest_os_ready(vm.server)
# allow ICMP traffic
sg_rule = self.create_pingable_secgroup_rule(self.security_group['id'])
diff --git a/neutron_tempest_plugin/scenario/test_vlan_transparency.py b/neutron_tempest_plugin/scenario/test_vlan_transparency.py
new file mode 100644
index 0000000..d9a529c
--- /dev/null
+++ b/neutron_tempest_plugin/scenario/test_vlan_transparency.py
@@ -0,0 +1,186 @@
+# Copyright (c) 2020 Red Hat, Inc.
+#
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.common import ip
+from neutron_tempest_plugin.common import ssh
+from neutron_tempest_plugin import config
+from neutron_tempest_plugin.scenario import base
+
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+MIN_VLAN_ID = 1
+MAX_VLAN_ID = 4094
+
+
+class VlanTransparencyTest(base.BaseTempestTestCase):
+ credentials = ['primary', 'admin']
+ force_tenant_isolation = False
+
+ required_extensions = ['vlan-transparent', 'allowed-address-pairs']
+
+ @classmethod
+ def resource_setup(cls):
+ super(VlanTransparencyTest, cls).resource_setup()
+ # setup basic topology for servers we can log into
+ cls.rand_name = data_utils.rand_name(
+ cls.__name__.rsplit('.', 1)[-1])
+ cls.network = cls.create_network(name=cls.rand_name,
+ vlan_transparent=True)
+ cls.subnet = cls.create_subnet(network=cls.network,
+ name=cls.rand_name)
+ cls.router = cls.create_router_by_client()
+ cls.create_router_interface(cls.router['id'], cls.subnet['id'])
+ cls.keypair = cls.create_keypair(name=cls.rand_name)
+ cls.vm_ports = []
+ cls.security_group = cls.create_security_group(name=cls.rand_name)
+ cls.create_loginable_secgroup_rule(cls.security_group['id'])
+
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ cls.flavor_ref = CONF.compute.flavor_ref
+ cls.image_ref = CONF.compute.image_ref
+ else:
+ cls.flavor_ref = \
+ CONF.neutron_plugin_options.advanced_image_flavor_ref
+ cls.image_ref = CONF.neutron_plugin_options.advanced_image_ref
+
+ @classmethod
+ def skip_checks(cls):
+ super(VlanTransparencyTest, cls).skip_checks()
+ if not (CONF.neutron_plugin_options.advanced_image_ref or
+ CONF.neutron_plugin_options.default_image_is_advanced):
+ raise cls.skipException(
+ 'Advanced image is required to run these tests.')
+
+ def _create_port_and_server(self, index,
+ port_security=True,
+ allowed_address_pairs=None):
+ server_name = 'server-%s-%d' % (self.rand_name, index)
+ port_name = 'port-%s-%d' % (self.rand_name, index)
+ if port_security:
+ sec_groups = [self.security_group['id']]
+ else:
+ sec_groups = None
+ self.vm_ports.append(
+ self.create_port(network=self.network, name=port_name,
+ security_groups=sec_groups,
+ port_security_enabled=port_security,
+ allowed_address_pairs=allowed_address_pairs))
+ return self.create_server(flavor_ref=self.flavor_ref,
+ image_ref=self.image_ref,
+ key_name=self.keypair['name'],
+ networks=[{'port': self.vm_ports[-1]['id']}],
+ name=server_name)['server']
+
+ def _configure_vlan_transparent(self, port, ssh_client,
+ vlan_tag, vlan_ip):
+ ip_command = ip.IPCommand(ssh_client=ssh_client)
+ addresses = ip_command.list_addresses(port=port)
+ port_iface = ip.get_port_device_name(addresses, port)
+ subport_iface = ip_command.configure_vlan_transparent(
+ port=port, vlan_tag=vlan_tag, ip_addresses=[vlan_ip])
+
+ for address in ip_command.list_addresses(ip_addresses=vlan_ip):
+ self.assertEqual(subport_iface, address.device.name)
+ self.assertEqual(port_iface, address.device.parent)
+ break
+ else:
+ self.fail("Sub-port fixed IP not found on server.")
+
+ def _create_ssh_client(self, floating_ip):
+ if CONF.neutron_plugin_options.default_image_is_advanced:
+ username = CONF.validation.image_ssh_user
+ else:
+ username = CONF.neutron_plugin_options.advanced_image_ssh_user
+ return ssh.Client(host=floating_ip['floating_ip_address'],
+ username=username,
+ pkey=self.keypair['private_key'])
+
+ def _test_basic_vlan_transparency_connectivity(
+ self, port_security=True, use_allowed_address_pairs=False):
+ vlan_tag = data_utils.rand_int_id(start=MIN_VLAN_ID, end=MAX_VLAN_ID)
+ vlan_ipmask_template = '192.168.%d.{ip_last_byte}/24' % (vlan_tag %
+ 256)
+ vms = []
+ vlan_ipmasks = []
+ floating_ips = []
+ ssh_clients = []
+
+ for i in range(2):
+ vlan_ipmasks.append(vlan_ipmask_template.format(
+ ip_last_byte=(i + 1) * 10))
+ if use_allowed_address_pairs:
+ allowed_address_pairs = [{'ip_address': vlan_ipmasks[i]}]
+ else:
+ allowed_address_pairs = None
+ vms.append(self._create_port_and_server(
+ index=i,
+ port_security=port_security,
+ allowed_address_pairs=allowed_address_pairs))
+ floating_ips.append(self.create_floatingip(port=self.vm_ports[-1]))
+ ssh_clients.append(
+ self._create_ssh_client(floating_ip=floating_ips[i]))
+
+ self.check_connectivity(
+ host=floating_ips[i]['floating_ip_address'],
+ ssh_client=ssh_clients[i])
+ self._configure_vlan_transparent(port=self.vm_ports[-1],
+ ssh_client=ssh_clients[i],
+ vlan_tag=vlan_tag,
+ vlan_ip=vlan_ipmasks[i])
+
+ if port_security:
+ # Ping from vm0 to vm1 via VLAN interface should fail because
+ # we haven't allowed ICMP
+ self.check_remote_connectivity(
+ ssh_clients[0],
+ vlan_ipmasks[1].split('/')[0],
+ servers=vms,
+ should_succeed=False)
+
+ # allow intra-security-group traffic
+ sg_rule = self.create_pingable_secgroup_rule(
+ self.security_group['id'])
+ self.addCleanup(
+ self.os_primary.network_client.delete_security_group_rule,
+ sg_rule['id'])
+
+ # Ping from vm0 to vm1 via VLAN interface should pass because
+ # either port security is disabled or the ICMP sec group rule has been
+ # added
+ self.check_remote_connectivity(
+ ssh_clients[0],
+ vlan_ipmasks[1].split('/')[0],
+ servers=vms)
+ # Ping from vm1 to vm0 and check untagged packets are not dropped
+ self.check_remote_connectivity(
+ ssh_clients[1],
+ self.vm_ports[-2]['fixed_ips'][0]['ip_address'],
+ servers=vms)
+
+ @decorators.idempotent_id('a2694e3a-6d4d-4a23-9fcc-c3ed3ef37b16')
+ def test_vlan_transparent_port_sec_disabled(self):
+ self._test_basic_vlan_transparency_connectivity(
+ port_security=False, use_allowed_address_pairs=False)
+
+ @decorators.idempotent_id('2dd03b4f-9c20-4cda-8c6a-40fa453ec69a')
+ def test_vlan_transparent_allowed_address_pairs(self):
+ self._test_basic_vlan_transparency_connectivity(
+ port_security=True, use_allowed_address_pairs=True)
diff --git a/neutron_tempest_plugin/services/network/json/network_client.py b/neutron_tempest_plugin/services/network/json/network_client.py
index 0b2544b..e177e10 100644
--- a/neutron_tempest_plugin/services/network/json/network_client.py
+++ b/neutron_tempest_plugin/services/network/json/network_client.py
@@ -11,9 +11,9 @@
# under the License.
import time
+from urllib import parse as urlparse
from oslo_serialization import jsonutils
-from six.moves.urllib import parse as urlparse
from tempest.lib.common import rest_client as service_client
from tempest.lib import exceptions as lib_exc
@@ -627,57 +627,6 @@
self.expected_success(200, resp.status)
return service_client.ResponseBody(resp, body)
- def create_bandwidth_limit_rule(self, policy_id, max_kbps,
- max_burst_kbps, direction=None):
- uri = '%s/qos/policies/%s/bandwidth_limit_rules' % (
- self.uri_prefix, policy_id)
- post_data = {
- 'bandwidth_limit_rule': {
- 'max_kbps': max_kbps,
- 'max_burst_kbps': max_burst_kbps
- }
- }
- if direction:
- post_data['bandwidth_limit_rule']['direction'] = direction
- resp, body = self.post(uri, self.serialize(post_data))
- self.expected_success(201, resp.status)
- body = jsonutils.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def list_bandwidth_limit_rules(self, policy_id):
- uri = '%s/qos/policies/%s/bandwidth_limit_rules' % (
- self.uri_prefix, policy_id)
- resp, body = self.get(uri)
- body = self.deserialize_single(body)
- self.expected_success(200, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def show_bandwidth_limit_rule(self, policy_id, rule_id):
- uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
- self.uri_prefix, policy_id, rule_id)
- resp, body = self.get(uri)
- body = self.deserialize_single(body)
- self.expected_success(200, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def update_bandwidth_limit_rule(self, policy_id, rule_id, **kwargs):
- uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
- self.uri_prefix, policy_id, rule_id)
- if "direction" in kwargs and kwargs['direction'] is None:
- kwargs.pop('direction')
- post_data = {'bandwidth_limit_rule': kwargs}
- resp, body = self.put(uri, jsonutils.dumps(post_data))
- body = self.deserialize_single(body)
- self.expected_success(200, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def delete_bandwidth_limit_rule(self, policy_id, rule_id):
- uri = '%s/qos/policies/%s/bandwidth_limit_rules/%s' % (
- self.uri_prefix, policy_id, rule_id)
- resp, body = self.delete(uri)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
-
def create_dscp_marking_rule(self, policy_id, dscp_mark):
uri = '%s/qos/policies/%s/dscp_marking_rules' % (
self.uri_prefix, policy_id)
@@ -723,53 +672,6 @@
self.expected_success(204, resp.status)
return service_client.ResponseBody(resp, body)
- def create_minimum_bandwidth_rule(self, policy_id, direction,
- min_kbps=None):
- uri = '%s/qos/policies/%s/minimum_bandwidth_rules' % (
- self.uri_prefix, policy_id)
- data = {
- 'direction': direction,
- }
- if min_kbps is not None:
- data['min_kbps'] = min_kbps
- post_data = self.serialize({'minimum_bandwidth_rule': data})
- resp, body = self.post(uri, post_data)
- self.expected_success(201, resp.status)
- body = jsonutils.loads(body)
- return service_client.ResponseBody(resp, body)
-
- def list_minimum_bandwidth_rules(self, policy_id):
- uri = '%s/qos/policies/%s/minimum_bandwidth_rules' % (
- self.uri_prefix, policy_id)
- resp, body = self.get(uri)
- body = self.deserialize_single(body)
- self.expected_success(200, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def show_minimum_bandwidth_rule(self, policy_id, rule_id):
- uri = '%s/qos/policies/%s/minimum_bandwidth_rules/%s' % (
- self.uri_prefix, policy_id, rule_id)
- resp, body = self.get(uri)
- body = self.deserialize_single(body)
- self.expected_success(200, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def update_minimum_bandwidth_rule(self, policy_id, rule_id, **kwargs):
- uri = '%s/qos/policies/%s/minimum_bandwidth_rules/%s' % (
- self.uri_prefix, policy_id, rule_id)
- post_data = {'minimum_bandwidth_rule': kwargs}
- resp, body = self.put(uri, jsonutils.dumps(post_data))
- body = self.deserialize_single(body)
- self.expected_success(200, resp.status)
- return service_client.ResponseBody(resp, body)
-
- def delete_minimum_bandwidth_rule(self, policy_id, rule_id):
- uri = '%s/qos/policies/%s/minimum_bandwidth_rules/%s' % (
- self.uri_prefix, policy_id, rule_id)
- resp, body = self.delete(uri)
- self.expected_success(204, resp.status)
- return service_client.ResponseBody(resp, body)
-
def list_qos_rule_types(self):
uri = '%s/qos/rule-types' % self.uri_prefix
resp, body = self.get(uri)
@@ -923,8 +825,6 @@
return service_client.ResponseBody(resp, body)
def list_security_groups(self, **kwargs):
- post_body = {'security_groups': kwargs}
- body = jsonutils.dumps(post_body)
uri = '%s/security-groups' % self.uri_prefix
if kwargs:
uri += '?' + urlparse.urlencode(kwargs, doseq=1)
@@ -941,8 +841,6 @@
return service_client.ResponseBody(resp, body)
def list_ports(self, **kwargs):
- post_body = {'ports': kwargs}
- body = jsonutils.dumps(post_body)
uri = '%s/ports' % self.uri_prefix
if kwargs:
uri += '?' + urlparse.urlencode(kwargs, doseq=1)
@@ -1038,6 +936,92 @@
self.expected_success(204, resp.status)
service_client.ResponseBody(resp, body)
+ def create_local_ip(self, network_id, **kwargs):
+ post_body = {'local_ip': {
+ 'network_id': network_id}}
+ if kwargs:
+ post_body['local_ip'].update(kwargs)
+ body = jsonutils.dumps(post_body)
+ uri = '%s/local_ips' % self.uri_prefix
+ resp, body = self.post(uri, body)
+ self.expected_success(201, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def list_local_ips(self, **kwargs):
+ uri = '%s/local_ips' % self.uri_prefix
+ if kwargs:
+ uri += '?' + urlparse.urlencode(kwargs, doseq=1)
+ resp, body = self.get(uri)
+ self.expected_success(200, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def get_local_ip(self, local_ip_id):
+ uri = '%s/local_ips/%s' % (self.uri_prefix, local_ip_id)
+ get_resp, get_resp_body = self.get(uri)
+ self.expected_success(200, get_resp.status)
+ body = jsonutils.loads(get_resp_body)
+ return service_client.ResponseBody(get_resp, body)
+
+ def update_local_ip(self, local_ip_id, **kwargs):
+ uri = '%s/local_ips/%s' % (self.uri_prefix, local_ip_id)
+ get_resp, _ = self.get(uri)
+ self.expected_success(200, get_resp.status)
+ put_body = jsonutils.dumps({'local_ip': kwargs})
+ put_resp, resp_body = self.put(uri, put_body)
+ self.expected_success(200, put_resp.status)
+ body = jsonutils.loads(resp_body)
+ return service_client.ResponseBody(put_resp, body)
+
+ def delete_local_ip(self, local_ip_id):
+ uri = '%s/local_ips/%s' % (
+ self.uri_prefix, local_ip_id)
+ resp, body = self.delete(uri)
+ self.expected_success(204, resp.status)
+ return service_client.ResponseBody(resp, body)
+
+ def create_local_ip_association(self, local_ip_id, fixed_port_id,
+ fixed_ip=None):
+ post_body = {'port_association': {
+ 'fixed_port_id': fixed_port_id}}
+ if fixed_ip:
+ post_body['port_association']['fixed_ip'] = (
+ fixed_ip)
+ body = jsonutils.dumps(post_body)
+ uri = '%s/local_ips/%s/port_associations' % (self.uri_prefix,
+ local_ip_id)
+ resp, body = self.post(uri, body)
+ self.expected_success(201, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def get_local_ip_association(self, local_ip_id, fixed_port_id):
+ uri = '%s/local_ips/%s/port_associations/%s' % (self.uri_prefix,
+ local_ip_id,
+ fixed_port_id)
+ get_resp, get_resp_body = self.get(uri)
+ self.expected_success(200, get_resp.status)
+ body = jsonutils.loads(get_resp_body)
+ return service_client.ResponseBody(get_resp, body)
+
+ def list_local_ip_associations(self, local_ip_id):
+ uri = '%s/local_ips/%s/port_associations' % (self.uri_prefix,
+ local_ip_id)
+ resp, body = self.get(uri)
+ self.expected_success(200, resp.status)
+ body = jsonutils.loads(body)
+ return service_client.ResponseBody(resp, body)
+
+ def delete_local_ip_association(self, local_ip_id, fixed_port_id):
+
+ uri = '%s/local_ips/%s/port_associations/%s' % (self.uri_prefix,
+ local_ip_id,
+ fixed_port_id)
+ resp, body = self.delete(uri)
+ self.expected_success(204, resp.status)
+ service_client.ResponseBody(resp, body)
+
def create_conntrack_helper(self, router_id, helper, protocol, port):
post_body = {'conntrack_helper': {
'helper': helper,
@@ -1130,3 +1114,21 @@
self.uri_prefix, resource_type, resource_id, tag)
resp, body = self.delete(uri)
self.expected_success(204, resp.status)
+
+ def add_addresses_to_address_group(self, address_group_id, addresses):
+ uri = '%s/address-groups/%s/add_addresses' % (
+ self.uri_prefix, address_group_id)
+ request_body = {'addresses': addresses}
+ resp, response_body = self.put(uri, jsonutils.dumps(request_body))
+ self.expected_success(200, resp.status)
+ return service_client.ResponseBody(
+ resp, jsonutils.loads(response_body))
+
+ def remove_addresses_from_address_group(self, address_group_id, addresses):
+ uri = '%s/address-groups/%s/remove_addresses' % (
+ self.uri_prefix, address_group_id)
+ request_body = {'addresses': addresses}
+ resp, response_body = self.put(uri, jsonutils.dumps(request_body))
+ self.expected_success(200, resp.status)
+ return service_client.ResponseBody(
+ resp, jsonutils.loads(response_body))
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/manager.py b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
index e2571ab..394fb02 100644
--- a/neutron_tempest_plugin/sfc/tests/scenario/manager.py
+++ b/neutron_tempest_plugin/sfc/tests/scenario/manager.py
@@ -14,294 +14,34 @@
# License for the specific language governing permissions and limitations
# under the License.
-import subprocess
-
-import netaddr
from oslo_log import log
-from oslo_utils import netutils
-from tempest.common import compute
-from tempest.common.utils.linux import remote_client
-from tempest.common.utils import net_utils
-from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import exceptions as lib_exc
-import tempest.test
+from tempest.scenario import manager
CONF = config.CONF
LOG = log.getLogger(__name__)
-class ScenarioTest(tempest.test.BaseTestCase):
+class ScenarioTest(manager.NetworkScenarioTest):
"""Base class for scenario tests. Uses tempest own clients. """
credentials = ['primary']
- @classmethod
- def setup_clients(cls):
- super(ScenarioTest, cls).setup_clients()
- # Clients (in alphabetical order)
- cls.keypairs_client = cls.os_primary.keypairs_client
- cls.servers_client = cls.os_primary.servers_client
- # Neutron network client
- cls.networks_client = cls.os_primary.networks_client
- cls.ports_client = cls.os_primary.ports_client
- cls.routers_client = cls.os_primary.routers_client
- cls.subnets_client = cls.os_primary.subnets_client
- cls.floating_ips_client = cls.os_primary.floating_ips_client
- cls.security_groups_client = cls.os_primary.security_groups_client
- cls.security_group_rules_client = (
- cls.os_primary.security_group_rules_client)
-
# ## Test functions library
#
# The create_[resource] functions only return body and discard the
# resp part which is not used in scenario tests
- def _create_port(self, network_id, client=None, namestart='port-quotatest',
- **kwargs):
- if not client:
- client = self.ports_client
- name = data_utils.rand_name(namestart)
- result = client.create_port(
- name=name,
- network_id=network_id,
- **kwargs)
- self.assertIsNotNone(result, 'Unable to allocate port')
- port = result['port']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_port, port['id'])
- return port
-
- def create_keypair(self, client=None):
- if not client:
- client = self.keypairs_client
- name = data_utils.rand_name(self.__class__.__name__)
- # We don't need to create a keypair by pubkey in scenario
- body = client.create_keypair(name=name)
- self.addCleanup(client.delete_keypair, name)
- return body['keypair']
-
- def create_server(self, name=None, image_id=None, flavor=None,
- validatable=False, wait_until='ACTIVE',
- clients=None, **kwargs):
- """Wrapper utility that returns a test server.
-
- This wrapper utility calls the common create test server and
- returns a test server. The purpose of this wrapper is to minimize
- the impact on the code of the tests already using this
- function.
- """
-
- # NOTE(jlanoux): As a first step, ssh checks in the scenario
- # tests need to be run regardless of the run_validation and
- # validatable parameters and thus until the ssh validation job
- # becomes voting in CI. The test resources management and IP
- # association are taken care of in the scenario tests.
- # Therefore, the validatable parameter is set to false in all
- # those tests. In this way create_server just return a standard
- # server and the scenario tests always perform ssh checks.
-
- # Needed for the cross_tenant_traffic test:
- if clients is None:
- clients = self.os_primary
-
- if name is None:
- name = data_utils.rand_name(self.__class__.__name__ + "-server")
-
- vnic_type = CONF.network.port_vnic_type
-
- # If vnic_type is configured create port for
- # every network
- if vnic_type:
- ports = []
-
- create_port_body = {'binding:vnic_type': vnic_type,
- 'namestart': 'port-smoke'}
- if kwargs:
- # Convert security group names to security group ids
- # to pass to create_port
- if 'security_groups' in kwargs:
- security_groups = \
- clients.security_groups_client.list_security_groups(
- ).get('security_groups')
- sec_dict = dict([(s['name'], s['id'])
- for s in security_groups])
-
- sec_groups_names = [s['name'] for s in kwargs.pop(
- 'security_groups')]
- security_groups_ids = [sec_dict[s]
- for s in sec_groups_names]
-
- if security_groups_ids:
- create_port_body[
- 'security_groups'] = security_groups_ids
- networks = kwargs.pop('networks', [])
- else:
- networks = []
-
- # If there are no networks passed to us we look up
- # for the project's private networks and create a port.
- # The same behaviour as we would expect when passing
- # the call to the clients with no networks
- if not networks:
- networks = clients.networks_client.list_networks(
- **{'router:external': False, 'fields': 'id'})['networks']
-
- # It's net['uuid'] if networks come from kwargs
- # and net['id'] if they come from
- # clients.networks_client.list_networks
- for net in networks:
- net_id = net.get('uuid', net.get('id'))
- if 'port' not in net:
- port = self._create_port(network_id=net_id,
- client=clients.ports_client,
- **create_port_body)
- ports.append({'port': port['id']})
- else:
- ports.append({'port': net['port']})
- if ports:
- kwargs['networks'] = ports
- self.ports = ports
-
- tenant_network = self.get_tenant_network()
-
- body, _ = compute.create_test_server(
- clients,
- tenant_network=tenant_network,
- wait_until=wait_until,
- name=name, flavor=flavor,
- image_id=image_id, **kwargs)
-
- self.addCleanup(waiters.wait_for_server_termination,
- clients.servers_client, body['id'])
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- clients.servers_client.delete_server, body['id'])
- server = clients.servers_client.show_server(body['id'])['server']
- return server
-
- def get_remote_client(self, ip_address, username=None, private_key=None,
- server=None):
- """Get a SSH client to a remote server
-
- @param ip_address the server floating or fixed IP address to use
- for ssh validation
- @param username name of the Linux account on the remote server
- @param private_key the SSH private key to use
- @param server: server dict, used for debugging purposes
- @return a RemoteClient object
- """
-
- if username is None:
- username = CONF.validation.image_ssh_user
- # Set this with 'keypair' or others to log in with keypair or
- # username/password.
- if CONF.validation.auth_method == 'keypair':
- password = None
- if private_key is None:
- private_key = self.keypair['private_key']
- else:
- password = CONF.validation.image_ssh_password
- private_key = None
- linux_client = remote_client.RemoteClient(
- ip_address, username, pkey=private_key, password=password,
- server=server, servers_client=self.servers_client)
- linux_client.validate_authentication()
- return linux_client
-
- def _log_console_output(self, servers=None, client=None):
- if not CONF.compute_feature_enabled.console_output:
- LOG.debug('Console output not supported, cannot log')
- return
- client = client or self.servers_client
- if not servers:
- servers = client.list_servers()
- servers = servers['servers']
- for server in servers:
- try:
- console_output = client.get_console_output(
- server['id'])['output']
- LOG.debug('Console output for %s\nbody=\n%s',
- server['id'], console_output)
- except lib_exc.NotFound:
- LOG.debug("Server %s disappeared(deleted) while looking "
- "for the console log", server['id'])
-
def _log_net_info(self, exc):
# network debug is called as part of ssh init
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
- def ping_ip_address(self, ip_address, should_succeed=True,
- ping_timeout=None, mtu=None):
- timeout = ping_timeout or CONF.validation.ping_timeout
- cmd = ['ping', '-c1', '-w1']
-
- if mtu:
- cmd += [
- # don't fragment
- '-M', 'do',
- # ping receives just the size of ICMP payload
- '-s', str(net_utils.get_ping_payload_size(mtu, 4))
- ]
- cmd.append(ip_address)
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.communicate()
-
- return (proc.returncode == 0) == should_succeed
-
- caller = test_utils.find_test_caller()
- LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
- ' expected result is %(should_succeed)s', {
- 'caller': caller, 'ip': ip_address, 'timeout': timeout,
- 'should_succeed':
- 'reachable' if should_succeed else 'unreachable'
- })
- result = test_utils.call_until_true(ping, timeout, 1)
- LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
- 'ping result is %(result)s', {
- 'caller': caller, 'ip': ip_address, 'timeout': timeout,
- 'result': 'expected' if result else 'unexpected'
- })
- return result
-
- def check_vm_connectivity(self, ip_address,
- username=None,
- private_key=None,
- should_connect=True,
- mtu=None):
- """Check server connectivity
-
- :param ip_address: server to test against
- :param username: server's ssh username
- :param private_key: server's ssh private key to be used
- :param should_connect: True/False indicates positive/negative test
- positive - attempt ping and ssh
- negative - attempt ping and fail if succeed
- :param mtu: network MTU to use for connectivity validation
-
- :raises: AssertError if the result of the connectivity check does
- not match the value of the should_connect param
- """
- if should_connect:
- msg = "Timed out waiting for %s to become reachable" % ip_address
- else:
- msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self.ping_ip_address(ip_address,
- should_succeed=should_connect,
- mtu=mtu),
- msg=msg)
- if should_connect:
- # no need to check ssh for negative connectivity
- self.get_remote_client(ip_address, username, private_key)
-
def check_public_network_connectivity(self, ip_address, username,
private_key, should_connect=True,
msg=None, servers=None, mtu=None):
@@ -320,7 +60,7 @@
if msg:
ex_msg += ": " + msg
LOG.exception(ex_msg)
- self._log_console_output(servers)
+ self.log_console_output(servers)
raise
@@ -344,460 +84,6 @@
if not CONF.service_available.neutron:
raise cls.skipException('Neutron not available')
- def _create_network(self, networks_client=None,
- tenant_id=None,
- namestart='network-smoke-',
- port_security_enabled=True):
- if not networks_client:
- networks_client = self.networks_client
- if not tenant_id:
- tenant_id = networks_client.tenant_id
- name = data_utils.rand_name(namestart)
- network_kwargs = dict(name=name, tenant_id=tenant_id)
- network_kwargs['port_security_enabled'] = port_security_enabled
- result = networks_client.create_network(**network_kwargs)
- network = result['network']
-
- self.assertEqual(network['name'], name)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- networks_client.delete_network,
- network['id'])
- return network
-
- def _create_subnet(self, network, subnets_client=None,
- routers_client=None, namestart='subnet-smoke',
- **kwargs):
- """Create a subnet for the given network
-
- within the cidr block configured for tenant networks.
- """
- if not subnets_client:
- subnets_client = self.subnets_client
- if not routers_client:
- routers_client = self.routers_client
-
- def cidr_in_use(cidr, tenant_id):
- """Check cidr existence
-
- :returns: True if subnet with cidr already exist in tenant
- False else
- """
- cidr_in_use = self.os_admin.subnets_client.list_subnets(
- tenant_id=tenant_id, cidr=cidr)['subnets']
- return len(cidr_in_use) != 0
-
- ip_version = kwargs.pop('ip_version', 4)
-
- if ip_version == 6:
- tenant_cidr = netaddr.IPNetwork(
- CONF.network.project_network_v6_cidr)
- num_bits = CONF.network.project_network_v6_mask_bits
- else:
- tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
- num_bits = CONF.network.project_network_mask_bits
-
- result = None
- str_cidr = None
- # Repeatedly attempt subnet creation with sequential cidr
- # blocks until an unallocated block is found.
- for subnet_cidr in tenant_cidr.subnet(num_bits):
- str_cidr = str(subnet_cidr)
- if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
- continue
-
- subnet = dict(
- name=data_utils.rand_name(namestart),
- network_id=network['id'],
- tenant_id=network['tenant_id'],
- cidr=str_cidr,
- ip_version=ip_version,
- **kwargs
- )
- try:
- result = subnets_client.create_subnet(**subnet)
- break
- except lib_exc.Conflict as e:
- is_overlapping_cidr = 'overlaps with another subnet' in str(e)
- if not is_overlapping_cidr:
- raise
- self.assertIsNotNone(result, 'Unable to allocate tenant network')
-
- subnet = result['subnet']
- self.assertEqual(subnet['cidr'], str_cidr)
-
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- subnets_client.delete_subnet, subnet['id'])
-
- return subnet
-
- def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'], fixed_ip=ip_addr)['ports']
- # A port can have more than one IP address in some cases.
- # If the network is dual-stack (IPv4 + IPv6), this port is associated
- # with 2 subnets
- p_status = ['ACTIVE']
- # NOTE(vsaienko) With Ironic, instances live on separate hardware
- # servers. Neutron does not bind ports for Ironic instances, as a
- # result the port remains in the DOWN state.
- # TODO(vsaienko) remove once bug: #1599836 is resolved.
- if getattr(CONF.service_available, 'ironic', False):
- p_status.append('DOWN')
- port_map = [(p["id"], fxip["ip_address"])
- for p in ports
- for fxip in p["fixed_ips"]
- if netutils.is_valid_ipv4(fxip["ip_address"]) and
- p['status'] in p_status]
- inactive = [p for p in ports if p['status'] != 'ACTIVE']
- if inactive:
- LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
- self.assertNotEmpty(port_map,
- "No IPv4 addresses found in: %s" % ports)
- self.assertEqual(len(port_map), 1,
- "Found multiple IPv4 addresses: %s. "
- "Unable to determine which port to target."
- % port_map)
- return port_map[0]
-
- def _get_network_by_name(self, network_name):
- net = self.os_admin.networks_client.list_networks(
- name=network_name)['networks']
- self.assertNotEmpty(net,
- "Unable to get network by name: %s" % network_name)
- return net[0]
-
- def create_floating_ip(self, thing, external_network_id=None,
- port_id=None, client=None):
- """Create a floating IP and associates to a resource/port on Neutron"""
- if not external_network_id:
- external_network_id = CONF.network.public_network_id
- if not client:
- client = self.floating_ips_client
- if not port_id:
- port_id, ip4 = self._get_server_port_id_and_ip4(thing)
- else:
- ip4 = None
- result = client.create_floatingip(
- floating_network_id=external_network_id,
- port_id=port_id,
- tenant_id=thing['tenant_id'],
- fixed_ip_address=ip4
- )
- floating_ip = result['floatingip']
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_floatingip,
- floating_ip['id'])
- return floating_ip
-
- def _associate_floating_ip(self, floating_ip, server):
- port_id, _ = self._get_server_port_id_and_ip4(server)
- kwargs = dict(port_id=port_id)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertEqual(port_id, floating_ip['port_id'])
- return floating_ip
-
- def _disassociate_floating_ip(self, floating_ip):
- """:param floating_ip: floating_ips_client.create_floatingip"""
- kwargs = dict(port_id=None)
- floating_ip = self.floating_ips_client.update_floatingip(
- floating_ip['id'], **kwargs)['floatingip']
- self.assertIsNone(floating_ip['port_id'])
- return floating_ip
-
- def check_floating_ip_status(self, floating_ip, status):
- """Verifies floatingip reaches the given status
-
- :param dict floating_ip: floating IP dict to check status
- :param status: target status
- :raises: AssertionError if status doesn't match
- """
- floatingip_id = floating_ip['id']
-
- def refresh():
- result = (self.floating_ips_client.
- show_floatingip(floatingip_id)['floatingip'])
- return status == result['status']
-
- if not test_utils.call_until_true(refresh,
- CONF.network.build_timeout,
- CONF.network.build_interval):
- floating_ip = self.floating_ips_client.show_floatingip(
- floatingip_id)['floatingip']
- self.assertEqual(status, floating_ip['status'],
- message="FloatingIP: {fp} is at status: {cst}. "
- "failed to reach status: {st}"
- .format(fp=floating_ip, cst=floating_ip['status'],
- st=status))
- LOG.info("FloatingIP: {fp} is at status: {st}"
- .format(fp=floating_ip, st=status))
-
- def _check_tenant_network_connectivity(self, server,
- username,
- private_key,
- should_connect=True,
- servers_for_debug=None):
- if not CONF.network.project_networks_reachable:
- msg = 'Tenant networks not configured to be reachable.'
- LOG.info(msg)
- return
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- try:
- for ip_addresses in server['addresses'].values():
- for ip_address in ip_addresses:
- self.check_vm_connectivity(ip_address['addr'],
- username,
- private_key,
- should_connect=should_connect)
- except Exception as e:
- LOG.exception('Tenant network connectivity check failed')
- self._log_console_output(servers_for_debug)
- self._log_net_info(e)
- raise
-
- def _check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
- """assert ping server via source ssh connection
-
- Note: This is an internal method. Use check_remote_connectivity
- instead.
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :param nic: specific network interface to ping from
- """
- def ping_remote():
- try:
- source.ping_host(dest, nic=nic)
- except lib_exc.SSHExecCommandFailed:
- LOG.warning('Failed to ping IP: %s via a ssh connection '
- 'from: %s.', dest, source.ssh_client.host)
- return not should_succeed
- return should_succeed
-
- return test_utils.call_until_true(ping_remote,
- CONF.validation.ping_timeout,
- 1)
-
- def check_remote_connectivity(self, source, dest, should_succeed=True,
- nic=None):
- """assert ping server via source ssh connection
-
- :param source: RemoteClient: an ssh connection from which to ping
- :param dest: and IP to ping against
- :param should_succeed: boolean should ping succeed or not
- :param nic: specific network interface to ping from
- """
- result = self._check_remote_connectivity(source, dest, should_succeed,
- nic)
- source_host = source.ssh_client.host
- if should_succeed:
- msg = "Timed out waiting for %s to become reachable from %s" \
- % (dest, source_host)
- else:
- msg = "%s is reachable from %s" % (dest, source_host)
- self.assertTrue(result, msg)
-
- def _create_security_group(self, security_group_rules_client=None,
- tenant_id=None,
- namestart='secgroup-smoke',
- security_groups_client=None):
- if security_group_rules_client is None:
- security_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- if tenant_id is None:
- tenant_id = security_groups_client.tenant_id
- secgroup = self._create_empty_security_group(
- namestart=namestart, client=security_groups_client,
- tenant_id=tenant_id)
-
- # Add rules to the security group
- rules = self._create_loginable_secgroup_rule(
- security_group_rules_client=security_group_rules_client,
- secgroup=secgroup,
- security_groups_client=security_groups_client)
- for rule in rules:
- self.assertEqual(tenant_id, rule['tenant_id'])
- self.assertEqual(secgroup['id'], rule['security_group_id'])
- return secgroup
-
- def _create_empty_security_group(self, client=None, tenant_id=None,
- namestart='secgroup-smoke'):
- """Create a security group without rules.
-
- Default rules will be created:
- - IPv4 egress to any
- - IPv6 egress to any
-
- :param tenant_id: secgroup will be created in this tenant
- :returns: the created security group
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sg_name = data_utils.rand_name(namestart)
- sg_desc = sg_name + " description"
- sg_dict = dict(name=sg_name,
- description=sg_desc)
- sg_dict['tenant_id'] = tenant_id
- result = client.create_security_group(**sg_dict)
-
- secgroup = result['security_group']
- self.assertEqual(secgroup['name'], sg_name)
- self.assertEqual(tenant_id, secgroup['tenant_id'])
- self.assertEqual(secgroup['description'], sg_desc)
-
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- client.delete_security_group, secgroup['id'])
- return secgroup
-
- def _default_security_group(self, client=None, tenant_id=None):
- """Get default secgroup for given tenant_id.
-
- :returns: default secgroup for given tenant
- """
- if client is None:
- client = self.security_groups_client
- if not tenant_id:
- tenant_id = client.tenant_id
- sgs = [
- sg for sg in list(client.list_security_groups().values())[0]
- if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
- ]
- msg = "No default security group for tenant %s." % (tenant_id)
- self.assertNotEmpty(sgs, msg)
- return sgs[0]
-
- def _create_security_group_rule(self, secgroup=None,
- sec_group_rules_client=None,
- tenant_id=None,
- security_groups_client=None, **kwargs):
- """Create a rule from a dictionary of rule parameters.
-
- Create a rule in a secgroup. if secgroup not defined will search for
- default secgroup in tenant_id.
-
- :param secgroup: the security group.
- :param tenant_id: if secgroup not passed -- the tenant in which to
- search for default secgroup
- :param kwargs: a dictionary containing rule parameters:
- for example, to allow incoming ssh:
- rule = {
- direction: 'ingress'
- protocol:'tcp',
- port_range_min: 22,
- port_range_max: 22
- }
- """
- if sec_group_rules_client is None:
- sec_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- if not tenant_id:
- tenant_id = security_groups_client.tenant_id
- if secgroup is None:
- secgroup = self._default_security_group(
- client=security_groups_client, tenant_id=tenant_id)
-
- ruleset = dict(security_group_id=secgroup['id'],
- tenant_id=secgroup['tenant_id'])
- ruleset.update(kwargs)
-
- sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
- sg_rule = sg_rule['security_group_rule']
-
- self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
- self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
- return sg_rule
-
- def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
- secgroup=None,
- security_groups_client=None):
- """Create loginable security group rule
-
- This function will create:
- 1. egress and ingress tcp port 22 allow rule in order to allow ssh
- access for ipv4.
- 2. egress and ingress ipv6 icmp allow rule, in order to allow icmpv6.
- 3. egress and ingress ipv4 icmp allow rule, in order to allow icmpv4.
- """
-
- if security_group_rules_client is None:
- security_group_rules_client = self.security_group_rules_client
- if security_groups_client is None:
- security_groups_client = self.security_groups_client
- rules = []
- rulesets = [
- dict(
- # ssh
- protocol='tcp',
- port_range_min=22,
- port_range_max=22,
- ),
- dict(
- # ping
- protocol='icmp',
- ),
- dict(
- # ipv6-icmp for ping6
- protocol='icmp',
- ethertype='IPv6',
- )
- ]
- sec_group_rules_client = security_group_rules_client
- for ruleset in rulesets:
- for r_direction in ['ingress', 'egress']:
- ruleset['direction'] = r_direction
- try:
- sg_rule = self._create_security_group_rule(
- sec_group_rules_client=sec_group_rules_client,
- secgroup=secgroup,
- security_groups_client=security_groups_client,
- **ruleset)
- except lib_exc.Conflict as ex:
- # if rule already exist - skip rule and continue
- msg = 'Security group rule already exists'
- if msg not in ex._error_string:
- raise ex
- else:
- self.assertEqual(r_direction, sg_rule['direction'])
- rules.append(sg_rule)
-
- return rules
-
- def _get_router(self, client=None, tenant_id=None):
- """Retrieve a router for the given tenant id.
-
- If a public router has been configured, it will be returned.
-
- If a public router has not been configured, but a public
- network has, a tenant router will be created and returned that
- routes traffic to the public network.
- """
- if not client:
- client = self.routers_client
- if not tenant_id:
- tenant_id = client.tenant_id
- router_id = CONF.network.public_router_id
- network_id = CONF.network.public_network_id
- if router_id:
- body = client.show_router(router_id)
- return body['router']
- elif network_id:
- router = self._create_router(client, tenant_id)
- kwargs = {'external_gateway_info': dict(network_id=network_id)}
- router = client.update_router(router['id'], **kwargs)['router']
- return router
- else:
- raise Exception("Neither of 'public_router_id' or "
- "'public_network_id' has been defined.")
-
def _create_router(self, client=None, tenant_id=None,
namestart='router-smoke'):
if not client:
@@ -814,62 +100,3 @@
client.delete_router,
router['id'])
return router
-
- def _update_router_admin_state(self, router, admin_state_up):
- kwargs = dict(admin_state_up=admin_state_up)
- router = self.routers_client.update_router(
- router['id'], **kwargs)['router']
- self.assertEqual(admin_state_up, router['admin_state_up'])
-
- def create_networks(self, networks_client=None,
- routers_client=None, subnets_client=None,
- tenant_id=None, dns_nameservers=None,
- port_security_enabled=True):
- """Create a network with a subnet connected to a router.
-
- The baremetal driver is a special case since all nodes are
- on the same shared network.
-
- :param tenant_id: id of tenant to create resources in.
- :param dns_nameservers: list of dns servers to send to subnet.
- :returns: network, subnet, router
- """
- if CONF.network.shared_physical_network:
- # NOTE(Shrews): This exception is for environments where tenant
- # credential isolation is available, but network separation is
- # not (the current baremetal case). Likely can be removed when
- # test account mgmt is reworked:
- # https://blueprints.launchpad.net/tempest/+spec/test-accounts
- if not CONF.compute.fixed_network_name:
- m = 'fixed_network_name must be specified in config'
- raise lib_exc.InvalidConfiguration(m)
- network = self._get_network_by_name(
- CONF.compute.fixed_network_name)
- router = None
- subnet = None
- else:
- network = self._create_network(
- networks_client=networks_client,
- tenant_id=tenant_id,
- port_security_enabled=port_security_enabled)
- router = self._get_router(client=routers_client,
- tenant_id=tenant_id)
- subnet_kwargs = dict(network=network,
- subnets_client=subnets_client,
- routers_client=routers_client)
- # use explicit check because empty list is a valid option
- if dns_nameservers is not None:
- subnet_kwargs['dns_nameservers'] = dns_nameservers
- subnet = self._create_subnet(**subnet_kwargs)
- if not routers_client:
- routers_client = self.routers_client
- router_id = router['id']
- routers_client.add_router_interface(router_id,
- subnet_id=subnet['id'])
-
- # save a cleanup job to remove this association between
- # router and subnet
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- routers_client.remove_router_interface, router_id,
- subnet_id=subnet['id'])
- return network, subnet, router
diff --git a/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py b/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py
index 2f091e0..2093006 100644
--- a/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py
+++ b/neutron_tempest_plugin/sfc/tests/scenario/test_sfc.py
@@ -61,14 +61,15 @@
self.ssh_user = CONF.validation.image_ssh_user
self.keypair = self.create_keypair()
- self.net1, self.subnet1, self.router1 = self.create_networks(
- port_security_enabled=False)
+ self.net1, self.subnet1, self.router1 = (
+ self.setup_network_subnet_with_router(port_security_enabled=False)
+ )
self.router2 = self._create_router()
self.router3 = self._create_router()
- self.router2_net1 = self._create_port(self.net1['id'])
+ self.router2_net1 = self.create_port(self.net1['id'])
self._add_router_interface(
self.router2['id'], self.router2_net1['id'])
- self.router3_net1 = self._create_port(self.net1['id'])
+ self.router3_net1 = self.create_port(self.net1['id'])
self._add_router_interface(
self.router3['id'], self.router3_net1['id'])
self.router2_net1_fixed_ip = self.router2_net1[
@@ -81,7 +82,7 @@
floating_ip = self._create_floating_ip(
server)
port_id, fixed_ip = (
- self._get_server_port_id_and_ip4(server))
+ self.get_server_port_id_and_ip4(server))
return floating_ip, port_id, fixed_ip
def _create_floating_ip(self, server, client=None):
@@ -1063,13 +1064,13 @@
self
):
self.router4 = self._create_router()
- self.router4_net1 = self._create_port(self.net1['id'])
+ self.router4_net1 = self.create_port(self.net1['id'])
self._add_router_interface(
self.router4['id'], self.router4_net1['id'])
self.router4_net1_fixed_ip = self.router4_net1[
'fixed_ips'][0]['ip_address']
self.router5 = self._create_router()
- self.router5_net1 = self._create_port(self.net1['id'])
+ self.router5_net1 = self.create_port(self.net1['id'])
self._add_router_interface(
self.router5['id'], self.router5_net1['id'])
self.router5_net1_fixed_ip = self.router5_net1[
@@ -1182,7 +1183,7 @@
adm_get_server = self.os_admin.servers_client.show_server
server = adm_get_server(inst['id'])['server']
- self._check_tenant_network_connectivity(
+ self.check_tenant_network_connectivity(
server, self.ssh_user, self.keypair['private_key'])
# Check server is on different node
diff --git a/neutron_tempest_plugin/tap_as_a_service/__init__.py b/neutron_tempest_plugin/tap_as_a_service/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/__init__.py
diff --git a/neutron_tempest_plugin/tap_as_a_service/api/__init__.py b/neutron_tempest_plugin/tap_as_a_service/api/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/api/__init__.py
diff --git a/neutron_tempest_plugin/tap_as_a_service/api/test_taas.py b/neutron_tempest_plugin/tap_as_a_service/api/test_taas.py
new file mode 100644
index 0000000..06dce53
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/api/test_taas.py
@@ -0,0 +1,129 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.tap_as_a_service import base
+
+CONF = config.CONF
+
+
+class TaaSExtensionTestJSON(base.BaseTaasTest):
+
+ @classmethod
+ @utils.requires_ext(extension='taas', service='network')
+ def skip_checks(cls):
+ super(TaaSExtensionTestJSON, cls).skip_checks()
+
+ @classmethod
+ def resource_setup(cls):
+ super(TaaSExtensionTestJSON, cls).resource_setup()
+ cls.network = cls.create_network()
+ cls.ts_port = cls.create_port(cls.network)
+ cls.tf_port = cls.create_port(cls.network)
+ cls.tf2_port = cls.create_port(cls.network)
+
+ @decorators.idempotent_id('b993c14e-797a-4c91-b4da-8cb1a450aa2f')
+ def test_create_tap_service_and_flow(self):
+ """create tap service adn tap flow
+
+ Test create tap service and flow.
+ """
+ tap_service = self.create_tap_service(port_id=self.ts_port['id'])
+ self.create_tap_flow(tap_service_id=tap_service['id'],
+ direction='BOTH', source_port=self.tf_port['id'])
+
+ @decorators.idempotent_id('897a0aaf-1b55-4ea8-9d9f-1bc0fd09cb60')
+ @utils.requires_ext(extension='taas-vlan-filter', service='network')
+ def test_create_tap_service_and_flow_vlan_filter(self):
+ """create tap service with vlan_filter
+
+ Test create tap service with additional vlan_filter argument.
+ """
+ tap_service = self.create_tap_service(port_id=self.ts_port['id'])
+ tap_flow = self.create_tap_flow(tap_service_id=tap_service['id'],
+ direction='BOTH',
+ source_port=self.tf_port['id'],
+ vlan_filter='189,279,999-1008')
+ self.assertEqual(tap_flow['vlan_filter'], '189,279,999-1008')
+
+ @decorators.idempotent_id('d7a2115d-16b4-41cf-95a6-dcebc3682b24')
+ def test_delete_tap_resources_after_ts_port_delete(self):
+ """delete tap resources after ts port delete
+
+ Test delete tap resources after deletion of ts port.
+ """
+ tap_service = self.create_tap_service(port_id=self.ts_port['id'])
+ tap_flow = self.create_tap_flow(tap_service_id=tap_service['id'],
+ direction='BOTH',
+ source_port=self.tf2_port['id'])
+ # delete ts_port; it shall also delete the associated tap-service and
+ # subsequently the tap-flow as well
+ self.ports_client.delete_port(self.ts_port['id'])
+ # Attempt tap-service deletion; it should throw not found exception.
+ self.assertRaises(lib_exc.NotFound,
+ self.tap_services_client.delete_tap_service,
+ tap_service['id'])
+ # Attempt tap-flow deletion; it should throw not found exception.
+ self.assertRaises(lib_exc.NotFound,
+ self.tap_flows_client.delete_tap_flow,
+ tap_flow['id'])
+
+ @decorators.idempotent_id('9ba4edfd-4002-4c44-b02b-6c4f71b40a92')
+ def test_delete_tap_resources_after_tf_port_delete(self):
+ """delete tap resources after tf port delete
+
+ Test delete tap service after deletion of tf port.
+ """
+ tap_service = self.create_tap_service(port_id=self.ts_port['id'])
+ tap_flow = self.create_tap_flow(tap_service_id=tap_service['id'],
+ direction='BOTH',
+ source_port=self.tf_port['id'])
+ # delete tf port; it shall also delete the associated tap-flow
+ self.ports_client.delete_port(self.tf_port['id'])
+ # Attempt tap-flow deletion; it should throw not found exception.
+ self.assertRaises(lib_exc.NotFound,
+ self.tap_flows_client.delete_tap_flow,
+ tap_flow['id'])
+ # delete tap service; it shall go fine
+ self.tap_services_client.delete_tap_service(tap_service['id'])
+
+ @decorators.idempotent_id('687089b8-b045-496d-86bf-030b380039d1')
+ def test_create_and_update_tap_service(self):
+ """create and update tap service
+
+ Test update tap service - update description.
+ """
+ tap_service = self.create_tap_service(port_id=self.ts_port['id'])
+
+ # Update description of the tap service
+ self.update_tap_service(
+ tap_service['id'],
+ description='Tap Service Description Updated')
+
+ @decorators.idempotent_id('bb4d5482-37fc-46b5-85a5-5867e9adbfae')
+ def test_create_and_update_tap_flow(self):
+ """create and update tap flow
+
+ Test update tap flow - update description.
+ """
+ tap_service = self.create_tap_service(port_id=self.ts_port['id'])
+ tap_flow = self.create_tap_flow(
+ tap_service_id=tap_service['id'],
+ direction='BOTH', source_port=self.tf_port['id'])
+ # Update description of the tap flow
+ self.update_tap_flow(
+ tap_flow['id'],
+ description='Tap Flow Description Updated')
diff --git a/neutron_tempest_plugin/tap_as_a_service/base.py b/neutron_tempest_plugin/tap_as_a_service/base.py
new file mode 100644
index 0000000..3ddc797
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/base.py
@@ -0,0 +1,82 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import tempest.api.network.base as test
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+
+from neutron_tempest_plugin.tap_as_a_service.services import taas_client
+
+CONF = config.CONF
+
+
+class BaseTaasTest(test.BaseAdminNetworkTest):
+
+ @classmethod
+ def resource_setup(cls):
+ super(BaseTaasTest, cls).resource_setup()
+ os_primary = cls.os_primary
+ cls.tap_services_client = taas_client.TapServicesClient(
+ os_primary.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **os_primary.default_params)
+ cls.tap_flows_client = taas_client.TapFlowsClient(
+ os_primary.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **os_primary.default_params)
+
+ def create_tap_service(self, **kwargs):
+ body = self.tap_services_client.create_tap_service(
+ name=data_utils.rand_name("tap_service"),
+ **kwargs)
+ tap_service = body['tap_service']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.tap_services_client.delete_tap_service,
+ tap_service['id'])
+ return tap_service
+
+ def create_tap_flow(self, **kwargs):
+ body = self.tap_flows_client.create_tap_flow(
+ name=data_utils.rand_name("tap_service"),
+ **kwargs)
+ tap_flow = body['tap_flow']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.tap_flows_client.delete_tap_flow,
+ tap_flow['id'])
+ return tap_flow
+
+ def update_tap_service(self, tap_service_id, **kwargs):
+ body = self.tap_services_client.update_tap_service(
+ tap_service_id,
+ **kwargs)
+ tap_service = body['tap_service']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.tap_services_client.delete_tap_service,
+ tap_service['id'])
+
+ def update_tap_flow(self, tap_flow_id, **kwargs):
+ body = self.tap_flows_client.update_tap_flow(
+ tap_flow_id,
+ **kwargs)
+ tap_flow = body['tap_flow']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.tap_flows_client.delete_tap_flow,
+ tap_flow['id'])
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/__init__.py b/neutron_tempest_plugin/tap_as_a_service/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/__init__.py
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/manager.py b/neutron_tempest_plugin/tap_as_a_service/scenario/manager.py
new file mode 100644
index 0000000..80389c1
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/manager.py
@@ -0,0 +1,293 @@
+# Copyright 2012 OpenStack Foundation
+# Copyright 2013 IBM Corp.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import netaddr
+from oslo_log import log
+from oslo_utils import netutils
+
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+
+from neutron_tempest_plugin.scenario import base
+from neutron_tempest_plugin.tap_as_a_service.services import taas_client
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class BaseTaasScenarioTests(base.BaseTempestTestCase):
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def setup_clients(cls):
+ super(BaseTaasScenarioTests, cls).setup_clients()
+
+ cls.client = cls.os_primary.network_client
+ cls.admin_network_client = cls.os_admin.network_client
+
+ # Setup taas clients
+ cls.tap_services_client = taas_client.TapServicesClient(
+ cls.os_primary.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **cls.os_primary.default_params)
+ cls.tap_flows_client = taas_client.TapFlowsClient(
+ cls.os_primary.auth_provider,
+ CONF.network.catalog_type,
+ CONF.network.region or CONF.identity.region,
+ endpoint_type=CONF.network.endpoint_type,
+ build_interval=CONF.network.build_interval,
+ build_timeout=CONF.network.build_timeout,
+ **cls.os_primary.default_params)
+
+ def _create_subnet(self, network, subnets_client=None,
+ namestart='subnet-smoke', **kwargs):
+ """Create a subnet for the given network
+
+ within the cidr block configured for tenant networks.
+ """
+ if not subnets_client:
+ subnets_client = self.client
+
+ def cidr_in_use(cidr, tenant_id):
+ """Check cidr existence
+
+ :returns: True if subnet with cidr already exist in tenant
+ False else
+ """
+ cidr_in_use = self.os_admin.network_client.list_subnets(
+ tenant_id=tenant_id, cidr=cidr)['subnets']
+ return len(cidr_in_use) != 0
+
+ ip_version = kwargs.pop('ip_version', 4)
+
+ if ip_version == 6:
+ tenant_cidr = netaddr.IPNetwork(
+ CONF.network.project_network_v6_cidr)
+ num_bits = CONF.network.project_network_v6_mask_bits
+ else:
+ tenant_cidr = netaddr.IPNetwork(CONF.network.project_network_cidr)
+ num_bits = CONF.network.project_network_mask_bits
+
+ result = None
+ str_cidr = None
+ # Repeatedly attempt subnet creation with sequential cidr
+ # blocks until an unallocated block is found.
+ for subnet_cidr in tenant_cidr.subnet(num_bits):
+ str_cidr = str(subnet_cidr)
+ if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
+ continue
+
+ subnet = dict(
+ name=data_utils.rand_name(namestart),
+ network_id=network['id'],
+ tenant_id=network['tenant_id'],
+ cidr=str_cidr,
+ ip_version=ip_version,
+ **kwargs
+ )
+ try:
+ result = subnets_client.create_subnet(**subnet)
+ break
+ except lib_exc.Conflict as e:
+ is_overlapping_cidr = 'overlaps with another subnet' in str(e)
+ if not is_overlapping_cidr:
+ raise
+ assert result is not None, 'Unable to allocate tenant network'
+
+ subnet = result['subnet']
+ assert subnet['cidr'] == str_cidr
+
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ subnets_client.delete_subnet, subnet['id'])
+
+ return subnet
+
+ def _get_server_port_id_and_ip4(self, server, ip_addr=None):
+ ports = self.os_admin.network_client.list_ports(
+ device_id=server['id'], fixed_ip=ip_addr)['ports']
+ # A port can have more than one IP address in some cases.
+ # If the network is dual-stack (IPv4 + IPv6), this port is associated
+ # with 2 subnets
+ p_status = ['ACTIVE']
+ # NOTE(vsaienko) With Ironic, instances live on separate hardware
+ # servers. Neutron does not bind ports for Ironic instances, as a
+ # result the port remains in the DOWN state.
+ # TODO(vsaienko) remove once bug: #1599836 is resolved.
+ if getattr(CONF.service_available, 'ironic', False):
+ p_status.append('DOWN')
+ port_map = [(p["id"], fxip["ip_address"])
+ for p in ports
+ for fxip in p["fixed_ips"]
+ if netutils.is_valid_ipv4(fxip["ip_address"]) and
+ p['status'] in p_status]
+ inactive = [p for p in ports if p['status'] != 'ACTIVE']
+ if inactive:
+ LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+
+ self.assertNotEqual(0, len(port_map),
+ "No IPv4 addresses found in: %s" % ports)
+ self.assertEqual(len(port_map), 1,
+ "Found multiple IPv4 addresses: %s. "
+ "Unable to determine which port to target."
+ % port_map)
+ return port_map[0]
+
+ def _get_network_by_name(self, network_name):
+ net = self.os_admin.network_client.list_networks(
+ name=network_name)['networks']
+ self.assertNotEqual(len(net), 0,
+ "Unable to get network by name: %s" % network_name)
+ return net[0]
+
+ def _run_in_background(self, sshclient, cmd):
+ runInBg = "nohup %s 2>&1 &" % cmd
+ sshclient.exec_command(runInBg)
+
+ def create_networks(self, networks_client=None,
+ routers_client=None, subnets_client=None,
+ dns_nameservers=None, port_security_enabled=True):
+ """Create a network with a subnet connected to a router.
+
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
+ :param dns_nameservers: list of dns servers to send to subnet.
+ :returns: network, subnet, router
+ """
+ if CONF.network.shared_physical_network:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ if not CONF.compute.fixed_network_name:
+ m = 'fixed_network_name must be specified in config'
+ raise lib_exc.InvalidConfiguration(m)
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ router = None
+ subnet = None
+ else:
+ network = self.create_network(
+ client=networks_client,
+ port_security_enabled=port_security_enabled)
+ router = self._ensure_public_router(client=routers_client)
+ subnet_kwargs = dict(network=network,
+ subnets_client=subnets_client)
+ # use explicit check because empty list is a valid option
+ if dns_nameservers is not None:
+ subnet_kwargs['dns_nameservers'] = dns_nameservers
+ subnet = self._create_subnet(**subnet_kwargs)
+ if not routers_client:
+ routers_client = self.client
+ router_id = router['id']
+ routers_client.add_router_interface_with_subnet_id(
+ router_id=router_id, subnet_id=subnet['id'])
+
+ # save a cleanup job to remove this association between
+ # router and subnet
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ routers_client.remove_router_interface_with_subnet_id,
+ router_id=router_id, subnet_id=subnet['id'])
+ return network, subnet, router
+
+ def _create_server_with_floatingip(self, use_taas_cloud_image=False,
+ provider_net=False, **kwargs):
+ network = self.network
+ if use_taas_cloud_image:
+ image = CONF.neutron_plugin_options.advanced_image_ref
+ flavor = CONF.neutron_plugin_options.advanced_image_flavor_ref
+ else:
+ flavor = CONF.compute.flavor_ref
+ image = CONF.compute.image_ref
+
+ if provider_net:
+ network = self.provider_network
+
+ port = self.create_port(
+ network=network, security_groups=[self.secgroup['id']], **kwargs)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_port, port['id'])
+
+ params = {
+ 'flavor_ref': flavor,
+ 'image_ref': image,
+ 'key_name': self.keypair['name']
+ }
+ vm = self.create_server(networks=[{'port': port['id']}], **params)
+ self.wait_for_server_active(vm['server'])
+ self.wait_for_guest_os_ready(vm['server'])
+
+ fip = self.create_and_associate_floatingip(
+ port_id=port['id'])
+
+ return port, fip
+
+ def _setup_provider_network(self):
+ net = self._create_provider_network()
+ self._create_provider_subnet(net["id"])
+ return net
+
+ def _create_provider_network(self):
+ network_kwargs = {
+ "admin_state_up": True,
+ "shared": True,
+ "provider:network_type": "vlan",
+ "provider:physical_network":
+ CONF.taas.provider_physical_network,
+ }
+
+ segmentation_id = CONF.taas.provider_segmentation_id
+ if segmentation_id and segmentation_id == "0":
+ network_kwargs['provider:network_type'] = 'flat'
+ elif segmentation_id:
+ network_kwargs['provider:segmentation_id'] = segmentation_id
+
+ network = self.admin_network_client.create_network(
+ **network_kwargs)['network']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.admin_network_client.delete_network,
+ network['id'])
+
+ return network
+
+ def _create_provider_subnet(self, net_id):
+ subnet = dict(
+ network_id=net_id,
+ cidr="172.25.100.0/24",
+ ip_version=4,
+ )
+ result = self.admin_network_client.create_subnet(**subnet)
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.admin_network_client.delete_subnet, result['subnet']['id'])
+
+ self.admin_network_client.add_router_interface_with_subnet_id(
+ self.router['id'], subnet_id=result['subnet']['id'])
+
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.admin_network_client.remove_router_interface_with_subnet_id,
+ self.router['id'], subnet_id=result['subnet']['id'])
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
new file mode 100644
index 0000000..5598fbe
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/test_taas.py
@@ -0,0 +1,249 @@
+# Copyright (c) 2015 Midokura SARL
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+import testtools
+
+from neutron_tempest_plugin.tap_as_a_service.scenario import manager
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+# pylint: disable=too-many-ancestors
+class TestTaaS(manager.BaseTaasScenarioTests):
+ """Config Requirement in tempest.conf:
+
+ - project_network_cidr_bits- specifies the subnet range for each network
+ - project_network_cidr
+ - public_network_id.
+ """
+
+ @classmethod
+ @utils.requires_ext(extension='taas', service='network')
+ @utils.requires_ext(extension='security-group', service='network')
+ @utils.requires_ext(extension='router', service='network')
+ def skip_checks(cls):
+ super(TestTaaS, cls).skip_checks()
+
+ @classmethod
+ def resource_setup(cls):
+ super(TestTaaS, cls).resource_setup()
+ cls.keypair = cls.create_keypair()
+ cls.secgroup = cls.create_security_group(
+ name=data_utils.rand_name('secgroup'))
+ cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ LOG.debug("TaaSScenarioTest Setup done.")
+
+ def _create_server(self, network, security_group=None):
+ """Create a server
+
+ Creates a server having a port on given network and security group.
+ """
+ keys = self.create_keypair()
+ kwargs = {}
+ if security_group is not None:
+ kwargs['security_groups'] = [{'name': security_group['name']}]
+ server = self.create_server(
+ key_name=keys['name'],
+ networks=[{'uuid': network['id']}],
+ flavor_ref=CONF.compute.flavor_ref,
+ image_ref=CONF.compute.image_ref,
+ **kwargs)
+ self.wait_for_server_active(server['server'])
+ self.wait_for_guest_os_ready(server['server'])
+ return server, keys
+
+ @testtools.skipUnless(CONF.taas.provider_physical_network,
+ 'Provider physical network parameter not provided.')
+ @utils.requires_ext(extension="provider", service="network")
+ def _create_network_sriov(self, networks_client=None,
+ tenant_id=None,
+ namestart='network-smoke-sriov-',
+ port_security_enabled=True):
+ if not networks_client:
+ networks_client = self.networks_client
+ if not tenant_id:
+ tenant_id = networks_client.tenant_id
+ name = data_utils.rand_name(namestart)
+ network_kwargs = dict(name=name, tenant_id=tenant_id)
+ # Neutron disables port security by default so we have to check the
+ # config before trying to create the network with
+ # port_security_enabled
+ if CONF.network_feature_enabled.port_security:
+ network_kwargs['port_security_enabled'] = port_security_enabled
+
+ if CONF.network.port_vnic_type and \
+ CONF.network.port_vnic_type == 'direct':
+ network_kwargs['provider:network_type'] = 'vlan'
+ if CONF.taas_plugin_options.provider_segmentation_id:
+ if CONF.taas_plugin_options.provider_segmentation_id == '0':
+ network_kwargs['provider:network_type'] = 'flat'
+ else:
+ network_kwargs['provider:segmentation_id'] = \
+ CONF.taas_plugin_options.provider_segmentation_id
+
+ network_kwargs['provider:physical_network'] = \
+ CONF.taas_plugin_options.provider_physical_network
+
+ result = networks_client.create_network(**network_kwargs)
+ network = result['network']
+ self.assertEqual(network['name'], name)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ networks_client.delete_network,
+ network['id'])
+ return network
+
+ @testtools.skipUnless(CONF.taas.provider_physical_network,
+ 'Provider physical network parameter not provided.')
+ @utils.requires_ext(extension="provider", service="network")
+ def create_networks_sriov(self, networks_client=None,
+ routers_client=None, subnets_client=None,
+ tenant_id=None, dns_nameservers=None,
+ port_security_enabled=True):
+ """Create a network with a subnet connected to a router.
+
+ The baremetal driver is a special case since all nodes are
+ on the same shared network.
+
+ :param tenant_id: id of tenant to create resources in.
+ :param dns_nameservers: list of dns servers to send to subnet.
+ :returns: network, subnet, router
+ """
+ router = None
+ if CONF.network.shared_physical_network:
+ # NOTE(Shrews): This exception is for environments where tenant
+ # credential isolation is available, but network separation is
+ # not (the current baremetal case). Likely can be removed when
+ # test account mgmt is reworked:
+ # https://blueprints.launchpad.net/tempest/+spec/test-accounts
+ if not CONF.compute.fixed_network_name:
+ msg = 'fixed_network_name must be specified in config'
+ raise lib_exc.InvalidConfiguration(msg)
+ network = self._get_network_by_name(
+ CONF.compute.fixed_network_name)
+ subnet = None
+ else:
+ network = self._create_network_sriov(
+ networks_client=networks_client,
+ tenant_id=tenant_id,
+ port_security_enabled=port_security_enabled)
+ subnet_kwargs = dict(network=network,
+ subnets_client=subnets_client,
+ routers_client=routers_client)
+ # use explicit check because empty list is a valid option
+ if dns_nameservers is not None:
+ subnet_kwargs['dns_nameservers'] = dns_nameservers
+ subnet = self._create_subnet(**subnet_kwargs)
+ return network, subnet, router
+
+ def _create_topology(self):
+ """Topology
+
+ +----------+ +----------+
+ | "server" | | "server" |
+ | VM-1 | | VM-2 |
+ | | | |
+ +----+-----+ +----+-----+
+ | |
+ | |
+ +----+----+----+----+----+----+-----+
+ |
+ |
+ |
+ +------+------+
+ | "server" |
+ | tap-service |
+ +-------------+
+ """
+ LOG.debug('Starting Topology Creation')
+ resp = {}
+ # Create Network1 and Subnet1.
+ vnic_type = CONF.network.port_vnic_type
+ if vnic_type == 'direct':
+ self.network1, self.subnet1, self.router1 = \
+ self.create_networks_sriov()
+ else:
+ self.network1, self.subnet1, self.router1 = self.create_networks()
+ resp['network1'] = self.network1
+ resp['subnet1'] = self.subnet1
+ resp['router1'] = self.router1
+
+ # Create a security group allowing icmp and ssh traffic.
+ self.security_group = self.create_security_group(
+ name=data_utils.rand_name('secgroup'))
+ self.create_loginable_secgroup_rule(
+ secgroup_id=self.security_group['id'])
+
+ # Create 3 VMs and assign them a floating IP each.
+ port1, server_floating_ip_1 = self._create_server_with_floatingip()
+ port2, server_floating_ip_2 = self._create_server_with_floatingip()
+ port3, server_floating_ip_3 = self._create_server_with_floatingip()
+
+ # Store the received information to be used later
+ resp['port1'] = port1
+ resp['server_floating_ip_1'] = server_floating_ip_1
+
+ resp['port2'] = port2
+ resp['server_floating_ip_2'] = server_floating_ip_2
+
+ resp['port3'] = port3
+ resp['server_floating_ip_3'] = server_floating_ip_3
+
+ return resp
+
+ @utils.services('network')
+ @utils.requires_ext(extension="taas-vlan-filter", service="network")
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('40903cbd-0e3c-464d-b311-dc77d3894e65')
+ def test_tap_flow_data_mirroring(self):
+ """Create test topology and TaaS resources
+
+ Creates test topology consisting of 3 servers, one routable network,
+ ports and TaaS resources, i.e. tap-service and tap-flow using those
+ ports.
+ """
+ self.network, self.subnet, self.router = self.create_networks()
+ topology = self._create_topology()
+
+ # Create Tap-Service.
+ tap_service = self.tap_services_client.create_tap_service(
+ port_id=topology['port1']['id'])['tap_service']
+
+ LOG.debug('TaaS Config options: vlan-filter: %s',
+ CONF.taas.vlan_filter)
+
+ # Create Tap-Flow.
+ vnic_type = CONF.network.port_vnic_type
+ vlan_filter = None
+ if vnic_type == 'direct':
+ vlan_filter = '108-117,126,135-144'
+ if CONF.taas.vlan_filter:
+ vlan_filter = CONF.taas.vlan_filter
+ elif topology['network1']['provider:segmentation_id'] != '0':
+ vlan_filter = topology['network1']['provider:segmentation_id']
+
+ tap_flow = self.tap_flows_client.create_tap_flow(
+ tap_service_id=tap_service['id'], direction='BOTH',
+ source_port=topology['port3']['id'],
+ vlan_filter=vlan_filter)['tap_flow']
+
+ self.assertEqual(tap_flow['vlan_filter'], vlan_filter)
diff --git a/neutron_tempest_plugin/tap_as_a_service/scenario/test_traffic_impact.py b/neutron_tempest_plugin/tap_as_a_service/scenario/test_traffic_impact.py
new file mode 100644
index 0000000..e2b14c7
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/scenario/test_traffic_impact.py
@@ -0,0 +1,261 @@
+# Copyright (c) 2019 AT&T
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from contextlib import contextmanager
+from oslo_log import log
+import testtools
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib.common.utils import test_utils
+
+from tempest.lib import decorators
+
+from neutron_tempest_plugin.tap_as_a_service.scenario import manager
+
+
+CONF = config.CONF
+LOG = log.getLogger(__name__)
+
+
+class TestTaaSTrafficScenarios(manager.BaseTaasScenarioTests):
+
+ @classmethod
+ @utils.requires_ext(extension='taas', service='network')
+ @utils.requires_ext(extension='security-group', service='network')
+ @utils.requires_ext(extension='router', service='network')
+ def skip_checks(cls):
+ super(TestTaaSTrafficScenarios, cls).skip_checks()
+
+ @classmethod
+ def resource_setup(cls):
+ super(TestTaaSTrafficScenarios, cls).resource_setup()
+ cls.provider_network = None
+ cls.keypair = cls.create_keypair()
+ cls.secgroup = cls.create_security_group(
+ name=data_utils.rand_name('secgroup'))
+ cls.create_loginable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+ cls.create_pingable_secgroup_rule(secgroup_id=cls.secgroup['id'])
+
+ @contextmanager
+ def _setup_topology(self, taas=True, use_taas_cloud_image=False,
+ provider_net=False):
+ """Setup topology for the test
+
+ +------------+
+ | monitor vm |
+ +-----+------+
+ |
+ +-----v---+
+ +--+ network <--+
+ | +----^----+ |
+ | | |
+ | +----+-+ +---+--+
+ | | vm 1 | | vm 2 |
+ | +------+ +------+
+ |
+ | +--------+
+ +--> router |
+ +-----+--+
+ |
+ +-----v------+
+ | public net |
+ +------------+
+ """
+ self.network, self.subnet, self.router = self.create_networks()
+ LOG.debug('Setup topology sbunet details: %s ', self.subnet)
+ if provider_net:
+ if CONF.taas.provider_physical_network:
+ self.provider_network = self._setup_provider_network()
+ else:
+ msg = "provider_physical_network not provided"
+ raise self.skipException(msg)
+
+ self.mon_port, mon_fip = self._create_server_with_floatingip(
+ use_taas_cloud_image=use_taas_cloud_image,
+ provider_net=provider_net)
+ LOG.debug('Setup topology monitor port: %s ### monitor FIP: %s ',
+ self.mon_port, mon_fip)
+ self.left_port, self.left_fip = self._create_server_with_floatingip(
+ provider_net=provider_net)
+ LOG.debug('Setup topology left port: %s ### left FIP: %s ',
+ self.left_port, self.left_fip)
+ self.right_port, self.right_fip = self._create_server_with_floatingip(
+ provider_net=provider_net)
+ LOG.debug('Setup topology right port: %s ### right FIP: %s ',
+ self.right_port, self.right_fip)
+
+ if taas:
+ LOG.debug("Create TAAS service")
+ tap_service = self.tap_services_client.create_tap_service(
+ port_id=self.mon_port['id'])['tap_service']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_tap_service, tap_service['id'])
+ tap_flow = self.tap_flows_client.create_tap_flow(
+ tap_service_id=tap_service['id'], direction='BOTH',
+ source_port=self.left_port['id'])['tap_flow']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_tap_flow, tap_flow['id'])
+ tap_flow = self.tap_flows_client.create_tap_flow(
+ tap_service_id=tap_service['id'], direction='BOTH',
+ source_port=self.right_port['id'])['tap_flow']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_tap_flow, tap_flow['id'])
+
+ user = CONF.validation.image_ssh_user
+ if use_taas_cloud_image:
+ user = CONF.neutron_plugin_options.advanced_image_ssh_user
+
+ self.monitor_client = remote_client.RemoteClient(
+ mon_fip['floating_ip_address'], user,
+ pkey=self.keypair['private_key'])
+ self.monitor_client.validate_authentication()
+ self.left_client = remote_client.RemoteClient(
+ self.left_fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+ self.left_client.validate_authentication()
+ self.right_client = remote_client.RemoteClient(
+ self.right_fip['floating_ip_address'],
+ CONF.validation.image_ssh_user,
+ pkey=self.keypair['private_key'])
+ self.right_client.validate_authentication()
+ yield
+
+ def _check_icmp_traffic(self):
+ log_location = "/tmp/tcpdumplog"
+
+ right_ip = self.right_port['fixed_ips'][0]['ip_address']
+ left_ip = self.left_port['fixed_ips'][0]['ip_address']
+
+ # Run tcpdump in background
+ self._run_in_background(self.monitor_client,
+ "sudo tcpdump -n -nn > %s" % log_location)
+
+ # Ensure tcpdump is up and running
+ psax = self.monitor_client.exec_command("ps -ax")
+ self.assertTrue("tcpdump" in psax)
+
+ # Run traffic from left_vm to right_vm
+ LOG.debug('Check ICMP traffic: ping %s ', right_ip)
+ # self.left_client.exec_command(
+ # "ping -c 50 %s" % self.right_fip['floating_ip_address'])
+ self.check_remote_connectivity(self.left_client, right_ip,
+ ping_count=50)
+
+ # Collect tcpdump results
+ output = self.monitor_client.exec_command("cat %s" % log_location)
+ self.assertLess(0, len(output))
+
+ looking_for = ["IP %s > %s: ICMP echo request" % (left_ip, right_ip),
+ "IP %s > %s: ICMP echo reply" % (right_ip, left_ip)]
+
+ results = []
+ for tcpdump_line in looking_for:
+ results.append(tcpdump_line in output)
+
+ return all(results)
+
+ def _test_taas_connectivity(self, use_provider_net=False):
+ """Ensure TAAS doesn't break connectivity
+
+ This test creates TAAS service between two servers and checks that
+ it doesn't break basic connectivity between them.
+ """
+ # Check uninterrupted traffic between VMs
+ with self._setup_topology(provider_net=use_provider_net):
+ # Left to right
+ self.check_remote_connectivity(
+ self.left_client,
+ self.right_port['fixed_ips'][0]['ip_address'])
+
+ # Right to left
+ self.check_remote_connectivity(
+ self.right_client,
+ self.left_port['fixed_ips'][0]['ip_address'])
+
+ # TAAS vm to right
+ self.check_remote_connectivity(
+ self.monitor_client,
+ self.right_port['fixed_ips'][0]['ip_address'])
+
+ # TAAS vm to left
+ self.check_remote_connectivity(
+ self.monitor_client,
+ self.left_port['fixed_ips'][0]['ip_address'])
+
+ @decorators.idempotent_id('ff414b7d-e81c-47f2-b6c8-53bc2f1e9b00')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_taas_provider_network_connectivity(self):
+ self._test_taas_connectivity(use_provider_net=True)
+
+ @decorators.idempotent_id('e3c52e91-7abf-4dfd-8687-f7c071cdd333')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_taas_network_connectivity(self):
+ self._test_taas_connectivity(use_provider_net=False)
+
+ @decorators.idempotent_id('fcb15ca3-ef61-11e9-9792-f45c89c47e11')
+ @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+ 'Cloud image not found.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_taas_forwarded_traffic_positive(self):
+ """Check that TAAS forwards traffic as expected"""
+
+ with self._setup_topology(use_taas_cloud_image=True):
+ # Check that traffic was forwarded to TAAS service
+ self.assertTrue(self._check_icmp_traffic())
+
+ @decorators.idempotent_id('6c54d9c5-075a-4a1f-bbe6-12c3c9abf1e2')
+ @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+ 'Cloud image not found.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_taas_forwarded_traffic_negative(self):
+ """Check that TAAS doesn't forward traffic"""
+
+ with self._setup_topology(taas=False, use_taas_cloud_image=True):
+ # Check that traffic was NOT forwarded to TAAS service
+ self.assertFalse(self._check_icmp_traffic())
+
+ @decorators.idempotent_id('fcb15ca3-ef61-11e9-9792-f45c89c47e12')
+ @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+ 'Cloud image not found.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_taas_forwarded_traffic_provider_net_positive(self):
+ """Check that TAAS forwards traffic as expected in provider network"""
+
+ with self._setup_topology(use_taas_cloud_image=True,
+ provider_net=True):
+ # Check that traffic was forwarded to TAAS service
+ self.assertTrue(self._check_icmp_traffic())
+
+ @decorators.idempotent_id('6c54d9c5-075a-4a1f-bbe6-12c3c9abf1e3')
+ @testtools.skipUnless(CONF.neutron_plugin_options.advanced_image_ref,
+ 'Cloud image not found.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_taas_forwarded_traffic_provider_net_negative(self):
+ """Check that TAAS doesn't forward traffic in provider network"""
+
+ with self._setup_topology(taas=False, use_taas_cloud_image=True,
+ provider_net=True):
+ # Check that traffic was NOT forwarded to TAAS service
+ self.assertFalse(self._check_icmp_traffic())
diff --git a/neutron_tempest_plugin/tap_as_a_service/services/__init__.py b/neutron_tempest_plugin/tap_as_a_service/services/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/services/__init__.py
diff --git a/neutron_tempest_plugin/tap_as_a_service/services/taas_client.py b/neutron_tempest_plugin/tap_as_a_service/services/taas_client.py
new file mode 100644
index 0000000..7230cbb
--- /dev/null
+++ b/neutron_tempest_plugin/tap_as_a_service/services/taas_client.py
@@ -0,0 +1,63 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib.services.network import base
+
+
+class TapServicesClient(base.BaseNetworkClient):
+
+ def create_tap_service(self, **kwargs):
+ uri = '/taas/tap_services'
+ post_data = {'tap_service': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_tap_service(self, tap_service_id, **kwargs):
+ uri = '/taas/tap_services/%s' % tap_service_id
+ post_data = {'tap_service': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_tap_service(self, tap_service_id, **fields):
+ uri = '/taas/tap_services/%s' % tap_service_id
+ return self.show_resource(uri, **fields)
+
+ def delete_tap_service(self, tap_service_id):
+ uri = '/taas/tap_services/%s' % tap_service_id
+ return self.delete_resource(uri)
+
+ def list_tap_services(self, **filters):
+ uri = '/taas/tap_services'
+ return self.list_resources(uri, **filters)
+
+
+class TapFlowsClient(base.BaseNetworkClient):
+
+ def create_tap_flow(self, **kwargs):
+ uri = '/taas/tap_flows'
+ post_data = {'tap_flow': kwargs}
+ return self.create_resource(uri, post_data)
+
+ def update_tap_flow(self, tap_flow_id, **kwargs):
+ uri = '/taas/tap_flows/%s' % tap_flow_id
+ post_data = {'tap_flow': kwargs}
+ return self.update_resource(uri, post_data)
+
+ def show_tap_flow(self, tap_flow_id, **fields):
+ uri = '/taas/tap_flows/%s' % tap_flow_id
+ return self.show_resource(uri, **fields)
+
+ def delete_tap_flow(self, tap_flow_id):
+ uri = '/taas/tap_flows/%s' % tap_flow_id
+ return self.delete_resource(uri)
+
+ def list_tap_flows(self, **filters):
+ uri = '/taas/tap_flows'
+ return self.list_resources(uri, **filters)
diff --git a/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py b/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
index 1a51198..92eed9e 100644
--- a/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
+++ b/neutron_tempest_plugin/vpnaas/scenario/test_vpnaas.py
@@ -233,7 +233,8 @@
left_server = self._create_server()
ssh_client = ssh.Client(left_server['fip']['floating_ip_address'],
CONF.validation.image_ssh_user,
- pkey=self.keypair['private_key'])
+ pkey=self.keypair['private_key'],
+ ssh_key_type=CONF.validation.ssh_key_type)
# check LEFT -> RIGHT connectivity via VPN
self.check_remote_connectivity(ssh_client, right_ip,
diff --git a/requirements.txt b/requirements.txt
index 3edf7dc..21f14cc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -11,8 +11,7 @@
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
oslo.utils>=3.33.0 # Apache-2.0
paramiko>=2.0.0 # LGPLv2.1+
-six>=1.10.0 # MIT
-tempest>=17.1.0 # Apache-2.0
+tempest>=29.2.0 # Apache-2.0
tenacity>=3.2.1 # Apache-2.0
ddt>=1.0.1 # MIT
nose>=1.3.7 # LGPL
diff --git a/setup.cfg b/setup.cfg
index 144569f..3e352a0 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,12 +1,12 @@
[metadata]
name = neutron-tempest-plugin
summary = Tempest plugin for Neutron Project
-description-file =
+description_file =
README.rst
author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = https://opendev.org/openstack/neutron-tempest-plugin
-python-requires = >=3.6
+author_email = openstack-discuss@lists.openstack.org
+home_page = https://opendev.org/openstack/neutron-tempest-plugin
+python_requires = >=3.6
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -18,6 +18,7 @@
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
[files]
packages =
diff --git a/test-requirements.txt b/test-requirements.txt
index bf1c626..f5bac7c 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -7,10 +7,6 @@
coverage!=4.4,>=4.0 # Apache-2.0
flake8-import-order==0.12 # LGPLv3
python-subunit>=1.0.0 # Apache-2.0/BSD
-sphinx>=2.0.0,!=2.1.0 # BSD
oslotest>=3.2.0 # Apache-2.0
stestr>=1.0.0 # Apache-2.0
testtools>=2.2.0 # MIT
-openstackdocstheme>=2.2.1 # Apache-2.0
-# releasenotes
-reno>=3.1.0 # Apache-2.0
diff --git a/tools/customize_ubuntu_image b/tools/customize_ubuntu_image
index 3697265..fdd2d12 100755
--- a/tools/customize_ubuntu_image
+++ b/tools/customize_ubuntu_image
@@ -16,6 +16,13 @@
INSTALL_GUEST_PACKAGES=(
socat # used to replace nc for testing advanced network features like
# multicast
+ iperf3
+ iputils-ping
+ ncat
+ psmisc # provides killall command
+ python3
+ tcpdump
+ vlan
)
# Function to be executed once after chroot on guest image
@@ -33,8 +40,8 @@
# Install desired packages to Ubuntu guest image
(
DEBIAN_FRONTEND=noninteractive
- apt-get update -y
- apt-get install -y "${INSTALL_GUEST_PACKAGES[@]}"
+ sudo apt-get update -y
+ sudo apt-get install -y "${INSTALL_GUEST_PACKAGES[@]}"
)
}
diff --git a/tox.ini b/tox.ini
index eecd16e..ff50b9d 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
[tox]
-minversion = 3.1
+minversion = 3.18.0
envlist = pep8
skipsdist = True
ignore_basepython_conflict = True
@@ -14,7 +14,7 @@
OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true}
OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true}
deps =
- -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
-r{toxinidir}/test-requirements.txt
commands = stestr run --slowest {posargs}
@@ -22,7 +22,7 @@
commands =
sh ./tools/misc-sanity-checks.sh
flake8
-whitelist_externals =
+allowlist_externals =
sh
[testenv:venv]
@@ -39,9 +39,12 @@
coverage xml -o cover/coverage.xml
[testenv:docs]
+deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+ -r{toxinidir}/doc/requirements.txt
commands = sphinx-build -W -b html doc/source doc/build/html
[testenv:releasenotes]
+deps = {[testenv:docs]deps}
commands =
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
diff --git a/zuul.d/base-nested-switch.yaml b/zuul.d/base-nested-switch.yaml
new file mode 100644
index 0000000..69e841f
--- /dev/null
+++ b/zuul.d/base-nested-switch.yaml
@@ -0,0 +1,32 @@
+- nodeset:
+ name: neutron-nested-virt-ubuntu-focal
+ nodes:
+ - name: controller
+ label: nested-virt-ubuntu-focal
+ groups:
+ - name: tempest
+ nodes:
+ - controller
+
+# Base nested switch job for non EM releases
+- job:
+ name: neutron-tempest-plugin-scenario-nested-switch
+ parent: neutron-tempest-plugin-scenario
+ abstract: true
+ branches: ^(?!stable/(queens|rocky|stein|train|ussuri)).*$
+ # Comment nodeset and vars to switch back to non nested nodes
+ nodeset: neutron-nested-virt-ubuntu-focal
+ vars:
+ devstack_localrc:
+ LIBVIRT_TYPE: kvm
+ LIBVIRT_CPU_MODE: host-passthrough
+ CIRROS_VERSION: 0.5.1
+ DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-disk
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-disk.img
+
+# Base nested switch job for EM releases
+- job:
+ name: neutron-tempest-plugin-scenario-nested-switch
+ parent: neutron-tempest-plugin-scenario
+ abstract: true
+ branches: ^(stable/(queens|rocky|stein|train|ussuri)).*$
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 77947fa..4efcd0c 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -7,7 +7,6 @@
roles:
- zuul: openstack/devstack
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- openstack/neutron-tempest-plugin
- openstack/tempest
@@ -18,6 +17,8 @@
USE_PYTHON3: true
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
CIRROS_VERSION: 0.5.1
+ DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
BUILD_TIMEOUT: 784
devstack_plugins:
neutron: https://opendev.org/openstack/neutron.git
@@ -25,7 +26,6 @@
tempest_plugins:
- neutron-tempest-plugin
devstack_services:
- tls-proxy: false
tempest: true
neutron-dns: true
neutron-qos: true
@@ -38,6 +38,17 @@
neutron-tag-ports-during-bulk-creation: true
br-ex-tcpdump: true
br-int-flows: true
+ # Cinder services
+ c-api: false
+ c-bak: false
+ c-sch: false
+ c-vol: false
+ cinder: false
+ # We don't need Swift to be run in the Neutron jobs
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -64,11 +75,6 @@
$NEUTRON_DHCP_CONF:
agent:
availability_zone: nova
- /etc/neutron/api-paste.ini:
- composite:neutronapi_v2_0:
- use: call:neutron.auth:pipeline_factory
- noauth: cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0
- keystone: cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0
test-config:
$TEMPEST_CONFIG:
neutron_plugin_options:
@@ -77,16 +83,23 @@
image_is_advanced: true
available_type_drivers: flat,geneve,vlan,gre,local,vxlan
provider_net_base_segm_id: 1
- irrelevant-files: &tempest-irrelevant-files
+ irrelevant-files:
- ^(test-|)requirements.txt$
+ - lower-constraints.txt
- ^releasenotes/.*$
- ^doc/.*$
+ - ^.*\.conf\.sample$
- ^setup.cfg$
- ^.*\.rst$
- ^neutron/locale/.*$
- ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
- ^tools/.*$
- ^tox.ini$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-scenario
@@ -99,15 +112,20 @@
# default test timeout set to 1200 seconds may be not enough if job is
# run on slow node
tempest_test_timeout: 2400
- tempest_test_regex: ^neutron_tempest_plugin\.scenario
+ tempest_test_regex: "\
+ (^neutron_tempest_plugin.scenario)|\
+ (^tempest.api.compute.servers.test_attach_interfaces)|\
+ (^tempest.api.compute.servers.test_multiple_create)"
devstack_localrc:
PHYSICAL_NETWORK: default
- CIRROS_VERSION: 0.5.1
- IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
- ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
- ADVANCED_INSTANCE_TYPE: ds512M
+ IMAGE_URLS: https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ntp_image_256M
ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: true
BUILD_TIMEOUT: 784
- devstack_services:
- cinder: true
-
+ tempest_concurrency: 3 # out of 4
+ zuul_copy_output:
+ '/var/log/ovn': 'logs'
+ '/var/log/openvswitch': 'logs'
+ '/var/lib/ovn': 'logs'
diff --git a/zuul.d/master_jobs.yaml b/zuul.d/master_jobs.yaml
index 5cdf6eb..53e85f3 100644
--- a/zuul.d/master_jobs.yaml
+++ b/zuul.d/master_jobs.yaml
@@ -6,6 +6,7 @@
# neutron repository and keep it different per branch,
# then it could be removed from here
network_api_extensions_common: &api_extensions
+ - address-group
- address-scope
- agent
- allowed-address-pairs
@@ -16,6 +17,7 @@
- dhcp_agent_scheduler
- dns-domain-ports
- dns-integration
+ - dns-integration-domain-keywords
- empty-string-filtering
- expose-port-forwarding-in-fip
- expose-l3-conntrack-helper
@@ -43,7 +45,9 @@
- network_availability_zone
- network-segment-range
- pagination
+ - port-device-profile
- port-resource-request
+ - port-resource-request-groups
- port-mac-address-regenerate
- port-security
- port-security-groups-filtering
@@ -54,6 +58,7 @@
- qos-fip
- quotas
- quota_details
+ - rbac-address-group
- rbac-address-scope
- rbac-policies
- rbac-security-groups
@@ -62,6 +67,7 @@
- router-admin-state-down-before-update
- router_availability_zone
- security-group
+ - security-groups-remote-address-group
- segment
- service-type
- sorting
@@ -70,8 +76,10 @@
- standard-attr-segment
- standard-attr-tag
- standard-attr-timestamp
+ - stateful-security-group
- subnet_allocation
- subnet-dns-publish-fixed-ip
+ - subnet-service-types
- subnetpool-prefix-ops
- tag-ports-during-bulk-creation
- trunk
@@ -79,9 +87,21 @@
- uplink-status-propagation
network_api_extensions_tempest:
- dvr
+ network_available_features: &available_features
+ - ipv6_metadata
tempest_test_regex: ^neutron_tempest_plugin\.api
devstack_services:
neutron-log: true
+ devstack_localrc:
+ # TODO(lucasagomes): Re-enable MOD_WSGI after
+ # https://bugs.launchpad.net/neutron/+bug/1912359 is implemented
+ NEUTRON_DEPLOY_MOD_WSGI: false
+ # TODO(ralonsoh): remove OVN_BUILD_FROM_SOURCE once the OS packages
+ # include at least OVN v20.12.0.
+ OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.03.0"
+ OVS_BRANCH: "8dc1733eaea866dce033b3c44853e1b09bf59fc7"
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions_common | join(',') }}"
devstack_local_conf:
post-config:
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
@@ -92,21 +112,64 @@
tunnel_types: gre,vxlan
network_log:
local_output_log_base: /tmp/test_log.log
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/.*$
+ - ^neutron/privileged/.*$
+ - ^neutron_lib/tests/unit/.*$
+ - ^neutron_tempest_plugin/scenario/.*$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-scenario-openvswitch
- parent: neutron-tempest-plugin-scenario
+ parent: neutron-tempest-plugin-scenario-nested-switch
timeout: 10000
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ neutron-local-ip-static: true
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions: *api_extensions
+ network_api_extensions_openvswitch:
+ - local_ip
+ network_available_features: *available_features
devstack_localrc:
Q_AGENT: openvswitch
- NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_openvswitch) | join(',') }}"
devstack_local_conf:
post-config:
$NEUTRON_CONF:
DEFAULT:
enable_dvr: false
+ l3_ha: true
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
# devstack-tempest job will be switched to use lib/neutron instead of
# lib/neutron-legacy
@@ -119,26 +182,90 @@
openflow_processed_per_port: True
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
available_type_drivers: flat,vlan,local,vxlan
+ firewall_driver: openvswitch
+ irrelevant-files: &openvswitch-scenario-irrelevant-files
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/ovn/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/plugins/ml2/drivers/ovn/.*$
+ - ^neutron/services/ovn_l3/.*$
+ - ^neutron/services/logapi/drivers/ovn/.*$
+ - ^neutron/services/portforwarding/drivers/ovn/.*$
+ - ^neutron/services/qos/drivers/linuxbridge/.*$
+ - ^neutron/services/qos/drivers/ovn/.*$
+ - ^neutron/services/trunk/drivers/linuxbridge/.*$
+ - ^neutron/services/trunk/drivers/ovn/.*$
+ - ^neutron/cmd/ovn/.*$
+ - ^neutron/common/ovn/.*$
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
- parent: neutron-tempest-plugin-scenario
+ parent: neutron-tempest-plugin-scenario-nested-switch
timeout: 10000
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ neutron-local-ip: true
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions: *api_extensions
+ network_api_extensions_openvswitch:
+ - local_ip
+ network_available_features: *available_features
# TODO(slaweq): remove trunks subport_connectivity test from blacklist
# when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
- tempest_black_regex: "(^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)"
+ # TODO(akatz): remove established tcp session verification test when the
+ # bug https://bugzilla.redhat.com/show_bug.cgi?id=1965036 will be fixed
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)"
devstack_localrc:
Q_AGENT: openvswitch
- NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_openvswitch) | join(',') }}"
devstack_local_conf:
post-config:
$NEUTRON_CONF:
DEFAULT:
enable_dvr: false
+ l3_ha: true
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
# devstack-tempest job will be switched to use lib/neutron instead of
# lib/neutron-legacy
@@ -152,26 +279,128 @@
firewall_driver: iptables_hybrid
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
available_type_drivers: flat,vlan,local,vxlan
+ firewall_driver: iptables_hybrid
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/linux/openvswitch_firewall/.*$
+ - ^neutron/agent/ovn/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/plugins/ml2/drivers/ovn/.*$
+ - ^neutron/services/ovn_l3/.*$
+ - ^neutron/services/logapi/drivers/ovn/.*$
+ - ^neutron/services/portforwarding/drivers/ovn/.*$
+ - ^neutron/services/qos/drivers/linuxbridge/.*$
+ - ^neutron/services/qos/drivers/ovn/.*$
+ - ^neutron/services/trunk/drivers/linuxbridge/.*$
+ - ^neutron/services/trunk/drivers/ovn/.*$
+ - ^neutron/cmd/ovn/.*$
+ - ^neutron/common/ovn/.*$
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-distributed-dhcp
+ parent: neutron-tempest-plugin-scenario-openvswitch
+ timeout: 10000
+ vars:
+ # NOTE: DHCP extra options and dns services aren't supported with
+ # distributed DHCP L2 agent extension
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_dhcp.DHCPTest.test_extra_dhcp_opts)|\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
+ devstack_services:
+ q-dhcp: false
+ q-distributed-dhcp: true
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-distributed-dhcp
+ parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ timeout: 10000
+ vars:
+ # NOTE: DHCP extra options and dns services aren't supported with
+ # distributed DHCP L2 agent extension
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_dhcp.DHCPTest.test_extra_dhcp_opts)|\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
+ devstack_services:
+ q-dhcp: false
+ q-distributed-dhcp: true
- job:
name: neutron-tempest-plugin-scenario-linuxbridge
- parent: neutron-tempest-plugin-scenario
+ parent: neutron-tempest-plugin-scenario-nested-switch
timeout: 10000
roles:
- zuul: openstack/neutron
pre-run: playbooks/linuxbridge-scenario-pre-run.yaml
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions: *api_extensions
+ network_api_extensions_linuxbridge:
+ - vlan-transparent
+ network_available_features: *available_features
+ # TODO(eolivare): remove VLAN Transparency tests from blacklist
+ # when bug https://bugs.launchpad.net/neutron/+bug/1907548 will be fixed
+ # TODO(slaweq): remove
+ # test_established_tcp_session_after_re_attachinging_sg from the
+ # exclude regex when bug https://bugs.launchpad.net/neutron/+bug/1936911
+ # will be fixed
+ # TODO(slaweq) remove test_floatingip_port_details from the exclude
+ # regex when bug https://bugs.launchpad.net/neutron/+bug/1799790 will be
+ # fixed
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_established_tcp_session_after_re_attachinging_sg)|\
+ (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPPortDetailsTest.test_floatingip_port_details)"
devstack_localrc:
Q_AGENT: linuxbridge
- NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_linuxbridge) | join(',') }}"
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
devstack_local_conf:
post-config:
$NEUTRON_CONF:
DEFAULT:
enable_dvr: false
+ vlan_transparent: true
+ l3_ha: true
AGENT:
debug_iptables_rules: true
# NOTE(slaweq): We can get rid of this hardcoded absolute path when
@@ -180,34 +409,92 @@
/$NEUTRON_CORE_PLUGIN_CONF:
ml2:
type_drivers: flat,vlan,local,vxlan
+ mechanism_drivers: linuxbridge
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
available_type_drivers: flat,vlan,local,vxlan
q_agent: linuxbridge
+ firewall_driver: iptables
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/linux/openvswitch_firewall/.*$
+ - ^neutron/agent/ovn/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/openvswitch/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/plugins/ml2/drivers/ovn/.*$
+ - ^neutron/services/ovn_l3/.*$
+ - ^neutron/services/logapi/drivers/openvswitch/.*$
+ - ^neutron/services/logapi/drivers/ovn/.*$
+ - ^neutron/services/portforwarding/drivers/ovn/.*$
+ - ^neutron/services/qos/drivers/openvswitch/.*$
+ - ^neutron/services/qos/drivers/ovn/.*$
+ - ^neutron/services/trunk/drivers/openvswitch/.*$
+ - ^neutron/services/trunk/drivers/ovn/.*$
+ - ^neutron/cmd/ovn/.*$
+ - ^neutron/common/ovn/.*$
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-scenario-ovn
- parent: neutron-tempest-plugin-scenario
- timeout: 10000
+ parent: neutron-tempest-plugin-scenario-nested-switch
+ timeout: 10800
vars:
network_api_extensions: *api_extensions
- # TODO(haleyb): Remove this blacklist when
- # https://bugs.launchpad.net/neutron/+bug/1881558 is fixed.
- tempest_black_regex: "(?:neutron_tempest_plugin.scenario.test_ipv6.IPv6Test)"
+ network_api_extensions_ovn:
+ - vlan-transparent
+ # TODO(jlibosva): Remove the NetworkWritableMtuTest test from the list
+ # once east/west fragmentation is supported in core OVN
+ tempest_exclude_regex: "\
+ (^neutron_tempest_plugin.scenario.test_mtu.NetworkWritableMtuTest)"
devstack_localrc:
Q_AGENT: ovn
- NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_ovn) | join(',') }}"
Q_ML2_PLUGIN_MECHANISM_DRIVERS: ovn,logger
Q_ML2_PLUGIN_TYPE_DRIVERS: local,flat,vlan,geneve
Q_ML2_TENANT_NETWORK_TYPE: geneve
Q_USE_PROVIDERNET_FOR_PUBLIC: true
PHYSICAL_NETWORK: public
+ # NOTE(slaweq): In the job with OVN backend we can't use Ubuntu minimal
+ # image because kernel in that image don't supports MULTICAST traffic
+ # thus multicast scenario test with IGMP snooping enabled would fail
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ntp_image_384M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
ENABLE_CHASSIS_AS_GW: true
OVN_L3_CREATE_PUBLIC_NETWORK: true
OVN_DBS_LOG_LEVEL: dbg
ENABLE_TLS: True
OVN_IGMP_SNOOPING_ENABLE: True
+ # TODO(eolivare): Remove OVN_BUILD_FROM_SOURCE once vlan-transparency
+ # is included in an ovn released version
+ OVN_BUILD_FROM_SOURCE: True
+ OVN_BRANCH: "v21.06.0"
+ OVS_BRANCH: "a4b04276ab5934d087669ff2d191a23931335c87"
+ OVS_SYSCONFDIR: "/usr/local/etc/openvswitch"
devstack_services:
br-ex-tcpdump: true
br-int-flows: true
@@ -221,27 +508,80 @@
q-l3: false
q-meta: false
q-metering: false
+ q-qos: true
+ # Cinder services
+ c-api: false
+ c-bak: false
+ c-sch: false
+ c-vol: false
+ cinder: false
s-account: false
s-container-sync: false
s-container: false
s-object: false
s-proxy: false
- tls-proxy: true
- q-qos: true
devstack_local_conf:
post-config:
$NEUTRON_CONF:
DEFAULT:
enable_dvr: false
+ vlan_transparent: true
/$NEUTRON_CORE_PLUGIN_CONF:
ml2:
type_drivers: local,flat,vlan,geneve
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
available_type_drivers: local,flat,vlan,geneve
- ipv6_metadata: False
is_igmp_snooping_enabled: True
+ firewall_driver: ovn
+ zuul_copy_output:
+ '{{ devstack_base_dir }}/data/ovs': 'logs'
+ '{{ devstack_base_dir }}/data/ovn': 'logs'
+ '{{ devstack_log_dir }}/ovsdb-server-nb.log': 'logs'
+ '{{ devstack_log_dir }}/ovsdb-server-sb.log': 'logs'
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/dhcp/.*$
+ - ^neutron/agent/l2/.*$
+ - ^neutron/agent/l3/.*$
+ - ^neutron/agent/metadata/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/agent/dhcp_agent.py
+ - ^neutron/agent/l3_agent.py
+ - ^neutron/agent/metadata_agent.py
+ - ^neutron/agent/resource_cache.py
+ - ^neutron/agent/rpc.py
+ - ^neutron/agent/securitygroup_rpc.py
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/openvswitch/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/services/qos/drivers/linuxbridge/.*$
+ - ^neutron/services/qos/drivers/openvswitch/.*$
+ - ^neutron/services/trunk/drivers/linuxbridge/.*$
+ - ^neutron/services/trunk/drivers/openvswitch/.*$
+ - ^neutron/scheduler/.*$
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario
@@ -251,7 +591,6 @@
roles:
- zuul: openstack/devstack
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- openstack/neutron-tempest-plugin
- openstack/tempest
@@ -273,26 +612,56 @@
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
PHYSICAL_NETWORK: default
CIRROS_VERSION: 0.5.1
- IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
- ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
- ADVANCED_INSTANCE_TYPE: ds512M
+ DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
+ IMAGE_URLS: https://cloud-images.ubuntu.com/minimal/releases/focal/release/ubuntu-20.04-minimal-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-20.04-minimal-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ntp_image_256M
ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: true
BUILD_TIMEOUT: 784
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
devstack_plugins:
neutron: https://opendev.org/openstack/neutron.git
neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
tempest_plugins:
- neutron-tempest-plugin
devstack_services:
- tls-proxy: false
+ tls-proxy: true
tempest: true
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
neutron-dns: true
neutron-qos: true
neutron-segments: true
neutron-trunk: true
neutron-log: true
neutron-port-forwarding: true
- cinder: true
+ # Cinder services
+ c-api: false
+ c-bak: false
+ c-sch: false
+ c-vol: false
+ cinder: false
+ # We don't need Swift to be run in the Neutron jobs
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -338,16 +707,28 @@
keystone: "cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0"
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: *available_features
neutron_plugin_options:
provider_vlans: foo,
agent_availability_zone: nova
image_is_advanced: true
available_type_drivers: flat,geneve,vlan,gre,local,vxlan
l3_agent_mode: dvr_snat
+ firewall_driver: openvswitch
group-vars:
subnode:
devstack_services:
- tls-proxy: false
+ tls-proxy: true
+ br-ex-tcpdump: false
+ br-int-flows: false
+ # Disable OVN services
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
q-agt: true
q-l3: true
q-meta: true
@@ -355,8 +736,19 @@
neutron-trunk: true
neutron-log: true
neutron-port-forwarding: true
+ # Cinder services
+ c-bak: false
+ c-vol: false
+ # We don't need Swift to be run in the Neutron jobs
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
devstack_localrc:
USE_PYTHON3: true
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
devstack_local_conf:
post-config:
$NEUTRON_CONF:
@@ -378,20 +770,11 @@
agent_mode: dvr_snat
agent:
availability_zone: nova
- irrelevant-files: &tempest-irrelevant-files
- - ^(test-|)requirements.txt$
- - ^releasenotes/.*$
- - ^doc/.*$
- - ^setup.cfg$
- - ^.*\.rst$
- - ^neutron.*/locale/.*$
- - ^neutron.*/tests/unit/.*$
- - ^tools/.*$
- - ^tox.ini$
+ irrelevant-files: *openvswitch-scenario-irrelevant-files
- job:
name: neutron-tempest-plugin-designate-scenario
- parent: neutron-tempest-plugin-scenario
+ parent: neutron-tempest-plugin-scenario-nested-switch
description: Neutron designate integration scenario
required-projects:
- openstack/designate
@@ -402,25 +785,10 @@
network_api_extensions_common: *api_extensions
devstack_localrc:
DESIGNATE_BACKEND_DRIVER: bind9
- Q_AGENT: openvswitch
# In this job advanced image is not needed, so it's name should be
# empty
ADVANCED_IMAGE_NAME: ""
NETWORK_API_EXTENSIONS: "{{ network_api_extensions_common | join(',') }}"
- devstack_local_conf:
- post-config:
- $NEUTRON_CONF:
- DEFAULT:
- enable_dvr: false
- # NOTE(slaweq): We can get rid of this hardcoded absolute path when
- # devstack-tempest job will be switched to use lib/neutron instead of
- # lib/neutron-legacy
- /$NEUTRON_CORE_PLUGIN_CONF:
- agent:
- tunnel_types: vxlan,gre
- ovs:
- tunnel_bridge: br-tun
- bridge_mappings: public:br-ex
devstack_plugins:
designate: https://opendev.org/openstack/designate.git
devstack_services:
@@ -430,19 +798,58 @@
- designate-tempest-plugin
- neutron-tempest-plugin
tempest_test_regex: ^neutron_tempest_plugin\.scenario\.test_dns_integration
- irrelevant-files: *tempest-irrelevant-files
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^.*\.conf\.sample$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/.*$
+ - ^neutron/cmd/.*$
+ - ^neutron/privileged/.*$
+ - ^neutron/plugins/ml2/drivers/.*$
+ - ^neutron/scheduler/.*$
+ - ^neutron/services/(?!externaldns).*$
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-sfc
parent: neutron-tempest-plugin-base
timeout: 10800
required-projects:
- - openstack/devstack-gate
- openstack/networking-sfc
- openstack/neutron
- openstack/neutron-tempest-plugin
- openstack/tempest
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Enable Neutron services that are not used by OVN
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions_common: *api_extensions
tempest_test_regex: ^neutron_tempest_plugin\.sfc
devstack_plugins:
@@ -452,11 +859,34 @@
- flow_classifier
- sfc
devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_sfc) | join(',') }}"
# TODO(bcafarel): tests still fail from time to time in parallel
# https://bugs.launchpad.net/neutron/+bug/1851500
# https://bugs.launchpad.net/networking-sfc/+bug/1660366
tempest_concurrency: 1
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^.*\.conf\.sample$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-bgpvpn-bagpipe
@@ -465,12 +895,30 @@
- openstack/networking-bagpipe
- openstack/networking-bgpvpn
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Enable Neutron services that are not used by OVN
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
tempest_test_regex: ^neutron_tempest_plugin\.bgpvpn
network_api_extensions: *api_extensions
network_api_extensions_bgpvpn:
- bgpvpn
- bgpvpn-routes-control
devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
NETWORKING_BGPVPN_DRIVER: "BGPVPN:BaGPipe:networking_bgpvpn.neutron.services.service_drivers.bagpipe.bagpipe_v2.BaGPipeBGPVPNDriver:default"
BAGPIPE_DATAPLANE_DRIVER_IPVPN: "ovs"
BAGPIPE_BGP_PEERS: "-"
@@ -478,6 +926,26 @@
devstack_plugins:
networking-bgpvpn: https://git.openstack.org/openstack/networking-bgpvpn
networking-bagpipe: https://git.openstack.org/openstack/networking-bagpipe
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^.*\.conf\.sample$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(fwaas|neutron_dynamic_routing|sfc|tap_as_a_service|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
- job:
name: neutron-tempest-plugin-dynamic-routing
@@ -501,19 +969,90 @@
- bgp_4byte_asn
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_bgp) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-meta: true
+ q-metering: true
+ q-l3: true
neutron-dr: true
neutron-dr-agent: true
- q-l3: true
tempest_concurrency: 1
tempest_test_regex: ^neutron_tempest_plugin\.neutron_dynamic_routing
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^.*\.conf\.sample$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|sfc|tap_as_a_service|vpnaas).*$
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
+
+- job:
+ name: neutron-tempest-plugin-fwaas
+ parent: neutron-tempest-plugin-base
+ timeout: 10800
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron-fwaas
+ - openstack/neutron
+ - openstack/neutron-tempest-plugin
+ - openstack/tempest
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.fwaas
+ devstack_plugins:
+ neutron-fwaas: https://opendev.org/openstack/neutron-fwaas.git
+ neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
+ network_api_extensions_common: *api_extensions
+ network_api_extensions_fwaas:
+ - fwaas_v2
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_fwaas) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-meta: true
+ q-metering: true
+ q-l3: true
- job:
name: neutron-tempest-plugin-vpnaas
parent: neutron-tempest-plugin-base
timeout: 3900
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- openstack/neutron-vpnaas
- openstack/neutron-tempest-plugin
@@ -529,4 +1068,148 @@
devstack_localrc:
IPSEC_PACKAGE: strongswan
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_vpnaas) | join(',') }}"
- irrelevant-files: *tempest-irrelevant-files
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-meta: true
+ q-metering: true
+ q-l3: true
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^.*\.conf\.sample$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|tap_as_a_service).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
+
+- job:
+ name: neutron-tempest-plugin-tap-as-a-service
+ parent: neutron-tempest-plugin-base
+ description: |
+ Perform setup common to all tap-as-a-service tempest tests
+ roles:
+ - zuul: openstack/devstack
+ required-projects:
+ - openstack/devstack-gate
+ - openstack/neutron
+ - openstack/neutron-tempest-plugin
+ - openstack/tap-as-a-service
+ - openstack/tempest
+ vars:
+ tempest_test_regex: ^neutron_tempest_plugin\.tap_as_a_service
+ tox_envlist: all
+ network_api_extensions_common: *api_extensions
+ network_api_extensions_tempest:
+ - taas
+ - taas-vlan-filter
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ntp_image_384M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
+ BUILD_TIMEOUT: 784
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan,vlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ devstack_local_conf:
+ post-config:
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: vxlan
+ ml2_type_vlan:
+ network_vlan_ranges: public
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ image_is_advanced: true
+ advanced_image_flavor_ref: d1
+ taas:
+ provider_physical_network: public
+ provider_segmentation_id: 100
+ image_feature_enabled:
+ api_v2: true
+ devstack_plugins:
+ neutron: git://opendev.org/openstack/neutron.git
+ neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
+ tap-as-a-service: git://opendev.org/openstack/tap-as-a-service.git
+ devstack_services:
+ # Disable OVN services
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Enable Neutron services that are not used by OVN
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
+ br-ex-tcpdump: true
+ br-int-flows: true
+ base: false
+ key: true
+ mysql: true
+ rabbit: true
+ g-api: true
+ g-reg: true
+ n-api: true
+ n-cond: true
+ n-cpu: true
+ n-crt: true
+ n-sch: true
+ placement-api: true
+ n-api-meta: true
+ q-svc: true
+ quantum: true
+ taas: true
+ taas_openvswitch_agent: true
+ tempest: true
+ dstat: true
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - lower-constraints.txt
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^.*\.conf\.sample$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^neutron_tempest_plugin/api/test_.*$
+ - ^neutron_tempest_plugin/(bgpvpn|fwaas|neutron_dynamic_routing|sfc|vpnaas).*$
+ - ^neutron_tempest_plugin/services/bgp/.*$
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^rally-jobs/.*$
+ - ^vagrant/.*$
+ - ^zuul.d/(?!(project)).*\.yaml
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 0355f69..0584523 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -3,15 +3,11 @@
check:
jobs:
- neutron-tempest-plugin-api
- - neutron-tempest-plugin-designate-scenario:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1891309
- # will be fixed
- voting: false
- neutron-tempest-plugin-scenario-linuxbridge
- neutron-tempest-plugin-scenario-openvswitch
- neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
- neutron-tempest-plugin-scenario-ovn
+ - neutron-tempest-plugin-designate-scenario
gate:
jobs:
- neutron-tempest-plugin-api
@@ -24,6 +20,8 @@
experimental:
jobs:
- neutron-tempest-plugin-dvr-multinode-scenario
+ - neutron-tempest-plugin-scenario-openvswitch-distributed-dhcp
+ - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-distributed-dhcp
- project-template:
@@ -48,14 +46,10 @@
check:
jobs:
- neutron-tempest-plugin-api-rocky
- - neutron-tempest-plugin-designate-scenario-rocky:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1891309
- # will be fixed
- voting: false
- neutron-tempest-plugin-scenario-linuxbridge-rocky
- neutron-tempest-plugin-scenario-openvswitch-rocky
- neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
+ - neutron-tempest-plugin-designate-scenario-rocky
gate:
jobs:
- neutron-tempest-plugin-api-rocky
@@ -71,14 +65,10 @@
check:
jobs:
- neutron-tempest-plugin-api-stein
- - neutron-tempest-plugin-designate-scenario-stein:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1891309
- # will be fixed
- voting: false
- neutron-tempest-plugin-scenario-linuxbridge-stein
- neutron-tempest-plugin-scenario-openvswitch-stein
- neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-stein
+ - neutron-tempest-plugin-designate-scenario-stein
gate:
jobs:
- neutron-tempest-plugin-api-stein
@@ -94,14 +84,10 @@
check:
jobs:
- neutron-tempest-plugin-api-train
- - neutron-tempest-plugin-designate-scenario-train:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1891309
- # will be fixed
- voting: false
- neutron-tempest-plugin-scenario-linuxbridge-train
- neutron-tempest-plugin-scenario-openvswitch-train
- neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-train
+ - neutron-tempest-plugin-designate-scenario-train
gate:
jobs:
- neutron-tempest-plugin-api-train
@@ -117,15 +103,11 @@
check:
jobs:
- neutron-tempest-plugin-api-ussuri
- - neutron-tempest-plugin-designate-scenario-ussuri:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1891309
- # will be fixed
- voting: false
- neutron-tempest-plugin-scenario-linuxbridge-ussuri
- neutron-tempest-plugin-scenario-openvswitch-ussuri
- neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-ussuri
- neutron-tempest-plugin-scenario-ovn-ussuri
+ - neutron-tempest-plugin-designate-scenario-ussuri
gate:
jobs:
- neutron-tempest-plugin-api-ussuri
@@ -141,15 +123,11 @@
check:
jobs:
- neutron-tempest-plugin-api-victoria
- - neutron-tempest-plugin-designate-scenario-victoria:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1891309
- # will be fixed
- voting: false
- neutron-tempest-plugin-scenario-linuxbridge-victoria
- neutron-tempest-plugin-scenario-openvswitch-victoria
- neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-victoria
- neutron-tempest-plugin-scenario-ovn-victoria
+ - neutron-tempest-plugin-designate-scenario-victoria
gate:
jobs:
- neutron-tempest-plugin-api-victoria
@@ -160,59 +138,105 @@
- neutron-tempest-plugin-dvr-multinode-scenario-victoria
+- project-template:
+ name: neutron-tempest-plugin-jobs-wallaby
+ check:
+ jobs:
+ - neutron-tempest-plugin-api-wallaby
+ - neutron-tempest-plugin-scenario-linuxbridge-wallaby
+ - neutron-tempest-plugin-scenario-openvswitch-wallaby
+ - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-wallaby
+ - neutron-tempest-plugin-scenario-ovn-wallaby
+ - neutron-tempest-plugin-designate-scenario-wallaby
+ gate:
+ jobs:
+ - neutron-tempest-plugin-api-wallaby
+ #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
+ # the experimental queue when it will be more stable
+ experimental:
+ jobs:
+ - neutron-tempest-plugin-dvr-multinode-scenario-wallaby
+
+
+- project-template:
+ name: neutron-tempest-plugin-jobs-xena
+ check:
+ jobs:
+ - neutron-tempest-plugin-api-xena
+ - neutron-tempest-plugin-scenario-linuxbridge-xena
+ - neutron-tempest-plugin-scenario-openvswitch-xena
+ - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-xena
+ - neutron-tempest-plugin-scenario-ovn-xena
+ - neutron-tempest-plugin-designate-scenario-xena
+ gate:
+ jobs:
+ - neutron-tempest-plugin-api-xena
+ #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
+ # the experimental queue when it will be more stable
+ experimental:
+ jobs:
+ - neutron-tempest-plugin-dvr-multinode-scenario-xena
+
+- project-template:
+ name: neutron-tempest-plugin-jobs-yoga
+ check:
+ jobs:
+ - neutron-tempest-plugin-api-yoga
+ - neutron-tempest-plugin-scenario-linuxbridge-yoga
+ - neutron-tempest-plugin-scenario-openvswitch-yoga
+ - neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
+ - neutron-tempest-plugin-scenario-ovn-yoga
+ - neutron-tempest-plugin-designate-scenario-yoga
+ gate:
+ jobs:
+ - neutron-tempest-plugin-api-yoga
+ #TODO(slaweq): Move neutron-tempest-plugin-dvr-multinode-scenario out of
+ # the experimental queue when it will be more stable
+ experimental:
+ jobs:
+ - neutron-tempest-plugin-dvr-multinode-scenario-yoga
+
- project:
templates:
- build-openstack-docs-pti
- neutron-tempest-plugin-jobs
- - neutron-tempest-plugin-jobs-stein
- - neutron-tempest-plugin-jobs-train
- - neutron-tempest-plugin-jobs-ussuri
- neutron-tempest-plugin-jobs-victoria
+ - neutron-tempest-plugin-jobs-wallaby
+ - neutron-tempest-plugin-jobs-xena
+ - neutron-tempest-plugin-jobs-yoga
- check-requirements
- tempest-plugin-jobs
- release-notes-jobs-python3
check:
jobs:
- neutron-tempest-plugin-sfc
- - neutron-tempest-plugin-sfc-train
- - neutron-tempest-plugin-sfc-ussuri
- neutron-tempest-plugin-sfc-victoria
- - neutron-tempest-plugin-bgpvpn-bagpipe:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/networking-bagpipe/+bug/1897408
- # will be fixed
- voting: false
- - neutron-tempest-plugin-bgpvpn-bagpipe-train
- - neutron-tempest-plugin-bgpvpn-bagpipe-ussuri:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/networking-bagpipe/+bug/1897408
- # will be fixed
- voting: false
- - neutron-tempest-plugin-bgpvpn-bagpipe-victoria:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/networking-bagpipe/+bug/1897408
- # will be fixed
- voting: false
- - neutron-tempest-plugin-fwaas-train:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1858645 will be fixed
- voting: false
- - neutron-tempest-plugin-fwaas-ussuri:
- # TODO(slaweq): switch it to be voting when bug
- # https://bugs.launchpad.net/neutron/+bug/1858645 will be fixed
- voting: false
+ - neutron-tempest-plugin-sfc-wallaby
+ - neutron-tempest-plugin-sfc-xena
+ - neutron-tempest-plugin-sfc-yoga
+ - neutron-tempest-plugin-bgpvpn-bagpipe
+ - neutron-tempest-plugin-bgpvpn-bagpipe-victoria
+ - neutron-tempest-plugin-bgpvpn-bagpipe-wallaby
+ - neutron-tempest-plugin-bgpvpn-bagpipe-xena
+ - neutron-tempest-plugin-bgpvpn-bagpipe-yoga
- neutron-tempest-plugin-dynamic-routing
- - neutron-tempest-plugin-dynamic-routing-ussuri
- neutron-tempest-plugin-dynamic-routing-victoria
+ - neutron-tempest-plugin-dynamic-routing-wallaby
+ - neutron-tempest-plugin-dynamic-routing-xena
+ - neutron-tempest-plugin-dynamic-routing-yoga
+ - neutron-tempest-plugin-fwaas
- neutron-tempest-plugin-vpnaas
- - neutron-tempest-plugin-vpnaas-ussuri
- neutron-tempest-plugin-vpnaas-victoria
+ - neutron-tempest-plugin-vpnaas-wallaby
+ - neutron-tempest-plugin-vpnaas-xena
+ - neutron-tempest-plugin-vpnaas-yoga
+ - neutron-tempest-plugin-tap-as-a-service
+ - neutron-tempest-plugin-tap-as-a-service-xena
+ - neutron-tempest-plugin-tap-as-a-service-yoga
gate:
jobs:
- neutron-tempest-plugin-sfc
- # TODO(slaweq): make bgpvpn-bagpipe job gating again when
- # https://bugs.launchpad.net/networking-bagpipe/+bug/1897408
- # will be fixed
- #- neutron-tempest-plugin-bgpvpn-bagpipe
+ - neutron-tempest-plugin-bgpvpn-bagpipe
- neutron-tempest-plugin-dynamic-routing
+ - neutron-tempest-plugin-fwaas
diff --git a/zuul.d/queens_jobs.yaml b/zuul.d/queens_jobs.yaml
index 2b52978..214df60 100644
--- a/zuul.d/queens_jobs.yaml
+++ b/zuul.d/queens_jobs.yaml
@@ -4,12 +4,26 @@
parent: neutron-tempest-plugin-api
override-checkout: stable/queens
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.3.0
- openstack/tempest
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/queens
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -61,14 +75,39 @@
- standard-attr-timestamp
- standard-attr-tag
- subnet_allocation
+ - subnet-service-types
- trunk
- trunk-details
network_api_extensions_tempest:
- dvr
+ network_available_features: &available_features
+ -
devstack_localrc:
+ NEUTRON_DEPLOY_MOD_WSGI: false
USE_PYTHON3: false
+ CIRROS_VERSION: 0.3.5
+ DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ ML2_L3_PLUGIN: router
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
@@ -78,7 +117,6 @@
nodeset: openstack-single-node-xenial
override-checkout: stable/queens
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.3.0
@@ -86,6 +124,13 @@
vars:
branch_override: stable/queens
network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Xenial keepalived don't knows this option yet
+ keepalived_use_no_track: False
# TODO(slaweq): remove trunks subport_connectivity test from blacklist
# when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
# NOTE(bcafarel): remove DNS test as queens pinned version does not have
@@ -95,16 +140,28 @@
(^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
+ CIRROS_VERSION: 0.3.5
+ DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
+ # Queens
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-queens
- parent: neutron-tempest-plugin-scenario-linuxbridge
+ parent: neutron-tempest-plugin-scenario
nodeset: openstack-single-node-xenial
+ timeout: 10000
+ roles:
+ - zuul: openstack/neutron
override-checkout: stable/queens
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.3.0
@@ -112,21 +169,50 @@
vars:
branch_override: stable/queens
network_api_extensions: *api_extensions
+ network_available_features: *available_features
# NOTE(bcafarel): remove DNS test as queens pinned version does not have
# fix for https://bugs.launchpad.net/neutron/+bug/1826419
tempest_black_regex: "\
(^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
+ CIRROS_VERSION: 0.3.5
+ DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
+ Q_AGENT: linuxbridge
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
+ # Queens
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ enable_dvr: false
+ AGENT:
+ debug_iptables_rules: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ ml2:
+ type_drivers: flat,vlan,local,vxlan
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Xenial keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
# NOTE: ignores linux bridge's trunk delete on bound port test
- # for queens branch (as https://review.opendev.org/#/c/605589/
- # fix will not apply for queens branch)
+ # for rocky branch (as https://review.opendev.org/#/c/605589/
+ # fix will not apply for rocky branch)
$TEMPEST_CONFIG:
neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
q_agent: None
- job:
@@ -135,7 +221,6 @@
nodeset: openstack-two-node-xenial
override-checkout: stable/queens
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.3.0
@@ -152,6 +237,9 @@
(^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
+ CIRROS_VERSION: 0.3.5
+ DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- job:
@@ -160,7 +248,6 @@
nodeset: openstack-single-node-xenial
override-checkout: stable/queens
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.3.0
@@ -176,4 +263,8 @@
(^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)"
devstack_localrc:
USE_PYTHON3: false
+ CIRROS_VERSION: 0.3.5
+ DEFAULT_IMAGE_NAME: cirros-0.3.5-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.3.5-x86_64-uec.tar.gz
TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
+ ADVANCED_INSTANCE_TYPE: ds512M
diff --git a/zuul.d/rocky_jobs.yaml b/zuul.d/rocky_jobs.yaml
index a45cae8..9915575 100644
--- a/zuul.d/rocky_jobs.yaml
+++ b/zuul.d/rocky_jobs.yaml
@@ -6,12 +6,26 @@
This job run on py2 for stable/rocky gate.
override-checkout: stable/rocky
required-projects: &required-projects-rocky
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.9.0
- openstack/tempest
vars: &api_vars_rocky
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/rocky
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -70,14 +84,34 @@
- standard-attr-timestamp
- standard-attr-tag
- subnet_allocation
+ - subnet-service-types
- trunk
- trunk-details
network_api_extensions_tempest:
- dvr
devstack_localrc:
+ NEUTRON_DEPLOY_MOD_WSGI: false
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ ML2_L3_PLUGIN: router
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
# NOTE(gmann): This job run on py2 for stable/rocky gate.
branches:
- stable/rocky
@@ -99,25 +133,106 @@
- job:
name: neutron-tempest-plugin-scenario-openvswitch-rocky
- parent: neutron-tempest-plugin-scenario-openvswitch
+ parent: neutron-tempest-plugin-scenario
description: |
This job run on py2 for stable/rocky gate.
nodeset: openstack-single-node-xenial
+ timeout: 10000
override-checkout: stable/rocky
required-projects: *required-projects-rocky
vars: &scenario_vars_rocky
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/rocky
network_api_extensions: *api_extensions
+ network_available_features: &available_features
+ -
devstack_localrc:
USE_PYTHON3: false
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- # NOTE(bcafarel): newer tests, unstable on rocky branch
- tempest_black_regex: "\
+ # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
+ # Rocky
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
+ devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ enable_dvr: false
+ l3_ha: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ agent:
+ tunnel_types: vxlan,gre
+ ovs:
+ tunnel_bridge: br-tun
+ bridge_mappings: public:br-ex
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Xenial keepalived don't knows this option yet
+ keepalived_use_no_track: False
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+ neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
+ firewall_driver: openvswitch
+ # NOTE(bcafarel): filtering out unstable tests or tests with known
+ # issues in the used pinned version for this EM branch
+ tempest_black_regex: &rocky_tempest_exclude "\
+ (^neutron_tempest_plugin.scenario.admin.test_floatingip.FloatingIpTestCasesAdmin.test_two_vms_fips)|\
+ (^neutron_tempest_plugin.scenario.test_floatingip.FloatingIPQosTest.test_qos)|\
+ (^neutron_tempest_plugin.scenario.test_internal_dns.InternalDNSTest.test_dns_domain_and_name)|\
(^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
- (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ (^neutron_tempest_plugin.scenario.test_ports.PortsTest.test_previously_used_port)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_ip_prefix)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_secgroup_inheritance)|\
+ (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_remote_group)|\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesTestJSON.test_reassign_port_between_servers)|\
+ (^tempest.api.compute.servers.test_attach_interfaces.AttachInterfacesUnderV243Test.test_add_remove_fixed_ip)"
branches:
- stable/rocky
+ irrelevant-files: &openvswitch-scenario-irrelevant-files
+ - ^(test-|)requirements.txt$
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
- job:
name: neutron-tempest-plugin-scenario-openvswitch-rocky
@@ -132,37 +247,105 @@
<<: *scenario_vars_rocky
devstack_localrc:
USE_PYTHON3: True
+ ADVANCED_INSTANCE_TYPE: ds512M
branches: ^(?!stable/rocky).*$
+ irrelevant-files: *openvswitch-scenario-irrelevant-files
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
- parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ parent: neutron-tempest-plugin-scenario
nodeset: openstack-single-node-xenial
+ timeout: 10000
description: |
This job run on py2 for stable/rocky gate.
override-checkout: stable/rocky
required-projects: *required-projects-rocky
vars: &openvswitch_vars_rocky
- branch_override: stable/rocky
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
USE_PYTHON3: false
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- # TODO(bcafarel): remove trunks subport_connectivity test from blacklist
- # when bug https://bugs.launchpad.net/neutron/+bug/1838760 will be fixed
- # NOTE(bcafarel): other are newer tests, unstable on rocky branch
- tempest_black_regex: "\
- (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
- (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
- (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
+ # Rocky
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
+ devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ enable_dvr: false
+ l3_ha: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ agent:
+ tunnel_types: vxlan,gre
+ ovs:
+ tunnel_bridge: br-tun
+ bridge_mappings: public:br-ex
+ securitygroup:
+ firewall_driver: iptables_hybrid
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Xenial keepalived don't knows this option yet
+ keepalived_use_no_track: False
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+ neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
+ firewall_driver: iptables_hybrid
+ tempest_black_regex: *rocky_tempest_exclude
branches:
- stable/rocky
+ irrelevant-files: &iptables_hybrid_irrelevant_files
+ - ^(test-|)requirements.txt$
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/linux/openvswitch_firewall/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-rocky
- parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ parent: neutron-tempest-plugin-scenario
nodeset: openstack-single-node-xenial
+ timeout: 10000
description: |
This job run on py3 for other than stable/rocky gate
which is nothing but neutron-tempest-pluign master gate.
@@ -172,14 +355,19 @@
<<: *openvswitch_vars_rocky
devstack_localrc:
USE_PYTHON3: True
+ ADVANCED_INSTANCE_TYPE: ds512M
branches: ^(?!stable/rocky).*$
+ irrelevant-files: *iptables_hybrid_irrelevant_files
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-rocky
- parent: neutron-tempest-plugin-scenario-linuxbridge
+ parent: neutron-tempest-plugin-scenario
+ timeout: 10000
description: |
This job run on py2 for stable/rocky gate.
nodeset: openstack-single-node-xenial
+ roles:
+ - zuul: openstack/neutron
override-checkout: stable/rocky
required-projects: *required-projects-rocky
vars: &linuxbridge_vars_rocky
@@ -187,62 +375,233 @@
network_api_extensions: *api_extensions
devstack_localrc:
USE_PYTHON3: false
+ Q_AGENT: linuxbridge
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ # NOTE(slaweq) some tests are not running fine with ubuntu minimal on
+ # Rocky
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ enable_dvr: false
+ AGENT:
+ debug_iptables_rules: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ ml2:
+ type_drivers: flat,vlan,local,vxlan
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Xenial keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
# NOTE: ignores linux bridge's trunk delete on bound port test
# for rocky branch (as https://review.opendev.org/#/c/605589/
# fix will not apply for rocky branch)
$TEMPEST_CONFIG:
neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
q_agent: None
- # NOTE(bcafarel): newer tests, unstable on rocky branch
- tempest_black_regex: "\
- (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
- (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ tempest_black_regex: *rocky_tempest_exclude
branches:
- stable/rocky
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-rocky
- parent: neutron-tempest-plugin-scenario-linuxbridge
+ parent: neutron-tempest-plugin-scenario
nodeset: openstack-single-node-xenial
+ timeout: 10000
description: |
This job run on py3 for other than stable/rocky gate
which is nothing but neutron-tempest-pluign master gate.
+ roles:
+ - zuul: openstack/neutron
override-checkout: stable/rocky
required-projects: *required-projects-rocky
vars:
<<: *linuxbridge_vars_rocky
devstack_localrc:
USE_PYTHON3: True
+ Q_AGENT: linuxbridge
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
+ ADVANCED_INSTANCE_TYPE: ds512M
branches: ^(?!stable/rocky).*$
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
- parent: neutron-tempest-plugin-dvr-multinode-scenario
+ parent: tempest-multinode-full
description: |
This job run on py2 for stable/rocky gate.
nodeset: openstack-two-node-xenial
override-checkout: stable/rocky
+ roles:
+ - zuul: openstack/devstack
required-projects: *required-projects-rocky
+ pre-run: playbooks/dvr-multinode-scenario-pre-run.yaml
+ voting: false
vars: &multinode_scenario_vars_rocky
- branch_override: stable/rocky
+ tempest_concurrency: 4
+ tox_envlist: all
+ tempest_test_regex: ^neutron_tempest_plugin\.scenario
+ # NOTE(slaweq): in case of some tests, which requires advanced image,
+ # default test timeout set to 1200 seconds may be not enough if job is
+ # run on slow node
+ tempest_test_timeout: 2400
network_api_extensions_common: *api_extensions
+ network_api_extensions_dvr:
+ - dvr
devstack_localrc:
USE_PYTHON3: false
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_dvr) | join(',') }}"
+ PHYSICAL_NETWORK: default
+ CIRROS_VERSION: 0.5.1
+ DEFAULT_IMAGE_NAME: cirros-0.5.1-x86_64-uec
+ DEFAULT_IMAGE_FILE_NAME: cirros-0.5.1-x86_64-uec.tar.gz
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ BUILD_TIMEOUT: 784
TEMPEST_PLUGINS: /opt/stack/neutron-tempest-plugin
- # NOTE(bcafarel): newer tests, unstable on rocky branch
- tempest_black_regex: "\
- (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
- (^neutron_tempest_plugin.scenario.test_security_groups.NetworkSecGroupTest.test_multiple_ports_portrange_remote)"
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron.git
+ neutron-tempest-plugin: https://opendev.org/openstack/neutron-tempest-plugin.git
+ tempest_plugins:
+ - neutron-tempest-plugin
+ devstack_services:
+ tls-proxy: false
+ tempest: true
+ neutron-dns: true
+ neutron-qos: true
+ neutron-segments: true
+ neutron-trunk: true
+ neutron-log: true
+ neutron-port-forwarding: true
+ # Cinder services
+ c-api: false
+ c-bak: false
+ c-sch: false
+ c-vol: false
+ cinder: false
+ # We don't need Swift to be run in the Neutron jobs
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ quotas:
+ quota_router: 100
+ quota_floatingip: 500
+ quota_security_group: 100
+ quota_security_group_rule: 1000
+ DEFAULT:
+ router_distributed: True
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ mechanism_drivers: openvswitch,l2population
+ ml2_type_vlan:
+ network_vlan_ranges: foo:1:10
+ ml2_type_vxlan:
+ vni_ranges: 1:2000
+ ml2_type_gre:
+ tunnel_id_ranges: 1:1000
+ agent:
+ enable_distributed_routing: True
+ l2_population: True
+ tunnel_types: vxlan,gre
+ ovs:
+ tunnel_bridge: br-tun
+ bridge_mappings: public:br-ex
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ agent_mode: dvr_snat
+ agent:
+ availability_zone: nova
+ $NEUTRON_DHCP_CONF:
+ agent:
+ availability_zone: nova
+ "/etc/neutron/api-paste.ini":
+ "composite:neutronapi_v2_0":
+ use: "call:neutron.auth:pipeline_factory"
+ noauth: "cors request_id catch_errors osprofiler extensions neutronapiapp_v2_0"
+ keystone: "cors request_id catch_errors osprofiler authtoken keystonecontext extensions neutronapiapp_v2_0"
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: *available_features
+ neutron_plugin_options:
+ provider_vlans: foo,
+ agent_availability_zone: nova
+ image_is_advanced: true
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+ l3_agent_mode: dvr_snat
+ firewall_driver: openvswitch
+ branch_override: stable/rocky
+ tempest_black_regex: *rocky_tempest_exclude
branches:
- stable/rocky
+ group-vars: &multinode_scenario_group_vars_rocky
+ subnode:
+ devstack_services:
+ tls-proxy: false
+ q-agt: true
+ q-l3: true
+ q-meta: true
+ neutron-qos: true
+ neutron-trunk: true
+ neutron-log: true
+ neutron-port-forwarding: true
+ # Cinder services
+ c-bak: false
+ c-vol: false
+ # We don't need Swift to be run in the Neutron jobs
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ devstack_localrc:
+ USE_PYTHON3: true
+ devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ router_distributed: True
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ agent:
+ enable_distributed_routing: True
+ l2_population: True
+ tunnel_types: vxlan,gre
+ ovs:
+ tunnel_bridge: br-tun
+ bridge_mappings: public:br-ex
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ agent_mode: dvr_snat
+ agent:
+ availability_zone: nova
+ irrelevant-files: *openvswitch-scenario-irrelevant-files
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-rocky
- parent: neutron-tempest-plugin-dvr-multinode-scenario
+ parent: tempest-multinode-full
nodeset: openstack-two-node-xenial
description: |
This job run on py3 for other than stable/rocky gate
@@ -254,6 +613,7 @@
USE_PYTHON3: True
required-projects: *required-projects-rocky
group-vars:
+ <<: *multinode_scenario_group_vars_rocky
subnode:
devstack_localrc:
USE_PYTHON3: True
@@ -267,7 +627,6 @@
nodeset: openstack-single-node-xenial
override-checkout: stable/rocky
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- name: openstack/neutron-tempest-plugin
override-checkout: 0.9.0
@@ -280,6 +639,10 @@
devstack_localrc:
USE_PYTHON3: false
TEMPEST_PLUGINS: '"/opt/stack/designate-tempest-plugin /opt/stack/neutron-tempest-plugin"'
+ ADVANCED_INSTANCE_TYPE: ds512M
+ # NOTE(bcafarel): filtering out unstable tests or tests with known
+ # issues in the used pinned version for this EM branch
+ tempest_black_regex: "(^neutron_tempest_plugin.scenario.test_dns_integration.DNSIntegrationAdminTests.test_port_on_special_network)"
branches:
- stable/rocky
diff --git a/zuul.d/stein_jobs.yaml b/zuul.d/stein_jobs.yaml
index ff6ed38..b229296 100644
--- a/zuul.d/stein_jobs.yaml
+++ b/zuul.d/stein_jobs.yaml
@@ -3,7 +3,27 @@
parent: neutron-tempest-plugin-api
nodeset: openstack-single-node-bionic
override-checkout: stable/stein
+ required-projects: &required-projects-stein
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.1.0
+ - openstack/tempest
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/stein
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -68,67 +88,261 @@
- standard-attr-tag
- standard-attr-timestamp
- subnet_allocation
+ - subnet-service-types
- trunk
- trunk-details
- uplink-status-propagation
network_api_extensions_tempest:
- dvr
+ network_available_features: &available_features
+ -
+ # NOTE(bcafarel): filtering out unstable tests or tests with known
+ # issues in the used pinned version for this EM branch
+ tempest_black_regex: &stein_tempest_exclude "\
+ (^neutron_tempest_plugin.scenario.test_mtu.NetworkWritableMtuTest.test_connectivity_min_max_mtu)|\
+ (^neutron_tempest_plugin.scenario.test_port_forwardings.PortForwardingTestJSON.test_port_forwarding_to_2_servers)|\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_parent_port_connectivity_after_trunk_deleted_lb)|\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_subport_connectivity)|\
+ (^neutron_tempest_plugin.scenario.test_vlan_transparency.VlanTransparencyTest)"
devstack_localrc:
+ NEUTRON_DEPLOY_MOD_WSGI: false
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ ML2_L3_PLUGIN: router
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
- job:
name: neutron-tempest-plugin-scenario-openvswitch-stein
parent: neutron-tempest-plugin-scenario-openvswitch
nodeset: openstack-single-node-bionic
override-checkout: stable/stein
+ required-projects: *required-projects-stein
vars:
branch_override: stable/stein
network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ tempest_black_regex: *stein_tempest_exclude
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-stein
- parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ parent: neutron-tempest-plugin-scenario
nodeset: openstack-single-node-bionic
+ timeout: 10000
override-checkout: stable/stein
+ required-projects: *required-projects-stein
vars:
branch_override: stable/stein
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ tempest_black_regex: *stein_tempest_exclude
devstack_localrc:
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ enable_dvr: false
+ l3_ha: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ agent:
+ tunnel_types: vxlan,gre
+ ovs:
+ tunnel_bridge: br-tun
+ bridge_mappings: public:br-ex
+ securitygroup:
+ firewall_driver: iptables_hybrid
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
+ firewall_driver: iptables_hybrid
ipv6_metadata: False
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/linux/openvswitch_firewall/.*$
+ - ^neutron/agent/ovn/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/linuxbridge/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/plugins/ml2/drivers/ovn/.*$
- job:
name: neutron-tempest-plugin-scenario-linuxbridge-stein
- parent: neutron-tempest-plugin-scenario-linuxbridge
+ parent: neutron-tempest-plugin-scenario
nodeset: openstack-single-node-bionic
+ timeout: 10000
+ roles:
+ - zuul: openstack/neutron
+ pre-run: playbooks/linuxbridge-scenario-pre-run.yaml
override-checkout: stable/stein
+ required-projects: *required-projects-stein
vars:
branch_override: stable/stein
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
network_api_extensions: *api_extensions
+ network_api_extensions_linuxbridge:
+ - vlan-transparent
+ network_available_features: *available_features
+ tempest_black_regex: *stein_tempest_exclude
devstack_localrc:
- NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ Q_AGENT: linuxbridge
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_linuxbridge) | join(',') }}"
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch,linuxbridge
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_CONF:
+ DEFAULT:
+ enable_dvr: false
+ vlan_transparent: true
+ l3_ha: true
+ AGENT:
+ debug_iptables_rules: true
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ ml2:
+ type_drivers: flat,vlan,local,vxlan
+ mechanism_drivers: linuxbridge
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
+ available_type_drivers: flat,vlan,local,vxlan
+ q_agent: linuxbridge
+ firewall_driver: iptables
ipv6_metadata: False
+ irrelevant-files:
+ - ^(test-|)requirements.txt$
+ - ^releasenotes/.*$
+ - ^doc/.*$
+ - ^setup.cfg$
+ - ^.*\.rst$
+ - ^neutron/locale/.*$
+ - ^neutron/tests/unit/.*$
+ - ^neutron/tests/fullstack/.*
+ - ^neutron/tests/functional/.*
+ - ^tools/.*$
+ - ^tox.ini$
+ - ^neutron/agent/linux/openvswitch_firewall/.*$
+ - ^neutron/agent/ovn/.*$
+ - ^neutron/agent/windows/.*$
+ - ^neutron/plugins/ml2/drivers/openvswitch/.*$
+ - ^neutron/plugins/ml2/drivers/macvtap/.*$
+ - ^neutron/plugins/ml2/drivers/mech_sriov/.*$
+ - ^neutron/plugins/ml2/drivers/ovn/.*$
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-stein
parent: neutron-tempest-plugin-dvr-multinode-scenario
nodeset: openstack-two-node-bionic
override-checkout: stable/stein
+ required-projects: *required-projects-stein
vars:
network_api_extensions_common: *api_extensions
branch_override: stable/stein
@@ -139,12 +353,19 @@
nodeset: openstack-single-node-bionic
override-checkout: stable/stein
required-projects:
- - openstack/devstack-gate
- openstack/neutron
- - openstack/neutron-tempest-plugin
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.3.0
- name: openstack/designate-tempest-plugin
override-checkout: 0.7.0
- openstack/tempest
vars:
branch_override: stable/stein
network_api_extensions_common: *api_extensions
+ devstack_localrc:
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
diff --git a/zuul.d/train_jobs.yaml b/zuul.d/train_jobs.yaml
index a9cc5be..e3e4600 100644
--- a/zuul.d/train_jobs.yaml
+++ b/zuul.d/train_jobs.yaml
@@ -3,7 +3,27 @@
parent: neutron-tempest-plugin-api
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: &required-projects-train
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.5.0
+ - openstack/tempest
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/train
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -72,28 +92,65 @@
- standard-attr-tag
- standard-attr-timestamp
- subnet_allocation
+ - subnet-service-types
- subnetpool-prefix-ops
- trunk
- trunk-details
- uplink-status-propagation
network_api_extensions_tempest:
- dvr
+ network_available_features: &available_features
+ -
devstack_localrc:
+ NEUTRON_DEPLOY_MOD_WSGI: false
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ ML2_L3_PLUGIN: router
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
- job:
name: neutron-tempest-plugin-scenario-openvswitch-train
parent: neutron-tempest-plugin-scenario-openvswitch
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
branch_override: stable/train
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived doesn't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
@@ -102,14 +159,29 @@
parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
branch_override: stable/train
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
@@ -118,14 +190,29 @@
parent: neutron-tempest-plugin-scenario-linuxbridge
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
branch_override: stable/train
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
@@ -134,6 +221,7 @@
parent: neutron-tempest-plugin-dvr-multinode-scenario
nodeset: openstack-two-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
network_api_extensions_common: *api_extensions
branch_override: stable/train
@@ -143,15 +231,30 @@
parent: neutron-tempest-plugin-designate-scenario
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects:
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.5.0
+ - openstack/tempest
+ - name: openstack/designate-tempest-plugin
+ override-checkout: 0.7.0
vars:
branch_override: stable/train
network_api_extensions_common: *api_extensions
+ devstack_localrc:
+ # NOTE(bcafarel) guestmount binary not available on host OS
+ IMAGE_URLS: https://cloud-images.ubuntu.com/releases/bionic/release/ubuntu-18.04-server-cloudimg-amd64.img
+ ADVANCED_IMAGE_NAME: ubuntu-18.04-server-cloudimg-amd64
+ ADVANCED_INSTANCE_TYPE: ds512M
+ ADVANCED_INSTANCE_USER: ubuntu
+ CUSTOMIZE_IMAGE: false
- job:
name: neutron-tempest-plugin-sfc-train
parent: neutron-tempest-plugin-sfc
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
branch_override: stable/train
network_api_extensions_common: *api_extensions
@@ -161,6 +264,7 @@
parent: neutron-tempest-plugin-bgpvpn-bagpipe
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
branch_override: stable/train
network_api_extensions: *api_extensions
@@ -170,6 +274,7 @@
parent: neutron-tempest-plugin-fwaas-ussuri
nodeset: openstack-single-node-bionic
override-checkout: stable/train
+ required-projects: *required-projects-train
vars:
branch_override: stable/train
network_api_extensions_common: *api_extensions
diff --git a/zuul.d/ussuri_jobs.yaml b/zuul.d/ussuri_jobs.yaml
index 135d9f5..be9e0f0 100644
--- a/zuul.d/ussuri_jobs.yaml
+++ b/zuul.d/ussuri_jobs.yaml
@@ -3,7 +3,27 @@
parent: neutron-tempest-plugin-api
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: &required-projects-ussuri
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.8.0
+ - openstack/tempest
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/ussuri
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -75,6 +95,7 @@
- standard-attr-timestamp
- subnet_allocation
- subnet-dns-publish-fixed-ip
+ - subnet-service-types
- subnetpool-prefix-ops
- tag-ports-during-bulk-creation
- trunk
@@ -82,38 +103,79 @@
- uplink-status-propagation
network_api_extensions_tempest:
- dvr
+ network_available_features: &available_features
+ -
devstack_localrc:
+ NEUTRON_DEPLOY_MOD_WSGI: false
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ ML2_L3_PLUGIN: router
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
+
- job:
name: neutron-tempest-plugin-scenario-openvswitch-ussuri
parent: neutron-tempest-plugin-scenario-openvswitch
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
+
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-ussuri
parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
@@ -122,14 +184,23 @@
parent: neutron-tempest-plugin-scenario-linuxbridge
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
+ post-config:
+ $NEUTRON_L3_CONF:
+ DEFAULT:
+ # NOTE(slaweq): on Bionic keepalived don't knows this option yet
+ keepalived_use_no_track: False
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
neutron_plugin_options:
ipv6_metadata: False
@@ -138,9 +209,21 @@
parent: neutron-tempest-plugin-scenario-ovn
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions: *api_extensions
+ # TODO(haleyb): Remove IPv6Test from blacklist when
+ # https://bugs.launchpad.net/neutron/+bug/1881558 is fixed.
+ # TODO(slaweq): Remove test_trunk_subport_lifecycle test from the
+ # blacklist when bug https://bugs.launchpad.net/neutron/+bug/1885900 will
+ # be fixed
+ # TODO(jlibosva): Remove the NetworkWritableMtuTest test from the list
+ # once east/west fragmentation is supported in core OVN
+ tempest_exclude_regex: "\
+ (?:neutron_tempest_plugin.scenario.test_ipv6.IPv6Test)|\
+ (^neutron_tempest_plugin.scenario.test_trunk.TrunkTest.test_trunk_subport_lifecycle)|\
+ (^neutron_tempest_plugin.scenario.test_mtu.NetworkWritableMtuTest)"
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
# TODO(mjozefcz): Stop compiling OVS modules when meter action in kernel
@@ -149,12 +232,21 @@
OVN_BUILD_MODULES: True
# TODO(skaplons): v2.13.1 is incompatible with kernel 4.15.0-118, sticking to commit hash until new v2.13 tag is created
OVS_BRANCH: 0047ca3a0290f1ef954f2c76b31477cf4b9755f5
+ OVN_BRANCH: "v20.03.0"
+ # NOTE(slaweq): IGMP Snooping requires OVN 20.12
+ OVN_IGMP_SNOOPING_ENABLE: False
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ is_igmp_snooping_enabled: False
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-ussuri
parent: neutron-tempest-plugin-dvr-multinode-scenario
nodeset: openstack-two-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
network_api_extensions_common: *api_extensions
branch_override: stable/ussuri
@@ -164,6 +256,12 @@
parent: neutron-tempest-plugin-designate-scenario
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects:
+ - openstack/neutron
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.8.0
+ - openstack/tempest
+ - openstack/designate-tempest-plugin
vars:
branch_override: stable/ussuri
network_api_extensions_common: *api_extensions
@@ -173,6 +271,7 @@
parent: neutron-tempest-plugin-sfc
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions_common: *api_extensions
@@ -182,6 +281,7 @@
parent: neutron-tempest-plugin-bgpvpn-bagpipe
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions: *api_extensions
@@ -193,10 +293,10 @@
timeout: 10800
override-checkout: stable/ussuri
required-projects:
- - openstack/devstack-gate
- openstack/neutron-fwaas
- openstack/neutron
- - openstack/neutron-tempest-plugin
+ - name: openstack/neutron-tempest-plugin
+ override-checkout: 1.8.0
- openstack/tempest
vars:
branch_override: stable/ussuri
@@ -215,6 +315,7 @@
parent: neutron-tempest-plugin-dynamic-routing
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions_common: *api_extensions
@@ -224,6 +325,7 @@
parent: neutron-tempest-plugin-vpnaas
nodeset: openstack-single-node-bionic
override-checkout: stable/ussuri
+ required-projects: *required-projects-ussuri
vars:
branch_override: stable/ussuri
network_api_extensions_common: *api_extensions
diff --git a/zuul.d/victoria_jobs.yaml b/zuul.d/victoria_jobs.yaml
index 0bc1e13..d648aa8 100644
--- a/zuul.d/victoria_jobs.yaml
+++ b/zuul.d/victoria_jobs.yaml
@@ -3,6 +3,21 @@
parent: neutron-tempest-plugin-api
override-checkout: stable/victoria
vars:
+ devstack_services:
+ # Disable OVN services
+ br-ex-tcpdump: false
+ br-int-flows: false
+ ovn-controller: false
+ ovn-northd: false
+ ovs-vswitchd: false
+ ovsdb-server: false
+ q-ovn-metadata-agent: false
+ # Neutron services
+ q-agt: true
+ q-dhcp: true
+ q-l3: true
+ q-meta: true
+ q-metering: true
branch_override: stable/victoria
# TODO(slaweq): find a way to put this list of extensions in
# neutron repository and keep it different per branch,
@@ -74,6 +89,7 @@
- standard-attr-timestamp
- subnet_allocation
- subnet-dns-publish-fixed-ip
+ - subnet-service-types
- subnetpool-prefix-ops
- tag-ports-during-bulk-creation
- trunk
@@ -81,8 +97,29 @@
- uplink-status-propagation
network_api_extensions_tempest:
- dvr
+ network_available_features: &available_features
+ - ipv6_metadata
devstack_localrc:
+ NEUTRON_DEPLOY_MOD_WSGI: false
NETWORK_API_EXTENSIONS: "{{ (network_api_extensions_common + network_api_extensions_tempest) | join(',') }}"
+ Q_AGENT: openvswitch
+ Q_ML2_TENANT_NETWORK_TYPE: vxlan
+ Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
+ ML2_L3_PLUGIN: router
+ devstack_local_conf:
+ post-config:
+ # NOTE(slaweq): We can get rid of this hardcoded absolute path when
+ # devstack-tempest job will be switched to use lib/neutron instead of
+ # lib/neutron-legacy
+ /$NEUTRON_CORE_PLUGIN_CONF:
+ AGENT:
+ tunnel_types: gre,vxlan
+ ml2:
+ type_drivers: flat,geneve,vlan,gre,local,vxlan
+ test-config:
+ $TEMPEST_CONFIG:
+ neutron_plugin_options:
+ available_type_drivers: flat,geneve,vlan,gre,local,vxlan
- job:
name: neutron-tempest-plugin-scenario-openvswitch-victoria
@@ -91,26 +128,32 @@
vars:
branch_override: stable/victoria
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
ipv6_metadata: False
- job:
name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-victoria
parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
- override-checkout: stable-victoria
+ override-checkout: stable/victoria
vars:
- branch_override: stable-victoria
+ branch_override: stable/victoria
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
ipv6_metadata: False
@@ -121,11 +164,14 @@
vars:
branch_override: stable/victoria
network_api_extensions: *api_extensions
+ network_available_features: *available_features
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
neutron_plugin_options:
ipv6_metadata: False
@@ -138,6 +184,11 @@
network_api_extensions: *api_extensions
devstack_localrc:
NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
- job:
name: neutron-tempest-plugin-dvr-multinode-scenario-victoria
diff --git a/zuul.d/wallaby_jobs.yaml b/zuul.d/wallaby_jobs.yaml
new file mode 100644
index 0000000..c79667a
--- /dev/null
+++ b/zuul.d/wallaby_jobs.yaml
@@ -0,0 +1,199 @@
+- job:
+ name: neutron-tempest-plugin-api-wallaby
+ parent: neutron-tempest-plugin-api
+ override-checkout: stable/wallaby
+ vars:
+ # TODO(slaweq): find a way to put this list of extensions in
+ # neutron repository and keep it different per branch,
+ # then it could be removed from here
+ network_api_extensions_common: &api_extensions
+ - address-group
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dhcp_agent_scheduler
+ - dns-domain-ports
+ - dns-integration
+ - empty-string-filtering
+ - expose-port-forwarding-in-fip
+ - expose-l3-conntrack-helper
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - extraroute-atomic
+ - filter-validation
+ - fip-port-details
+ - flavors
+ - floating-ip-port-forwarding
+ - floatingip-pools
+ - ip-substring-filtering
+ - l3-conntrack-helper
+ - l3-flavors
+ - l3-ha
+ - l3_agent_scheduler
+ - logging
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - network-segment-range
+ - pagination
+ - port-resource-request
+ - port-mac-address-regenerate
+ - port-security
+ - port-security-groups-filtering
+ - project-id
+ - provider
+ - qos
+ - qos-bw-minimum-ingress
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-address-group
+ - rbac-address-scope
+ - rbac-policies
+ - rbac-security-groups
+ - rbac-subnetpool
+ - router
+ - router-admin-state-down-before-update
+ - router_availability_zone
+ - security-group
+ - security-groups-remote-address-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-segment
+ - standard-attr-tag
+ - standard-attr-timestamp
+ - subnet_allocation
+ - subnet-dns-publish-fixed-ip
+ - subnet-service-types
+ - subnetpool-prefix-ops
+ - tag-ports-during-bulk-creation
+ - trunk
+ - trunk-details
+ - uplink-status-propagation
+ network_api_extensions_tempest:
+ - dvr
+ network_available_features: &available_features
+ - ipv6_metadata
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-wallaby
+ parent: neutron-tempest-plugin-scenario-openvswitch
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-wallaby
+ parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-linuxbridge-wallaby
+ parent: neutron-tempest-plugin-scenario-linuxbridge
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-ovn-wallaby
+ parent: neutron-tempest-plugin-scenario-ovn
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions: *api_extensions
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
+
+- job:
+ name: neutron-tempest-plugin-dvr-multinode-scenario-wallaby
+ parent: neutron-tempest-plugin-dvr-multinode-scenario
+ override-checkout: stable/wallaby
+ vars:
+ network_api_extensions_common: *api_extensions
+ branch_override: stable/wallaby
+
+- job:
+ name: neutron-tempest-plugin-designate-scenario-wallaby
+ parent: neutron-tempest-plugin-designate-scenario
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-sfc-wallaby
+ parent: neutron-tempest-plugin-sfc
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-bgpvpn-bagpipe-wallaby
+ parent: neutron-tempest-plugin-bgpvpn-bagpipe
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-dynamic-routing-wallaby
+ parent: neutron-tempest-plugin-dynamic-routing
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-vpnaas-wallaby
+ parent: neutron-tempest-plugin-vpnaas
+ override-checkout: stable/wallaby
+ vars:
+ branch_override: stable/wallaby
+ network_api_extensions_common: *api_extensions
diff --git a/zuul.d/xena_jobs.yaml b/zuul.d/xena_jobs.yaml
new file mode 100644
index 0000000..3d8ce43
--- /dev/null
+++ b/zuul.d/xena_jobs.yaml
@@ -0,0 +1,209 @@
+- job:
+ name: neutron-tempest-plugin-api-xena
+ parent: neutron-tempest-plugin-api
+ override-checkout: stable/xena
+ vars:
+ # TODO(slaweq): find a way to put this list of extensions in
+ # neutron repository and keep it different per branch,
+ # then it could be removed from here
+ network_api_extensions_common: &api_extensions
+ - address-group
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dhcp_agent_scheduler
+ - dns-domain-ports
+ - dns-integration
+ - dns-integration-domain-keywords
+ - empty-string-filtering
+ - expose-port-forwarding-in-fip
+ - expose-l3-conntrack-helper
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - extraroute-atomic
+ - filter-validation
+ - fip-port-details
+ - flavors
+ - floating-ip-port-forwarding
+ - floatingip-pools
+ - ip-substring-filtering
+ - l3-conntrack-helper
+ - l3-flavors
+ - l3-ha
+ - l3_agent_scheduler
+ - logging
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - network-segment-range
+ - pagination
+ - port-device-profile
+ - port-resource-request
+ - port-mac-address-regenerate
+ - port-security
+ - port-security-groups-filtering
+ - project-id
+ - provider
+ - qos
+ - qos-bw-minimum-ingress
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-address-group
+ - rbac-address-scope
+ - rbac-policies
+ - rbac-security-groups
+ - rbac-subnetpool
+ - router
+ - router-admin-state-down-before-update
+ - router_availability_zone
+ - security-group
+ - security-groups-remote-address-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-segment
+ - standard-attr-tag
+ - standard-attr-timestamp
+ - subnet_allocation
+ - subnet-dns-publish-fixed-ip
+ - subnet-service-types
+ - subnetpool-prefix-ops
+ - tag-ports-during-bulk-creation
+ - trunk
+ - trunk-details
+ - uplink-status-propagation
+ network_api_extensions_tempest:
+ - dvr
+ network_available_features: &available_features
+ - ipv6_metadata
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-xena
+ parent: neutron-tempest-plugin-scenario-openvswitch
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-xena
+ parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-linuxbridge-xena
+ parent: neutron-tempest-plugin-scenario-linuxbridge
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-ovn-xena
+ parent: neutron-tempest-plugin-scenario-ovn
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions: *api_extensions
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
+
+- job:
+ name: neutron-tempest-plugin-dvr-multinode-scenario-xena
+ parent: neutron-tempest-plugin-dvr-multinode-scenario
+ override-checkout: stable/xena
+ vars:
+ network_api_extensions_common: *api_extensions
+ branch_override: stable/xena
+
+- job:
+ name: neutron-tempest-plugin-designate-scenario-xena
+ parent: neutron-tempest-plugin-designate-scenario
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-sfc-xena
+ parent: neutron-tempest-plugin-sfc
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-bgpvpn-bagpipe-xena
+ parent: neutron-tempest-plugin-bgpvpn-bagpipe
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-dynamic-routing-xena
+ parent: neutron-tempest-plugin-dynamic-routing
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-vpnaas-xena
+ parent: neutron-tempest-plugin-vpnaas
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-tap-as-a-service-xena
+ parent: neutron-tempest-plugin-tap-as-a-service
+ override-checkout: stable/xena
+ vars:
+ branch_override: stable/xena
+ network_api_extensions_common: *api_extensions
diff --git a/zuul.d/yoga_jobs.yaml b/zuul.d/yoga_jobs.yaml
new file mode 100644
index 0000000..35720a0
--- /dev/null
+++ b/zuul.d/yoga_jobs.yaml
@@ -0,0 +1,213 @@
+- job:
+ name: neutron-tempest-plugin-api-yoga
+ parent: neutron-tempest-plugin-api
+ override-checkout: stable/yoga
+ vars:
+ # TODO(slaweq): find a way to put this list of extensions in
+ # neutron repository and keep it different per branch,
+ # then it could be removed from here
+ network_api_extensions_common: &api_extensions
+ - address-group
+ - address-scope
+ - agent
+ - allowed-address-pairs
+ - auto-allocated-topology
+ - availability_zone
+ - binding
+ - default-subnetpools
+ - dhcp_agent_scheduler
+ - dns-domain-ports
+ - dns-integration
+ - dns-integration-domain-keywords
+ - empty-string-filtering
+ - expose-port-forwarding-in-fip
+ - expose-l3-conntrack-helper
+ - ext-gw-mode
+ - external-net
+ - extra_dhcp_opt
+ - extraroute
+ - extraroute-atomic
+ - filter-validation
+ - fip-port-details
+ - flavors
+ - floating-ip-port-forwarding
+ - floatingip-pools
+ - ip-substring-filtering
+ - l3-conntrack-helper
+ - l3-flavors
+ - l3-ha
+ - l3_agent_scheduler
+ - logging
+ - metering
+ - multi-provider
+ - net-mtu
+ - net-mtu-writable
+ - network-ip-availability
+ - network_availability_zone
+ - network-segment-range
+ - pagination
+ - port-device-profile
+ - port-resource-request
+ - port-resource-request-groups
+ - port-mac-address-regenerate
+ - port-security
+ - port-security-groups-filtering
+ - project-id
+ - provider
+ - qos
+ - qos-bw-minimum-ingress
+ - qos-fip
+ - quotas
+ - quota_details
+ - rbac-address-group
+ - rbac-address-scope
+ - rbac-policies
+ - rbac-security-groups
+ - rbac-subnetpool
+ - router
+ - router-admin-state-down-before-update
+ - router_availability_zone
+ - security-group
+ - security-groups-remote-address-group
+ - segment
+ - service-type
+ - sorting
+ - standard-attr-description
+ - standard-attr-revisions
+ - standard-attr-segment
+ - standard-attr-tag
+ - standard-attr-timestamp
+ - stateful-security-group
+ - subnet_allocation
+ - subnet-dns-publish-fixed-ip
+ - subnet-service-types
+ - subnetpool-prefix-ops
+ - tag-ports-during-bulk-creation
+ - trunk
+ - trunk-details
+ - uplink-status-propagation
+ network_api_extensions_tempest:
+ - dvr
+ network_available_features: &available_features
+ - ipv6_metadata
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-yoga
+ parent: neutron-tempest-plugin-scenario-openvswitch
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_openvswitch) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid-yoga
+ parent: neutron-tempest-plugin-scenario-openvswitch-iptables_hybrid
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_openvswitch) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-linuxbridge-yoga
+ parent: neutron-tempest-plugin-scenario-linuxbridge
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions: *api_extensions
+ network_available_features: *available_features
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ network_api_extensions | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: "{{ network_available_features | join(',') }}"
+
+- job:
+ name: neutron-tempest-plugin-scenario-ovn-yoga
+ parent: neutron-tempest-plugin-scenario-ovn
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions: *api_extensions
+ network_api_extensions_ovn:
+ - vlan-transparent
+ devstack_localrc:
+ NETWORK_API_EXTENSIONS: "{{ (network_api_extensions + network_api_extensions_ovn) | join(',') }}"
+ devstack_local_conf:
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ available_features: ""
+
+- job:
+ name: neutron-tempest-plugin-dvr-multinode-scenario-yoga
+ parent: neutron-tempest-plugin-dvr-multinode-scenario
+ override-checkout: stable/yoga
+ vars:
+ network_api_extensions_common: *api_extensions
+ branch_override: stable/yoga
+
+- job:
+ name: neutron-tempest-plugin-designate-scenario-yoga
+ parent: neutron-tempest-plugin-designate-scenario
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-sfc-yoga
+ parent: neutron-tempest-plugin-sfc
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-bgpvpn-bagpipe-yoga
+ parent: neutron-tempest-plugin-bgpvpn-bagpipe
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-dynamic-routing-yoga
+ parent: neutron-tempest-plugin-dynamic-routing
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-vpnaas-yoga
+ parent: neutron-tempest-plugin-vpnaas
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions_common: *api_extensions
+
+- job:
+ name: neutron-tempest-plugin-tap-as-a-service-yoga
+ parent: neutron-tempest-plugin-tap-as-a-service
+ override-checkout: stable/yoga
+ vars:
+ branch_override: stable/yoga
+ network_api_extensions_common: *api_extensions