Merge "Remove RetryFilter from defaults"
diff --git a/.zuul.yaml b/.zuul.yaml
index 5bec9f9..ecc9284 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -273,6 +273,21 @@
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ devstack_local_conf:
+ post-config:
+ "/$NEUTRON_CORE_PLUGIN_CONF":
+ ovs:
+ bridge_mappings: public:br-ex
+ resource_provider_bandwidths: br-ex:1000000:1000000
+ test-config:
+ $TEMPEST_CONFIG:
+ network-feature-enabled:
+ qos_placement_physnet: public
tempest_concurrency: 2
group-vars:
# NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
diff --git a/doc/source/data/tempest-blacklisted-plugins-registry.header b/doc/source/data/tempest-blacklisted-plugins-registry.header
new file mode 100644
index 0000000..6b6af11
--- /dev/null
+++ b/doc/source/data/tempest-blacklisted-plugins-registry.header
@@ -0,0 +1,7 @@
+Blacklisted Plugins
+===================
+
+List of Tempest plugin projects that are stale or unmaintained for a long
+time (6 months or more). They can be moved out of blacklist state once one
+of the relevant patches gets merged:
+https://review.opendev.org/#/q/topic:tempest-sanity-gate+%28status:open%29
diff --git a/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml b/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml
new file mode 100644
index 0000000..384f916
--- /dev/null
+++ b/releasenotes/notes/bug-1647999-7aeda50a8d082d4c.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new parameter, compute/compute_volume_common_az is introduced to
+ specify availability zone where tempest creates instances and volumes
+ for scenario tests, to allow us to run scenario tests in the deployment
+ which has multiple availability zones and cinder/cross_az_attach in
+ nova.conf is set to False.
diff --git a/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml b/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml
new file mode 100644
index 0000000..8475f50
--- /dev/null
+++ b/releasenotes/notes/config_image_certificate_compute_feature-c56efb520d54aff5.yaml
@@ -0,0 +1,8 @@
+---
+other:
+ - |
+ New configuration options ``[compute]/certified_image_ref`` and
+ ``[compute]/certified_image_trusted_certs`` have been introduced. These
+ are required in order to run the ``ServerShowV263Test`` test and allow a
+ signed image with the required img_signature_* properties set along
+ with a list of trusted certificates to be used during the test.
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index e71e642..aaf7a5a 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -487,6 +487,9 @@
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(
cls.volumes_client.wait_for_resource_deletion, volume['id'])
diff --git a/tempest/api/compute/flavors/test_flavors_negative.py b/tempest/api/compute/flavors/test_flavors_negative.py
index 3a474e6..235049a 100644
--- a/tempest/api/compute/flavors/test_flavors_negative.py
+++ b/tempest/api/compute/flavors/test_flavors_negative.py
@@ -70,9 +70,7 @@
self.assertEqual(min_img_ram, image['min_ram'])
# Try to create server with flavor of insufficient ram size
- self.assertRaisesRegex(lib_exc.BadRequest,
- "Flavor's memory is too small for "
- "requested image",
- self.create_test_server,
- image_id=image['id'],
- flavor=flavor['id'])
+ self.assertRaises(lib_exc.BadRequest,
+ self.create_test_server,
+ image_id=image['id'],
+ flavor=flavor['id'])
diff --git a/tempest/api/compute/servers/test_servers.py b/tempest/api/compute/servers/test_servers.py
index e8b1161..76d65dd 100644
--- a/tempest/api/compute/servers/test_servers.py
+++ b/tempest/api/compute/servers/test_servers.py
@@ -186,10 +186,17 @@
min_microversion = '2.63'
max_microversion = 'latest'
+ @testtools.skipUnless(CONF.compute.certified_image_ref,
+ '``[compute]/certified_image_ref`` required to test '
+ 'image certificate validation.')
+ @testtools.skipUnless(CONF.compute.certified_image_trusted_certs,
+ '``[compute]/certified_image_trusted_certs`` '
+ 'required to test image certificate validation.')
@decorators.idempotent_id('71b8e3d5-11d2-494f-b917-b094a4afed3c')
def test_show_update_rebuild_list_server(self):
- trusted_certs = ['test-cert-1', 'test-cert-2']
+ trusted_certs = CONF.compute.certified_image_trusted_certs
server = self.create_test_server(
+ image_id=CONF.compute.certified_image_ref,
trusted_image_certificates=trusted_certs,
wait_until='ACTIVE')
diff --git a/tempest/api/identity/admin/v3/test_endpoints.py b/tempest/api/identity/admin/v3/test_endpoints.py
index 2cd8906..366d6a0 100644
--- a/tempest/api/identity/admin/v3/test_endpoints.py
+++ b/tempest/api/identity/admin/v3/test_endpoints.py
@@ -44,11 +44,14 @@
cls.addClassResourceCleanup(
cls.services_client.delete_service, service['id'])
- region = data_utils.rand_name('region')
+ region_name = data_utils.rand_name('region')
url = data_utils.rand_url()
endpoint = cls.client.create_endpoint(
service_id=cls.service_ids[i], interface=interfaces[i],
- url=url, region=region, enabled=True)['endpoint']
+ url=url, region=region_name, enabled=True)['endpoint']
+ region = cls.regions_client.show_region(region_name)['region']
+ cls.addClassResourceCleanup(
+ cls.regions_client.delete_region, region['id'])
cls.addClassResourceCleanup(
cls.client.delete_endpoint, endpoint['id'])
cls.setup_endpoint_ids.append(endpoint['id'])
@@ -108,17 +111,19 @@
@decorators.idempotent_id('0e2446d2-c1fd-461b-a729-b9e73e3e3b37')
def test_create_list_show_delete_endpoint(self):
- region = data_utils.rand_name('region')
+ region_name = data_utils.rand_name('region')
url = data_utils.rand_url()
interface = 'public'
endpoint = self.client.create_endpoint(service_id=self.service_ids[0],
interface=interface,
- url=url, region=region,
+ url=url, region=region_name,
enabled=True)['endpoint']
+ region = self.regions_client.show_region(region_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region['id'])
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.client.delete_endpoint, endpoint['id'])
# Asserting Create Endpoint response body
- self.assertEqual(region, endpoint['region'])
+ self.assertEqual(region_name, endpoint['region'])
self.assertEqual(url, endpoint['url'])
# Checking if created endpoint is present in the list of endpoints
@@ -133,7 +138,7 @@
self.assertEqual(self.service_ids[0], fetched_endpoint['service_id'])
self.assertEqual(interface, fetched_endpoint['interface'])
self.assertEqual(url, fetched_endpoint['url'])
- self.assertEqual(region, fetched_endpoint['region'])
+ self.assertEqual(region_name, fetched_endpoint['region'])
self.assertEqual(True, fetched_endpoint['enabled'])
# Deleting the endpoint created in this method
@@ -161,28 +166,33 @@
self.addCleanup(self.services_client.delete_service, service2['id'])
# Creating an endpoint so as to check update endpoint with new values
- region1 = data_utils.rand_name('region')
+ region1_name = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
endpoint_for_update = (
self.client.create_endpoint(service_id=self.service_ids[0],
interface=interface1,
- url=url1, region=region1,
+ url=url1, region=region1_name,
enabled=True)['endpoint'])
- self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+ region1 = self.regions_client.show_region(region1_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region1['id'])
# Updating endpoint with new values
- region2 = data_utils.rand_name('region')
+ region2_name = data_utils.rand_name('region')
url2 = data_utils.rand_url()
interface2 = 'internal'
endpoint = self.client.update_endpoint(endpoint_for_update['id'],
service_id=service2['id'],
interface=interface2,
- url=url2, region=region2,
+ url=url2, region=region2_name,
enabled=False)['endpoint']
+ region2 = self.regions_client.show_region(region2_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region2['id'])
+ self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
+
# Asserting if the attributes of endpoint are updated
self.assertEqual(service2['id'], endpoint['service_id'])
self.assertEqual(interface2, endpoint['interface'])
self.assertEqual(url2, endpoint['url'])
- self.assertEqual(region2, endpoint['region'])
+ self.assertEqual(region2_name, endpoint['region'])
self.assertEqual(False, endpoint['enabled'])
diff --git a/tempest/api/identity/admin/v3/test_endpoints_negative.py b/tempest/api/identity/admin/v3/test_endpoints_negative.py
index 4c3eb1c..164b577 100644
--- a/tempest/api/identity/admin/v3/test_endpoints_negative.py
+++ b/tempest/api/identity/admin/v3/test_endpoints_negative.py
@@ -70,14 +70,16 @@
def _assert_update_raises_bad_request(self, enabled):
# Create an endpoint
- region1 = data_utils.rand_name('region')
+ region1_name = data_utils.rand_name('region')
url1 = data_utils.rand_url()
interface1 = 'public'
endpoint_for_update = (
self.client.create_endpoint(service_id=self.service_id,
interface=interface1,
- url=url1, region=region1,
+ url=url1, region=region1_name,
enabled=True)['endpoint'])
+ region1 = self.regions_client.show_region(region1_name)['region']
+ self.addCleanup(self.regions_client.delete_region, region1['id'])
self.addCleanup(self.client.delete_endpoint, endpoint_for_update['id'])
self.assertRaises(lib_exc.BadRequest, self.client.update_endpoint,
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 64fe29a..1bfd075 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -124,6 +124,10 @@
name = data_utils.rand_name(cls.__name__ + '-Volume')
kwargs['name'] = name
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.delete_volume, cls.volumes_client,
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index e6db2e9..f0d7264 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -94,6 +94,8 @@
class TempestCleanup(command.Command):
+ GOT_EXCEPTIONS = []
+
def take_action(self, parsed_args):
try:
self.init(parsed_args)
@@ -103,6 +105,8 @@
LOG.exception("Failure during cleanup")
traceback.print_exc()
raise
+ if self.GOT_EXCEPTIONS:
+ raise Exception(self.GOT_EXCEPTIONS)
def init(self, parsed_args):
cleanup_service.init_conf()
@@ -159,7 +163,8 @@
'is_dry_run': is_dry_run,
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
- 'is_save_state': is_save_state}
+ 'is_save_state': is_save_state,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
@@ -200,7 +205,8 @@
'saved_state_json': self.json_data,
'is_preserve': is_preserve,
'is_save_state': False,
- 'project_id': project_id}
+ 'project_id': project_id,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.project_services:
svc = service(mgr, **kwargs)
svc.run()
@@ -300,7 +306,8 @@
'is_dry_run': False,
'saved_state_json': data,
'is_preserve': False,
- 'is_save_state': True}
+ 'is_save_state': True,
+ 'got_exceptions': self.GOT_EXCEPTIONS}
for service in self.global_services:
svc = service(admin_mgr, **kwargs)
svc.run()
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 104958a..ccceb34 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -22,6 +22,7 @@
from tempest.common import utils
from tempest.common.utils import net_info
from tempest import config
+from tempest.lib import exceptions
LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -127,12 +128,23 @@
pass
def run(self):
- if self.is_dry_run:
- self.dry_run()
- elif self.is_save_state:
- self.save_state()
- else:
- self.delete()
+ try:
+ if self.is_dry_run:
+ self.dry_run()
+ elif self.is_save_state:
+ self.save_state()
+ else:
+ self.delete()
+ except exceptions.NotImplemented as exc:
+ # Many OpenStack services use extensions logic to implement the
+ # features or resources. Tempest cleanup tries to clean up the test
+ # resources without having much logic of extensions checks etc.
+ # If any of the extension is missing then, service will return
+ # NotImplemented error.
+ msg = ("Got NotImplemented error in %s, full exception: %s" %
+ (str(self.__class__), str(exc)))
+ LOG.exception(msg)
+ self.got_exceptions.append(msg)
class SnapshotService(BaseService):
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 4ac92d9..cd85ede 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -167,6 +167,9 @@
params = {'name': volume_name,
'imageRef': image_id,
'size': CONF.volume.volume_size}
+ if CONF.compute.compute_volume_common_az:
+ params.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
volume = volumes_client.create_volume(**params)
try:
waiters.wait_for_volume_resource_status(volumes_client,
@@ -193,6 +196,9 @@
# to be specified.
image_id = ''
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
body = clients.servers_client.create_server(name=name, imageRef=image_id,
flavorRef=flavor,
**kwargs)
diff --git a/tempest/config.py b/tempest/config.py
index 345daa5..c50ebbe 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -271,6 +271,17 @@
help="Valid secondary image reference to be used in tests. "
"This is a required option, but if only one image is "
"available duplicate the value of image_ref above"),
+ cfg.StrOpt('certified_image_ref',
+ help="Valid image reference to be used in image certificate "
+ "validation tests when enabled. This image must also "
+ "have the required img_signature_* properties set. "
+ "Additional details available within the following Nova "
+ "documentation: https://docs.openstack.org/nova/latest/"
+ "user/certificate-validation.html"),
+ cfg.ListOpt('certified_image_trusted_certs',
+ help="A list of trusted certificates to be used when the "
+ "image certificate validation compute feature is "
+ "enabled."),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
@@ -352,6 +363,19 @@
"If both values are not specified, Tempest avoids tests "
"which require a microversion. Valid values are string "
"with format 'X.Y' or string 'latest'"),
+ cfg.StrOpt('compute_volume_common_az',
+ default=None,
+ help='AZ to be used for Cinder and Nova. Set this parameter '
+ 'when the cloud has nova.conf: cinder.cross_az_attach '
+ 'set to false. Which means volumes attached to an '
+ 'instance must be in the same availability zone in Cinder '
+ 'as the instance availability zone in Nova. Set the '
+ 'common availability zone in this config which will be '
+ 'used to boot an instance as well as creating a volume. '
+ 'NOTE: If that AZ is not in Cinder (or '
+ 'allow_availability_zone_fallback=False in cinder.conf), '
+ 'the volume create request will fail and the instance '
+ 'will fail the build request.'),
]
placement_group = cfg.OptGroup(name='placement',
@@ -725,7 +749,13 @@
help="Does the test environment support port security?"),
cfg.BoolOpt('floating_ips',
default=True,
- help='Does the test environment support floating_ips')
+ help='Does the test environment support floating_ips'),
+ cfg.StrOpt('qos_placement_physnet', default=None,
+ help='Name of the physnet for placement based minimum '
+ 'bandwidth allocation.'),
+ cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
+ help='Base segmentation ID to create provider networks. '
+ 'This value will be increased in case of conflict.')
]
validation_group = cfg.OptGroup(name='validation',
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 87d7e76..1252f09 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -274,6 +274,10 @@
tenant_network = self.get_tenant_network()
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
body, _ = compute.create_test_server(
clients,
tenant_network=tenant_network,
@@ -307,6 +311,11 @@
'imageRef': imageRef,
'volume_type': volume_type,
'size': size}
+
+ if CONF.compute.compute_volume_common_az:
+ kwargs.setdefault('availability_zone',
+ CONF.compute.compute_volume_common_az)
+
volume = self.volumes_client.create_volume(**kwargs)['volume']
self.addCleanup(self.volumes_client.wait_for_resource_deletion,
@@ -826,13 +835,15 @@
def _create_network(self, networks_client=None,
tenant_id=None,
namestart='network-smoke-',
- port_security_enabled=True):
+ port_security_enabled=True, **net_dict):
if not networks_client:
networks_client = self.networks_client
if not tenant_id:
tenant_id = networks_client.tenant_id
name = data_utils.rand_name(namestart)
network_kwargs = dict(name=name, tenant_id=tenant_id)
+ if net_dict:
+ network_kwargs.update(net_dict)
# Neutron disables port security by default so we have to check the
# config before trying to create the network with port_security_enabled
if CONF.network_feature_enabled.port_security:
@@ -1257,7 +1268,7 @@
def create_networks(self, networks_client=None,
routers_client=None, subnets_client=None,
tenant_id=None, dns_nameservers=None,
- port_security_enabled=True):
+ port_security_enabled=True, **net_dict):
"""Create a network with a subnet connected to a router.
The baremetal driver is a special case since all nodes are
@@ -1265,6 +1276,11 @@
:param tenant_id: id of tenant to create resources in.
:param dns_nameservers: list of dns servers to send to subnet.
+ :param port_security_enabled: whether or not port_security is enabled
+ :param net_dict: a dict containing experimental network information in
+ a form like this: {'provider:network_type': 'vlan',
+ 'provider:physical_network': 'foo',
+ 'provider:segmentation_id': '42'}
:returns: network, subnet, router
"""
if CONF.network.shared_physical_network:
@@ -1284,7 +1300,8 @@
network = self._create_network(
networks_client=networks_client,
tenant_id=tenant_id,
- port_security_enabled=port_security_enabled)
+ port_security_enabled=port_security_enabled,
+ **net_dict)
router = self._get_router(client=routers_client,
tenant_id=tenant_id)
subnet_kwargs = dict(network=network,
diff --git a/tempest/scenario/test_minbw_allocation_placement.py b/tempest/scenario/test_minbw_allocation_placement.py
new file mode 100644
index 0000000..e7085f6
--- /dev/null
+++ b/tempest/scenario/test_minbw_allocation_placement.py
@@ -0,0 +1,195 @@
+# Copyright (c) 2019 Ericsson
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.scenario import manager
+
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class MinBwAllocationPlacementTest(manager.NetworkScenarioTest):
+ credentials = ['primary', 'admin']
+ required_extensions = ['port-resource-request',
+ 'qos',
+ 'qos-bw-minimum-ingress']
+ # The feature QoS minimum bandwidth allocation in Placement API depends on
+ # Granular resource requests to GET /allocation_candidates and Support
+ # allocation candidates with nested resource providers features in
+ # Placement (see: https://specs.openstack.org/openstack/nova-specs/specs/
+ # stein/approved/bandwidth-resource-provider.html#rest-api-impact) and this
+ # means that the minimum placement microversion is 1.29
+ placement_min_microversion = '1.29'
+ placement_max_microversion = 'latest'
+
+ # Nova rejects to boot VM with port which has resource_request field, below
+ # microversion 2.72
+ compute_min_microversion = '2.72'
+ compute_max_microversion = 'latest'
+
+ INGRESS_RESOURCE_CLASS = "NET_BW_IGR_KILOBIT_PER_SEC"
+ INGRESS_DIRECTION = 'ingress'
+
+ SMALLEST_POSSIBLE_BW = 1
+ # For any realistic inventory value (that is inventory != MAX_INT) an
+ # allocation candidate request of MAX_INT is expected to be rejected, see:
+ # https://github.com/openstack/placement/blob/master/placement/
+ # db/constants.py#L16
+ PLACEMENT_MAX_INT = 0x7FFFFFFF
+
+ @classmethod
+ def setup_clients(cls):
+ super(MinBwAllocationPlacementTest, cls).setup_clients()
+ cls.placement_client = cls.os_admin.placement_client
+ cls.networks_client = cls.os_admin.networks_client
+ cls.subnets_client = cls.os_admin.subnets_client
+ cls.routers_client = cls.os_adm.routers_client
+ cls.qos_client = cls.os_admin.qos_client
+ cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
+
+ @classmethod
+ def skip_checks(cls):
+ super(MinBwAllocationPlacementTest, cls).skip_checks()
+ if not CONF.network_feature_enabled.qos_placement_physnet:
+ msg = "Skipped as no physnet is available in config for " \
+ "placement based QoS allocation."
+ raise cls.skipException(msg)
+
+ def _create_policy_and_min_bw_rule(self, name_prefix, min_kbps):
+ policy = self.qos_client.create_qos_policy(
+ name=data_utils.rand_name(name_prefix),
+ shared=True)['policy']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.qos_client.delete_qos_policy, policy['id'])
+ rule = self.qos_min_bw_client.create_minimum_bandwidth_rule(
+ policy['id'],
+ **{
+ 'min_kbps': min_kbps,
+ 'direction': self.INGRESS_DIRECTION
+ })['minimum_bandwidth_rule']
+ self.addCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ self.qos_min_bw_client.delete_minimum_bandwidth_rule, policy['id'],
+ rule['id'])
+
+ return policy
+
+ def _create_qos_policies(self):
+ self.qos_policy_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_valid',
+ min_kbps=self.SMALLEST_POSSIBLE_BW)
+ self.qos_policy_not_valid = self._create_policy_and_min_bw_rule(
+ name_prefix='test_policy_not_valid',
+ min_kbps=self.PLACEMENT_MAX_INT)
+
+ def _create_network_and_qos_policies(self):
+ physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+ base_segm = \
+ CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+ self.prov_network, _, _ = self.create_networks(
+ networks_client=self.networks_client,
+ routers_client=self.routers_client,
+ subnets_client=self.subnets_client,
+ **{
+ 'shared': True,
+ 'provider:network_type': 'vlan',
+ 'provider:physical_network': physnet_name,
+ 'provider:segmentation_id': base_segm
+ })
+
+ self._create_qos_policies()
+
+ def _check_if_allocation_is_possible(self):
+ alloc_candidates = self.placement_client.list_allocation_candidates(
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+ self.SMALLEST_POSSIBLE_BW))
+ if len(alloc_candidates['provider_summaries']) == 0:
+ self.fail('No allocation candidates are available for %s:%s' %
+ (self.INGRESS_RESOURCE_CLASS, self.SMALLEST_POSSIBLE_BW))
+
+ # Just to be sure check with impossible high (placement max_int),
+ # allocation
+ alloc_candidates = self.placement_client.list_allocation_candidates(
+ resources1='%s:%s' % (self.INGRESS_RESOURCE_CLASS,
+ self.PLACEMENT_MAX_INT))
+ if len(alloc_candidates['provider_summaries']) != 0:
+ self.fail('For %s:%s there should be no available candidate!' %
+ (self.INGRESS_RESOURCE_CLASS, self.PLACEMENT_MAX_INT))
+
+ @decorators.idempotent_id('78625d92-212c-400e-8695-dd51706858b8')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_qos_min_bw_allocation_basic(self):
+ """"Basic scenario with QoS min bw allocation in placement.
+
+ Steps:
+ * Create prerequisites:
+ ** VLAN type provider network with subnet.
+ ** valid QoS policy with minimum bandwidth rule with min_kbps=1
+ (This is a simplification to skip the checks in placement for
+ detecting the resource provider tree and inventories, as if
+ bandwidth resource is available 1 kbs will be available).
+ ** invalid QoS policy with minimum bandwidth rule with
+ min_kbs=max integer from placement (this is a simplification again
+ to avoid detection of RP tress and inventories, as placement will
+ reject such big allocation).
+ * Create port with valid QoS policy, and boot VM with that, it should
+ pass.
+ * Create port with invalid QoS policy, and try to boot VM with that,
+ it should fail.
+ """
+
+ self._check_if_allocation_is_possible()
+
+ self._create_network_and_qos_policies()
+
+ valid_port = self.create_port(
+ self.prov_network['id'], qos_policy_id=self.qos_policy_valid['id'])
+
+ server1 = self.create_server(
+ networks=[{'port': valid_port['id']}])
+ allocations = self.placement_client.list_allocations(server1['id'])
+
+ self.assertGreater(len(allocations['allocations']), 0)
+ bw_resource_in_alloc = False
+ for rp, resources in allocations['allocations'].items():
+ if self.INGRESS_RESOURCE_CLASS in resources['resources']:
+ bw_resource_in_alloc = True
+ self.assertTrue(bw_resource_in_alloc)
+
+ # boot another vm with max int bandwidth
+ not_valid_port = self.create_port(
+ self.prov_network['id'],
+ qos_policy_id=self.qos_policy_not_valid['id'])
+ server2 = self.create_server(
+ wait_until=None,
+ networks=[{'port': not_valid_port['id']}])
+ waiters.wait_for_server_status(
+ client=self.os_primary.servers_client, server_id=server2['id'],
+ status='ERROR', ready_wait=False, raise_on_error=False)
+ allocations = self.placement_client.list_allocations(server2['id'])
+
+ self.assertEqual(0, len(allocations['allocations']))
+ server2 = self.servers_client.show_server(server2['id'])
+ self.assertIn('fault', server2['server'])
+ self.assertIn('No valid host', server2['server']['fault']['message'])
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
index b47da0b..1618df9 100644
--- a/tempest/tests/cmd/test_cleanup.py
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import mock
+
from tempest.cmd import cleanup
from tempest.tests import base
@@ -24,3 +26,17 @@
test_saved_json = 'tempest/tests/cmd/test_saved_state_json.json'
# test if the file is loaded without any issues/exceptions
c._load_json(test_saved_json)
+
+ @mock.patch('tempest.cmd.cleanup.TempestCleanup.init')
+ @mock.patch('tempest.cmd.cleanup.TempestCleanup._cleanup')
+ def test_take_action_got_exception(self, mock_cleanup, mock_init):
+ c = cleanup.TempestCleanup(None, None, 'test')
+ c.GOT_EXCEPTIONS.append('exception')
+ mock_cleanup.return_value = True
+ mock_init.return_value = True
+ try:
+ c.take_action(mock.Mock())
+ except Exception as exc:
+ self.assertEqual(str(exc), '[\'exception\']')
+ return
+ assert False
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 3262b1c..de0dbec 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -19,6 +19,7 @@
from tempest import clients
from tempest.cmd import cleanup_service
from tempest import config
+from tempest.lib import exceptions
from tempest.tests import base
from tempest.tests import fake_config
from tempest.tests.lib import fake_credentials
@@ -27,13 +28,24 @@
class TestBaseService(base.TestCase):
+ class TestException(cleanup_service.BaseService):
+ def delete(self):
+ raise exceptions.NotImplemented
+
+ def dry_run(self):
+ raise exceptions.NotImplemented
+
+ def save_state(self):
+ raise exceptions.NotImplemented
+
def test_base_service_init(self):
kwargs = {'data': {'data': 'test'},
'is_dry_run': False,
'saved_state_json': {'saved': 'data'},
'is_preserve': False,
'is_save_state': True,
- 'tenant_id': 'project_id'}
+ 'tenant_id': 'project_id',
+ 'got_exceptions': []}
base = cleanup_service.BaseService(kwargs)
self.assertEqual(base.data, kwargs['data'])
self.assertFalse(base.is_dry_run)
@@ -41,6 +53,28 @@
self.assertFalse(base.is_preserve)
self.assertTrue(base.is_save_state)
self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
+ self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
+
+ def test_not_implemented_ex(self):
+ kwargs = {'data': {'data': 'test'},
+ 'is_dry_run': False,
+ 'saved_state_json': {'saved': 'data'},
+ 'is_preserve': False,
+ 'is_save_state': False,
+ 'tenant_id': 'project_id',
+ 'got_exceptions': []}
+ base = self.TestException(kwargs)
+ # delete
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 1)
+ # save_state
+ base.save_state = True
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 2)
+ # dry_run
+ base.is_dry_run = True
+ base.run()
+ self.assertEqual(len(base.got_exceptions), 3)
class MockFunctionsBase(base.TestCase):
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 55cda97..9ebeee2 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -25,6 +25,7 @@
import json
import re
+import sys
try:
# For Python 3.0 and later
@@ -35,6 +36,30 @@
import urllib2 as urllib
from urllib2 import HTTPError
+# List of projects having tempest plugin stale or unmaintained for a long time
+# (6 months or more)
+# TODO(masayukig): Some of these can be removed from BLACKLIST in the future
+# when the patches are merged.
+BLACKLIST = [
+ 'openstack/barbican-tempest-plugin',
+ # https://review.opendev.org/#/c/634631/
+ 'openstack/cyborg-tempest-plugin', # https://review.opendev.org/659687
+ 'x/gce-api', # It looks gce-api doesn't support python3 yet.
+ 'x/intel-nfv-ci-tests', # https://review.opendev.org/#/c/634640/
+ 'x/networking-ansible', # https://review.opendev.org/#/c/634647/
+ 'openstack/networking-generic-switch',
+ # https://review.opendev.org/#/c/634846/
+ 'openstack/networking-l2gw-tempest-plugin',
+ # https://review.opendev.org/#/c/635093/
+ 'openstack/networking-midonet', # https://review.opendev.org/#/c/635096/
+ 'x/networking-plumgrid', # https://review.opendev.org/#/c/635096/
+ 'x/networking-spp', # https://review.opendev.org/#/c/635098/
+ 'openstack/neutron-dynamic-routing',
+ # https://review.opendev.org/#/c/637718/
+ 'openstack/neutron-vpnaas', # https://review.opendev.org/#/c/637719/
+ 'x/nova-lxd', # https://review.opendev.org/#/c/638334/
+ 'x/valet', # https://review.opendev.org/#/c/638339/
+]
url = 'https://review.opendev.org/projects/'
@@ -66,6 +91,13 @@
False
+if len(sys.argv) > 1 and sys.argv[1] == 'blacklist':
+ for black_plugin in BLACKLIST:
+ print(black_plugin)
+ # We just need BLACKLIST when we use this `blacklist` option.
+ # So, this exits here.
+ sys.exit()
+
r = urllib.urlopen(url)
# Gerrit prepends 4 garbage octets to the JSON, in order to counter
# cross-site scripting attacks. Therefore we must discard it so the
diff --git a/tools/generate-tempest-plugins-list.sh b/tools/generate-tempest-plugins-list.sh
index c0d47a1..6e473b7 100755
--- a/tools/generate-tempest-plugins-list.sh
+++ b/tools/generate-tempest-plugins-list.sh
@@ -61,20 +61,37 @@
printf " ===\n"
}
+function print_plugin_table() {
+ title_underline ${name_col_len}
+ printf "%-3s %-${name_col_len}s %s\n" "SR" "Plugin Name" "URL"
+ title_underline ${name_col_len}
+
+ i=0
+ for plugin in $1; do
+ i=$((i+1))
+ giturl="https://opendev.org/openstack/${plugin}"
+ printf "%-3s %-${name_col_len}s %s\n" "$i" "${plugin}" "${giturl}"
+ done
+
+ title_underline ${name_col_len}
+}
+
printf "\n\n"
-title_underline ${name_col_len}
-printf "%-3s %-${name_col_len}s %s\n" "SR" "Plugin Name" "URL"
-title_underline ${name_col_len}
+print_plugin_table "${sorted_plugins}"
-i=0
-for plugin in ${sorted_plugins}; do
- i=$((i+1))
- giturl="https://opendev.org/${plugin}"
- gitlink="https://opendev.org/cgit/${plugin}"
- printf "%-3s %-${name_col_len}s %s\n" "$i" "${plugin}" "\`${giturl} <${gitlink}>\`__"
-done
+printf "\n\n"
-title_underline ${name_col_len}
+# Print BLACKLIST
+if [[ -r doc/source/data/tempest-blacklisted-plugins-registry.header ]]; then
+ cat doc/source/data/tempest-blacklisted-plugins-registry.header
+fi
+
+blacklist=$(python tools/generate-tempest-plugins-list.py blacklist)
+name_col_len=$(echo "${blacklist}" | wc -L)
+name_col_len=$(( name_col_len + 20 ))
+
+printf "\n\n"
+print_plugin_table "${blacklist}"
printf "\n\n"
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index d38687e..b652369 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -43,42 +43,8 @@
# retrieve a list of projects having tempest plugins
PROJECT_LIST="$(python tools/generate-tempest-plugins-list.py)"
-# List of projects having tempest plugin stale or unmaintained for a long time
-# (6 months or more)
-# TODO(masayukig): Some of these can be removed from BLACKLIST in the future.
-# barbican-tempest-plugin: https://review.opendev.org/#/c/634631/
-# cyborg-tempest-plugin: https://review.opendev.org/659687
-# gce-api: It looks gce-api doesn't support python3 yet.
-# intel-nfv-ci-tests: https://review.opendev.org/#/c/634640/
-# networking-ansible: https://review.opendev.org/#/c/634647/
-# networking-bgpvpn: https://review.opendev.org/#/c/662142/
-# networking-generic-switch: https://review.opendev.org/#/c/634846/
-# networking-l2gw-tempest-plugin: https://review.opendev.org/#/c/635093/
-# networking-midonet: https://review.opendev.org/#/c/635096/
-# networking-plumgrid: https://review.opendev.org/#/c/635096/
-# networking-spp: https://review.opendev.org/#/c/635098/
-# neutron-dynamic-routing: https://review.opendev.org/#/c/637718/
-# neutron-vpnaas: https://review.opendev.org/#/c/637719/
-# nova-lxd: https://review.opendev.org/#/c/638334/
-# valet: https://review.opendev.org/#/c/638339/
-BLACKLIST="
-openstack/barbican-tempest-plugin
-openstack/cyborg-tempest-plugin
-x/gce-api
-x/intel-nfv-ci-tests
-x/networking-ansible
-openstack/networking-bgpvpn
-openstack/networking-generic-switch
-openstack/networking-l2gw-tempest-plugin
-openstack/networking-midonet
-x/networking-plumgrid
-x/networking-spp
-openstack/neutron-dynamic-routing
-openstack/neutron-vpnaas
-x/nova-lxd
-x/valet
-"
+BLACKLIST="$(python tools/generate-tempest-plugins-list.py blacklist)"
# Function to clone project using zuul-cloner or from git
function clone_project() {