Merge "Fix tests for call_until_true function."
diff --git a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
index 406e282..6c44ba0 100644
--- a/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
+++ b/releasenotes/notes/add-show-quota-details-api-to-network-quotas-client-3fffd302cc5d335f.yaml
@@ -3,5 +3,5 @@
- |
Add extension API show quota details to network quotas_client library.
This feature enables the possibility to show a quota set for a specified
- project that includes the quota’s used, limit and reserved counts for per
- resource
+ project that includes the quota's used, limit and reserved counts per
+ resource.
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index b377c0c..d0c1973 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -108,6 +108,35 @@
raise lib_exc.InvalidConfiguration(
'Either api_v1 or api_v2 must be True in '
'[image-feature-enabled].')
+ cls._check_depends_on_nova_network()
+
+ @classmethod
+ def _check_depends_on_nova_network(cls):
+ # Since nova-network APIs were removed from Nova in the Rocky release,
+ # determine, based on the max version from the version document, if
+ # the compute API is >Queens and if so, skip tests that rely on
+ # nova-network.
+ if not getattr(cls, 'depends_on_nova_network', False):
+ return
+ versions = cls.versions_client.list_versions()['versions']
+ # Find the v2.1 version which will tell us our max version for the
+ # compute API we're testing against.
+ for version in versions:
+ if version['id'] == 'v2.1':
+ max_version = api_version_request.APIVersionRequest(
+ version['version'])
+ break
+ else:
+ LOG.warning(
+ 'Unable to determine max v2.1 compute API version: %s',
+ versions)
+ return
+
+ # The max compute API version in Queens is 2.60 so we cap
+ # at that version.
+ queens = api_version_request.APIVersionRequest('2.60')
+ if max_version > queens:
+ raise cls.skipException('nova-network is gone')
@classmethod
def resource_setup(cls):
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 3a85a86..ff8ed61 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -13,7 +13,6 @@
# under the License.
import json
-import time
from oslo_log import log as logging
@@ -80,9 +79,11 @@
return True
cmd = 'curl %s' % md_url
md_json = ssh_client.exec_command(cmd)
- verify_method(md_json)
- return True
-
+ return verify_method(md_json)
+ # NOTE(gmann) Keep refreshing the metadata info until the metadata
+ # cache is refreshed. For safer side, we will go with wait loop of
+ # build_interval till build_timeout. verify_method() above will return
+ # True if all metadata verification is done as expected.
if not test_utils.call_until_true(get_and_verify_metadata,
CONF.compute.build_timeout,
CONF.compute.build_interval):
@@ -122,16 +123,20 @@
if d['mac'] == self.net_2_200_mac:
self.assertEqual(d['tags'], ['net-2-200'])
- # A hypervisor may present multiple paths to a tagged disk, so
- # there may be duplicated tags in the metadata, use set() to
- # remove duplicated tags.
- # Some hypervisors might report devices with no tags as well.
- found_devices = [d['tags'][0] for d in md_dict['devices']
- if d.get('tags')]
+ # A hypervisor may present multiple paths to a tagged disk, so
+ # there may be duplicated tags in the metadata, use set() to
+ # remove duplicated tags.
+ # Some hypervisors might report devices with no tags as well.
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
+ try:
self.assertEqual(set(found_devices), set(['port-1', 'port-2',
'net-1', 'net-2-100',
'net-2-200', 'boot',
'other']))
+ return True
+ except Exception:
+ return False
@decorators.idempotent_id('a2e65a6c-66f1-4442-aaa8-498c31778d96')
@utils.services('network', 'volume', 'image')
@@ -302,12 +307,21 @@
def verify_device_metadata(self, md_json):
md_dict = json.loads(md_json)
- found_devices = [d['tags'][0] for d in md_dict['devices']]
- self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+ found_devices = [d['tags'][0] for d in md_dict['devices']
+ if d.get('tags')]
+ try:
+ self.assertItemsEqual(found_devices, ['nic-tag', 'volume-tag'])
+ return True
+ except Exception:
+ return False
def verify_empty_devices(self, md_json):
md_dict = json.loads(md_json)
- self.assertEmpty(md_dict['devices'])
+ try:
+ self.assertEmpty(md_dict['devices'])
+ return True
+ except Exception:
+ return False
@decorators.idempotent_id('3e41c782-2a89-4922-a9d2-9a188c4e7c7c')
@utils.services('network', 'volume', 'image')
@@ -354,10 +368,6 @@
server=server,
servers_client=self.servers_client)
- # NOTE(artom) The newly attached tagged nic won't appear in the
- # metadata until the cache is refreshed. We wait 16 seconds since the
- # default cache expiry is 15 seconds.
- time.sleep(16)
self.verify_metadata_from_api(server, ssh_client,
self.verify_device_metadata)
@@ -370,7 +380,5 @@
waiters.wait_for_interface_detach(self.interfaces_client,
server['id'],
interface['port_id'])
- # NOTE(artom) More waiting until metadata cache is refreshed.
- time.sleep(16)
self.verify_metadata_from_api(server, ssh_client,
self.verify_empty_devices)
diff --git a/tempest/api/compute/servers/test_virtual_interfaces.py b/tempest/api/compute/servers/test_virtual_interfaces.py
index 90f04ff..5fb1711 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces.py
@@ -25,8 +25,12 @@
CONF = config.CONF
+# TODO(mriedem): Remove this test class once the nova queens branch goes into
+# extended maintenance mode.
class VirtualInterfacesTestJSON(base.BaseV2ComputeTest):
+ depends_on_nova_network = True
+
@classmethod
def setup_credentials(cls):
# This test needs a network and a subnet
@@ -50,8 +54,6 @@
# for a given server_id
if CONF.service_available.neutron:
- # TODO(mriedem): After a microversion implements the API for
- # neutron, a 400 should be a failure for nova-network and neutron.
with testtools.ExpectedException(exceptions.BadRequest):
self.client.list_virtual_interfaces(self.server['id'])
else:
diff --git a/tempest/api/compute/servers/test_virtual_interfaces_negative.py b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
index c4e2400..ec4d7a8 100644
--- a/tempest/api/compute/servers/test_virtual_interfaces_negative.py
+++ b/tempest/api/compute/servers/test_virtual_interfaces_negative.py
@@ -20,8 +20,12 @@
from tempest.lib import exceptions as lib_exc
+# TODO(mriedem): Remove this test class once the nova queens branch goes into
+# extended maintenance mode.
class VirtualInterfacesNegativeTestJSON(base.BaseV2ComputeTest):
+ depends_on_nova_network = True
+
@classmethod
def setup_credentials(cls):
# For this test no network resources are needed
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index c0891e4..c5c70d2 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -29,6 +29,10 @@
if not CONF.volume_feature_enabled.multi_backend:
raise cls.skipException("Cinder multi-backend feature disabled")
+ if len(set(CONF.volume.backend_names)) < 2:
+ raise cls.skipException("Requires at least two different "
+ "backend names")
+
@classmethod
def resource_setup(cls):
super(VolumeMultiBackendTest, cls).resource_setup()
@@ -41,9 +45,6 @@
# Volume/Type creation (uses volume_backend_name)
# It is not allowed to create the same backend name twice
- if len(backend_names) < 2:
- raise cls.skipException("Requires at least two different "
- "backend names")
for backend_name in backend_names:
# Volume/Type creation (uses backend_name)
cls._create_type_and_volume(backend_name, False)
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 4108da5..75e81b7 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common import waiters
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -43,6 +44,9 @@
transfer = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']
transfer_id = transfer['id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_volume_transfer,
+ transfer_id)
auth_key = transfer['auth_key']
waiters.wait_for_volume_resource_status(
self.volumes_client, volume['id'], 'awaiting-transfer')
@@ -81,6 +85,9 @@
# Create a volume transfer
transfer_id = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']['id']
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_volume_transfer,
+ transfer_id)
waiters.wait_for_volume_resource_status(
self.volumes_client, volume['id'], 'awaiting-transfer')
diff --git a/tempest/clients.py b/tempest/clients.py
index 707127c..2a07be9 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -241,32 +241,32 @@
# if only api_v3 is enabled, all these clients should be available
if (CONF.volume_feature_enabled.api_v2 or
CONF.volume_feature_enabled.api_v3):
- self.backups_v2_client = self.volume_v2.BackupsClient()
+ self.backups_v2_client = self.volume_v3.BackupsClient()
self.encryption_types_v2_client = \
- self.volume_v2.EncryptionTypesClient()
+ self.volume_v3.EncryptionTypesClient()
self.snapshot_manage_v2_client = \
- self.volume_v2.SnapshotManageClient()
- self.snapshots_v2_client = self.volume_v2.SnapshotsClient()
+ self.volume_v3.SnapshotManageClient()
+ self.snapshots_v2_client = self.volume_v3.SnapshotsClient()
self.volume_capabilities_v2_client = \
- self.volume_v2.CapabilitiesClient()
- self.volume_manage_v2_client = self.volume_v2.VolumeManageClient()
- self.volume_qos_v2_client = self.volume_v2.QosSpecsClient()
- self.volume_services_v2_client = self.volume_v2.ServicesClient()
- self.volume_types_v2_client = self.volume_v2.TypesClient()
- self.volume_hosts_v2_client = self.volume_v2.HostsClient()
- self.volume_quotas_v2_client = self.volume_v2.QuotasClient()
+ self.volume_v3.CapabilitiesClient()
+ self.volume_manage_v2_client = self.volume_v3.VolumeManageClient()
+ self.volume_qos_v2_client = self.volume_v3.QosSpecsClient()
+ self.volume_services_v2_client = self.volume_v3.ServicesClient()
+ self.volume_types_v2_client = self.volume_v3.TypesClient()
+ self.volume_hosts_v2_client = self.volume_v3.HostsClient()
+ self.volume_quotas_v2_client = self.volume_v3.QuotasClient()
self.volume_quota_classes_v2_client = \
- self.volume_v2.QuotaClassesClient()
+ self.volume_v3.QuotaClassesClient()
self.volume_scheduler_stats_v2_client = \
- self.volume_v2.SchedulerStatsClient()
+ self.volume_v3.SchedulerStatsClient()
self.volume_transfers_v2_client = \
- self.volume_v2.TransfersClient()
+ self.volume_v3.TransfersClient()
self.volume_v2_availability_zone_client = \
- self.volume_v2.AvailabilityZoneClient()
- self.volume_v2_limits_client = self.volume_v2.LimitsClient()
- self.volumes_v2_client = self.volume_v2.VolumesClient()
+ self.volume_v3.AvailabilityZoneClient()
+ self.volume_v2_limits_client = self.volume_v3.LimitsClient()
+ self.volumes_v2_client = self.volume_v3.VolumesClient()
self.volumes_v2_extension_client = \
- self.volume_v2.ExtensionsClient()
+ self.volume_v3.ExtensionsClient()
# Set default client for users that don't need explicit version
self.volumes_client_latest = self.volumes_v2_client
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 2d024e9..1564f25 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -68,6 +68,9 @@
waiters.wait_for_server_termination(self.servers_client, server['id'])
@decorators.idempotent_id('557cd2c2-4eb8-4dce-98be-f86765ff311b')
+ # Note: This test is being skipped based on 'public_network_id'.
+ # It is being used in create_floating_ip() method which gets called
+ # from get_server_ip() method
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,