Reorder device rescue with volume for overlap
Change-Id: I266b9775caf0439937e1889ad9afedb1c1eaadc9
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index b1bfac7..5d172c7 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -524,6 +524,8 @@
"""Create a volume and wait for it to become 'available'.
:param image_ref: Specify an image id to create a bootable volume.
+ :param wait_for_available: Wait until the volume becomes available
+ before returning
:param kwargs: other parameters to create volume.
:returns: The available volume.
"""
@@ -534,6 +536,7 @@
kwargs['display_name'] = vol_name
if image_ref is not None:
kwargs['imageRef'] = image_ref
+ wait = kwargs.pop('wait_for_available', True)
if CONF.volume.volume_type and 'volume_type' not in kwargs:
# If volume_type is not provided in config then no need to
# add a volume type and
@@ -549,8 +552,9 @@
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
- waiters.wait_for_volume_resource_status(cls.volumes_client,
- volume['id'], 'available')
+ if wait:
+ waiters.wait_for_volume_resource_status(cls.volumes_client,
+ volume['id'], 'available')
return volume
def _detach_volume(self, server, volume):
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 716ecda..97c2774 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -239,13 +239,15 @@
# after unrescue the server. Due to that we need to make
# server SSHable before it try to detach, more details are
# in bug#1960346
+ volume = self.create_volume(wait_for_available=False)
validation_resources = self.get_class_validation_resources(
self.os_primary)
server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='disk', hw_rescue_bus='virtio', validatable=True,
validation_resources=validation_resources, wait_until="SSHABLE")
server = self.servers_client.show_server(server['id'])['server']
- volume = self.create_volume()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
self.attach_volume(server, volume)
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')