Wait for actual volume detach in multiattach tests
catching "in-use" volume transitioning back to "in-use" is error prone,
the volume might've been not even started to be detached,
but the test logic already may consider it as 'detached' and proceed.
As a result, such "in-use" volume might fail to be deleted later.
Related-Issue: PRODX-51560
Change-Id: Ic6672846f4dd3ccccdcb417a0b50b1d542e1813b
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 5e230c6..73ea12f 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -375,8 +375,19 @@
volume = self.volumes_client.show_volume(volume_id)['volume']
attachments = volume['attachments']
wait_status = 'in-use' if len(attachments) > 1 else 'available'
+ attachment_id = [
+ attachment["attachment_id"] for attachment in attachments
+ if attachment["server_id"] == server_id
+ ][0]
# Now detach the volume from the given server.
self.servers_client.detach_volume(server_id, volume_id)
+ # wait for both cinder and nova detaching the volume
+ waiters.wait_for_volume_attachment_remove_from_server(
+ self.servers_client, server_id, volume_id
+ )
+ waiters.wait_for_volume_attachment_remove(
+ self.volumes_client, volume_id, attachment_id
+ )
# Now wait for the volume status to change.
waiters.wait_for_volume_resource_status(
self.volumes_client, volume_id, wait_status)