fix patrole-multinode-admin non-voting gate

The 'patrole-multinode-admin' non-voting gate seems to consistently
fail ServerVolumeAttachmentRbacTest tearDownClass. The failure is coming
from 'test_update_volume_attachment' with the following error message:

Invalid volume: Volume status must be available or error or error_restoring
or error_extending or error_managing and must not be migrating, attached,
belong to a group, have snapshots or be disassociated from snapshots after
volume transfer.'}

The fix is to detach the volume and wait until the detached volume reaches
the 'available' state.

Change-Id: I195115c0d61d15a62cabf3f2b736affbd855cefd
diff --git a/patrole_tempest_plugin/tests/api/compute/test_server_volume_attachments_rbac.py b/patrole_tempest_plugin/tests/api/compute/test_server_volume_attachments_rbac.py
index c5fbef6..74f8110 100644
--- a/patrole_tempest_plugin/tests/api/compute/test_server_volume_attachments_rbac.py
+++ b/patrole_tempest_plugin/tests/api/compute/test_server_volume_attachments_rbac.py
@@ -59,6 +59,12 @@
         cls.server = cls.create_test_server(wait_until='ACTIVE')
         cls.volume = cls.create_volume()
 
+    def _detach_volume_and_wait_until_available(self, server, volume):
+        self.servers_client.detach_volume(server['id'],
+                                          volume['id'])
+        waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                volume['id'], 'available')
+
     def _recreate_volume(self):
         try:
             # In case detachment failed, update the DB status of the volume
@@ -190,7 +196,8 @@
             # Swap volume from "volume1" to "volume2"
             self.servers_client.update_attached_volume(
                 self.server['id'], volume1['id'], volumeId=volume2['id'])
-        self.addCleanup(self._detach_volume, self.server, volume2)
+        self.addCleanup(self._detach_volume_and_wait_until_available,
+                        self.server, volume2)
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume1['id'], 'available')
         waiters.wait_for_volume_resource_status(self.volumes_client,