Avoid wait for vol detach in live-migrate tests

This extends the recent theme of making sure the tearDown process does
not hang and ultimately fail the test because it cannot detach a
volume from a guest. We see (and have been seeing for a long time)
the live migration tests specifically fail in this way in the gate.

Change-Id: Ib1065c146ab2f927c23fc8a0b73d7040516a19a6
Related-Bug: #199232
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 1cb8004..f7c0dd9 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -202,7 +202,8 @@
         volume = self.create_volume()
 
         # Attach the volume to the server
-        self.attach_volume(server, volume, device='/dev/xvdb')
+        self.attach_volume(server, volume, device='/dev/xvdb',
+                           wait_for_detach=False)
         server = self.admin_servers_client.show_server(server_id)['server']
         volume_id1 = server["os-extended-volumes:volumes_attached"][0]["id"]
         self._live_migrate(server_id, target_host, 'ACTIVE')
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index ea1cddc..260d4e0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -568,7 +568,8 @@
             # is already detached.
             pass
 
-    def attach_volume(self, server, volume, device=None, tag=None):
+    def attach_volume(self, server, volume, device=None, tag=None,
+                      wait_for_detach=True):
         """Attaches volume to server and waits for 'in-use' volume status.
 
         The volume will be detached when the test tears down.
@@ -605,7 +606,7 @@
         # the contents of the console log. The final check of the volume state
         # should be a no-op by this point and is just added for completeness
         # when detaching non-multiattach volumes.
-        if not volume['multiattach']:
+        if not volume['multiattach'] and wait_for_detach:
             self.addCleanup(
                 waiters.wait_for_volume_resource_status, self.volumes_client,
                 volume['id'], 'available')