Re-use common volume create/attach methods in test_iscsi_volume
This updates the test_iscsi_volume test to re-use the common
methods from the BaseV2ComputeTest class for creating a volume
and attaching it to a server, along with common cleanup handling.
Change-Id: I2ca16a521251e7af0ec9fb6f1ad92ce44d485437
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 18a6afc..4e9bb88 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -82,14 +82,6 @@
if host != target_host:
return target_host
- def _volume_clean_up(self, server_id, volume_id):
- body = self.volumes_client.show_volume(volume_id)['volume']
- if body['status'] == 'in-use':
- self.servers_client.detach_volume(server_id, volume_id)
- waiters.wait_for_volume_status(self.volumes_client,
- volume_id, 'available')
- self.volumes_client.delete_volume(volume_id)
-
def _test_live_migration(self, state='ACTIVE', volume_backed=False):
"""Tests live migration between two hosts.
@@ -151,22 +143,15 @@
block_migrate_cinder_iscsi,
'Block Live migration not configured for iSCSI')
def test_iscsi_volume(self):
- server_id = self.create_test_server(wait_until="ACTIVE")['id']
+ server = self.create_test_server(wait_until="ACTIVE")
+ server_id = server['id']
actual_host = self._get_host_for_server(server_id)
target_host = self._get_host_other_than(actual_host)
- volume = self.volumes_client.create_volume(
- size=CONF.volume.volume_size, display_name='test')['volume']
-
- waiters.wait_for_volume_status(self.volumes_client,
- volume['id'], 'available')
- self.addCleanup(self._volume_clean_up, server_id, volume['id'])
+ volume = self.create_volume()
# Attach the volume to the server
- self.servers_client.attach_volume(server_id, volumeId=volume['id'],
- device='/dev/xvdb')
- waiters.wait_for_volume_status(self.volumes_client,
- volume['id'], 'in-use')
+ self.attach_volume(server, volume, device='/dev/xvdb')
self._migrate_server_to(server_id, target_host)
waiters.wait_for_server_status(self.servers_client,
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index a4578ae..b738e82 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -404,10 +404,21 @@
LOG.exception('Waiting for deletion of volume %s failed',
volume['id'])
- def attach_volume(self, server, volume):
- """Attaches volume to server and waits for 'in-use' volume status."""
+ def attach_volume(self, server, volume, device=None):
+ """Attaches volume to server and waits for 'in-use' volume status.
+
+ The volume will be detached when the test tears down.
+
+ :param server: The server to which the volume will be attached.
+ :param volume: The volume to attach.
+ :param device: Optional mountpoint for the attached volume. Note that
+ this is not guaranteed for all hypervisors and is not recommended.
+ """
+ attach_kwargs = dict(volumeId=volume['id'])
+ if device:
+ attach_kwargs['device'] = device
self.servers_client.attach_volume(
- server['id'], volumeId=volume['id'])
+ server['id'], **attach_kwargs)
# On teardown detach the volume and wait for it to be available. This
# is so we don't error out when trying to delete the volume during
# teardown.