Update volume-status waiter for new cinder attach
The new Cinder attach API treats attaching to a shelved instance
differently than the old code. In the old case we fake things a
bit and mark the volume as attached, even though it's really not,
it's actually just reserved and there's no valid connector
or connection associated with it.
In the new (Cinder V3.27) Attach API's we instead give a more
accurate response, create an attachment record and mark the
volume as 'reserved' until the Instance is unhselved at which
time we complete the connection and move the volume to 'in-use'.
This patch modifes the volume_status waiter slightly so that it
can accept multiple statuses to consider "ok". We also modify the
test_attach_volume_shelved_or_offload_server and
test_detach_volume_shelved_or_offload_server tests to specify that
either 'in-use' or 'reserved' are acceptable responses. This way
we're still compatable with older combinatons of nova/cinder and
we also work with the new combination as well.
Change-Id: Ia84f4325ddb0080521241ace26f89d1161db9dca
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 429ded5..ddb035d 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -424,7 +424,7 @@
LOG.exception('Waiting for deletion of volume %s failed',
volume['id'])
- def attach_volume(self, server, volume, device=None):
+ def attach_volume(self, server, volume, device=None, check_reserved=False):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -433,10 +433,15 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
+ :param check_reserved: Consider a status of reserved as valid for
+ completion. This is to handle new Cinder attach where we more
+ accurately use 'reserved' for things like attaching to a shelved
+ server.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
+
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
@@ -449,8 +454,11 @@
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.servers_client.detach_volume,
server['id'], volume['id'])
+ statuses = ['in-use']
+ if check_reserved:
+ statuses.append('reserved')
waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'in-use')
+ volume['id'], statuses)
return attachment
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ed5f9a6..502bc1b 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -212,7 +212,8 @@
num_vol = self._count_volumes(server)
self._shelve_server(server)
attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ device=('/dev/%s' % self.device),
+ check_reserved=True)
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(server, num_vol + 1)
@@ -239,7 +240,8 @@
self._shelve_server(server)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device))
+ self.attach_volume(server, volume, device=('/dev/%s' % self.device),
+ check_reserved=True)
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 93e6fbf..cf187e6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -179,25 +179,26 @@
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, status):
- """Waits for a volume resource to reach a given status.
+def wait_for_volume_resource_status(client, resource_id, statuses):
+ """Waits for a volume resource to reach any of the specified statuses.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
"""
- resource_name = re.findall(
- r'(Volume|Snapshot|Backup|Group)',
- client.__class__.__name__)[0].lower()
+ if not isinstance(statuses, list):
+ statuses = [statuses]
+ resource_name = re.findall(r'(Volume|Snapshot|Backup|Group)',
+ client.__class__.__name__)[0].lower()
show_resource = getattr(client, 'show_' + resource_name)
resource_status = show_resource(resource_id)[resource_name]['status']
start = int(time.time())
- while resource_status != status:
+ while resource_status not in statuses:
time.sleep(client.build_interval)
resource_status = show_resource(resource_id)[
'{}'.format(resource_name)]['status']
- if resource_status == 'error' and resource_status != status:
+ if resource_status == 'error' and resource_status not in statuses:
raise exceptions.VolumeResourceBuildErrorException(
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
@@ -206,7 +207,7 @@
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
- (resource_name, resource_id, status, resource_status,
+ (resource_name, resource_id, statuses, resource_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)