Merge "Updated from global requirements"
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 429ded5..ddb035d 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -424,7 +424,7 @@
LOG.exception('Waiting for deletion of volume %s failed',
volume['id'])
- def attach_volume(self, server, volume, device=None):
+ def attach_volume(self, server, volume, device=None, check_reserved=False):
"""Attaches volume to server and waits for 'in-use' volume status.
The volume will be detached when the test tears down.
@@ -433,10 +433,15 @@
:param volume: The volume to attach.
:param device: Optional mountpoint for the attached volume. Note that
this is not guaranteed for all hypervisors and is not recommended.
+ :param check_reserved: Consider a status of reserved as valid for
+ completion. This is to handle new Cinder attach where we more
+ accurately use 'reserved' for things like attaching to a shelved
+ server.
"""
attach_kwargs = dict(volumeId=volume['id'])
if device:
attach_kwargs['device'] = device
+
attachment = self.servers_client.attach_volume(
server['id'], **attach_kwargs)['volumeAttachment']
# On teardown detach the volume and wait for it to be available. This
@@ -449,8 +454,11 @@
self.addCleanup(test_utils.call_and_ignore_notfound_exc,
self.servers_client.detach_volume,
server['id'], volume['id'])
+ statuses = ['in-use']
+ if check_reserved:
+ statuses.append('reserved')
waiters.wait_for_volume_resource_status(self.volumes_client,
- volume['id'], 'in-use')
+ volume['id'], statuses)
return attachment
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index ed5f9a6..502bc1b 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -212,7 +212,8 @@
num_vol = self._count_volumes(server)
self._shelve_server(server)
attachment = self.attach_volume(server, volume,
- device=('/dev/%s' % self.device))
+ device=('/dev/%s' % self.device),
+ check_reserved=True)
# Unshelve the instance and check that attached volume exists
self._unshelve_server_and_check_volumes(server, num_vol + 1)
@@ -239,7 +240,8 @@
self._shelve_server(server)
# Attach and then detach the volume
- self.attach_volume(server, volume, device=('/dev/%s' % self.device))
+ self.attach_volume(server, volume, device=('/dev/%s' % self.device),
+ check_reserved=True)
self.servers_client.detach_volume(server['id'], volume['id'])
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index 44c1def..e68ab7e 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
from testtools import matchers
from tempest.api.volume import base
@@ -149,3 +150,16 @@
# Should allow
self.assertEqual(volume['snapshot_id'], src_snap['id'])
self.assertEqual(volume['size'], src_size + 1)
+
+ @decorators.idempotent_id('bbcfa285-af7f-479e-8c1a-8c34fc16543c')
+ @testtools.skipUnless(CONF.volume_feature_enabled.backup,
+ "Cinder backup is disabled")
+ def test_snapshot_backup(self):
+ # Create a snapshot
+ snapshot = self.create_snapshot(volume_id=self.volume_origin['id'])
+
+ backup = self.create_backup(volume_id=self.volume_origin['id'],
+ snapshot_id=snapshot['id'])
+ backup_info = self.backups_client.show_backup(backup['id'])['backup']
+ self.assertEqual(self.volume_origin['id'], backup_info['volume_id'])
+ self.assertEqual(snapshot['id'], backup_info['snapshot_id'])
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 93e6fbf..cf187e6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -179,25 +179,26 @@
raise lib_exc.TimeoutException(message)
-def wait_for_volume_resource_status(client, resource_id, status):
- """Waits for a volume resource to reach a given status.
+def wait_for_volume_resource_status(client, resource_id, statuses):
+ """Waits for a volume resource to reach any of the specified statuses.
This function is a common function for volume, snapshot and backup
resources. The function extracts the name of the desired resource from
the client class name of the resource.
"""
- resource_name = re.findall(
- r'(Volume|Snapshot|Backup|Group)',
- client.__class__.__name__)[0].lower()
+ if not isinstance(statuses, list):
+ statuses = [statuses]
+ resource_name = re.findall(r'(Volume|Snapshot|Backup|Group)',
+ client.__class__.__name__)[0].lower()
show_resource = getattr(client, 'show_' + resource_name)
resource_status = show_resource(resource_id)[resource_name]['status']
start = int(time.time())
- while resource_status != status:
+ while resource_status not in statuses:
time.sleep(client.build_interval)
resource_status = show_resource(resource_id)[
'{}'.format(resource_name)]['status']
- if resource_status == 'error' and resource_status != status:
+ if resource_status == 'error' and resource_status not in statuses:
raise exceptions.VolumeResourceBuildErrorException(
resource_name=resource_name, resource_id=resource_id)
if resource_name == 'volume' and resource_status == 'error_restoring':
@@ -206,7 +207,7 @@
if int(time.time()) - start >= client.build_timeout:
message = ('%s %s failed to reach %s status (current %s) '
'within the required time (%s s).' %
- (resource_name, resource_id, status, resource_status,
+ (resource_name, resource_id, statuses, resource_status,
client.build_timeout))
raise lib_exc.TimeoutException(message)