Update shrink scenario tests
This patch updates the shrink scenario tests to handle with the
"available" or "shrinking_possible_data_loss_error" state received
from the manila share manager after a failed shrink operation when
attempt to shrink a share with a size smaller than thecurrent used
space.
Change-Id: Idf34a149e5a975d5fbced0ec78d102c5d90b87c6
Related-Bug: #1858328
diff --git a/manila_tempest_tests/common/constants.py b/manila_tempest_tests/common/constants.py
index 8791837..69cd1ff 100644
--- a/manila_tempest_tests/common/constants.py
+++ b/manila_tempest_tests/common/constants.py
@@ -20,7 +20,8 @@
STATUS_CREATING = 'creating'
STATUS_DELETING = 'deleting'
STATUS_SHRINKING = 'shrinking'
-
+STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR = (
+ 'shrinking_possible_data_loss_error')
TEMPEST_MANILA_PREFIX = 'tempest-manila'
# Replication
diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py
index b19872b..1c7095c 100644
--- a/manila_tempest_tests/services/share/v2/json/shares_client.py
+++ b/manila_tempest_tests/services/share/v2/json/shares_client.py
@@ -403,20 +403,21 @@
share_status = body[status_attr]
start = int(time.time())
- while share_status != status:
+ exp_status = status if isinstance(status, list) else [status]
+ while share_status not in exp_status:
time.sleep(self.build_interval)
body = self.get_share(share_id, version=version)
share_status = body[status_attr]
- if share_status == status:
+ if share_status in exp_status:
return
elif 'error' in share_status.lower():
raise share_exceptions.ShareBuildErrorException(
share_id=share_id)
-
if int(time.time()) - start >= self.build_timeout:
message = ("Share's %(status_attr)s failed to transition to "
- "%(status)s within the required time %(seconds)s." %
- {"status_attr": status_attr, "status": status,
+ "%(status)s within the required "
+ "time %(seconds)s." %
+ {"status_attr": status_attr, "status": exp_status,
"seconds": self.build_timeout})
raise exceptions.TimeoutException(message)
diff --git a/manila_tempest_tests/tests/scenario/test_share_shrink.py b/manila_tempest_tests/tests/scenario/test_share_shrink.py
index 44882c8..4d232a8 100644
--- a/manila_tempest_tests/tests/scenario/test_share_shrink.py
+++ b/manila_tempest_tests/tests/scenario/test_share_shrink.py
@@ -84,7 +84,16 @@
self.shares_v2_client.shrink_share(share['id'],
new_size=default_share_size)
self.shares_v2_client.wait_for_share_status(
- share['id'], 'shrinking_possible_data_loss_error')
+ share['id'], ['shrinking_possible_data_loss_error', 'available'])
+
+ share = self.shares_v2_client.get_share(share["id"])
+
+ if share["status"] == constants.STATUS_AVAILABLE:
+ params = {'resource_id': share['id']}
+ messages = self.shares_v2_client.list_messages(params=params)
+ self.assertIn('009',
+ [message['action_id'] for message in messages])
+ self.assertEqual(share_size, int(share["size"]))
LOG.debug('Step 9 - delete data')
remote_client.exec_command("sudo rm /mnt/t1")
@@ -92,9 +101,12 @@
ls_result = remote_client.exec_command("sudo ls -lAh /mnt/")
LOG.debug(ls_result)
+ # Deletion of files can be an asynchronous activity on the backend.
+ # Thus we need to wait until timeout for the space to be released
+ # and repeating the shrink request until success
LOG.debug('Step 10 - reset and shrink')
self.share_shrink_retry_until_success(share["id"],
- share_size=default_share_size)
+ new_size=default_share_size)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(default_share_size, int(share["size"]))
@@ -108,37 +120,39 @@
LOG.debug('Step 12 - unmount')
self.unmount_share(remote_client)
- def share_shrink_retry_until_success(self, share_id, share_size,
+ def share_shrink_retry_until_success(self, share_id, new_size,
status_attr='status'):
"""Try share reset, followed by shrink, until timeout"""
check_interval = CONF.share.build_interval * 2
- body = self.shares_v2_client.get_share(share_id)
- share_status = body[status_attr]
+ share = self.shares_v2_client.get_share(share_id)
+ share_current_size = share["size"]
+ share_status = share[status_attr]
start = int(time.time())
-
- while share_status != constants.STATUS_AVAILABLE:
- if share_status != constants.STATUS_SHRINKING:
+ while share_current_size != new_size:
+ if (share_status ==
+ constants.STATUS_SHRINKING_POSSIBLE_DATA_LOSS_ERROR):
self.shares_admin_v2_client.reset_state(
share_id, status=constants.STATUS_AVAILABLE)
+ elif share_status != constants.STATUS_SHRINKING:
try:
self.shares_v2_client.shrink_share(share_id,
- new_size=share_size)
+ new_size=new_size)
except exceptions.BadRequest as e:
- if ('New size for shrink must be less '
- 'than current size') in six.text_type(e):
+ if ('New size for shrink must be less than current size'
+ in six.text_type(e)):
break
- time.sleep(check_interval)
- body = self.shares_v2_client.get_share(share_id)
- share_status = body[status_attr]
- if share_status == constants.STATUS_AVAILABLE:
- return
+ time.sleep(check_interval)
+ share = self.shares_v2_client.get_share(share_id)
+ share_status = share[status_attr]
+ share_current_size = share["size"]
+ if share_current_size == new_size:
+ return
if int(time.time()) - start >= CONF.share.build_timeout:
- message = ("Share's %(status_attr)s failed to transition to "
- "%(status)s within the required time %(seconds)s." %
- {"status_attr": status_attr,
- "status": constants.STATUS_AVAILABLE,
+ message = ("Share %(share_id)s failed to shrink within the "
+ "required time %(seconds)s." %
+ {"share_id": share["id"],
"seconds": CONF.share.build_timeout})
raise exceptions.TimeoutException(message)