Moving resource waiters to a new module
Currently, resource waiters methods are placed under
service clients modules. There are three duplicated
waiters in both v1 and v2 modules:
- wait_for_share_status
- wait_for_snapshot_status
- wait_for_access_rule_status
This patch suggests to separate the waiter functions
from client modules and collect them in one place.
Change-Id: I9f0d50a325139e6067e4339533d4b01a322df7a8
diff --git a/manila_tempest_tests/common/waiters.py b/manila_tempest_tests/common/waiters.py
new file mode 100644
index 0000000..d3dbcb4
--- /dev/null
+++ b/manila_tempest_tests/common/waiters.py
@@ -0,0 +1,374 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+import six
+from tempest import config
+from tempest.lib import exceptions
+
+from manila_tempest_tests.common import constants
+from manila_tempest_tests.services.share.v2.json import shares_client
+from manila_tempest_tests import share_exceptions
+
+CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+
+
+def wait_for_share_instance_status(client, instance_id, status,
+ version=LATEST_MICROVERSION):
+ """Waits for a share to reach a given status."""
+ body = client.get_share_instance(instance_id, version=version)
+ instance_status = body['status']
+ start = int(time.time())
+
+ while instance_status != status:
+ time.sleep(client.build_interval)
+ body = client.get_share_instance(instance_id)
+ instance_status = body['status']
+ if instance_status == status:
+ return
+ elif 'error' in instance_status.lower():
+ raise share_exceptions.ShareInstanceBuildErrorException(
+ id=instance_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Share instance %s failed to reach %s status within'
+ ' the required time (%s s).' %
+ (instance_id, status, client.build_timeout))
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_share_status(client, share_id, status, status_attr='status',
+ version=LATEST_MICROVERSION):
+ """Waits for a share to reach a given status."""
+ if isinstance(client, shares_client.SharesV2Client):
+ body = client.get_share(share_id, version=version)
+ else:
+ body = client.get_share(share_id)
+ share_status = body[status_attr]
+ start = int(time.time())
+
+ exp_status = status if isinstance(status, list) else [status]
+ while share_status not in exp_status:
+ time.sleep(client.build_interval)
+ if isinstance(client, shares_client.SharesV2Client):
+ body = client.get_share(share_id, version=version)
+ else:
+ body = client.get_share(share_id)
+ share_status = body[status_attr]
+ if share_status in exp_status:
+ return
+ elif 'error' in share_status.lower():
+ raise share_exceptions.ShareBuildErrorException(
+ share_id=share_id)
+ if int(time.time()) - start >= client.build_timeout:
+ message = ("Share's %(status_attr)s failed to transition to "
+ "%(status)s within the required "
+ "time %(seconds)s." %
+ {"status_attr": status_attr, "status": exp_status,
+ "seconds": client.build_timeout})
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_snapshot_status(client, snapshot_id, status,
+ version=LATEST_MICROVERSION):
+ """Waits for a snapshot to reach a given status."""
+ if isinstance(client, shares_client.SharesV2Client):
+ body = client.get_snapshot(snapshot_id, version=version)
+ else:
+ body = client.get_snapshot(snapshot_id)
+ snapshot_name = body['name']
+ snapshot_status = body['status']
+ start = int(time.time())
+
+ while snapshot_status != status:
+ time.sleep(client.build_interval)
+ if isinstance(client, shares_client.SharesV2Client):
+ body = client.get_snapshot(snapshot_id, version=version)
+ else:
+ body = client.get_snapshot(snapshot_id)
+ snapshot_status = body['status']
+ if snapshot_status in status:
+ return
+ if 'error' in snapshot_status:
+ raise (share_exceptions.
+ SnapshotBuildErrorException(snapshot_id=snapshot_id))
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Share Snapshot %s failed to reach %s status '
+ 'within the required time (%s s).' %
+ (snapshot_name, status, client.build_timeout))
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_access_rule_status(client, share_id, rule_id, status,
+ raise_rule_in_error_state=True):
+ """Waits for an access rule to reach a given status."""
+ rule_status = "new"
+ start = int(time.time())
+ while rule_status != status:
+ time.sleep(client.build_interval)
+ rules = client.list_access_rules(share_id)
+ for rule in rules:
+ if rule["id"] in rule_id:
+ rule_status = rule['state']
+ break
+ if 'error' in rule_status and raise_rule_in_error_state:
+ raise share_exceptions.AccessRuleBuildErrorException(
+ rule_id=rule_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Share Access Rule %s failed to reach %s status '
+ 'within the required time (%s s).' %
+ (rule_id, status, client.build_timeout))
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_snapshot_instance_status(client, instance_id, expected_status):
+ """Waits for a snapshot instance status to reach a given status."""
+ body = client.get_snapshot_instance(instance_id)
+ instance_status = body['status']
+ start = int(time.time())
+
+ while instance_status != expected_status:
+ time.sleep(client.build_interval)
+ body = client.get_snapshot_instance(instance_id)
+ instance_status = body['status']
+ if instance_status == expected_status:
+ return
+ if 'error' in instance_status:
+ raise share_exceptions.SnapshotInstanceBuildErrorException(
+ id=instance_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('The status of snapshot instance %(id)s failed to '
+ 'reach %(expected_status)s status within the '
+ 'required time (%(time)ss). Current '
+ 'status: %(current_status)s.' %
+ {
+ 'expected_status': expected_status,
+ 'time': client.build_timeout,
+ 'id': instance_id,
+ 'current_status': instance_status,
+ })
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_share_group_status(client, share_group_id, status):
+ """Waits for a share group to reach a given status."""
+ body = client.get_share_group(share_group_id)
+ sg_name = body['name']
+ sg_status = body['status']
+ start = int(time.time())
+
+ while sg_status != status:
+ time.sleep(client.build_interval)
+ body = client.get_share_group(share_group_id)
+ sg_status = body['status']
+ if 'error' in sg_status and status != 'error':
+ raise share_exceptions.ShareGroupBuildErrorException(
+ share_group_id=share_group_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ sg_name = sg_name or share_group_id
+ message = ('Share Group %s failed to reach %s status '
+ 'within the required time (%s s). '
+ 'Current status: %s' %
+ (sg_name, status, client.build_timeout, sg_status))
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_share_group_snapshot_status(client, share_group_snapshot_id,
+ status):
+ """Waits for a share group snapshot to reach a given status."""
+ body = client.get_share_group_snapshot(share_group_snapshot_id)
+ sg_snapshot_name = body['name']
+ sg_snapshot_status = body['status']
+ start = int(time.time())
+
+ while sg_snapshot_status != status:
+ time.sleep(client.build_interval)
+ body = client.get_share_group_snapshot(share_group_snapshot_id)
+ sg_snapshot_status = body['status']
+ if 'error' in sg_snapshot_status and status != 'error':
+ raise share_exceptions.ShareGroupSnapshotBuildErrorException(
+ share_group_snapshot_id=share_group_snapshot_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Share Group Snapshot %s failed to reach %s status '
+ 'within the required time (%s s).' %
+ (sg_snapshot_name, status, client.build_timeout))
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_share_server_status(client, server_id, status,
+ status_attr='status'):
+ """Waits for a share to reach a given status."""
+ body = client.show_share_server(server_id)
+ server_status = body[status_attr]
+ start = int(time.time())
+
+ while server_status != status:
+ time.sleep(client.build_interval)
+ body = client.show_share_server(server_id)
+ server_status = body[status_attr]
+ if server_status in status:
+ return
+ elif constants.STATUS_ERROR in server_status.lower():
+ raise share_exceptions.ShareServerBuildErrorException(
+ server_id=server_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ("Share server's %(status_attr)s failed to "
+ "transition to %(status)s within the required "
+ "time %(seconds)s." %
+ {"status_attr": status_attr, "status": status,
+ "seconds": client.build_timeout})
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_share_replica_status(client, replica_id, expected_status,
+ status_attr='status'):
+ """Waits for a replica's status_attr to reach a given status."""
+ body = client.get_share_replica(replica_id)
+ replica_status = body[status_attr]
+ start = int(time.time())
+
+ while replica_status != expected_status:
+ time.sleep(client.build_interval)
+ body = client.get_share_replica(replica_id)
+ replica_status = body[status_attr]
+ if replica_status == expected_status:
+ return
+ if ('error' in replica_status
+ and expected_status != constants.STATUS_ERROR):
+ raise share_exceptions.ShareInstanceBuildErrorException(
+ id=replica_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('The %(status_attr)s of Replica %(id)s failed to '
+ 'reach %(expected_status)s status within the '
+ 'required time (%(time)ss). Current '
+ '%(status_attr)s: %(current_status)s.' %
+ {
+ 'status_attr': status_attr,
+ 'expected_status': expected_status,
+ 'time': client.build_timeout,
+ 'id': replica_id,
+ 'current_status': replica_status,
+ })
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_snapshot_access_rule_status(client, snapshot_id, rule_id,
+ expected_state='active'):
+ rule = client.get_snapshot_access_rule(snapshot_id, rule_id)
+ state = rule['state']
+ start = int(time.time())
+
+ while state != expected_state:
+ time.sleep(client.build_interval)
+ rule = client.get_snapshot_access_rule(snapshot_id, rule_id)
+ state = rule['state']
+ if state == expected_state:
+ return
+ if 'error' in state:
+ raise share_exceptions.AccessRuleBuildErrorException(
+ snapshot_id)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('The status of snapshot access rule %(id)s failed '
+ 'to reach %(expected_state)s state within the '
+ 'required time (%(time)ss). Current '
+ 'state: %(current_state)s.' %
+ {
+ 'expected_state': expected_state,
+ 'time': client.build_timeout,
+ 'id': rule_id,
+ 'current_state': state,
+ })
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_migration_status(client, share_id, dest_host, status_to_wait,
+ version=LATEST_MICROVERSION):
+ """Waits for a share to migrate to a certain host."""
+ statuses = ((status_to_wait,)
+ if not isinstance(status_to_wait, (tuple, list, set))
+ else status_to_wait)
+ share = client.get_share(share_id, version=version)
+ migration_timeout = CONF.share.migration_timeout
+ start = int(time.time())
+ while share['task_state'] not in statuses:
+ time.sleep(client.build_interval)
+ share = client.get_share(share_id, version=version)
+ if share['task_state'] in statuses:
+ break
+ elif share['task_state'] == 'migration_error':
+ raise share_exceptions.ShareMigrationException(
+ share_id=share['id'], src=share['host'], dest=dest_host)
+ elif int(time.time()) - start >= migration_timeout:
+ message = ('Share %(share_id)s failed to reach a status in'
+ '%(status)s when migrating from host %(src)s to '
+ 'host %(dest)s within the required time '
+ '%(timeout)s.' % {
+ 'src': share['host'],
+ 'dest': dest_host,
+ 'share_id': share['id'],
+ 'timeout': client.build_timeout,
+ 'status': six.text_type(statuses),
+ })
+ raise exceptions.TimeoutException(message)
+ return share
+
+
+def wait_for_snapshot_access_rule_deletion(client, snapshot_id, rule_id):
+ rule = client.get_snapshot_access_rule(snapshot_id, rule_id)
+ start = int(time.time())
+
+ while rule is not None:
+ time.sleep(client.build_interval)
+
+ rule = client.get_snapshot_access_rule(snapshot_id, rule_id)
+
+ if rule is None:
+ return
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('The snapshot access rule %(id)s failed to delete '
+ 'within the required time (%(time)ss).' %
+ {
+ 'time': client.build_timeout,
+ 'id': rule_id,
+ })
+ raise exceptions.TimeoutException(message)
+
+
+def wait_for_message(client, resource_id):
+ """Waits until a message for a resource with given id exists"""
+ start = int(time.time())
+ message = None
+
+ while not message:
+ time.sleep(client.build_interval)
+ for msg in client.list_messages():
+ if msg['resource_id'] == resource_id:
+ return msg
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('No message for resource with id %s was created in'
+ ' the required time (%s s).' %
+ (resource_id, client.build_timeout))
+ raise exceptions.TimeoutException(message)
diff --git a/manila_tempest_tests/services/share/json/shares_client.py b/manila_tempest_tests/services/share/json/shares_client.py
index 3684cf4..f7b3694 100644
--- a/manila_tempest_tests/services/share/json/shares_client.py
+++ b/manila_tempest_tests/services/share/json/shares_client.py
@@ -218,72 +218,6 @@
self.expected_success(202, resp.status)
return body
- def wait_for_share_status(self, share_id, status):
- """Waits for a share to reach a given status."""
- body = self.get_share(share_id)
- share_name = body['name']
- share_status = body['status']
- start = int(time.time())
-
- while share_status != status:
- time.sleep(self.build_interval)
- body = self.get_share(share_id)
- share_status = body['status']
- if share_status == status:
- return
- elif 'error' in share_status.lower():
- raise share_exceptions.ShareBuildErrorException(
- share_id=share_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('Share %s failed to reach %s status within '
- 'the required time (%s s).' %
- (share_name, status, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
- def wait_for_snapshot_status(self, snapshot_id, status):
- """Waits for a snapshot to reach a given status."""
- body = self.get_snapshot(snapshot_id)
- snapshot_name = body['name']
- snapshot_status = body['status']
- start = int(time.time())
-
- while snapshot_status != status:
- time.sleep(self.build_interval)
- body = self.get_snapshot(snapshot_id)
- snapshot_status = body['status']
- if 'error' in snapshot_status:
- raise share_exceptions.SnapshotBuildErrorException(
- snapshot_id=snapshot_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('Share Snapshot %s failed to reach %s status '
- 'within the required time (%s s).' %
- (snapshot_name, status, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
- def wait_for_access_rule_status(self, share_id, rule_id, status,
- raise_rule_in_error_state=True):
- """Waits for an access rule to reach a given status."""
- rule_status = "new"
- start = int(time.time())
- while rule_status != status:
- time.sleep(self.build_interval)
- rules = self.list_access_rules(share_id)
- for rule in rules:
- if rule["id"] in rule_id:
- rule_status = rule['state']
- break
- if 'error' in rule_status and raise_rule_in_error_state:
- raise share_exceptions.AccessRuleBuildErrorException(
- rule_id=rule_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('Share Access Rule %s failed to reach %s status '
- 'within the required time (%s s).' %
- (rule_id, status, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
def default_quotas(self, tenant_id):
resp, body = self.get("os-quota-sets/%s/defaults" % tenant_id)
self.expected_success(200, resp.status)
diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py
index c1a0f3c..475855e 100644
--- a/manila_tempest_tests/services/share/v2/json/shares_client.py
+++ b/manila_tempest_tests/services/share/v2/json/shares_client.py
@@ -17,11 +17,9 @@
import re
import time
-import six
from six.moves.urllib import parse
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib import exceptions
from manila_tempest_tests.common import constants
from manila_tempest_tests.services.share.json import shares_client
@@ -373,54 +371,6 @@
self.expected_success(200, resp.status)
return self._parse_resp(body)
- def wait_for_share_instance_status(self, instance_id, status,
- version=LATEST_MICROVERSION):
- """Waits for a share to reach a given status."""
- body = self.get_share_instance(instance_id, version=version)
- instance_status = body['status']
- start = int(time.time())
-
- while instance_status != status:
- time.sleep(self.build_interval)
- body = self.get_share(instance_id)
- instance_status = body['status']
- if instance_status == status:
- return
- elif 'error' in instance_status.lower():
- raise share_exceptions.ShareInstanceBuildErrorException(
- id=instance_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('Share instance %s failed to reach %s status within'
- ' the required time (%s s).' %
- (instance_id, status, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
- def wait_for_share_status(self, share_id, status, status_attr='status',
- version=LATEST_MICROVERSION):
- """Waits for a share to reach a given status."""
- body = self.get_share(share_id, version=version)
- share_status = body[status_attr]
- start = int(time.time())
-
- exp_status = status if isinstance(status, list) else [status]
- while share_status not in exp_status:
- time.sleep(self.build_interval)
- body = self.get_share(share_id, version=version)
- share_status = body[status_attr]
- if share_status in exp_status:
- return
- elif 'error' in share_status.lower():
- raise share_exceptions.ShareBuildErrorException(
- share_id=share_id)
- if int(time.time()) - start >= self.build_timeout:
- message = ("Share's %(status_attr)s failed to transition to "
- "%(status)s within the required "
- "time %(seconds)s." %
- {"status_attr": status_attr, "status": exp_status,
- "seconds": self.build_timeout})
- raise exceptions.TimeoutException(message)
-
###############
def extend_share(self, share_id, new_size, version=LATEST_MICROVERSION,
@@ -566,30 +516,6 @@
self.expected_success(202, resp.status)
return body
- def wait_for_snapshot_status(self, snapshot_id, status,
- version=LATEST_MICROVERSION):
- """Waits for a snapshot to reach a given status."""
- body = self.get_snapshot(snapshot_id, version=version)
- snapshot_name = body['name']
- snapshot_status = body['status']
- start = int(time.time())
-
- while snapshot_status != status:
- time.sleep(self.build_interval)
- body = self.get_snapshot(snapshot_id, version=version)
- snapshot_status = body['status']
- if snapshot_status in status:
- return
- if 'error' in snapshot_status:
- raise (share_exceptions.
- SnapshotBuildErrorException(snapshot_id=snapshot_id))
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('Share Snapshot %s failed to reach %s status '
- 'within the required time (%s s).' %
- (snapshot_name, status, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
def manage_snapshot(self, share_id, provider_location,
name=None, description=None,
version=LATEST_MICROVERSION,
@@ -728,35 +654,6 @@
self.expected_success(202, resp.status)
return self._parse_resp(body)
- def wait_for_snapshot_instance_status(self, instance_id, expected_status):
- """Waits for a snapshot instance status to reach a given status."""
- body = self.get_snapshot_instance(instance_id)
- instance_status = body['status']
- start = int(time.time())
-
- while instance_status != expected_status:
- time.sleep(self.build_interval)
- body = self.get_snapshot_instance(instance_id)
- instance_status = body['status']
- if instance_status == expected_status:
- return
- if 'error' in instance_status:
- raise share_exceptions.SnapshotInstanceBuildErrorException(
- id=instance_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('The status of snapshot instance %(id)s failed to '
- 'reach %(expected_status)s status within the '
- 'required time (%(time)ss). Current '
- 'status: %(current_status)s.' %
- {
- 'expected_status': expected_status,
- 'time': self.build_timeout,
- 'id': instance_id,
- 'current_status': instance_status,
- })
- raise exceptions.TimeoutException(message)
-
def get_snapshot_instance_export_location(
self, instance_id, export_location_uuid,
version=LATEST_MICROVERSION):
@@ -1171,29 +1068,6 @@
self.force_delete(share_group_id, s_type='share-groups',
headers=headers, version=version)
- def wait_for_share_group_status(self, share_group_id, status):
- """Waits for a share group to reach a given status."""
- body = self.get_share_group(share_group_id)
- sg_name = body['name']
- sg_status = body['status']
- start = int(time.time())
-
- while sg_status != status:
- time.sleep(self.build_interval)
- body = self.get_share_group(share_group_id)
- sg_status = body['status']
- if 'error' in sg_status and status != 'error':
- raise share_exceptions.ShareGroupBuildErrorException(
- share_group_id=share_group_id)
-
- if int(time.time()) - start >= self.build_timeout:
- sg_name = sg_name or share_group_id
- message = ('Share Group %s failed to reach %s status '
- 'within the required time (%s s). '
- 'Current status: %s' %
- (sg_name, status, self.build_timeout, sg_status))
- raise exceptions.TimeoutException(message)
-
###############
def create_share_group_type(self, name=None, share_types=(),
@@ -1458,28 +1332,6 @@
share_group_snapshot_id, s_type='share-group-snapshots',
headers=headers, version=version)
- def wait_for_share_group_snapshot_status(self, share_group_snapshot_id,
- status):
- """Waits for a share group snapshot to reach a given status."""
- body = self.get_share_group_snapshot(share_group_snapshot_id)
- sg_snapshot_name = body['name']
- sg_snapshot_status = body['status']
- start = int(time.time())
-
- while sg_snapshot_status != status:
- time.sleep(self.build_interval)
- body = self.get_share_group_snapshot(share_group_snapshot_id)
- sg_snapshot_status = body['status']
- if 'error' in sg_snapshot_status and status != 'error':
- raise share_exceptions.ShareGroupSnapshotBuildErrorException(
- share_group_snapshot_id=share_group_snapshot_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('Share Group Snapshot %s failed to reach %s status '
- 'within the required time (%s s).' %
- (sg_snapshot_name, status, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
###############
def manage_share_server(self, host, share_network_id, identifier,
@@ -1511,31 +1363,6 @@
self.expected_success(202, resp.status)
return self._parse_resp(body)
- def wait_for_share_server_status(self, server_id, status,
- status_attr='status'):
- """Waits for a share to reach a given status."""
- body = self.show_share_server(server_id)
- server_status = body[status_attr]
- start = int(time.time())
-
- while server_status != status:
- time.sleep(self.build_interval)
- body = self.show_share_server(server_id)
- server_status = body[status_attr]
- if server_status in status:
- return
- elif constants.STATUS_ERROR in server_status.lower():
- raise share_exceptions.ShareServerBuildErrorException(
- server_id=server_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ("Share server's %(status_attr)s failed to "
- "transition to %(status)s within the required "
- "time %(seconds)s." %
- {"status_attr": status_attr, "status": status,
- "seconds": self.build_timeout})
- raise exceptions.TimeoutException(message)
-
def share_server_reset_state(self, share_server_id,
status=constants.SERVER_STATE_ACTIVE,
version=LATEST_MICROVERSION):
@@ -1613,37 +1440,6 @@
headers=EXPERIMENTAL, extra_headers=True,
version=version)
- def wait_for_migration_status(self, share_id, dest_host, status_to_wait,
- version=LATEST_MICROVERSION):
- """Waits for a share to migrate to a certain host."""
- statuses = ((status_to_wait,)
- if not isinstance(status_to_wait, (tuple, list, set))
- else status_to_wait)
- share = self.get_share(share_id, version=version)
- migration_timeout = CONF.share.migration_timeout
- start = int(time.time())
- while share['task_state'] not in statuses:
- time.sleep(self.build_interval)
- share = self.get_share(share_id, version=version)
- if share['task_state'] in statuses:
- break
- elif share['task_state'] == 'migration_error':
- raise share_exceptions.ShareMigrationException(
- share_id=share['id'], src=share['host'], dest=dest_host)
- elif int(time.time()) - start >= migration_timeout:
- message = ('Share %(share_id)s failed to reach a status in'
- '%(status)s when migrating from host %(src)s to '
- 'host %(dest)s within the required time '
- '%(timeout)s.' % {
- 'src': share['host'],
- 'dest': dest_host,
- 'share_id': share['id'],
- 'timeout': self.build_timeout,
- 'status': six.text_type(statuses),
- })
- raise exceptions.TimeoutException(message)
- return share
-
################
def create_share_replica(self, share_id, availability_zone=None,
@@ -1751,38 +1547,6 @@
self.expected_success(expected_status, resp.status)
return self._parse_resp(body)
- def wait_for_share_replica_status(self, replica_id, expected_status,
- status_attr='status'):
- """Waits for a replica's status_attr to reach a given status."""
- body = self.get_share_replica(replica_id)
- replica_status = body[status_attr]
- start = int(time.time())
-
- while replica_status != expected_status:
- time.sleep(self.build_interval)
- body = self.get_share_replica(replica_id)
- replica_status = body[status_attr]
- if replica_status == expected_status:
- return
- if ('error' in replica_status
- and expected_status != constants.STATUS_ERROR):
- raise share_exceptions.ShareInstanceBuildErrorException(
- id=replica_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('The %(status_attr)s of Replica %(id)s failed to '
- 'reach %(expected_status)s status within the '
- 'required time (%(time)ss). Current '
- '%(status_attr)s: %(current_status)s.' %
- {
- 'status_attr': status_attr,
- 'expected_status': expected_status,
- 'time': self.build_timeout,
- 'id': replica_id,
- 'current_status': replica_status,
- })
- raise exceptions.TimeoutException(message)
-
def reset_share_replica_status(self, replica_id,
status=constants.STATUS_AVAILABLE,
version=LATEST_MICROVERSION):
@@ -1901,35 +1665,6 @@
return found_rules[0] if len(found_rules) > 0 else None
- def wait_for_snapshot_access_rule_status(self, snapshot_id, rule_id,
- expected_state='active'):
- rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
- state = rule['state']
- start = int(time.time())
-
- while state != expected_state:
- time.sleep(self.build_interval)
- rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
- state = rule['state']
- if state == expected_state:
- return
- if 'error' in state:
- raise share_exceptions.AccessRuleBuildErrorException(
- snapshot_id)
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('The status of snapshot access rule %(id)s failed '
- 'to reach %(expected_state)s state within the '
- 'required time (%(time)ss). Current '
- 'state: %(current_state)s.' %
- {
- 'expected_state': expected_state,
- 'time': self.build_timeout,
- 'id': rule_id,
- 'current_state': state,
- })
- raise exceptions.TimeoutException(message)
-
def delete_snapshot_access_rule(self, snapshot_id, rule_id):
body = {
"deny_access": {
@@ -1941,26 +1676,6 @@
self.expected_success(202, resp.status)
return self._parse_resp(body)
- def wait_for_snapshot_access_rule_deletion(self, snapshot_id, rule_id):
- rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
- start = int(time.time())
-
- while rule is not None:
- time.sleep(self.build_interval)
-
- rule = self.get_snapshot_access_rule(snapshot_id, rule_id)
-
- if rule is None:
- return
- if int(time.time()) - start >= self.build_timeout:
- message = ('The snapshot access rule %(id)s failed to delete '
- 'within the required time (%(time)ss).' %
- {
- 'time': self.build_timeout,
- 'id': rule_id,
- })
- raise exceptions.TimeoutException(message)
-
def get_snapshot_export_location(self, snapshot_id, export_location_uuid,
version=LATEST_MICROVERSION):
resp, body = self.get(
@@ -2001,23 +1716,6 @@
self.expected_success(204, resp.status)
return self._parse_resp(body)
- def wait_for_message(self, resource_id):
- """Waits until a message for a resource with given id exists"""
- start = int(time.time())
- message = None
-
- while not message:
- time.sleep(self.build_interval)
- for msg in self.list_messages():
- if msg['resource_id'] == resource_id:
- return msg
-
- if int(time.time()) - start >= self.build_timeout:
- message = ('No message for resource with id %s was created in'
- ' the required time (%s s).' %
- (resource_id, self.build_timeout))
- raise exceptions.TimeoutException(message)
-
###############
def create_security_service(self, ss_type="ldap",
diff --git a/manila_tempest_tests/tests/api/admin/test_admin_actions.py b/manila_tempest_tests/tests/api/admin/test_admin_actions.py
index 57b97e3..7b6b725 100644
--- a/manila_tempest_tests/tests/api/admin/test_admin_actions.py
+++ b/manila_tempest_tests/tests/api/admin/test_admin_actions.py
@@ -19,6 +19,7 @@
import testtools
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
CONF = config.CONF
@@ -41,8 +42,9 @@
def _wait_for_resource_status(self, resource_id, resource_type):
wait_for_resource_status = getattr(
- self.shares_v2_client, "wait_for_{}_status".format(resource_type))
- wait_for_resource_status(resource_id, "available")
+ waiters, "wait_for_{}_status".format(resource_type))
+ wait_for_resource_status(
+ self.shares_v2_client, resource_id, "available")
def _reset_resource_available(self, resource_id, resource_type="shares"):
self.shares_v2_client.reset_state(
@@ -54,7 +56,8 @@
@ddt.data("error", "available", "error_deleting", "deleting", "creating")
def test_reset_share_state(self, status):
self.shares_v2_client.reset_state(self.sh["id"], status=status)
- self.shares_v2_client.wait_for_share_status(self.sh["id"], status)
+ waiters.wait_for_share_status(self.shares_v2_client,
+ self.sh["id"], status)
self.addCleanup(self._reset_resource_available, self.sh["id"])
@decorators.idempotent_id('13075b2d-fe83-41bf-b6ef-99cfcc00257d')
@@ -66,8 +69,8 @@
share_instance_id = sh_instance["id"]
self.shares_v2_client.reset_state(
share_instance_id, s_type="share_instances", status=status)
- self.shares_v2_client.wait_for_share_instance_status(
- share_instance_id, status)
+ waiters.wait_for_share_instance_status(
+ self.shares_v2_client, share_instance_id, status)
self.addCleanup(self._reset_resource_available,
share_instance_id, "share_instances")
@@ -80,8 +83,8 @@
snapshot = self.create_snapshot_wait_for_active(self.sh["id"])
self.shares_v2_client.reset_state(
snapshot["id"], s_type="snapshots", status=status)
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot["id"], status)
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot["id"], status)
self.addCleanup(self._reset_resource_available,
snapshot["id"], "snapshots")
@@ -151,5 +154,5 @@
def test_reset_share_task_state(self):
for task_state in self.task_states:
self.shares_v2_client.reset_task_state(self.sh["id"], task_state)
- self.shares_v2_client.wait_for_share_status(
- self.sh["id"], task_state, 'task_state')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.sh["id"], task_state, 'task_state')
diff --git a/manila_tempest_tests/tests/api/admin/test_migration.py b/manila_tempest_tests/tests/api/admin/test_migration.py
index 25e7213..3c0254d 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -113,15 +114,16 @@
self.shares_v2_client.create_access_rule(
share['id'], access_to="50.50.50.50", access_level="rw")
- self.shares_v2_client.wait_for_share_status(
+ waiters.wait_for_share_status(
+ self.shares_v2_client,
share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
self.shares_v2_client.create_access_rule(
share['id'], access_to="51.51.51.51", access_level="ro")
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.RULE_STATE_ACTIVE,
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
dest_pool = dest_pool['name']
@@ -235,15 +237,15 @@
if resize == 'extend':
new_size = CONF.share.share_size + 2
self.shares_v2_client.extend_share(share['id'], new_size)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(new_size, int(share["size"]))
else:
new_size = CONF.share.share_size
self.shares_v2_client.shrink_share(share['id'], new_size)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(new_size, int(share["size"]))
diff --git a/manila_tempest_tests/tests/api/admin/test_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_migration_negative.py
index 12e3fb0..6ab0c97 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration_negative.py
@@ -23,6 +23,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -93,8 +94,8 @@
@base.skip_if_microversion_lt("2.22")
def test_migration_get_progress_None(self):
self.shares_v2_client.reset_task_state(self.share["id"], None)
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], None, 'task_state')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], None, 'task_state')
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migration_get_progress,
self.share['id'])
@@ -209,8 +210,8 @@
self.share['id'], self.dest_pool,
new_share_type_id=self.new_type_invalid['share_type']['id'],
new_share_network_id=new_share_network_id)
- self.shares_v2_client.wait_for_migration_status(
- self.share['id'], self.dest_pool,
+ waiters.wait_for_migration_status(
+ self.shares_v2_client, self.share['id'], self.dest_pool,
constants.TASK_STATE_MIGRATION_ERROR)
@decorators.idempotent_id('e2bd0cca-c091-4785-a9dc-7f42d2bb95a5')
@@ -227,15 +228,16 @@
def test_migrate_share_not_available(self):
self.shares_client.reset_state(self.share['id'],
constants.STATUS_ERROR)
- self.shares_client.wait_for_share_status(self.share['id'],
- constants.STATUS_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'], constants.STATUS_ERROR)
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool)
self.shares_client.reset_state(self.share['id'],
constants.STATUS_AVAILABLE)
- self.shares_client.wait_for_share_status(self.share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'],
+ constants.STATUS_AVAILABLE)
@decorators.idempotent_id('e8f1e491-697a-4941-bf51-4d37f0a93fa5')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
diff --git a/manila_tempest_tests/tests/api/admin/test_replication.py b/manila_tempest_tests/tests/api/admin/test_replication.py
index c435da1..6ad9a52 100644
--- a/manila_tempest_tests/tests/api/admin/test_replication.py
+++ b/manila_tempest_tests/tests/api/admin/test_replication.py
@@ -19,6 +19,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -107,9 +108,9 @@
share["id"], self.replica_zone, cleanup=False,
client=self.admin_client, version=version)
# Wait for replica state to update after creation
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# List replicas
replica_list = self.admin_client.list_share_replicas(
@@ -124,8 +125,9 @@
self.admin_client.reset_share_replica_state(
replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_OUT_OF_SYNC,
status_attr='replica_state')
# Promote 'out_of_sync' replica to 'active' state.
@@ -158,8 +160,8 @@
version=version)
self.admin_client.reset_share_replica_status(
replica['id'], constants.STATUS_ERROR_DELETING, version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.STATUS_ERROR_DELETING)
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'], constants.STATUS_ERROR_DELETING)
self.admin_client.force_delete_share_replica(replica['id'],
version=version)
self.admin_client.wait_for_resource_deletion(replica_id=replica['id'])
@@ -181,8 +183,8 @@
self.admin_client.reset_share_replica_status(replica['id'],
constants.STATUS_ERROR,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.STATUS_ERROR)
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'], constants.STATUS_ERROR)
@decorators.idempotent_id('258844da-a853-42b6-87db-b16e616018c6')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -201,8 +203,9 @@
self.admin_client.reset_share_replica_state(replica['id'],
constants.STATUS_ERROR,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.STATUS_ERROR, status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'], constants.STATUS_ERROR,
+ status_attr='replica_state')
@decorators.idempotent_id('2969565a-85e8-4c61-9dfb-cc7f7ca9f6dd')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -218,20 +221,21 @@
cleanup_in_class=False,
client=self.admin_client,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Set replica_state to 'out_of_sync'.
self.admin_client.reset_share_replica_state(
replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_OUT_OF_SYNC,
status_attr='replica_state')
# Attempt resync
self.admin_client.resync_share_replica(replica['id'], version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
diff --git a/manila_tempest_tests/tests/api/admin/test_replication_actions.py b/manila_tempest_tests/tests/api/admin/test_replication_actions.py
index 5ceb818..e90e746 100644
--- a/manila_tempest_tests/tests/api/admin/test_replication_actions.py
+++ b/manila_tempest_tests/tests/api/admin/test_replication_actions.py
@@ -17,6 +17,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -85,8 +86,8 @@
# Test extend share
new_size = self.share["size"] + 1
self.admin_client.extend_share(self.share["id"], new_size)
- self.admin_client.wait_for_share_status(self.share["id"],
- "available")
+ waiters.wait_for_share_status(
+ self.admin_client, self.share["id"], "available")
share = self.admin_client.get_share(self.share["id"])
self.assertEqual(new_size, int(share["size"]))
@@ -98,7 +99,8 @@
share = self.admin_client.get_share(self.share["id"])
new_size = self.share["size"] - 1
self.admin_client.shrink_share(self.share["id"], new_size)
- self.admin_client.wait_for_share_status(share["id"], "available")
+ waiters.wait_for_share_status(
+ self.admin_client, share["id"], "available")
shrink_share = self.admin_client.get_share(self.share["id"])
self.assertEqual(new_size, int(shrink_share["size"]))
@@ -127,8 +129,8 @@
managed_share = self.admin_client.manage_share(
share['host'], share['share_proto'],
export_path, self.share_type_id)
- self.admin_client.wait_for_share_status(
- managed_share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.admin_client, managed_share['id'], 'available')
# Add managed share to cleanup queue
self.method_resources.insert(
diff --git a/manila_tempest_tests/tests/api/admin/test_share_manage.py b/manila_tempest_tests/tests/api/admin/test_share_manage.py
index 9905eeb..6c311ac 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_manage.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -107,8 +108,9 @@
'client': self.shares_client})
# Wait for success
- self.shares_v2_client.wait_for_share_status(managed_share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(self.shares_v2_client,
+ managed_share['id'],
+ constants.STATUS_AVAILABLE)
# Verify data of managed share
self.assertEqual(name, managed_share['name'])
diff --git a/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
index 0aeb88f..d5cc09f 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -66,8 +67,8 @@
# Manage the share and wait for the expected state.
# Return the managed share object.
managed_share = self.shares_v2_client.manage_share(**params)
- self.shares_v2_client.wait_for_share_status(
- managed_share['id'], state)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, managed_share['id'], state)
return managed_share
@@ -167,8 +168,9 @@
invalid_share = self.shares_v2_client.manage_share(
**invalid_params
)
- self.shares_v2_client.wait_for_share_status(
- invalid_share['id'], constants.STATUS_MANAGE_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, invalid_share['id'],
+ constants.STATUS_MANAGE_ERROR)
# cleanup
self._unmanage_share_and_wait(invalid_share)
@@ -259,8 +261,9 @@
invalid_params.update({'export_path': 'invalid'})
invalid_share = self.shares_v2_client.manage_share(**invalid_params)
- self.shares_v2_client.wait_for_share_status(
- invalid_share['id'], constants.STATUS_MANAGE_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, invalid_share['id'],
+ constants.STATUS_MANAGE_ERROR)
self._unmanage_share_and_wait(share)
# the attempt to delete a share in manage_error should raise an
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers.py b/manila_tempest_tests/tests/api/admin/test_share_servers.py
index b5fe72c..b39a17d 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers.py
@@ -24,6 +24,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -328,9 +329,8 @@
share_server['id'],
status=state,
)
- self.shares_v2_client.wait_for_share_server_status(
- share_server['id'],
- status=state
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share_server['id'], status=state
)
# bring the share server back in the active state
@@ -338,8 +338,8 @@
share_server['id'],
status=constants.SERVER_STATE_ACTIVE,
)
- self.shares_v2_client.wait_for_share_server_status(
- share_server['id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share_server['id'],
status=constants.SERVER_STATE_ACTIVE
)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
index 0b04511..6d16c66 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -164,8 +165,8 @@
share['share_server_id'],
status=constants.SERVER_STATE_ACTIVE,
)
- self.shares_v2_client.wait_for_share_server_status(
- share['share_server_id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share['share_server_id'],
constants.SERVER_STATE_ACTIVE,
)
@@ -206,8 +207,8 @@
share['share_server_id'],
status=constants.SERVER_STATE_ACTIVE,
)
- self.shares_v2_client.wait_for_share_server_status(
- share['share_server_id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share['share_server_id'],
constants.SERVER_STATE_ACTIVE,
)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
index e162b17..ac332dd 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -96,8 +97,8 @@
access_to=rule.get('access_to'),
access_level=rule.get('access_level')
)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.RULE_STATE_ACTIVE,
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
share = self.shares_v2_client.get_share(share['id'])
@@ -140,8 +141,8 @@
# Check the snapshot status if possible.
if snapshot_id:
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot_id, constants.STATUS_AVAILABLE)
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot_id, constants.STATUS_AVAILABLE)
# Check the share server destination status.
dest_server = self.shares_v2_client.show_share_server(dest_server_id)
@@ -276,8 +277,9 @@
src_server_id, dest_host, preserve_snapshots=preserve_snapshots)
expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
- self.shares_v2_client.wait_for_share_server_status(
- src_server_id, expected_state, status_attr='task_state')
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, src_server_id, expected_state,
+ status_attr='task_state')
# Get for the destination share server.
dest_server_id = self._get_share_server_destination_for_migration(
src_server_id)
@@ -295,8 +297,9 @@
# Wait for the migration cancelled status.
expected_state = constants.TASK_STATE_MIGRATION_CANCELLED
- self.shares_v2_client.wait_for_share_server_status(
- src_server_id, expected_state, status_attr='task_state')
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, src_server_id, expected_state,
+ status_attr='task_state')
# After the cancel operation, we need to validate again the resources.
expected_status = constants.STATUS_AVAILABLE
@@ -339,8 +342,9 @@
preserve_snapshots=preserve_snapshots)
expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
- self.shares_v2_client.wait_for_share_server_status(
- src_server_id, expected_state, status_attr='task_state')
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, src_server_id, expected_state,
+ status_attr='task_state')
# Get for the destination share server.
dest_server_id = self._get_share_server_destination_for_migration(
src_server_id)
@@ -358,8 +362,8 @@
# It's necessary wait for the destination server went to active status.
expected_status = constants.SERVER_STATE_ACTIVE
- self.shares_v2_client.wait_for_share_server_status(
- dest_server_id, expected_status)
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, dest_server_id, expected_status)
# Check if the source server went to inactive status if it exists.
try:
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
index 3b39a61..9a038ac 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
@@ -21,6 +21,7 @@
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api.admin import test_share_servers_migration
from manila_tempest_tests.tests.api import base
@@ -245,11 +246,12 @@
def resource_cleanup(cls):
states = [constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE]
- cls.shares_v2_client.wait_for_share_server_status(
- cls.server_id, status=states, status_attr="task_state")
+ waiters.wait_for_share_server_status(
+ cls.shares_v2_client, cls.server_id, status=states,
+ status_attr="task_state")
cls.shares_v2_client.share_server_migration_cancel(cls.server_id)
- cls.shares_v2_client.wait_for_share_status(cls.share['id'],
- status="available")
+ waiters.wait_for_share_status(
+ cls.shares_v2_client, cls.share['id'], status="available")
super(ShareServerMigrationStartNegativesNFS, cls).resource_cleanup()
@decorators.idempotent_id('5b904db3-fc36-4c35-a8ef-cf6b80315388')
diff --git a/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py b/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
index d09b25f..7e9aa6a 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
@@ -18,6 +18,7 @@
from tempest.lib import decorators
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -128,8 +129,8 @@
for status in ("error", "available"):
self.shares_v2_client.reset_snapshot_instance_status(
sii, status=status)
- self.shares_v2_client.wait_for_snapshot_instance_status(
- sii, expected_status=status)
+ waiters.wait_for_snapshot_instance_status(
+ self.shares_v2_client, sii, expected_status=status)
self.shares_v2_client.delete_snapshot(snapshot['id'])
self.shares_v2_client.wait_for_resource_deletion(
snapshot_id=snapshot['id'])
diff --git a/manila_tempest_tests/tests/api/admin/test_share_types.py b/manila_tempest_tests/tests/api/admin/test_share_types.py
index cb97bc5..5b117cc 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_types.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_types.py
@@ -20,6 +20,7 @@
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -233,7 +234,8 @@
share = self.create_share(
name=share_name, share_type_id=st_create["share_type"]["id"])
self.assertEqual(share["name"], share_name)
- self.shares_client.wait_for_share_status(share["id"], "available")
+ waiters.wait_for_share_status(
+ self.shares_client, share["id"], "available")
# Verify share info
get = self.shares_v2_client.get_share(share["id"], version="2.5")
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
index 8e97887..6f6f3a0 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -99,9 +100,8 @@
'client': self.shares_v2_client})
# Wait for success
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot['id'],
- constants.STATUS_AVAILABLE
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot['id'], constants.STATUS_AVAILABLE
)
# Verify manage snapshot API response
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
index 68ed242..b079264 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -120,8 +121,8 @@
'invalid_provider_location',
driver_options={}
)
- self.shares_v2_client.wait_for_snapshot_status(
- invalid_snap['id'],
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, invalid_snap['id'],
constants.STATUS_MANAGE_ERROR
)
self.shares_v2_client.unmanage_snapshot(invalid_snap['id'])
@@ -131,8 +132,8 @@
self.share['id'],
snap['provider_location']
)
- self.shares_v2_client.wait_for_snapshot_status(
- managed_snap['id'],
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, managed_snap['id'],
constants.STATUS_AVAILABLE
)
self._delete_snapshot_and_wait(managed_snap)
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index fe4616e..0ee3611 100755
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -27,6 +27,7 @@
from manila_tempest_tests import clients
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests import utils
@@ -399,24 +400,24 @@
nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots,
new_share_network_id=new_share_network_id,
new_share_type_id=new_share_type_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, wait_for_status, **kwargs)
+ share = waiters.wait_for_migration_status(
+ client, share_id, dest_host, wait_for_status, **kwargs)
return share
@classmethod
def migration_complete(cls, share_id, dest_host, client=None, **kwargs):
client = client or cls.shares_v2_client
client.migration_complete(share_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, 'migration_success', **kwargs)
+ share = waiters.wait_for_migration_status(
+ client, share_id, dest_host, 'migration_success', **kwargs)
return share
@classmethod
def migration_cancel(cls, share_id, dest_host, client=None, **kwargs):
client = client or cls.shares_v2_client
client.migration_cancel(share_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, 'migration_cancelled', **kwargs)
+ share = waiters.wait_for_migration_status(
+ client, share_id, dest_host, 'migration_cancelled', **kwargs)
return share
@classmethod
@@ -480,7 +481,8 @@
client = d["kwargs"]["client"]
share_id = d["share"]["id"]
try:
- client.wait_for_share_status(share_id, "available")
+ waiters.wait_for_share_status(
+ client, share_id, "available")
d["available"] = True
except (share_exceptions.ShareBuildErrorException,
exceptions.TimeoutException) as e:
@@ -539,7 +541,8 @@
else:
cls.method_resources.insert(0, resource)
- client.wait_for_share_group_status(share_group['id'], 'available')
+ waiters.wait_for_share_group_status(
+ client, share_group['id'], 'available')
return share_group
@classmethod
@@ -588,7 +591,7 @@
cls.class_resources.insert(0, resource)
else:
cls.method_resources.insert(0, resource)
- client.wait_for_snapshot_status(snapshot["id"], "available")
+ waiters.wait_for_snapshot_status(client, snapshot["id"], "available")
return snapshot
@classmethod
@@ -609,8 +612,8 @@
cls.class_resources.insert(0, resource)
else:
cls.method_resources.insert(0, resource)
- client.wait_for_share_group_snapshot_status(
- sg_snapshot["id"], "available")
+ waiters.wait_for_share_group_snapshot_status(
+ client, sg_snapshot["id"], "available")
return sg_snapshot
@classmethod
@@ -696,8 +699,8 @@
cls.class_resources.insert(0, resource)
else:
cls.method_resources.insert(0, resource)
- client.wait_for_share_replica_status(
- replica["id"], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_replica_status(
+ client, replica["id"], constants.STATUS_AVAILABLE)
return replica
@classmethod
@@ -715,9 +718,8 @@
version=CONF.share.max_api_microversion):
client = client or cls.shares_v2_client
replica = client.promote_share_replica(replica_id, version=version)
- client.wait_for_share_replica_status(
- replica["id"],
- constants.REPLICATION_STATE_ACTIVE,
+ waiters.wait_for_share_replica_status(
+ client, replica["id"], constants.REPLICATION_STATE_ACTIVE,
status_attr="replica_state")
return replica
@@ -1084,8 +1086,9 @@
'share_network_id': self.shares_v2_client.share_network_id}
share = self.shares_v2_client.create_share(**params)
self.addCleanup(self.shares_v2_client.delete_share, share['id'])
- self.shares_v2_client.wait_for_share_status(share['id'], "error")
- return self.shares_v2_client.wait_for_message(share['id'])
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], "error")
+ return waiters.wait_for_message(self.shares_v2_client, share['id'])
def allow_access(self, share_id, client=None, access_type=None,
access_level='rw', access_to=None, status='active',
@@ -1098,8 +1101,8 @@
rule = client.create_access_rule(share_id, access_type, access_to,
access_level)
- client.wait_for_access_rule_status(share_id, rule['id'], status,
- raise_rule_in_error_state)
+ waiters.wait_for_access_rule_status(client, share_id, rule['id'],
+ status, raise_rule_in_error_state)
if cleanup:
self.addCleanup(client.wait_for_resource_deletion,
rule_id=rule['id'], share_id=share_id)
@@ -1186,8 +1189,9 @@
description=description,
share_server_id=share_server_id
)
- self.shares_v2_client.wait_for_share_status(
- managed_share['id'], constants.STATUS_AVAILABLE
+ waiters.wait_for_share_status(
+ self.shares_v2_client, managed_share['id'],
+ constants.STATUS_AVAILABLE
)
return managed_share
@@ -1207,8 +1211,8 @@
params.get('identifier', share_server['identifier']),
share_network_subnet_id=subnet_id,
)
- self.shares_v2_client.wait_for_share_server_status(
- managed_share_server['id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, managed_share_server['id'],
constants.SERVER_STATE_ACTIVE,
)
diff --git a/manila_tempest_tests/tests/api/test_replication.py b/manila_tempest_tests/tests/api/test_replication.py
index 5eb5e2a..713f446 100644
--- a/manila_tempest_tests/tests/api/test_replication.py
+++ b/manila_tempest_tests/tests/api/test_replication.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -129,9 +130,9 @@
replica = self.create_share_replica(share["id"], self.replica_zone,
cleanup=False)
# Wait for replica state to update after creation
- self.shares_v2_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Promote the first in_sync replica to active state
promoted_replica = self.promote_share_replica(replica['id'])
# Delete the demoted replica so promoted replica can be cleaned
@@ -191,16 +192,17 @@
access_type, access_to = self._get_access_rule_data_from_config()
rule = self.shares_v2_client.create_access_rule(
self.shares[0]["id"], access_type, access_to, 'ro')
- self.shares_v2_client.wait_for_access_rule_status(
- self.shares[0]["id"], rule["id"], constants.RULE_STATE_ACTIVE)
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.shares[0]["id"], rule["id"],
+ constants.RULE_STATE_ACTIVE)
# Create the replica
self._verify_create_replica()
# Verify access_rules_status transitions to 'active' state.
- self.shares_v2_client.wait_for_share_status(
- self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
- status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.shares[0]["id"],
+ constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
# Delete rule and wait for deletion
self.shares_v2_client.delete_access_rule(self.shares[0]["id"],
@@ -219,9 +221,9 @@
self.shares_v2_client.create_access_rule(
self.shares[0]["id"], access_type, access_to, 'ro')
- self.shares_v2_client.wait_for_share_status(
- self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
- status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.shares[0]["id"],
+ constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
# Delete the replica
self.delete_share_replica(share_replica["id"])
@@ -279,8 +281,9 @@
access_type, access_to = self._get_access_rule_data_from_config()
rule = self.shares_v2_client.create_access_rule(
share["id"], access_type, access_to, 'ro')
- self.shares_v2_client.wait_for_access_rule_status(
- share["id"], rule["id"], constants.RULE_STATE_ACTIVE)
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, share["id"], rule["id"],
+ constants.RULE_STATE_ACTIVE)
original_replica = self.shares_v2_client.list_share_replicas(
share["id"])[0]
@@ -337,23 +340,23 @@
new_replica = self.create_share_replica(share["id"],
self.replica_zone,
cleanup_in_class=False)
- self.shares_v2_client.wait_for_share_replica_status(
- new_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, new_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Promote the new replica to active and verify the replica states
self.promote_share_replica(new_replica['id'])
self._verify_active_replica_count(share["id"])
- self.shares_v2_client.wait_for_share_replica_status(
- original_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, original_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Promote the original replica back to active
self.promote_share_replica(original_replica['id'])
self._verify_active_replica_count(share["id"])
- self.shares_v2_client.wait_for_share_replica_status(
- new_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, new_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
@decorators.idempotent_id('1452156b-75a5-4f3c-a921-834732a03b0a')
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
diff --git a/manila_tempest_tests/tests/api/test_replication_export_locations.py b/manila_tempest_tests/tests/api/test_replication_export_locations.py
index c9857e6..371f02d 100644
--- a/manila_tempest_tests/tests/api/test_replication_export_locations.py
+++ b/manila_tempest_tests/tests/api/test_replication_export_locations.py
@@ -18,6 +18,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -151,9 +152,9 @@
)
primary_replica = self.shares_v2_client.get_share_replica(
primary_replica_exports[0]['share_instance_id'])
- self.shares_v2_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Share export locations list API
share_exports = self.shares_v2_client.list_share_export_locations(
diff --git a/manila_tempest_tests/tests/api/test_replication_negative.py b/manila_tempest_tests/tests/api/test_replication_negative.py
index c437737..30367bd 100644
--- a/manila_tempest_tests/tests/api/test_replication_negative.py
+++ b/manila_tempest_tests/tests/api/test_replication_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -146,8 +147,9 @@
# Set replica state to out of sync
self.admin_client.reset_share_replica_state(
replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC)
- self.shares_v2_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica['id'],
+ constants.REPLICATION_STATE_OUT_OF_SYNC,
status_attr='replica_state')
# Try promoting the first out_of_sync replica to active state
self.assertRaises(lib_exc.Forbidden,
@@ -175,9 +177,9 @@
replica = self.create_share_replica(share["id"], self.replica_zone,
cleanup_in_class=False)
# By default, 'writable' replica is expected to be in active state
- self.shares_v2_client.wait_for_share_replica_status(
- replica["id"], constants.REPLICATION_STATE_ACTIVE,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica["id"],
+ constants.REPLICATION_STATE_ACTIVE, status_attr='replica_state')
# Try promoting the replica
self.shares_v2_client.promote_share_replica(replica['id'])
diff --git a/manila_tempest_tests/tests/api/test_replication_snapshots.py b/manila_tempest_tests/tests/api/test_replication_snapshots.py
index a812679..8bd6bdf 100644
--- a/manila_tempest_tests/tests/api/test_replication_snapshots.py
+++ b/manila_tempest_tests/tests/api/test_replication_snapshots.py
@@ -19,6 +19,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -86,9 +87,9 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot = self.create_snapshot_wait_for_active(share["id"])
self.promote_share_replica(share_replica['id'])
@@ -122,13 +123,13 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Wait for snapshot1 to become available
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot['id'], "available")
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot['id'], "available")
self.promote_share_replica(share_replica['id'])
self.delete_share_replica(original_replica['id'])
@@ -162,15 +163,15 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot2 = self.create_snapshot_wait_for_active(share["id"])
# Wait for snapshot1 to become available
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot1['id'], "available")
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot1['id'], "available")
self.promote_share_replica(share_replica['id'])
# Remove the original active replica to ensure that snapshot is
@@ -205,9 +206,9 @@
share_network_id=self.sn_id)
share_replica = self.create_share_replica(share["id"],
self.replica_zone)
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot = self.create_snapshot_wait_for_active(share["id"])
self.shares_v2_client.delete_snapshot(snapshot['id'])
self.shares_v2_client.wait_for_resource_deletion(
@@ -234,9 +235,9 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
self.promote_share_replica(share_replica['id'])
# Delete the demoted replica so promoted replica can be cleaned
# during the cleanup
diff --git a/manila_tempest_tests/tests/api/test_revert_to_snapshot.py b/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
index 6a915c6..30d5fcc 100644
--- a/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
+++ b/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -115,8 +116,9 @@
self.share['id'],
snapshot['id'],
version=version)
- self.shares_v2_client.wait_for_share_status(self.share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'],
+ constants.STATUS_AVAILABLE)
@decorators.idempotent_id('09bd9942-7ef9-4d24-b2dd-f83bdda27b50')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -137,8 +139,9 @@
self.shares_v2_client.revert_to_snapshot(self.share['id'],
snapshot1['id'],
version=version)
- self.shares_v2_client.wait_for_share_status(self.share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'],
+ constants.STATUS_AVAILABLE)
@decorators.idempotent_id('146de138-d351-49dc-a13a-5cdbed40b9ac')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -158,9 +161,9 @@
share_replica = self.create_share_replica(share["id"],
self.replica_zone)
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot = self.create_snapshot_wait_for_active(share["id"])
@@ -168,8 +171,8 @@
share['id'],
snapshot['id'],
version=version)
- self.shares_v2_client.wait_for_share_status(share['id'],
- constants.STATUS_AVAILABLE)
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index 2a2420a..37f9250 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -22,6 +22,7 @@
import testtools
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -55,12 +56,12 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_le(version, '2.9'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# If the 'access_rules_status' transitions to 'active',
# rule state must too
rules = self.shares_v2_client.list_access_rules(self.share['id'])
@@ -137,15 +138,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule and wait for deletion
if utils.is_microversion_eq(version, '1.0'):
@@ -190,15 +191,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule and wait for deletion
if utils.is_microversion_eq(version, '1.0'):
@@ -291,15 +292,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule and wait for deletion
if utils.is_microversion_eq(version, '1.0'):
@@ -393,15 +394,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule
if utils.is_microversion_eq(version, '1.0'):
@@ -441,15 +442,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
if utils.is_microversion_eq(version, '1.0'):
self.shares_client.delete_access_rule(self.share["id"], rule["id"])
@@ -506,8 +507,8 @@
self.assertEqual(access_level, rule['access_level'])
for key in ('deleted', 'deleted_at', 'instance_mappings'):
self.assertNotIn(key, rule.keys())
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
self.shares_v2_client.delete_access_rule(
self.share["id"], rule["id"], version=version)
@@ -520,8 +521,8 @@
# Grant access to the share
access1 = self.shares_v2_client.create_access_rule(
self.share['id'], self.access_type, self.access_to, 'rw')
- self.shares_v2_client.wait_for_access_rule_status(
- self.share['id'], access1['id'], 'active')
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share['id'], access1['id'], 'active')
# Create a new user in the current project
project = self.os_admin.projects_client.show_project(
@@ -537,8 +538,9 @@
# used in access1
access2 = user_client.shares_v2_client.create_access_rule(
share2['id'], self.access_type, self.access_to, 'rw')
- user_client.shares_v2_client.wait_for_access_rule_status(
- share2['id'], access2['id'], 'active')
+ waiters.wait_for_access_rule_status(
+ user_client.shares_v2_client, share2['id'], access2['id'],
+ 'active')
@ddt.ddt
@@ -614,15 +616,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# list rules
if utils.is_microversion_eq(version, '1.0'):
@@ -699,15 +701,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete share
if utils.is_microversion_eq(version, '1.0'):
diff --git a/manila_tempest_tests/tests/api/test_rules_negative.py b/manila_tempest_tests/tests/api/test_rules_negative.py
index e64a0d2..ef6a85c 100644
--- a/manila_tempest_tests/tests/api/test_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_rules_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -103,15 +104,15 @@
self.share["id"], access_type, access_to, version=version)
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# try create duplicate of rule
if utils.is_microversion_eq(version, '1.0'):
@@ -153,8 +154,9 @@
self.share["id"], "ip", access_to)
self.addCleanup(self.shares_v2_client.delete_access_rule,
self.share["id"], rule['id'])
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status')
self.assertRaises(lib_exc.BadRequest,
self.shares_v2_client.create_access_rule,
@@ -184,8 +186,8 @@
share = self.create_share(share_type_id=share_type['id'],
cleanup_in_class=False,
wait_for_status=False)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_ERROR)
self.assertRaises(lib_exc.BadRequest,
self.admin_client.create_access_rule,
share["id"], access_type, access_to)
@@ -458,8 +460,9 @@
# Check share's access_rules_status has transitioned to "active" status
self.alt_shares_v2_client.delete_access_rule(
share_alt['id'], rule1['id'])
- self.alt_shares_v2_client.wait_for_share_status(
- share_alt['id'], 'active', status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.alt_shares_v2_client, share_alt['id'], 'active',
+ status_attr='access_rules_status')
@ddt.ddt
diff --git a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
index a374f4e..a8d2c34 100644
--- a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
+++ b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -171,8 +172,8 @@
# Create a share into the share network
share = self.shares_v2_client.create_share(**args)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share['id'])
# Gets the export locations to be used in the future
@@ -202,8 +203,9 @@
)
# Do some necessary cleanup
- self.shares_v2_client.wait_for_share_status(
- managed_share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, managed_share['id'],
+ constants.STATUS_AVAILABLE)
self.shares_client.delete_share(managed_share['id'])
self.shares_v2_client.wait_for_resource_deletion(
share_id=managed_share["id"])
@@ -254,8 +256,8 @@
# Create a share into the share network
share = self.shares_v2_client.create_share(**args)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.admin_shares_v2_client.get_share(share['id'])
share_server = self.admin_shares_v2_client.show_share_server(
share['share_server_id']
diff --git a/manila_tempest_tests/tests/api/test_shares_actions.py b/manila_tempest_tests/tests/api/test_shares_actions.py
index 7f48ee1..4fed092 100644
--- a/manila_tempest_tests/tests/api/test_shares_actions.py
+++ b/manila_tempest_tests/tests/api/test_shares_actions.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -664,7 +665,8 @@
# extend share and wait for active status
self.shares_v2_client.extend_share(share['id'], new_size)
- self.shares_client.wait_for_share_status(share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.shares_client, share['id'], 'available')
# check state and new size
share_get = self.shares_v2_client.get_share(share['id'])
@@ -691,7 +693,8 @@
# shrink share and wait for active status
self.shares_v2_client.shrink_share(share['id'], new_size)
- self.shares_client.wait_for_share_status(share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.shares_client, share['id'], 'available')
# check state and new size
share_get = self.shares_v2_client.get_share(share['id'])
diff --git a/manila_tempest_tests/tests/api/test_snapshot_rules.py b/manila_tempest_tests/tests/api/test_snapshot_rules.py
index e793843..30c7da4 100644
--- a/manila_tempest_tests/tests/api/test_snapshot_rules.py
+++ b/manila_tempest_tests/tests/api/test_snapshot_rules.py
@@ -19,6 +19,7 @@
from tempest.lib import decorators
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -51,14 +52,14 @@
for key in ('deleted', 'deleted_at', 'instance_mappings'):
self.assertNotIn(key, list(six.iterkeys(rule)))
- self.shares_v2_client.wait_for_snapshot_access_rule_status(
- self.snapshot['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_status(
+ self.shares_v2_client, self.snapshot['id'], rule['id'])
# delete rule and wait for deletion
self.shares_v2_client.delete_snapshot_access_rule(self.snapshot['id'],
rule['id'])
- self.shares_v2_client.wait_for_snapshot_access_rule_deletion(
- self.snapshot['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_deletion(
+ self.shares_v2_client, self.snapshot['id'], rule['id'])
@ddt.ddt
diff --git a/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py b/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
index 92bd639..6965c7f 100644
--- a/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
@@ -19,6 +19,7 @@
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.api import test_snapshot_rules
from manila_tempest_tests import utils
@@ -92,8 +93,8 @@
rule = self.shares_v2_client.create_snapshot_access_rule(
self.snap['id'], access_type, access_to)
- self.shares_v2_client.wait_for_snapshot_access_rule_status(
- self.snap['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_status(
+ self.shares_v2_client, self.snap['id'], rule['id'])
# try create duplicate of rule
self.assertRaises(lib_exc.BadRequest,
@@ -113,8 +114,8 @@
# delete rule and wait for deletion
self.shares_v2_client.delete_snapshot_access_rule(self.snap['id'],
rule['id'])
- self.shares_v2_client.wait_for_snapshot_access_rule_deletion(
- self.snap['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_deletion(
+ self.shares_v2_client, self.snap['id'], rule['id'])
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.delete_snapshot_access_rule,
diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py
index 84c87bd..79a15a2 100644
--- a/manila_tempest_tests/tests/scenario/manager_share.py
+++ b/manila_tempest_tests/tests/scenario/manager_share.py
@@ -26,6 +26,7 @@
from manila_tempest_tests.common import constants
from manila_tempest_tests.common import remote_client
+from manila_tempest_tests.common import waiters as share_waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager
from manila_tempest_tests import utils
@@ -347,8 +348,9 @@
"""
client = client or self.shares_client
client.delete_access_rule(share_id, access_rule_id)
- self.shares_v2_client.wait_for_share_status(
- share_id, "active", status_attr='access_rules_status')
+ share_waiters.wait_for_share_status(
+ self.shares_v2_client, share_id, "active",
+ status_attr='access_rules_status')
def provide_access_to_auxiliary_instance(self, instance, share=None,
snapshot=None, access_level='rw',
@@ -531,7 +533,7 @@
self.addCleanup(client.delete_share,
share['id'])
- client.wait_for_share_status(share['id'], 'available')
+ share_waiters.wait_for_share_status(client, share['id'], 'available')
return share
def _create_snapshot(self, share_id, client=None, **kwargs):
@@ -540,7 +542,8 @@
self.addCleanup(
client.wait_for_resource_deletion, snapshot_id=snapshot['id'])
self.addCleanup(client.delete_snapshot, snapshot['id'])
- client.wait_for_snapshot_status(snapshot["id"], "available")
+ share_waiters.wait_for_snapshot_status(
+ client, snapshot["id"], "available")
return snapshot
def _wait_for_share_server_deletion(self, sn_id, client=None):
@@ -590,8 +593,8 @@
access = client.create_access_rule(share_id, access_type, access_to,
access_level)
- client.wait_for_share_status(
- share_id, "active", status_attr='access_rules_status')
+ share_waiters.wait_for_share_status(
+ client, share_id, "active", status_attr='access_rules_status')
if cleanup:
self.addCleanup(client.delete_access_rule, share_id, access['id'])
@@ -616,8 +619,8 @@
self.addCleanup(client.delete_snapshot_access_rule,
snapshot_id, access['id'])
- client.wait_for_snapshot_access_rule_status(
- snapshot_id, access['id'])
+ share_waiters.wait_for_snapshot_access_rule_status(
+ client, snapshot_id, access['id'])
return access
@@ -642,15 +645,16 @@
share_id, dest_host, writable=False, preserve_metadata=False,
nondisruptive=False, preserve_snapshots=False,
force_host_assisted_migration=force_host_assisted)
- share = client.wait_for_migration_status(share_id, dest_host, status)
+ share = share_waiters.wait_for_migration_status(
+ client, share_id, dest_host, status)
return share
def _migration_complete(self, share_id, dest_host, client=None, **kwargs):
client = client or self.shares_admin_v2_client
client.migration_complete(share_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, constants.TASK_STATE_MIGRATION_SUCCESS,
- **kwargs)
+ share = share_waiters.wait_for_migration_status(
+ client, share_id, dest_host,
+ constants.TASK_STATE_MIGRATION_SUCCESS, **kwargs)
return share
def _create_share_type(self, name, is_public=True, **kwargs):
diff --git a/manila_tempest_tests/tests/scenario/test_share_extend.py b/manila_tempest_tests/tests/scenario/test_share_extend.py
index df77990..c099b91 100644
--- a/manila_tempest_tests/tests/scenario/test_share_extend.py
+++ b/manila_tempest_tests/tests/scenario/test_share_extend.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
@@ -95,8 +96,8 @@
extended_share_size = default_share_size + 1
self.shares_v2_client.extend_share(share["id"],
new_size=extended_share_size)
- self.shares_v2_client.wait_for_share_status(share["id"],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share["id"], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(extended_share_size, int(share["size"]))
diff --git a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
index 7da1974..cc2f1a7 100644
--- a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
+++ b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
@@ -18,6 +18,7 @@
import testtools
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
from manila_tempest_tests import utils
@@ -131,8 +132,8 @@
share['share_proto'],
locations[0],
share_type['id'])
- self.shares_admin_v2_client.wait_for_share_status(
- managed_share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.shares_admin_v2_client, managed_share['id'], 'available')
LOG.debug('Step 11 - grant access again')
self.provide_access_to_auxiliary_instance(
@@ -165,8 +166,8 @@
share['share_proto'],
locations[0],
share_type['id'])
- self.shares_admin_v2_client.wait_for_share_status(
- remanaged_share['id'], 'manage_error')
+ waiters.wait_for_share_status(
+ self.shares_admin_v2_client, remanaged_share['id'], 'manage_error')
self.shares_admin_v2_client.reset_state(remanaged_share['id'])
diff --git a/manila_tempest_tests/tests/scenario/test_share_shrink.py b/manila_tempest_tests/tests/scenario/test_share_shrink.py
index 3e498f8..94f4e62 100644
--- a/manila_tempest_tests/tests/scenario/test_share_shrink.py
+++ b/manila_tempest_tests/tests/scenario/test_share_shrink.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
@@ -86,8 +87,9 @@
LOG.debug('Step 8 - try update size, shrink and wait')
self.shares_v2_client.shrink_share(share['id'],
new_size=default_share_size)
- self.shares_v2_client.wait_for_share_status(
- share['id'], ['shrinking_possible_data_loss_error', 'available'])
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'],
+ ['shrinking_possible_data_loss_error', 'available'])
share = self.shares_v2_client.get_share(share["id"])