Moving resource waiters to a new module
Currently, resource waiters methods are placed under
service clients modules. There are three duplicated
waiters in both v1 and v2 modules:
- wait_for_share_status
- wait_for_snapshot_status
- wait_for_access_rule_status
This patch suggests to separate the waiter functions
from client modules and collect them in one place.
Change-Id: I9f0d50a325139e6067e4339533d4b01a322df7a8
diff --git a/manila_tempest_tests/tests/api/admin/test_admin_actions.py b/manila_tempest_tests/tests/api/admin/test_admin_actions.py
index 57b97e3..7b6b725 100644
--- a/manila_tempest_tests/tests/api/admin/test_admin_actions.py
+++ b/manila_tempest_tests/tests/api/admin/test_admin_actions.py
@@ -19,6 +19,7 @@
import testtools
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
CONF = config.CONF
@@ -41,8 +42,9 @@
def _wait_for_resource_status(self, resource_id, resource_type):
wait_for_resource_status = getattr(
- self.shares_v2_client, "wait_for_{}_status".format(resource_type))
- wait_for_resource_status(resource_id, "available")
+ waiters, "wait_for_{}_status".format(resource_type))
+ wait_for_resource_status(
+ self.shares_v2_client, resource_id, "available")
def _reset_resource_available(self, resource_id, resource_type="shares"):
self.shares_v2_client.reset_state(
@@ -54,7 +56,8 @@
@ddt.data("error", "available", "error_deleting", "deleting", "creating")
def test_reset_share_state(self, status):
self.shares_v2_client.reset_state(self.sh["id"], status=status)
- self.shares_v2_client.wait_for_share_status(self.sh["id"], status)
+ waiters.wait_for_share_status(self.shares_v2_client,
+ self.sh["id"], status)
self.addCleanup(self._reset_resource_available, self.sh["id"])
@decorators.idempotent_id('13075b2d-fe83-41bf-b6ef-99cfcc00257d')
@@ -66,8 +69,8 @@
share_instance_id = sh_instance["id"]
self.shares_v2_client.reset_state(
share_instance_id, s_type="share_instances", status=status)
- self.shares_v2_client.wait_for_share_instance_status(
- share_instance_id, status)
+ waiters.wait_for_share_instance_status(
+ self.shares_v2_client, share_instance_id, status)
self.addCleanup(self._reset_resource_available,
share_instance_id, "share_instances")
@@ -80,8 +83,8 @@
snapshot = self.create_snapshot_wait_for_active(self.sh["id"])
self.shares_v2_client.reset_state(
snapshot["id"], s_type="snapshots", status=status)
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot["id"], status)
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot["id"], status)
self.addCleanup(self._reset_resource_available,
snapshot["id"], "snapshots")
@@ -151,5 +154,5 @@
def test_reset_share_task_state(self):
for task_state in self.task_states:
self.shares_v2_client.reset_task_state(self.sh["id"], task_state)
- self.shares_v2_client.wait_for_share_status(
- self.sh["id"], task_state, 'task_state')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.sh["id"], task_state, 'task_state')
diff --git a/manila_tempest_tests/tests/api/admin/test_migration.py b/manila_tempest_tests/tests/api/admin/test_migration.py
index 25e7213..3c0254d 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -113,15 +114,16 @@
self.shares_v2_client.create_access_rule(
share['id'], access_to="50.50.50.50", access_level="rw")
- self.shares_v2_client.wait_for_share_status(
+ waiters.wait_for_share_status(
+ self.shares_v2_client,
share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
self.shares_v2_client.create_access_rule(
share['id'], access_to="51.51.51.51", access_level="ro")
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.RULE_STATE_ACTIVE,
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
dest_pool = dest_pool['name']
@@ -235,15 +237,15 @@
if resize == 'extend':
new_size = CONF.share.share_size + 2
self.shares_v2_client.extend_share(share['id'], new_size)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(new_size, int(share["size"]))
else:
new_size = CONF.share.share_size
self.shares_v2_client.shrink_share(share['id'], new_size)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(new_size, int(share["size"]))
diff --git a/manila_tempest_tests/tests/api/admin/test_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_migration_negative.py
index 12e3fb0..6ab0c97 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration_negative.py
@@ -23,6 +23,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -93,8 +94,8 @@
@base.skip_if_microversion_lt("2.22")
def test_migration_get_progress_None(self):
self.shares_v2_client.reset_task_state(self.share["id"], None)
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], None, 'task_state')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], None, 'task_state')
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migration_get_progress,
self.share['id'])
@@ -209,8 +210,8 @@
self.share['id'], self.dest_pool,
new_share_type_id=self.new_type_invalid['share_type']['id'],
new_share_network_id=new_share_network_id)
- self.shares_v2_client.wait_for_migration_status(
- self.share['id'], self.dest_pool,
+ waiters.wait_for_migration_status(
+ self.shares_v2_client, self.share['id'], self.dest_pool,
constants.TASK_STATE_MIGRATION_ERROR)
@decorators.idempotent_id('e2bd0cca-c091-4785-a9dc-7f42d2bb95a5')
@@ -227,15 +228,16 @@
def test_migrate_share_not_available(self):
self.shares_client.reset_state(self.share['id'],
constants.STATUS_ERROR)
- self.shares_client.wait_for_share_status(self.share['id'],
- constants.STATUS_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'], constants.STATUS_ERROR)
self.assertRaises(
lib_exc.BadRequest, self.shares_v2_client.migrate_share,
self.share['id'], self.dest_pool)
self.shares_client.reset_state(self.share['id'],
constants.STATUS_AVAILABLE)
- self.shares_client.wait_for_share_status(self.share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'],
+ constants.STATUS_AVAILABLE)
@decorators.idempotent_id('e8f1e491-697a-4941-bf51-4d37f0a93fa5')
@tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
diff --git a/manila_tempest_tests/tests/api/admin/test_replication.py b/manila_tempest_tests/tests/api/admin/test_replication.py
index c435da1..6ad9a52 100644
--- a/manila_tempest_tests/tests/api/admin/test_replication.py
+++ b/manila_tempest_tests/tests/api/admin/test_replication.py
@@ -19,6 +19,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -107,9 +108,9 @@
share["id"], self.replica_zone, cleanup=False,
client=self.admin_client, version=version)
# Wait for replica state to update after creation
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# List replicas
replica_list = self.admin_client.list_share_replicas(
@@ -124,8 +125,9 @@
self.admin_client.reset_share_replica_state(
replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_OUT_OF_SYNC,
status_attr='replica_state')
# Promote 'out_of_sync' replica to 'active' state.
@@ -158,8 +160,8 @@
version=version)
self.admin_client.reset_share_replica_status(
replica['id'], constants.STATUS_ERROR_DELETING, version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.STATUS_ERROR_DELETING)
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'], constants.STATUS_ERROR_DELETING)
self.admin_client.force_delete_share_replica(replica['id'],
version=version)
self.admin_client.wait_for_resource_deletion(replica_id=replica['id'])
@@ -181,8 +183,8 @@
self.admin_client.reset_share_replica_status(replica['id'],
constants.STATUS_ERROR,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.STATUS_ERROR)
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'], constants.STATUS_ERROR)
@decorators.idempotent_id('258844da-a853-42b6-87db-b16e616018c6')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -201,8 +203,9 @@
self.admin_client.reset_share_replica_state(replica['id'],
constants.STATUS_ERROR,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.STATUS_ERROR, status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'], constants.STATUS_ERROR,
+ status_attr='replica_state')
@decorators.idempotent_id('2969565a-85e8-4c61-9dfb-cc7f7ca9f6dd')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -218,20 +221,21 @@
cleanup_in_class=False,
client=self.admin_client,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Set replica_state to 'out_of_sync'.
self.admin_client.reset_share_replica_state(
replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_OUT_OF_SYNC,
status_attr='replica_state')
# Attempt resync
self.admin_client.resync_share_replica(replica['id'], version=version)
- self.admin_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.admin_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
diff --git a/manila_tempest_tests/tests/api/admin/test_replication_actions.py b/manila_tempest_tests/tests/api/admin/test_replication_actions.py
index 5ceb818..e90e746 100644
--- a/manila_tempest_tests/tests/api/admin/test_replication_actions.py
+++ b/manila_tempest_tests/tests/api/admin/test_replication_actions.py
@@ -17,6 +17,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -85,8 +86,8 @@
# Test extend share
new_size = self.share["size"] + 1
self.admin_client.extend_share(self.share["id"], new_size)
- self.admin_client.wait_for_share_status(self.share["id"],
- "available")
+ waiters.wait_for_share_status(
+ self.admin_client, self.share["id"], "available")
share = self.admin_client.get_share(self.share["id"])
self.assertEqual(new_size, int(share["size"]))
@@ -98,7 +99,8 @@
share = self.admin_client.get_share(self.share["id"])
new_size = self.share["size"] - 1
self.admin_client.shrink_share(self.share["id"], new_size)
- self.admin_client.wait_for_share_status(share["id"], "available")
+ waiters.wait_for_share_status(
+ self.admin_client, share["id"], "available")
shrink_share = self.admin_client.get_share(self.share["id"])
self.assertEqual(new_size, int(shrink_share["size"]))
@@ -127,8 +129,8 @@
managed_share = self.admin_client.manage_share(
share['host'], share['share_proto'],
export_path, self.share_type_id)
- self.admin_client.wait_for_share_status(
- managed_share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.admin_client, managed_share['id'], 'available')
# Add managed share to cleanup queue
self.method_resources.insert(
diff --git a/manila_tempest_tests/tests/api/admin/test_share_manage.py b/manila_tempest_tests/tests/api/admin/test_share_manage.py
index 9905eeb..6c311ac 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_manage.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -107,8 +108,9 @@
'client': self.shares_client})
# Wait for success
- self.shares_v2_client.wait_for_share_status(managed_share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(self.shares_v2_client,
+ managed_share['id'],
+ constants.STATUS_AVAILABLE)
# Verify data of managed share
self.assertEqual(name, managed_share['name'])
diff --git a/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
index 0aeb88f..d5cc09f 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -66,8 +67,8 @@
# Manage the share and wait for the expected state.
# Return the managed share object.
managed_share = self.shares_v2_client.manage_share(**params)
- self.shares_v2_client.wait_for_share_status(
- managed_share['id'], state)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, managed_share['id'], state)
return managed_share
@@ -167,8 +168,9 @@
invalid_share = self.shares_v2_client.manage_share(
**invalid_params
)
- self.shares_v2_client.wait_for_share_status(
- invalid_share['id'], constants.STATUS_MANAGE_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, invalid_share['id'],
+ constants.STATUS_MANAGE_ERROR)
# cleanup
self._unmanage_share_and_wait(invalid_share)
@@ -259,8 +261,9 @@
invalid_params.update({'export_path': 'invalid'})
invalid_share = self.shares_v2_client.manage_share(**invalid_params)
- self.shares_v2_client.wait_for_share_status(
- invalid_share['id'], constants.STATUS_MANAGE_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, invalid_share['id'],
+ constants.STATUS_MANAGE_ERROR)
self._unmanage_share_and_wait(share)
# the attempt to delete a share in manage_error should raise an
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers.py b/manila_tempest_tests/tests/api/admin/test_share_servers.py
index b5fe72c..b39a17d 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers.py
@@ -24,6 +24,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -328,9 +329,8 @@
share_server['id'],
status=state,
)
- self.shares_v2_client.wait_for_share_server_status(
- share_server['id'],
- status=state
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share_server['id'], status=state
)
# bring the share server back in the active state
@@ -338,8 +338,8 @@
share_server['id'],
status=constants.SERVER_STATE_ACTIVE,
)
- self.shares_v2_client.wait_for_share_server_status(
- share_server['id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share_server['id'],
status=constants.SERVER_STATE_ACTIVE
)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
index 0b04511..6d16c66 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -164,8 +165,8 @@
share['share_server_id'],
status=constants.SERVER_STATE_ACTIVE,
)
- self.shares_v2_client.wait_for_share_server_status(
- share['share_server_id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share['share_server_id'],
constants.SERVER_STATE_ACTIVE,
)
@@ -206,8 +207,8 @@
share['share_server_id'],
status=constants.SERVER_STATE_ACTIVE,
)
- self.shares_v2_client.wait_for_share_server_status(
- share['share_server_id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, share['share_server_id'],
constants.SERVER_STATE_ACTIVE,
)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
index e162b17..ac332dd 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -96,8 +97,8 @@
access_to=rule.get('access_to'),
access_level=rule.get('access_level')
)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.RULE_STATE_ACTIVE,
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
status_attr='access_rules_status')
share = self.shares_v2_client.get_share(share['id'])
@@ -140,8 +141,8 @@
# Check the snapshot status if possible.
if snapshot_id:
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot_id, constants.STATUS_AVAILABLE)
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot_id, constants.STATUS_AVAILABLE)
# Check the share server destination status.
dest_server = self.shares_v2_client.show_share_server(dest_server_id)
@@ -276,8 +277,9 @@
src_server_id, dest_host, preserve_snapshots=preserve_snapshots)
expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
- self.shares_v2_client.wait_for_share_server_status(
- src_server_id, expected_state, status_attr='task_state')
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, src_server_id, expected_state,
+ status_attr='task_state')
# Get for the destination share server.
dest_server_id = self._get_share_server_destination_for_migration(
src_server_id)
@@ -295,8 +297,9 @@
# Wait for the migration cancelled status.
expected_state = constants.TASK_STATE_MIGRATION_CANCELLED
- self.shares_v2_client.wait_for_share_server_status(
- src_server_id, expected_state, status_attr='task_state')
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, src_server_id, expected_state,
+ status_attr='task_state')
# After the cancel operation, we need to validate again the resources.
expected_status = constants.STATUS_AVAILABLE
@@ -339,8 +342,9 @@
preserve_snapshots=preserve_snapshots)
expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
- self.shares_v2_client.wait_for_share_server_status(
- src_server_id, expected_state, status_attr='task_state')
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, src_server_id, expected_state,
+ status_attr='task_state')
# Get for the destination share server.
dest_server_id = self._get_share_server_destination_for_migration(
src_server_id)
@@ -358,8 +362,8 @@
# It's necessary wait for the destination server went to active status.
expected_status = constants.SERVER_STATE_ACTIVE
- self.shares_v2_client.wait_for_share_server_status(
- dest_server_id, expected_status)
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, dest_server_id, expected_status)
# Check if the source server went to inactive status if it exists.
try:
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
index 3b39a61..9a038ac 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
@@ -21,6 +21,7 @@
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api.admin import test_share_servers_migration
from manila_tempest_tests.tests.api import base
@@ -245,11 +246,12 @@
def resource_cleanup(cls):
states = [constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE]
- cls.shares_v2_client.wait_for_share_server_status(
- cls.server_id, status=states, status_attr="task_state")
+ waiters.wait_for_share_server_status(
+ cls.shares_v2_client, cls.server_id, status=states,
+ status_attr="task_state")
cls.shares_v2_client.share_server_migration_cancel(cls.server_id)
- cls.shares_v2_client.wait_for_share_status(cls.share['id'],
- status="available")
+ waiters.wait_for_share_status(
+ cls.shares_v2_client, cls.share['id'], status="available")
super(ShareServerMigrationStartNegativesNFS, cls).resource_cleanup()
@decorators.idempotent_id('5b904db3-fc36-4c35-a8ef-cf6b80315388')
diff --git a/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py b/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
index d09b25f..7e9aa6a 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
@@ -18,6 +18,7 @@
from tempest.lib import decorators
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -128,8 +129,8 @@
for status in ("error", "available"):
self.shares_v2_client.reset_snapshot_instance_status(
sii, status=status)
- self.shares_v2_client.wait_for_snapshot_instance_status(
- sii, expected_status=status)
+ waiters.wait_for_snapshot_instance_status(
+ self.shares_v2_client, sii, expected_status=status)
self.shares_v2_client.delete_snapshot(snapshot['id'])
self.shares_v2_client.wait_for_resource_deletion(
snapshot_id=snapshot['id'])
diff --git a/manila_tempest_tests/tests/api/admin/test_share_types.py b/manila_tempest_tests/tests/api/admin/test_share_types.py
index cb97bc5..5b117cc 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_types.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_types.py
@@ -20,6 +20,7 @@
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -233,7 +234,8 @@
share = self.create_share(
name=share_name, share_type_id=st_create["share_type"]["id"])
self.assertEqual(share["name"], share_name)
- self.shares_client.wait_for_share_status(share["id"], "available")
+ waiters.wait_for_share_status(
+ self.shares_client, share["id"], "available")
# Verify share info
get = self.shares_v2_client.get_share(share["id"], version="2.5")
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
index 8e97887..6f6f3a0 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -99,9 +100,8 @@
'client': self.shares_v2_client})
# Wait for success
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot['id'],
- constants.STATUS_AVAILABLE
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot['id'], constants.STATUS_AVAILABLE
)
# Verify manage snapshot API response
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
index 68ed242..b079264 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -120,8 +121,8 @@
'invalid_provider_location',
driver_options={}
)
- self.shares_v2_client.wait_for_snapshot_status(
- invalid_snap['id'],
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, invalid_snap['id'],
constants.STATUS_MANAGE_ERROR
)
self.shares_v2_client.unmanage_snapshot(invalid_snap['id'])
@@ -131,8 +132,8 @@
self.share['id'],
snap['provider_location']
)
- self.shares_v2_client.wait_for_snapshot_status(
- managed_snap['id'],
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, managed_snap['id'],
constants.STATUS_AVAILABLE
)
self._delete_snapshot_and_wait(managed_snap)
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index fe4616e..0ee3611 100755
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -27,6 +27,7 @@
from manila_tempest_tests import clients
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests import utils
@@ -399,24 +400,24 @@
nondisruptive=nondisruptive, preserve_snapshots=preserve_snapshots,
new_share_network_id=new_share_network_id,
new_share_type_id=new_share_type_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, wait_for_status, **kwargs)
+ share = waiters.wait_for_migration_status(
+ client, share_id, dest_host, wait_for_status, **kwargs)
return share
@classmethod
def migration_complete(cls, share_id, dest_host, client=None, **kwargs):
client = client or cls.shares_v2_client
client.migration_complete(share_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, 'migration_success', **kwargs)
+ share = waiters.wait_for_migration_status(
+ client, share_id, dest_host, 'migration_success', **kwargs)
return share
@classmethod
def migration_cancel(cls, share_id, dest_host, client=None, **kwargs):
client = client or cls.shares_v2_client
client.migration_cancel(share_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, 'migration_cancelled', **kwargs)
+ share = waiters.wait_for_migration_status(
+ client, share_id, dest_host, 'migration_cancelled', **kwargs)
return share
@classmethod
@@ -480,7 +481,8 @@
client = d["kwargs"]["client"]
share_id = d["share"]["id"]
try:
- client.wait_for_share_status(share_id, "available")
+ waiters.wait_for_share_status(
+ client, share_id, "available")
d["available"] = True
except (share_exceptions.ShareBuildErrorException,
exceptions.TimeoutException) as e:
@@ -539,7 +541,8 @@
else:
cls.method_resources.insert(0, resource)
- client.wait_for_share_group_status(share_group['id'], 'available')
+ waiters.wait_for_share_group_status(
+ client, share_group['id'], 'available')
return share_group
@classmethod
@@ -588,7 +591,7 @@
cls.class_resources.insert(0, resource)
else:
cls.method_resources.insert(0, resource)
- client.wait_for_snapshot_status(snapshot["id"], "available")
+ waiters.wait_for_snapshot_status(client, snapshot["id"], "available")
return snapshot
@classmethod
@@ -609,8 +612,8 @@
cls.class_resources.insert(0, resource)
else:
cls.method_resources.insert(0, resource)
- client.wait_for_share_group_snapshot_status(
- sg_snapshot["id"], "available")
+ waiters.wait_for_share_group_snapshot_status(
+ client, sg_snapshot["id"], "available")
return sg_snapshot
@classmethod
@@ -696,8 +699,8 @@
cls.class_resources.insert(0, resource)
else:
cls.method_resources.insert(0, resource)
- client.wait_for_share_replica_status(
- replica["id"], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_replica_status(
+ client, replica["id"], constants.STATUS_AVAILABLE)
return replica
@classmethod
@@ -715,9 +718,8 @@
version=CONF.share.max_api_microversion):
client = client or cls.shares_v2_client
replica = client.promote_share_replica(replica_id, version=version)
- client.wait_for_share_replica_status(
- replica["id"],
- constants.REPLICATION_STATE_ACTIVE,
+ waiters.wait_for_share_replica_status(
+ client, replica["id"], constants.REPLICATION_STATE_ACTIVE,
status_attr="replica_state")
return replica
@@ -1084,8 +1086,9 @@
'share_network_id': self.shares_v2_client.share_network_id}
share = self.shares_v2_client.create_share(**params)
self.addCleanup(self.shares_v2_client.delete_share, share['id'])
- self.shares_v2_client.wait_for_share_status(share['id'], "error")
- return self.shares_v2_client.wait_for_message(share['id'])
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], "error")
+ return waiters.wait_for_message(self.shares_v2_client, share['id'])
def allow_access(self, share_id, client=None, access_type=None,
access_level='rw', access_to=None, status='active',
@@ -1098,8 +1101,8 @@
rule = client.create_access_rule(share_id, access_type, access_to,
access_level)
- client.wait_for_access_rule_status(share_id, rule['id'], status,
- raise_rule_in_error_state)
+ waiters.wait_for_access_rule_status(client, share_id, rule['id'],
+ status, raise_rule_in_error_state)
if cleanup:
self.addCleanup(client.wait_for_resource_deletion,
rule_id=rule['id'], share_id=share_id)
@@ -1186,8 +1189,9 @@
description=description,
share_server_id=share_server_id
)
- self.shares_v2_client.wait_for_share_status(
- managed_share['id'], constants.STATUS_AVAILABLE
+ waiters.wait_for_share_status(
+ self.shares_v2_client, managed_share['id'],
+ constants.STATUS_AVAILABLE
)
return managed_share
@@ -1207,8 +1211,8 @@
params.get('identifier', share_server['identifier']),
share_network_subnet_id=subnet_id,
)
- self.shares_v2_client.wait_for_share_server_status(
- managed_share_server['id'],
+ waiters.wait_for_share_server_status(
+ self.shares_v2_client, managed_share_server['id'],
constants.SERVER_STATE_ACTIVE,
)
diff --git a/manila_tempest_tests/tests/api/test_replication.py b/manila_tempest_tests/tests/api/test_replication.py
index 5eb5e2a..713f446 100644
--- a/manila_tempest_tests/tests/api/test_replication.py
+++ b/manila_tempest_tests/tests/api/test_replication.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -129,9 +130,9 @@
replica = self.create_share_replica(share["id"], self.replica_zone,
cleanup=False)
# Wait for replica state to update after creation
- self.shares_v2_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Promote the first in_sync replica to active state
promoted_replica = self.promote_share_replica(replica['id'])
# Delete the demoted replica so promoted replica can be cleaned
@@ -191,16 +192,17 @@
access_type, access_to = self._get_access_rule_data_from_config()
rule = self.shares_v2_client.create_access_rule(
self.shares[0]["id"], access_type, access_to, 'ro')
- self.shares_v2_client.wait_for_access_rule_status(
- self.shares[0]["id"], rule["id"], constants.RULE_STATE_ACTIVE)
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.shares[0]["id"], rule["id"],
+ constants.RULE_STATE_ACTIVE)
# Create the replica
self._verify_create_replica()
# Verify access_rules_status transitions to 'active' state.
- self.shares_v2_client.wait_for_share_status(
- self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
- status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.shares[0]["id"],
+ constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
# Delete rule and wait for deletion
self.shares_v2_client.delete_access_rule(self.shares[0]["id"],
@@ -219,9 +221,9 @@
self.shares_v2_client.create_access_rule(
self.shares[0]["id"], access_type, access_to, 'ro')
- self.shares_v2_client.wait_for_share_status(
- self.shares[0]["id"], constants.RULE_STATE_ACTIVE,
- status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.shares[0]["id"],
+ constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
# Delete the replica
self.delete_share_replica(share_replica["id"])
@@ -279,8 +281,9 @@
access_type, access_to = self._get_access_rule_data_from_config()
rule = self.shares_v2_client.create_access_rule(
share["id"], access_type, access_to, 'ro')
- self.shares_v2_client.wait_for_access_rule_status(
- share["id"], rule["id"], constants.RULE_STATE_ACTIVE)
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, share["id"], rule["id"],
+ constants.RULE_STATE_ACTIVE)
original_replica = self.shares_v2_client.list_share_replicas(
share["id"])[0]
@@ -337,23 +340,23 @@
new_replica = self.create_share_replica(share["id"],
self.replica_zone,
cleanup_in_class=False)
- self.shares_v2_client.wait_for_share_replica_status(
- new_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, new_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Promote the new replica to active and verify the replica states
self.promote_share_replica(new_replica['id'])
self._verify_active_replica_count(share["id"])
- self.shares_v2_client.wait_for_share_replica_status(
- original_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, original_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Promote the original replica back to active
self.promote_share_replica(original_replica['id'])
self._verify_active_replica_count(share["id"])
- self.shares_v2_client.wait_for_share_replica_status(
- new_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, new_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
@decorators.idempotent_id('1452156b-75a5-4f3c-a921-834732a03b0a')
@tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
diff --git a/manila_tempest_tests/tests/api/test_replication_export_locations.py b/manila_tempest_tests/tests/api/test_replication_export_locations.py
index c9857e6..371f02d 100644
--- a/manila_tempest_tests/tests/api/test_replication_export_locations.py
+++ b/manila_tempest_tests/tests/api/test_replication_export_locations.py
@@ -18,6 +18,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -151,9 +152,9 @@
)
primary_replica = self.shares_v2_client.get_share_replica(
primary_replica_exports[0]['share_instance_id'])
- self.shares_v2_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Share export locations list API
share_exports = self.shares_v2_client.list_share_export_locations(
diff --git a/manila_tempest_tests/tests/api/test_replication_negative.py b/manila_tempest_tests/tests/api/test_replication_negative.py
index c437737..30367bd 100644
--- a/manila_tempest_tests/tests/api/test_replication_negative.py
+++ b/manila_tempest_tests/tests/api/test_replication_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -146,8 +147,9 @@
# Set replica state to out of sync
self.admin_client.reset_share_replica_state(
replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC)
- self.shares_v2_client.wait_for_share_replica_status(
- replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica['id'],
+ constants.REPLICATION_STATE_OUT_OF_SYNC,
status_attr='replica_state')
# Try promoting the first out_of_sync replica to active state
self.assertRaises(lib_exc.Forbidden,
@@ -175,9 +177,9 @@
replica = self.create_share_replica(share["id"], self.replica_zone,
cleanup_in_class=False)
# By default, 'writable' replica is expected to be in active state
- self.shares_v2_client.wait_for_share_replica_status(
- replica["id"], constants.REPLICATION_STATE_ACTIVE,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, replica["id"],
+ constants.REPLICATION_STATE_ACTIVE, status_attr='replica_state')
# Try promoting the replica
self.shares_v2_client.promote_share_replica(replica['id'])
diff --git a/manila_tempest_tests/tests/api/test_replication_snapshots.py b/manila_tempest_tests/tests/api/test_replication_snapshots.py
index a812679..8bd6bdf 100644
--- a/manila_tempest_tests/tests/api/test_replication_snapshots.py
+++ b/manila_tempest_tests/tests/api/test_replication_snapshots.py
@@ -19,6 +19,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -86,9 +87,9 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot = self.create_snapshot_wait_for_active(share["id"])
self.promote_share_replica(share_replica['id'])
@@ -122,13 +123,13 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
# Wait for snapshot1 to become available
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot['id'], "available")
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot['id'], "available")
self.promote_share_replica(share_replica['id'])
self.delete_share_replica(original_replica['id'])
@@ -162,15 +163,15 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot2 = self.create_snapshot_wait_for_active(share["id"])
# Wait for snapshot1 to become available
- self.shares_v2_client.wait_for_snapshot_status(
- snapshot1['id'], "available")
+ waiters.wait_for_snapshot_status(
+ self.shares_v2_client, snapshot1['id'], "available")
self.promote_share_replica(share_replica['id'])
# Remove the original active replica to ensure that snapshot is
@@ -205,9 +206,9 @@
share_network_id=self.sn_id)
share_replica = self.create_share_replica(share["id"],
self.replica_zone)
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot = self.create_snapshot_wait_for_active(share["id"])
self.shares_v2_client.delete_snapshot(snapshot['id'])
self.shares_v2_client.wait_for_resource_deletion(
@@ -234,9 +235,9 @@
self.replica_zone,
cleanup=False)
self.addCleanup(self.delete_share_replica, original_replica['id'])
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
self.promote_share_replica(share_replica['id'])
# Delete the demoted replica so promoted replica can be cleaned
# during the cleanup
diff --git a/manila_tempest_tests/tests/api/test_revert_to_snapshot.py b/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
index 6a915c6..30d5fcc 100644
--- a/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
+++ b/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests import share_exceptions
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -115,8 +116,9 @@
self.share['id'],
snapshot['id'],
version=version)
- self.shares_v2_client.wait_for_share_status(self.share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'],
+ constants.STATUS_AVAILABLE)
@decorators.idempotent_id('09bd9942-7ef9-4d24-b2dd-f83bdda27b50')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -137,8 +139,9 @@
self.shares_v2_client.revert_to_snapshot(self.share['id'],
snapshot1['id'],
version=version)
- self.shares_v2_client.wait_for_share_status(self.share['id'],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share['id'],
+ constants.STATUS_AVAILABLE)
@decorators.idempotent_id('146de138-d351-49dc-a13a-5cdbed40b9ac')
@tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -158,9 +161,9 @@
share_replica = self.create_share_replica(share["id"],
self.replica_zone)
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
snapshot = self.create_snapshot_wait_for_active(share["id"])
@@ -168,8 +171,8 @@
share['id'],
snapshot['id'],
version=version)
- self.shares_v2_client.wait_for_share_status(share['id'],
- constants.STATUS_AVAILABLE)
- self.shares_v2_client.wait_for_share_replica_status(
- share_replica['id'], constants.REPLICATION_STATE_IN_SYNC,
- status_attr='replica_state')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_replica_status(
+ self.shares_v2_client, share_replica['id'],
+ constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index 2a2420a..37f9250 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -22,6 +22,7 @@
import testtools
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -55,12 +56,12 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_le(version, '2.9'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# If the 'access_rules_status' transitions to 'active',
# rule state must too
rules = self.shares_v2_client.list_access_rules(self.share['id'])
@@ -137,15 +138,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule and wait for deletion
if utils.is_microversion_eq(version, '1.0'):
@@ -190,15 +191,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule and wait for deletion
if utils.is_microversion_eq(version, '1.0'):
@@ -291,15 +292,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule and wait for deletion
if utils.is_microversion_eq(version, '1.0'):
@@ -393,15 +394,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete rule
if utils.is_microversion_eq(version, '1.0'):
@@ -441,15 +442,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
if utils.is_microversion_eq(version, '1.0'):
self.shares_client.delete_access_rule(self.share["id"], rule["id"])
@@ -506,8 +507,8 @@
self.assertEqual(access_level, rule['access_level'])
for key in ('deleted', 'deleted_at', 'instance_mappings'):
self.assertNotIn(key, rule.keys())
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
self.shares_v2_client.delete_access_rule(
self.share["id"], rule["id"], version=version)
@@ -520,8 +521,8 @@
# Grant access to the share
access1 = self.shares_v2_client.create_access_rule(
self.share['id'], self.access_type, self.access_to, 'rw')
- self.shares_v2_client.wait_for_access_rule_status(
- self.share['id'], access1['id'], 'active')
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share['id'], access1['id'], 'active')
# Create a new user in the current project
project = self.os_admin.projects_client.show_project(
@@ -537,8 +538,9 @@
# used in access1
access2 = user_client.shares_v2_client.create_access_rule(
share2['id'], self.access_type, self.access_to, 'rw')
- user_client.shares_v2_client.wait_for_access_rule_status(
- share2['id'], access2['id'], 'active')
+ waiters.wait_for_access_rule_status(
+ user_client.shares_v2_client, share2['id'], access2['id'],
+ 'active')
@ddt.ddt
@@ -614,15 +616,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# list rules
if utils.is_microversion_eq(version, '1.0'):
@@ -699,15 +701,15 @@
self.assertEqual("queued_to_apply", rule['state'])
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share["id"], "active",
+ status_attr='access_rules_status', version=version)
# delete share
if utils.is_microversion_eq(version, '1.0'):
diff --git a/manila_tempest_tests/tests/api/test_rules_negative.py b/manila_tempest_tests/tests/api/test_rules_negative.py
index e64a0d2..ef6a85c 100644
--- a/manila_tempest_tests/tests/api/test_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_rules_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -103,15 +104,15 @@
self.share["id"], access_type, access_to, version=version)
if utils.is_microversion_eq(version, '1.0'):
- self.shares_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_client, self.share["id"], rule["id"], "active")
elif utils.is_microversion_eq(version, '2.9'):
- self.shares_v2_client.wait_for_access_rule_status(
- self.share["id"], rule["id"], "active")
+ waiters.wait_for_access_rule_status(
+ self.shares_v2_client, self.share["id"], rule["id"], "active")
else:
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status',
- version=version)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status', version=version)
# try create duplicate of rule
if utils.is_microversion_eq(version, '1.0'):
@@ -153,8 +154,9 @@
self.share["id"], "ip", access_to)
self.addCleanup(self.shares_v2_client.delete_access_rule,
self.share["id"], rule['id'])
- self.shares_v2_client.wait_for_share_status(
- self.share["id"], "active", status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.shares_v2_client, self.share["id"], "active",
+ status_attr='access_rules_status')
self.assertRaises(lib_exc.BadRequest,
self.shares_v2_client.create_access_rule,
@@ -184,8 +186,8 @@
share = self.create_share(share_type_id=share_type['id'],
cleanup_in_class=False,
wait_for_status=False)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_ERROR)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_ERROR)
self.assertRaises(lib_exc.BadRequest,
self.admin_client.create_access_rule,
share["id"], access_type, access_to)
@@ -458,8 +460,9 @@
# Check share's access_rules_status has transitioned to "active" status
self.alt_shares_v2_client.delete_access_rule(
share_alt['id'], rule1['id'])
- self.alt_shares_v2_client.wait_for_share_status(
- share_alt['id'], 'active', status_attr='access_rules_status')
+ waiters.wait_for_share_status(
+ self.alt_shares_v2_client, share_alt['id'], 'active',
+ status_attr='access_rules_status')
@ddt.ddt
diff --git a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
index a374f4e..a8d2c34 100644
--- a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
+++ b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -171,8 +172,8 @@
# Create a share into the share network
share = self.shares_v2_client.create_share(**args)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share['id'])
# Gets the export locations to be used in the future
@@ -202,8 +203,9 @@
)
# Do some necessary cleanup
- self.shares_v2_client.wait_for_share_status(
- managed_share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, managed_share['id'],
+ constants.STATUS_AVAILABLE)
self.shares_client.delete_share(managed_share['id'])
self.shares_v2_client.wait_for_resource_deletion(
share_id=managed_share["id"])
@@ -254,8 +256,8 @@
# Create a share into the share network
share = self.shares_v2_client.create_share(**args)
- self.shares_v2_client.wait_for_share_status(
- share['id'], constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
share = self.admin_shares_v2_client.get_share(share['id'])
share_server = self.admin_shares_v2_client.show_share_server(
share['share_server_id']
diff --git a/manila_tempest_tests/tests/api/test_shares_actions.py b/manila_tempest_tests/tests/api/test_shares_actions.py
index 7f48ee1..4fed092 100644
--- a/manila_tempest_tests/tests/api/test_shares_actions.py
+++ b/manila_tempest_tests/tests/api/test_shares_actions.py
@@ -22,6 +22,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -664,7 +665,8 @@
# extend share and wait for active status
self.shares_v2_client.extend_share(share['id'], new_size)
- self.shares_client.wait_for_share_status(share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.shares_client, share['id'], 'available')
# check state and new size
share_get = self.shares_v2_client.get_share(share['id'])
@@ -691,7 +693,8 @@
# shrink share and wait for active status
self.shares_v2_client.shrink_share(share['id'], new_size)
- self.shares_client.wait_for_share_status(share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.shares_client, share['id'], 'available')
# check state and new size
share_get = self.shares_v2_client.get_share(share['id'])
diff --git a/manila_tempest_tests/tests/api/test_snapshot_rules.py b/manila_tempest_tests/tests/api/test_snapshot_rules.py
index e793843..30c7da4 100644
--- a/manila_tempest_tests/tests/api/test_snapshot_rules.py
+++ b/manila_tempest_tests/tests/api/test_snapshot_rules.py
@@ -19,6 +19,7 @@
from tempest.lib import decorators
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests import utils
@@ -51,14 +52,14 @@
for key in ('deleted', 'deleted_at', 'instance_mappings'):
self.assertNotIn(key, list(six.iterkeys(rule)))
- self.shares_v2_client.wait_for_snapshot_access_rule_status(
- self.snapshot['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_status(
+ self.shares_v2_client, self.snapshot['id'], rule['id'])
# delete rule and wait for deletion
self.shares_v2_client.delete_snapshot_access_rule(self.snapshot['id'],
rule['id'])
- self.shares_v2_client.wait_for_snapshot_access_rule_deletion(
- self.snapshot['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_deletion(
+ self.shares_v2_client, self.snapshot['id'], rule['id'])
@ddt.ddt
diff --git a/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py b/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
index 92bd639..6965c7f 100644
--- a/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
@@ -19,6 +19,7 @@
from tempest.lib import exceptions as lib_exc
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.api import test_snapshot_rules
from manila_tempest_tests import utils
@@ -92,8 +93,8 @@
rule = self.shares_v2_client.create_snapshot_access_rule(
self.snap['id'], access_type, access_to)
- self.shares_v2_client.wait_for_snapshot_access_rule_status(
- self.snap['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_status(
+ self.shares_v2_client, self.snap['id'], rule['id'])
# try create duplicate of rule
self.assertRaises(lib_exc.BadRequest,
@@ -113,8 +114,8 @@
# delete rule and wait for deletion
self.shares_v2_client.delete_snapshot_access_rule(self.snap['id'],
rule['id'])
- self.shares_v2_client.wait_for_snapshot_access_rule_deletion(
- self.snap['id'], rule['id'])
+ waiters.wait_for_snapshot_access_rule_deletion(
+ self.shares_v2_client, self.snap['id'], rule['id'])
self.assertRaises(lib_exc.NotFound,
self.shares_v2_client.delete_snapshot_access_rule,
diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py
index 84c87bd..79a15a2 100644
--- a/manila_tempest_tests/tests/scenario/manager_share.py
+++ b/manila_tempest_tests/tests/scenario/manager_share.py
@@ -26,6 +26,7 @@
from manila_tempest_tests.common import constants
from manila_tempest_tests.common import remote_client
+from manila_tempest_tests.common import waiters as share_waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager
from manila_tempest_tests import utils
@@ -347,8 +348,9 @@
"""
client = client or self.shares_client
client.delete_access_rule(share_id, access_rule_id)
- self.shares_v2_client.wait_for_share_status(
- share_id, "active", status_attr='access_rules_status')
+ share_waiters.wait_for_share_status(
+ self.shares_v2_client, share_id, "active",
+ status_attr='access_rules_status')
def provide_access_to_auxiliary_instance(self, instance, share=None,
snapshot=None, access_level='rw',
@@ -531,7 +533,7 @@
self.addCleanup(client.delete_share,
share['id'])
- client.wait_for_share_status(share['id'], 'available')
+ share_waiters.wait_for_share_status(client, share['id'], 'available')
return share
def _create_snapshot(self, share_id, client=None, **kwargs):
@@ -540,7 +542,8 @@
self.addCleanup(
client.wait_for_resource_deletion, snapshot_id=snapshot['id'])
self.addCleanup(client.delete_snapshot, snapshot['id'])
- client.wait_for_snapshot_status(snapshot["id"], "available")
+ share_waiters.wait_for_snapshot_status(
+ client, snapshot["id"], "available")
return snapshot
def _wait_for_share_server_deletion(self, sn_id, client=None):
@@ -590,8 +593,8 @@
access = client.create_access_rule(share_id, access_type, access_to,
access_level)
- client.wait_for_share_status(
- share_id, "active", status_attr='access_rules_status')
+ share_waiters.wait_for_share_status(
+ client, share_id, "active", status_attr='access_rules_status')
if cleanup:
self.addCleanup(client.delete_access_rule, share_id, access['id'])
@@ -616,8 +619,8 @@
self.addCleanup(client.delete_snapshot_access_rule,
snapshot_id, access['id'])
- client.wait_for_snapshot_access_rule_status(
- snapshot_id, access['id'])
+ share_waiters.wait_for_snapshot_access_rule_status(
+ client, snapshot_id, access['id'])
return access
@@ -642,15 +645,16 @@
share_id, dest_host, writable=False, preserve_metadata=False,
nondisruptive=False, preserve_snapshots=False,
force_host_assisted_migration=force_host_assisted)
- share = client.wait_for_migration_status(share_id, dest_host, status)
+ share = share_waiters.wait_for_migration_status(
+ client, share_id, dest_host, status)
return share
def _migration_complete(self, share_id, dest_host, client=None, **kwargs):
client = client or self.shares_admin_v2_client
client.migration_complete(share_id, **kwargs)
- share = client.wait_for_migration_status(
- share_id, dest_host, constants.TASK_STATE_MIGRATION_SUCCESS,
- **kwargs)
+ share = share_waiters.wait_for_migration_status(
+ client, share_id, dest_host,
+ constants.TASK_STATE_MIGRATION_SUCCESS, **kwargs)
return share
def _create_share_type(self, name, is_public=True, **kwargs):
diff --git a/manila_tempest_tests/tests/scenario/test_share_extend.py b/manila_tempest_tests/tests/scenario/test_share_extend.py
index df77990..c099b91 100644
--- a/manila_tempest_tests/tests/scenario/test_share_extend.py
+++ b/manila_tempest_tests/tests/scenario/test_share_extend.py
@@ -20,6 +20,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
@@ -95,8 +96,8 @@
extended_share_size = default_share_size + 1
self.shares_v2_client.extend_share(share["id"],
new_size=extended_share_size)
- self.shares_v2_client.wait_for_share_status(share["id"],
- constants.STATUS_AVAILABLE)
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share["id"], constants.STATUS_AVAILABLE)
share = self.shares_v2_client.get_share(share["id"])
self.assertEqual(extended_share_size, int(share["size"]))
diff --git a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
index 7da1974..cc2f1a7 100644
--- a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
+++ b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
@@ -18,6 +18,7 @@
import testtools
from testtools import testcase as tc
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
from manila_tempest_tests import utils
@@ -131,8 +132,8 @@
share['share_proto'],
locations[0],
share_type['id'])
- self.shares_admin_v2_client.wait_for_share_status(
- managed_share['id'], 'available')
+ waiters.wait_for_share_status(
+ self.shares_admin_v2_client, managed_share['id'], 'available')
LOG.debug('Step 11 - grant access again')
self.provide_access_to_auxiliary_instance(
@@ -165,8 +166,8 @@
share['share_proto'],
locations[0],
share_type['id'])
- self.shares_admin_v2_client.wait_for_share_status(
- remanaged_share['id'], 'manage_error')
+ waiters.wait_for_share_status(
+ self.shares_admin_v2_client, remanaged_share['id'], 'manage_error')
self.shares_admin_v2_client.reset_state(remanaged_share['id'])
diff --git a/manila_tempest_tests/tests/scenario/test_share_shrink.py b/manila_tempest_tests/tests/scenario/test_share_shrink.py
index 3e498f8..94f4e62 100644
--- a/manila_tempest_tests/tests/scenario/test_share_shrink.py
+++ b/manila_tempest_tests/tests/scenario/test_share_shrink.py
@@ -21,6 +21,7 @@
from testtools import testcase as tc
from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
from manila_tempest_tests.tests.api import base
from manila_tempest_tests.tests.scenario import manager_share as manager
@@ -86,8 +87,9 @@
LOG.debug('Step 8 - try update size, shrink and wait')
self.shares_v2_client.shrink_share(share['id'],
new_size=default_share_size)
- self.shares_v2_client.wait_for_share_status(
- share['id'], ['shrinking_possible_data_loss_error', 'available'])
+ waiters.wait_for_share_status(
+ self.shares_v2_client, share['id'],
+ ['shrinking_possible_data_loss_error', 'available'])
share = self.shares_v2_client.get_share(share["id"])