Merge "Fix extra specs for share type"
diff --git a/manila_tempest_tests/common/waiters.py b/manila_tempest_tests/common/waiters.py
index d3dbcb4..e5ac0e1 100644
--- a/manila_tempest_tests/common/waiters.py
+++ b/manila_tempest_tests/common/waiters.py
@@ -19,7 +19,6 @@
 from tempest import config
 from tempest.lib import exceptions
 
-from manila_tempest_tests.common import constants
 from manila_tempest_tests.services.share.v2.json import shares_client
 from manila_tempest_tests import share_exceptions
 
@@ -27,279 +26,82 @@
 LATEST_MICROVERSION = CONF.share.max_api_microversion
 
 
-def wait_for_share_instance_status(client, instance_id, status,
-                                   version=LATEST_MICROVERSION):
-    """Waits for a share to reach a given status."""
-    body = client.get_share_instance(instance_id, version=version)
-    instance_status = body['status']
-    start = int(time.time())
-
-    while instance_status != status:
-        time.sleep(client.build_interval)
-        body = client.get_share_instance(instance_id)
-        instance_status = body['status']
-        if instance_status == status:
-            return
-        elif 'error' in instance_status.lower():
-            raise share_exceptions.ShareInstanceBuildErrorException(
-                id=instance_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Share instance %s failed to reach %s status within'
-                       ' the required time (%s s).' %
-                       (instance_id, status, client.build_timeout))
-            raise exceptions.TimeoutException(message)
+def _get_access_rule(body, rule_id):
+    for rule in body:
+        if rule['id'] in rule_id:
+            return rule
 
 
-def wait_for_share_status(client, share_id, status, status_attr='status',
-                          version=LATEST_MICROVERSION):
-    """Waits for a share to reach a given status."""
+def _get_name_of_raise_method(resource_name):
+    if resource_name == 'snapshot_access_rule':
+        return 'AccessRuleBuildErrorException'
+    if resource_name == 'share_replica':
+        return 'ShareInstanceBuildErrorException'
+    resource_name = resource_name.title()
+    name = resource_name.replace('_', '')
+    return name + 'BuildErrorException'
+
+
+def wait_for_resource_status(client, resource_id, status,
+                             resource_name='share', rule_id=None,
+                             status_attr='status',
+                             raise_rule_in_error_state=True,
+                             version=LATEST_MICROVERSION):
+    """Waits for a resource to reach a given status."""
+
+    get_resource_action = {
+        'share': 'get_share',
+        'snapshot': 'get_snapshot',
+        'share_server': 'show_share_server',
+        'share_instance': 'get_share_instance',
+        'snapshot_instance': 'get_snapshot_instance',
+        'access_rule': 'list_access_rules',
+        'snapshot_access_rule': 'get_snapshot_access_rule',
+        'share_group': 'get_share_group',
+        'share_group_snapshot': 'get_share_group_snapshot',
+        'share_replica': 'get_share_replica',
+    }
+
+    # Since API v2 requests require an additional parameter for micro-versions,
+    # it's necessary to pass the required parameters according to the version.
+    resource_action = getattr(client, get_resource_action[resource_name])
+    method_args = [resource_id]
+    method_kwargs = {}
     if isinstance(client, shares_client.SharesV2Client):
-        body = client.get_share(share_id, version=version)
-    else:
-        body = client.get_share(share_id)
-    share_status = body[status_attr]
+        method_kwargs.update({'version': version})
+        if resource_name == 'snapshot_access_rule':
+            method_args.insert(1, rule_id)
+    body = resource_action(*method_args, **method_kwargs)
+
+    if resource_name == 'access_rule':
+        status_attr = 'state'
+        body = _get_access_rule(body, rule_id)
+
+    resource_status = body[status_attr]
     start = int(time.time())
 
     exp_status = status if isinstance(status, list) else [status]
-    while share_status not in exp_status:
+    while resource_status not in exp_status:
         time.sleep(client.build_interval)
-        if isinstance(client, shares_client.SharesV2Client):
-            body = client.get_share(share_id, version=version)
-        else:
-            body = client.get_share(share_id)
-        share_status = body[status_attr]
-        if share_status in exp_status:
+        body = resource_action(*method_args, **method_kwargs)
+
+        if resource_name == 'access_rule':
+            status_attr = 'state'
+            body = _get_access_rule(body, rule_id)
+
+        resource_status = body[status_attr]
+
+        if resource_status in exp_status:
             return
-        elif 'error' in share_status.lower():
-            raise share_exceptions.ShareBuildErrorException(
-                share_id=share_id)
+        elif 'error' in resource_status.lower() and raise_rule_in_error_state:
+            raise_method = _get_name_of_raise_method(resource_name)
+            resource_exception = getattr(share_exceptions, raise_method)
+            raise resource_exception(resource_id=resource_id)
         if int(time.time()) - start >= client.build_timeout:
-            message = ("Share's %(status_attr)s failed to transition to "
-                       "%(status)s within the required "
-                       "time %(seconds)s." %
-                       {"status_attr": status_attr, "status": exp_status,
-                        "seconds": client.build_timeout})
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_snapshot_status(client, snapshot_id, status,
-                             version=LATEST_MICROVERSION):
-    """Waits for a snapshot to reach a given status."""
-    if isinstance(client, shares_client.SharesV2Client):
-        body = client.get_snapshot(snapshot_id, version=version)
-    else:
-        body = client.get_snapshot(snapshot_id)
-    snapshot_name = body['name']
-    snapshot_status = body['status']
-    start = int(time.time())
-
-    while snapshot_status != status:
-        time.sleep(client.build_interval)
-        if isinstance(client, shares_client.SharesV2Client):
-            body = client.get_snapshot(snapshot_id, version=version)
-        else:
-            body = client.get_snapshot(snapshot_id)
-        snapshot_status = body['status']
-        if snapshot_status in status:
-            return
-        if 'error' in snapshot_status:
-            raise (share_exceptions.
-                   SnapshotBuildErrorException(snapshot_id=snapshot_id))
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Share Snapshot %s failed to reach %s status '
+            message = ('%s %s failed to reach %s status (current %s) '
                        'within the required time (%s s).' %
-                       (snapshot_name, status, client.build_timeout))
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_access_rule_status(client, share_id, rule_id, status,
-                                raise_rule_in_error_state=True):
-    """Waits for an access rule to reach a given status."""
-    rule_status = "new"
-    start = int(time.time())
-    while rule_status != status:
-        time.sleep(client.build_interval)
-        rules = client.list_access_rules(share_id)
-        for rule in rules:
-            if rule["id"] in rule_id:
-                rule_status = rule['state']
-                break
-        if 'error' in rule_status and raise_rule_in_error_state:
-            raise share_exceptions.AccessRuleBuildErrorException(
-                rule_id=rule_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Share Access Rule %s failed to reach %s status '
-                       'within the required time (%s s).' %
-                       (rule_id, status, client.build_timeout))
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_snapshot_instance_status(client, instance_id, expected_status):
-    """Waits for a snapshot instance status to reach a given status."""
-    body = client.get_snapshot_instance(instance_id)
-    instance_status = body['status']
-    start = int(time.time())
-
-    while instance_status != expected_status:
-        time.sleep(client.build_interval)
-        body = client.get_snapshot_instance(instance_id)
-        instance_status = body['status']
-        if instance_status == expected_status:
-            return
-        if 'error' in instance_status:
-            raise share_exceptions.SnapshotInstanceBuildErrorException(
-                id=instance_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('The status of snapshot instance %(id)s failed to '
-                       'reach %(expected_status)s status within the '
-                       'required time (%(time)ss). Current '
-                       'status: %(current_status)s.' %
-                       {
-                           'expected_status': expected_status,
-                           'time': client.build_timeout,
-                           'id': instance_id,
-                           'current_status': instance_status,
-                       })
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_share_group_status(client, share_group_id, status):
-    """Waits for a share group to reach a given status."""
-    body = client.get_share_group(share_group_id)
-    sg_name = body['name']
-    sg_status = body['status']
-    start = int(time.time())
-
-    while sg_status != status:
-        time.sleep(client.build_interval)
-        body = client.get_share_group(share_group_id)
-        sg_status = body['status']
-        if 'error' in sg_status and status != 'error':
-            raise share_exceptions.ShareGroupBuildErrorException(
-                share_group_id=share_group_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            sg_name = sg_name or share_group_id
-            message = ('Share Group %s failed to reach %s status '
-                       'within the required time (%s s). '
-                       'Current status: %s' %
-                       (sg_name, status, client.build_timeout, sg_status))
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_share_group_snapshot_status(client, share_group_snapshot_id,
-                                         status):
-    """Waits for a share group snapshot to reach a given status."""
-    body = client.get_share_group_snapshot(share_group_snapshot_id)
-    sg_snapshot_name = body['name']
-    sg_snapshot_status = body['status']
-    start = int(time.time())
-
-    while sg_snapshot_status != status:
-        time.sleep(client.build_interval)
-        body = client.get_share_group_snapshot(share_group_snapshot_id)
-        sg_snapshot_status = body['status']
-        if 'error' in sg_snapshot_status and status != 'error':
-            raise share_exceptions.ShareGroupSnapshotBuildErrorException(
-                share_group_snapshot_id=share_group_snapshot_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('Share Group Snapshot %s failed to reach %s status '
-                       'within the required time (%s s).' %
-                       (sg_snapshot_name, status, client.build_timeout))
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_share_server_status(client, server_id, status,
-                                 status_attr='status'):
-    """Waits for a share to reach a given status."""
-    body = client.show_share_server(server_id)
-    server_status = body[status_attr]
-    start = int(time.time())
-
-    while server_status != status:
-        time.sleep(client.build_interval)
-        body = client.show_share_server(server_id)
-        server_status = body[status_attr]
-        if server_status in status:
-            return
-        elif constants.STATUS_ERROR in server_status.lower():
-            raise share_exceptions.ShareServerBuildErrorException(
-                server_id=server_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ("Share server's %(status_attr)s failed to "
-                       "transition to %(status)s within the required "
-                       "time %(seconds)s." %
-                       {"status_attr": status_attr, "status": status,
-                        "seconds": client.build_timeout})
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_share_replica_status(client, replica_id, expected_status,
-                                  status_attr='status'):
-    """Waits for a replica's status_attr to reach a given status."""
-    body = client.get_share_replica(replica_id)
-    replica_status = body[status_attr]
-    start = int(time.time())
-
-    while replica_status != expected_status:
-        time.sleep(client.build_interval)
-        body = client.get_share_replica(replica_id)
-        replica_status = body[status_attr]
-        if replica_status == expected_status:
-            return
-        if ('error' in replica_status
-                and expected_status != constants.STATUS_ERROR):
-            raise share_exceptions.ShareInstanceBuildErrorException(
-                id=replica_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('The %(status_attr)s of Replica %(id)s failed to '
-                       'reach %(expected_status)s status within the '
-                       'required time (%(time)ss). Current '
-                       '%(status_attr)s: %(current_status)s.' %
-                       {
-                           'status_attr': status_attr,
-                           'expected_status': expected_status,
-                           'time': client.build_timeout,
-                           'id': replica_id,
-                           'current_status': replica_status,
-                       })
-            raise exceptions.TimeoutException(message)
-
-
-def wait_for_snapshot_access_rule_status(client, snapshot_id, rule_id,
-                                         expected_state='active'):
-    rule = client.get_snapshot_access_rule(snapshot_id, rule_id)
-    state = rule['state']
-    start = int(time.time())
-
-    while state != expected_state:
-        time.sleep(client.build_interval)
-        rule = client.get_snapshot_access_rule(snapshot_id, rule_id)
-        state = rule['state']
-        if state == expected_state:
-            return
-        if 'error' in state:
-            raise share_exceptions.AccessRuleBuildErrorException(
-                snapshot_id)
-
-        if int(time.time()) - start >= client.build_timeout:
-            message = ('The status of snapshot access rule %(id)s failed '
-                       'to reach %(expected_state)s state within the '
-                       'required time (%(time)ss). Current '
-                       'state: %(current_state)s.' %
-                       {
-                           'expected_state': expected_state,
-                           'time': client.build_timeout,
-                           'id': rule_id,
-                           'current_state': state,
-                       })
+                       (resource_name.replace('_', ' '), resource_id, status,
+                        resource_status, client.build_timeout))
             raise exceptions.TimeoutException(message)
 
 
diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py
index 475855e..61f053f 100644
--- a/manila_tempest_tests/services/share/v2/json/shares_client.py
+++ b/manila_tempest_tests/services/share/v2/json/shares_client.py
@@ -623,6 +623,15 @@
 
 ###############
 
+    def show_share_server(self, share_server_id, version=LATEST_MICROVERSION):
+        """Get share server info."""
+        uri = "share-servers/%s" % share_server_id
+        resp, body = self.get(uri, version=version)
+        self.expected_success(200, resp.status)
+        return self._parse_resp(body)
+
+###############
+
     def get_snapshot_instance(self, instance_id, version=LATEST_MICROVERSION):
         resp, body = self.get("snapshot-instances/%s" % instance_id,
                               version=version)
@@ -746,7 +755,7 @@
         self.expected_success(202, resp.status)
         return body
 
-    def get_access(self, access_id, version=LATEST_MICROVERSION):
+    def get_access_rule(self, access_id, version=LATEST_MICROVERSION):
         resp, body = self.get("share-access-rules/%s" % access_id,
                               version=version)
         self.expected_success(200, resp.status)
@@ -1657,9 +1666,10 @@
         self.expected_success(202, resp.status)
         return self._parse_resp(body)
 
-    def get_snapshot_access_rule(self, snapshot_id, rule_id):
+    def get_snapshot_access_rule(self, snapshot_id, rule_id,
+                                 version=LATEST_MICROVERSION):
         resp, body = self.get("snapshots/%s/access-list" % snapshot_id,
-                              version=LATEST_MICROVERSION)
+                              version=version)
         body = self._parse_resp(body)
         found_rules = [r for r in body if r['id'] == rule_id]
 
diff --git a/manila_tempest_tests/share_exceptions.py b/manila_tempest_tests/share_exceptions.py
index 9466afe..efa61b5 100644
--- a/manila_tempest_tests/share_exceptions.py
+++ b/manila_tempest_tests/share_exceptions.py
@@ -17,33 +17,34 @@
 
 
 class ShareBuildErrorException(exceptions.TempestException):
-    message = "Share %(share_id)s failed to build and is in ERROR status"
+    message = "Share %(resource_id)s failed to build and is in ERROR status"
 
 
 class ShareInstanceBuildErrorException(exceptions.TempestException):
-    message = "Share instance %(id)s failed to build and is in ERROR status"
+    message = ("Share instance %(resource_id)s failed to build and is in "
+               "ERROR status")
 
 
 class ShareGroupBuildErrorException(exceptions.TempestException):
-    message = ("Share group %(share_group_id)s failed to build and "
+    message = ("Share group %(resource_id)s failed to build and "
                "is in ERROR status")
 
 
 class AccessRuleBuildErrorException(exceptions.TempestException):
-    message = "Share's rule with id %(rule_id)s is in ERROR status"
+    message = "Share's rule with id %(resource_id)s is in ERROR status"
 
 
 class SnapshotBuildErrorException(exceptions.TempestException):
-    message = "Snapshot %(snapshot_id)s failed to build and is in ERROR status"
+    message = "Snapshot %(resource_id)s failed to build and is in ERROR status"
 
 
 class SnapshotInstanceBuildErrorException(exceptions.TempestException):
-    message = ("Snapshot instance %(id)s failed to build and is in "
+    message = ("Snapshot instance %(resource_id)s failed to build and is in "
                "ERROR status.")
 
 
 class ShareGroupSnapshotBuildErrorException(exceptions.TempestException):
-    message = ("Share Group Snapshot %(share_group_snapshot_id)s failed "
+    message = ("Share Group Snapshot %(resource_id)s failed "
                "to build and is in ERROR status")
 
 
diff --git a/manila_tempest_tests/tests/api/admin/test_admin_actions.py b/manila_tempest_tests/tests/api/admin/test_admin_actions.py
index 7b6b725..adb0160 100644
--- a/manila_tempest_tests/tests/api/admin/test_admin_actions.py
+++ b/manila_tempest_tests/tests/api/admin/test_admin_actions.py
@@ -40,24 +40,20 @@
         # create share
         cls.sh = cls.create_share(share_type_id=cls.share_type_id)
 
-    def _wait_for_resource_status(self, resource_id, resource_type):
-        wait_for_resource_status = getattr(
-            waiters, "wait_for_{}_status".format(resource_type))
-        wait_for_resource_status(
-            self.shares_v2_client, resource_id, "available")
-
     def _reset_resource_available(self, resource_id, resource_type="shares"):
         self.shares_v2_client.reset_state(
             resource_id, s_type=resource_type, status="available")
-        self._wait_for_resource_status(resource_id, resource_type[:-1])
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, resource_id, "available",
+            resource_name=resource_type[:-1])
 
     @decorators.idempotent_id('4f8c6ae9-0656-445f-a911-fbf98fe761d0')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     @ddt.data("error", "available", "error_deleting", "deleting", "creating")
     def test_reset_share_state(self, status):
         self.shares_v2_client.reset_state(self.sh["id"], status=status)
-        waiters.wait_for_share_status(self.shares_v2_client,
-                                      self.sh["id"], status)
+        waiters.wait_for_resource_status(self.shares_v2_client,
+                                         self.sh["id"], status)
         self.addCleanup(self._reset_resource_available, self.sh["id"])
 
     @decorators.idempotent_id('13075b2d-fe83-41bf-b6ef-99cfcc00257d')
@@ -69,8 +65,9 @@
         share_instance_id = sh_instance["id"]
         self.shares_v2_client.reset_state(
             share_instance_id, s_type="share_instances", status=status)
-        waiters.wait_for_share_instance_status(
-            self.shares_v2_client, share_instance_id, status)
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, share_instance_id, status,
+            resource_name='share_instance')
         self.addCleanup(self._reset_resource_available,
                         share_instance_id, "share_instances")
 
@@ -83,8 +80,9 @@
         snapshot = self.create_snapshot_wait_for_active(self.sh["id"])
         self.shares_v2_client.reset_state(
             snapshot["id"], s_type="snapshots", status=status)
-        waiters.wait_for_snapshot_status(
-            self.shares_v2_client, snapshot["id"], status)
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, snapshot["id"], status,
+            resource_name='snapshot')
         self.addCleanup(self._reset_resource_available,
                         snapshot["id"], "snapshots")
 
@@ -154,5 +152,19 @@
     def test_reset_share_task_state(self):
         for task_state in self.task_states:
             self.shares_v2_client.reset_task_state(self.sh["id"], task_state)
-            waiters.wait_for_share_status(
-                self.shares_v2_client, self.sh["id"], task_state, 'task_state')
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.sh["id"], task_state,
+                status_attr='task_state')
+
+    @decorators.idempotent_id('4233b941-a909-4f35-9ec9-753736949dd2')
+    @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+    def test_ensure_share_server_creation_when_dhss_enabled(self):
+        # This check will ensure that when a share creation request is handled,
+        # if the driver has the "driver handles share servers" option enabled,
+        # that a share server will be created, otherwise, not.
+        share_get = self.admin_shares_v2_client.get_share(self.sh['id'])
+        share_server = share_get['share_server_id']
+        if CONF.share.multitenancy_enabled:
+            self.assertNotEmpty(share_server)
+        else:
+            self.assertEmpty(share_server)
diff --git a/manila_tempest_tests/tests/api/admin/test_migration.py b/manila_tempest_tests/tests/api/admin/test_migration.py
index 3c0254d..b75bac7 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration.py
@@ -114,7 +114,7 @@
         self.shares_v2_client.create_access_rule(
             share['id'], access_to="50.50.50.50", access_level="rw")
 
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client,
             share['id'], constants.RULE_STATE_ACTIVE,
             status_attr='access_rules_status')
@@ -122,7 +122,7 @@
         self.shares_v2_client.create_access_rule(
             share['id'], access_to="51.51.51.51", access_level="ro")
 
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
             status_attr='access_rules_status')
 
@@ -237,14 +237,14 @@
         if resize == 'extend':
             new_size = CONF.share.share_size + 2
             self.shares_v2_client.extend_share(share['id'], new_size)
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
             share = self.shares_v2_client.get_share(share["id"])
             self.assertEqual(new_size, int(share["size"]))
         else:
             new_size = CONF.share.share_size
             self.shares_v2_client.shrink_share(share['id'], new_size)
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
             share = self.shares_v2_client.get_share(share["id"])
             self.assertEqual(new_size, int(share["size"]))
diff --git a/manila_tempest_tests/tests/api/admin/test_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_migration_negative.py
index 6ab0c97..939806e 100644
--- a/manila_tempest_tests/tests/api/admin/test_migration_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_migration_negative.py
@@ -94,8 +94,9 @@
     @base.skip_if_microversion_lt("2.22")
     def test_migration_get_progress_None(self):
         self.shares_v2_client.reset_task_state(self.share["id"], None)
-        waiters.wait_for_share_status(
-            self.shares_v2_client, self.share["id"], None, 'task_state')
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, self.share["id"], None,
+            status_attr='task_state')
         self.assertRaises(
             lib_exc.BadRequest, self.shares_v2_client.migration_get_progress,
             self.share['id'])
@@ -228,14 +229,14 @@
     def test_migrate_share_not_available(self):
         self.shares_client.reset_state(self.share['id'],
                                        constants.STATUS_ERROR)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.share['id'], constants.STATUS_ERROR)
         self.assertRaises(
             lib_exc.BadRequest, self.shares_v2_client.migrate_share,
             self.share['id'], self.dest_pool)
         self.shares_client.reset_state(self.share['id'],
                                        constants.STATUS_AVAILABLE)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.share['id'],
             constants.STATUS_AVAILABLE)
 
diff --git a/manila_tempest_tests/tests/api/admin/test_replication.py b/manila_tempest_tests/tests/api/admin/test_replication.py
index 6ad9a52..6a3359c 100644
--- a/manila_tempest_tests/tests/api/admin/test_replication.py
+++ b/manila_tempest_tests/tests/api/admin/test_replication.py
@@ -108,9 +108,10 @@
             share["id"], self.replica_zone, cleanup=False,
             client=self.admin_client, version=version)
         # Wait for replica state to update after creation
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.admin_client, replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         # List replicas
         replica_list = self.admin_client.list_share_replicas(
@@ -125,10 +126,10 @@
         self.admin_client.reset_share_replica_state(
             replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
             version=version)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.admin_client, replica['id'],
             constants.REPLICATION_STATE_OUT_OF_SYNC,
-            status_attr='replica_state')
+            resource_name='share_replica', status_attr='replica_state')
 
         # Promote 'out_of_sync' replica to 'active' state.
         self.promote_share_replica(replica['id'], self.admin_client,
@@ -160,8 +161,9 @@
                                             version=version)
         self.admin_client.reset_share_replica_status(
             replica['id'], constants.STATUS_ERROR_DELETING, version=version)
-        waiters.wait_for_share_replica_status(
-            self.admin_client, replica['id'], constants.STATUS_ERROR_DELETING)
+        waiters.wait_for_resource_status(
+            self.admin_client, replica['id'], constants.STATUS_ERROR_DELETING,
+            resource_name='share_replica')
         self.admin_client.force_delete_share_replica(replica['id'],
                                                      version=version)
         self.admin_client.wait_for_resource_deletion(replica_id=replica['id'])
@@ -183,8 +185,9 @@
         self.admin_client.reset_share_replica_status(replica['id'],
                                                      constants.STATUS_ERROR,
                                                      version=version)
-        waiters.wait_for_share_replica_status(
-            self.admin_client, replica['id'], constants.STATUS_ERROR)
+        waiters.wait_for_resource_status(
+            self.admin_client, replica['id'], constants.STATUS_ERROR,
+            resource_name='share_replica')
 
     @decorators.idempotent_id('258844da-a853-42b6-87db-b16e616018c6')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -203,9 +206,9 @@
         self.admin_client.reset_share_replica_state(replica['id'],
                                                     constants.STATUS_ERROR,
                                                     version=version)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.admin_client, replica['id'], constants.STATUS_ERROR,
-            status_attr='replica_state')
+            resource_name='share_replica', status_attr='replica_state')
 
     @decorators.idempotent_id('2969565a-85e8-4c61-9dfb-cc7f7ca9f6dd')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
@@ -221,21 +224,23 @@
                                             cleanup_in_class=False,
                                             client=self.admin_client,
                                             version=version)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.admin_client, replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         # Set replica_state to 'out_of_sync'.
         self.admin_client.reset_share_replica_state(
             replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC,
             version=version)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.admin_client, replica['id'],
             constants.REPLICATION_STATE_OUT_OF_SYNC,
-            status_attr='replica_state')
+            resource_name='share_replica', status_attr='replica_state')
 
         # Attempt resync
         self.admin_client.resync_share_replica(replica['id'], version=version)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.admin_client, replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
diff --git a/manila_tempest_tests/tests/api/admin/test_replication_actions.py b/manila_tempest_tests/tests/api/admin/test_replication_actions.py
index e90e746..9a3af60 100644
--- a/manila_tempest_tests/tests/api/admin/test_replication_actions.py
+++ b/manila_tempest_tests/tests/api/admin/test_replication_actions.py
@@ -86,7 +86,7 @@
         # Test extend share
         new_size = self.share["size"] + 1
         self.admin_client.extend_share(self.share["id"], new_size)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.admin_client, self.share["id"], "available")
         share = self.admin_client.get_share(self.share["id"])
         self.assertEqual(new_size, int(share["size"]))
@@ -99,7 +99,7 @@
         share = self.admin_client.get_share(self.share["id"])
         new_size = self.share["size"] - 1
         self.admin_client.shrink_share(self.share["id"], new_size)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.admin_client, share["id"], "available")
         shrink_share = self.admin_client.get_share(self.share["id"])
         self.assertEqual(new_size, int(shrink_share["size"]))
@@ -129,7 +129,7 @@
         managed_share = self.admin_client.manage_share(
             share['host'], share['share_proto'],
             export_path, self.share_type_id)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.admin_client, managed_share['id'], 'available')
 
         # Add managed share to cleanup queue
diff --git a/manila_tempest_tests/tests/api/admin/test_share_manage.py b/manila_tempest_tests/tests/api/admin/test_share_manage.py
index 6c311ac..2622683 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_manage.py
@@ -108,9 +108,9 @@
                 'client': self.shares_client})
 
         # Wait for success
-        waiters.wait_for_share_status(self.shares_v2_client,
-                                      managed_share['id'],
-                                      constants.STATUS_AVAILABLE)
+        waiters.wait_for_resource_status(self.shares_v2_client,
+                                         managed_share['id'],
+                                         constants.STATUS_AVAILABLE)
 
         # Verify data of managed share
         self.assertEqual(name, managed_share['name'])
diff --git a/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
index 06d429b..deb5e05 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_manage_negative.py
@@ -67,7 +67,7 @@
         # Manage the share and wait for the expected state.
         # Return the managed share object.
         managed_share = self.shares_v2_client.manage_share(**params)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, managed_share['id'], state)
 
         return managed_share
@@ -168,7 +168,7 @@
             invalid_share = self.shares_v2_client.manage_share(
                 **invalid_params
             )
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, invalid_share['id'],
                 constants.STATUS_MANAGE_ERROR)
 
@@ -263,7 +263,7 @@
         )
         invalid_share = self.shares_v2_client.manage_share(**invalid_params)
 
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, invalid_share['id'],
             constants.STATUS_MANAGE_ERROR)
         self._unmanage_share_and_wait(share)
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers.py b/manila_tempest_tests/tests/api/admin/test_share_servers.py
index b39a17d..d48c383 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers.py
@@ -329,8 +329,9 @@
                 share_server['id'],
                 status=state,
             )
-            waiters.wait_for_share_server_status(
-                self.shares_v2_client, share_server['id'], status=state
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, share_server['id'], state,
+                resource_name="share_server"
             )
 
         # bring the share server back in the active state
@@ -338,9 +339,9 @@
             share_server['id'],
             status=constants.SERVER_STATE_ACTIVE,
         )
-        waiters.wait_for_share_server_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_server['id'],
-            status=constants.SERVER_STATE_ACTIVE
+            constants.SERVER_STATE_ACTIVE, resource_name="share_server"
         )
 
         # delete share
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
index 6d16c66..3b2db49 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_manage_negative.py
@@ -165,9 +165,9 @@
                 share['share_server_id'],
                 status=constants.SERVER_STATE_ACTIVE,
             )
-            waiters.wait_for_share_server_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, share['share_server_id'],
-                constants.SERVER_STATE_ACTIVE,
+                constants.SERVER_STATE_ACTIVE, resource_name='share_server'
             )
 
         # delete share
@@ -207,9 +207,9 @@
                 share['share_server_id'],
                 status=constants.SERVER_STATE_ACTIVE,
             )
-            waiters.wait_for_share_server_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, share['share_server_id'],
-                constants.SERVER_STATE_ACTIVE,
+                constants.SERVER_STATE_ACTIVE, resource_name='share_server'
             )
 
         # delete share
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
index ac332dd..ba774d3 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
@@ -97,7 +97,7 @@
                 access_to=rule.get('access_to'),
                 access_level=rule.get('access_level')
             )
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
             status_attr='access_rules_status')
 
@@ -141,8 +141,10 @@
 
         # Check the snapshot status if possible.
         if snapshot_id:
-            waiters.wait_for_snapshot_status(
-                self.shares_v2_client, snapshot_id, constants.STATUS_AVAILABLE)
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, snapshot_id, constants.STATUS_AVAILABLE,
+                resource_name='snapshot'
+            )
 
         # Check the share server destination status.
         dest_server = self.shares_v2_client.show_share_server(dest_server_id)
@@ -277,9 +279,12 @@
             src_server_id, dest_host, preserve_snapshots=preserve_snapshots)
 
         expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
-        waiters.wait_for_share_server_status(
-            self.shares_v2_client, src_server_id, expected_state,
-            status_attr='task_state')
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, src_server_id,
+            expected_state, resource_name='share_server',
+            status_attr='task_state'
+        )
+
         # Get for the destination share server.
         dest_server_id = self._get_share_server_destination_for_migration(
             src_server_id)
@@ -297,8 +302,9 @@
 
         # Wait for the migration cancelled status.
         expected_state = constants.TASK_STATE_MIGRATION_CANCELLED
-        waiters.wait_for_share_server_status(
-            self.shares_v2_client, src_server_id, expected_state,
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, src_server_id,
+            expected_state, resource_name='share_server',
             status_attr='task_state')
 
         # After the cancel operation, we need to validate again the resources.
@@ -342,9 +348,11 @@
             preserve_snapshots=preserve_snapshots)
 
         expected_state = constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE
-        waiters.wait_for_share_server_status(
-            self.shares_v2_client, src_server_id, expected_state,
-            status_attr='task_state')
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, src_server_id,
+            expected_state, resource_name='share_server',
+            status_attr='task_state'
+        )
         # Get for the destination share server.
         dest_server_id = self._get_share_server_destination_for_migration(
             src_server_id)
@@ -362,8 +370,10 @@
 
         # It's necessary wait for the destination server went to active status.
         expected_status = constants.SERVER_STATE_ACTIVE
-        waiters.wait_for_share_server_status(
-            self.shares_v2_client, dest_server_id, expected_status)
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, dest_server_id, expected_status,
+            resource_name='share_server'
+        )
 
         # Check if the source server went to inactive status if it exists.
         try:
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
index 9a038ac..72ee2c9 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration_negative.py
@@ -246,11 +246,12 @@
     def resource_cleanup(cls):
         states = [constants.TASK_STATE_MIGRATION_DRIVER_IN_PROGRESS,
                   constants.TASK_STATE_MIGRATION_DRIVER_PHASE1_DONE]
-        waiters.wait_for_share_server_status(
-            cls.shares_v2_client, cls.server_id, status=states,
+        waiters.wait_for_resource_status(
+            cls.shares_v2_client, cls.server_id, states,
+            resource_name="share_server",
             status_attr="task_state")
         cls.shares_v2_client.share_server_migration_cancel(cls.server_id)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             cls.shares_v2_client, cls.share['id'], status="available")
         super(ShareServerMigrationStartNegativesNFS, cls).resource_cleanup()
 
diff --git a/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py b/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
index 7e9aa6a..0aec375 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_snapshot_instances.py
@@ -129,8 +129,9 @@
         for status in ("error", "available"):
             self.shares_v2_client.reset_snapshot_instance_status(
                 sii, status=status)
-            waiters.wait_for_snapshot_instance_status(
-                self.shares_v2_client, sii, expected_status=status)
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, sii, status,
+                resource_name='snapshot_instance')
         self.shares_v2_client.delete_snapshot(snapshot['id'])
         self.shares_v2_client.wait_for_resource_deletion(
             snapshot_id=snapshot['id'])
diff --git a/manila_tempest_tests/tests/api/admin/test_share_types.py b/manila_tempest_tests/tests/api/admin/test_share_types.py
index 5b117cc..29b9038 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_types.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_types.py
@@ -234,7 +234,7 @@
         share = self.create_share(
             name=share_name, share_type_id=st_create["share_type"]["id"])
         self.assertEqual(share["name"], share_name)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_client, share["id"], "available")
 
         # Verify share info
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
index 6f6f3a0..4198af3 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage.py
@@ -100,8 +100,9 @@
                 'client': self.shares_v2_client})
 
         # Wait for success
-        waiters.wait_for_snapshot_status(
-            self.shares_v2_client, snapshot['id'], constants.STATUS_AVAILABLE
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, snapshot['id'], constants.STATUS_AVAILABLE,
+            resource_name='snapshot'
         )
 
         # Verify manage snapshot API response
diff --git a/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py b/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
index b079264..a4075e4 100644
--- a/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
+++ b/manila_tempest_tests/tests/api/admin/test_snapshot_manage_negative.py
@@ -121,9 +121,9 @@
             'invalid_provider_location',
             driver_options={}
         )
-        waiters.wait_for_snapshot_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, invalid_snap['id'],
-            constants.STATUS_MANAGE_ERROR
+            constants.STATUS_MANAGE_ERROR, resource_name='snapshot'
         )
         self.shares_v2_client.unmanage_snapshot(invalid_snap['id'])
 
@@ -132,9 +132,9 @@
             self.share['id'],
             snap['provider_location']
         )
-        waiters.wait_for_snapshot_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, managed_snap['id'],
-            constants.STATUS_AVAILABLE
+            constants.STATUS_AVAILABLE, resource_name='snapshot'
         )
         self._delete_snapshot_and_wait(managed_snap)
 
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index 2310591..0a82cf8 100755
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -481,7 +481,7 @@
                 client = d["kwargs"]["client"]
                 share_id = d["share"]["id"]
                 try:
-                    waiters.wait_for_share_status(
+                    waiters.wait_for_resource_status(
                         client, share_id, "available")
                     d["available"] = True
                 except (share_exceptions.ShareBuildErrorException,
@@ -541,8 +541,9 @@
                 else:
                     cls.method_resources.insert(0, resource)
 
-        waiters.wait_for_share_group_status(
-            client, share_group['id'], 'available')
+        waiters.wait_for_resource_status(
+            client, share_group['id'], 'available',
+            resource_name='share_group')
         return share_group
 
     @classmethod
@@ -591,7 +592,8 @@
             cls.class_resources.insert(0, resource)
         else:
             cls.method_resources.insert(0, resource)
-        waiters.wait_for_snapshot_status(client, snapshot["id"], "available")
+        waiters.wait_for_resource_status(client, snapshot["id"], "available",
+                                         resource_name='snapshot')
         return snapshot
 
     @classmethod
@@ -612,8 +614,9 @@
             cls.class_resources.insert(0, resource)
         else:
             cls.method_resources.insert(0, resource)
-        waiters.wait_for_share_group_snapshot_status(
-            client, sg_snapshot["id"], "available")
+        waiters.wait_for_resource_status(
+            client, sg_snapshot["id"], "available",
+            resource_name="share_group_snapshot")
         return sg_snapshot
 
     @classmethod
@@ -699,8 +702,9 @@
                 cls.class_resources.insert(0, resource)
             else:
                 cls.method_resources.insert(0, resource)
-        waiters.wait_for_share_replica_status(
-            client, replica["id"], constants.STATUS_AVAILABLE)
+        waiters.wait_for_resource_status(
+            client, replica["id"], constants.STATUS_AVAILABLE,
+            resource_name='share_replica')
         return replica
 
     @classmethod
@@ -718,9 +722,9 @@
                               version=CONF.share.max_api_microversion):
         client = client or cls.shares_v2_client
         replica = client.promote_share_replica(replica_id, version=version)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             client, replica["id"], constants.REPLICATION_STATE_ACTIVE,
-            status_attr="replica_state")
+            resource_name='share_replica', status_attr="replica_state")
         return replica
 
     @classmethod
@@ -1086,7 +1090,7 @@
                   'share_network_id': self.shares_v2_client.share_network_id}
         share = self.shares_v2_client.create_share(**params)
         self.addCleanup(self.shares_v2_client.delete_share, share['id'])
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], "error")
         return waiters.wait_for_message(self.shares_v2_client, share['id'])
 
@@ -1101,8 +1105,10 @@
 
         rule = client.create_access_rule(share_id, access_type, access_to,
                                          access_level)
-        waiters.wait_for_access_rule_status(client, share_id, rule['id'],
-                                            status, raise_rule_in_error_state)
+        waiters.wait_for_resource_status(
+            client, share_id, status, resource_name='access_rule',
+            rule_id=rule['id'],
+            raise_rule_in_error_state=raise_rule_in_error_state)
         if cleanup:
             self.addCleanup(client.wait_for_resource_deletion,
                             rule_id=rule['id'], share_id=share_id)
@@ -1184,7 +1190,7 @@
             description=description,
             share_server_id=share_server_id
         )
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, managed_share['id'],
             constants.STATUS_AVAILABLE
         )
@@ -1206,9 +1212,9 @@
             params.get('identifier', share_server['identifier']),
             share_network_subnet_id=subnet_id,
         )
-        waiters.wait_for_share_server_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, managed_share_server['id'],
-            constants.SERVER_STATE_ACTIVE,
+            constants.SERVER_STATE_ACTIVE, resource_name='share_server'
         )
 
         return managed_share_server
@@ -1262,7 +1268,8 @@
         cls.os_admin.domains_client = (
             cls.os_admin.identity_v3.DomainsClient() if
             CONF.identity.auth_version == 'v3' else None)
-        cls.admin_project_member_client = cls.create_user_and_get_client()
+        cls.admin_project_member_client = cls.create_user_and_get_client(
+            project=cls.admin_project, add_member_role=True)
 
         if CONF.share.multitenancy_enabled:
             admin_share_network_id = cls.provide_share_network(
@@ -1277,7 +1284,7 @@
             cls.alt_shares_v2_client.share_network_id = alt_share_network_id
 
     @classmethod
-    def create_user_and_get_client(cls, project=None):
+    def create_user_and_get_client(cls, project=None, add_member_role=True):
         """Create a user in specified project & set share clients for user
 
         The user will have all roles specified in tempest.conf
@@ -1302,9 +1309,12 @@
             username, password, project, email)
         cls.class_project_users_created.append(user)
 
-        for conf_role in CONF.auth.tempest_roles:
-            cls.os_admin.creds_client.assign_user_role(
-                user, project, conf_role)
+        tempest_roles_to_assign = CONF.auth.tempest_roles or []
+        if "member" not in tempest_roles_to_assign and add_member_role:
+            tempest_roles_to_assign.append("member")
+
+        for role in tempest_roles_to_assign:
+            cls.os_admin.creds_client.assign_user_role(user, project, role)
 
         user_creds = cls.os_admin.creds_client.get_credentials(
             user, project, password)
diff --git a/manila_tempest_tests/tests/api/test_access_rules_metadata.py b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
index bb6f7cd..9913fc8 100644
--- a/manila_tempest_tests/tests/api/test_access_rules_metadata.py
+++ b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
@@ -87,7 +87,7 @@
             self.access_to[self.access_type].pop(), 'rw', metadata=data)
 
         # read metadata
-        get_access = self.shares_v2_client.get_access(access["id"])
+        get_access = self.shares_v2_client.get_access_rule(access["id"])
 
         # verify metadata
         self.assertEqual(data, get_access['metadata'])
@@ -97,7 +97,7 @@
             self.shares_v2_client.delete_access_metadata(access["id"], key)
 
         # verify deletion of metadata
-        access_without_md = self.shares_v2_client.get_access(access["id"])
+        access_without_md = self.shares_v2_client.get_access_rule(access["id"])
         self.assertEqual({}, access_without_md['metadata'])
         self.shares_v2_client.delete_access_rule(self.share["id"],
                                                  access["id"])
@@ -113,7 +113,7 @@
         self.shares_v2_client.update_access_metadata(
             access_id=self.access['id'], metadata=md2)
         # get metadata
-        get_access = self.shares_v2_client.get_access(self.access['id'])
+        get_access = self.shares_v2_client.get_access_rule(self.access['id'])
 
         # verify metadata
         self.md1.update(md2)
diff --git a/manila_tempest_tests/tests/api/test_public_shares.py b/manila_tempest_tests/tests/api/test_public_shares.py
new file mode 100644
index 0000000..6b0ef69
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_public_shares.py
@@ -0,0 +1,106 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from testtools import testcase as tc
+
+from manila_tempest_tests.tests.api import base
+
+CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+
+
+class PublicSharesTest(base.BaseSharesMixedTest):
+
+    @classmethod
+    def resource_setup(cls):
+        super(PublicSharesTest, cls).resource_setup()
+        # create share_type
+        share_type = cls._create_share_type()
+        cls.share_type_id = share_type['id']
+
+    @decorators.idempotent_id('557a0474-9e30-47b4-a766-19e2afb13e66')
+    @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+    def test_list_shares_public_with_detail(self):
+        # The default RBAC policy in manila only allows admin users with
+        # system scope to create public shares since the Stein release
+        public_share = self.create_share(
+            name='public_share - must be visible to all projects in the cloud',
+            description='public_share_desc',
+            share_type_id=self.share_type_id,
+            is_public=True,
+            cleanup_in_class=False,
+            client=self.admin_shares_v2_client,
+            version=LATEST_MICROVERSION
+        )
+        private_share = self.create_share(
+            name='private_share',
+            description='private share in the primary user project',
+            share_type_id=self.share_type_id,
+            is_public=False,
+            cleanup_in_class=False,
+            version=LATEST_MICROVERSION
+        )
+
+        params = {'is_public': True}
+        shares = self.alt_shares_v2_client.list_shares_with_detail(params)
+
+        keys = [
+            'status', 'description', 'links', 'availability_zone',
+            'created_at', 'share_proto', 'name', 'snapshot_id', 'id',
+            'size', 'project_id', 'is_public',
+        ]
+        [self.assertIn(key, sh.keys()) for sh in shares for key in keys]
+
+        retrieved_public_share = [
+            share for share in shares if share['id'] == public_share['id']
+        ]
+        msg = 'expected id lists %s times in share list' % (
+            len(retrieved_public_share))
+        self.assertEqual(1, len(retrieved_public_share), msg)
+        self.assertTrue(retrieved_public_share[0]['is_public'])
+
+        self.assertFalse(any([s['id'] == private_share['id'] for s in shares]))
+
+    @decorators.idempotent_id('e073182e-459d-4e08-9300-5bc964ca806b')
+    @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+    def test_update_share_set_is_public(self):
+        share_name = data_utils.rand_name('tempest-share-name')
+        share = self.create_share(name=share_name,
+                                  description='a share we will update',
+                                  share_type_id=self.share_type_id,
+                                  is_public=False,
+                                  cleanup_in_class=False,
+                                  version=LATEST_MICROVERSION)
+
+        share = self.shares_v2_client.get_share(share['id'])
+        self.assertEqual(share_name, share['name'])
+        self.assertEqual('a share we will update', share['description'])
+        self.assertFalse(share['is_public'])
+
+        # update share, manila's default RBAC only allows administrator
+        # users with a system scope token to update a private share to public
+        new_name = data_utils.rand_name('tempest-new-share-name')
+        new_desc = 'share is now updated'
+        updated = self.admin_shares_v2_client.update_share(
+            share['id'], name=new_name, desc=new_desc, is_public=True)
+        self.assertEqual(new_name, updated['name'])
+        self.assertEqual(new_desc, updated['description'])
+        self.assertTrue(updated['is_public'])
+
+        # this share must now be publicly accessible
+        share = self.alt_shares_v2_client.get_share(share['id'])
+        self.assertEqual(new_name, share['name'])
+        self.assertEqual(new_desc, share['description'])
+        self.assertTrue(share['is_public'])
diff --git a/manila_tempest_tests/tests/api/test_public_shares_negative.py b/manila_tempest_tests/tests/api/test_public_shares_negative.py
new file mode 100644
index 0000000..6d99f13
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_public_shares_negative.py
@@ -0,0 +1,84 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+from testtools import testcase as tc
+
+from manila_tempest_tests.tests.api import base
+
+
+class PublicSharesNegativeTest(base.BaseSharesMixedTest):
+
+    @classmethod
+    def resource_setup(cls):
+        super(PublicSharesNegativeTest, cls).resource_setup()
+        # create share_type
+        share_type = cls._create_share_type()
+        share_type_id = share_type['id']
+        # create a public share - manila's default RBAC only allows
+        # administrator users operating at system scope to create public shares
+        cls.share = cls.create_share(
+            name='public_share',
+            description='public_share_desc',
+            share_type_id=share_type_id,
+            is_public=True,
+            metadata={'key': 'value'},
+            client=cls.admin_shares_v2_client
+        )
+
+    @decorators.idempotent_id('255011c0-4ed9-4174-bb13-8bbd06a62529')
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    def test_update_share_with_wrong_public_value(self):
+        self.assertRaises(lib_exc.BadRequest,
+                          self.admin_shares_v2_client.update_share,
+                          self.share["id"],
+                          is_public="truebar")
+
+    @decorators.idempotent_id('3443493b-f56a-4faa-9968-e7cbb0d2802f')
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    def test_update_other_tenants_public_share(self):
+        self.assertRaises(lib_exc.Forbidden,
+                          self.alt_shares_v2_client.update_share,
+                          self.share["id"],
+                          name="new_name")
+
+    @decorators.idempotent_id('68d1f1bc-16e4-4086-8982-7e44ca6bdc4d')
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    def test_delete_other_tenants_public_share(self):
+        self.assertRaises(lib_exc.Forbidden,
+                          self.alt_shares_v2_client.delete_share,
+                          self.share['id'])
+
+    @decorators.idempotent_id('1f9e5d84-0885-4a4b-9196-9031a1c01508')
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    def test_set_metadata_of_other_tenants_public_share(self):
+        self.assertRaises(lib_exc.Forbidden,
+                          self.alt_shares_v2_client.set_metadata,
+                          self.share['id'],
+                          {'key': 'value'})
+
+    @decorators.idempotent_id('fed7a935-9699-43a1-854e-67b61ba6233e')
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    def test_update_metadata_of_other_tenants_public_share(self):
+        self.assertRaises(lib_exc.Forbidden,
+                          self.alt_shares_v2_client.update_all_metadata,
+                          self.share['id'],
+                          {'key': 'value'})
+
+    @decorators.idempotent_id('bd62adeb-73c2-4b04-8812-80b479cd5c3b')
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    def test_delete_metadata_of_other_tenants_public_share(self):
+        self.assertRaises(lib_exc.Forbidden,
+                          self.alt_shares_v2_client.delete_metadata,
+                          self.share['id'],
+                          'key')
diff --git a/manila_tempest_tests/tests/api/test_replication.py b/manila_tempest_tests/tests/api/test_replication.py
index 713f446..d5aa518 100644
--- a/manila_tempest_tests/tests/api/test_replication.py
+++ b/manila_tempest_tests/tests/api/test_replication.py
@@ -130,9 +130,10 @@
         replica = self.create_share_replica(share["id"], self.replica_zone,
                                             cleanup=False)
         # Wait for replica state to update after creation
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
         # Promote the first in_sync replica to active state
         promoted_replica = self.promote_share_replica(replica['id'])
         # Delete the demoted replica so promoted replica can be cleaned
@@ -192,15 +193,16 @@
         access_type, access_to = self._get_access_rule_data_from_config()
         rule = self.shares_v2_client.create_access_rule(
             self.shares[0]["id"], access_type, access_to, 'ro')
-        waiters.wait_for_access_rule_status(
-            self.shares_v2_client, self.shares[0]["id"], rule["id"],
-            constants.RULE_STATE_ACTIVE)
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, self.shares[0]["id"],
+            constants.RULE_STATE_ACTIVE, resource_name='access_rule',
+            rule_id=rule["id"])
 
         # Create the replica
         self._verify_create_replica()
 
         # Verify access_rules_status transitions to 'active' state.
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.shares[0]["id"],
             constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
 
@@ -221,7 +223,7 @@
         self.shares_v2_client.create_access_rule(
             self.shares[0]["id"], access_type, access_to, 'ro')
 
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.shares[0]["id"],
             constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
 
@@ -281,9 +283,9 @@
         access_type, access_to = self._get_access_rule_data_from_config()
         rule = self.shares_v2_client.create_access_rule(
             share["id"], access_type, access_to, 'ro')
-        waiters.wait_for_access_rule_status(
-            self.shares_v2_client, share["id"], rule["id"],
-            constants.RULE_STATE_ACTIVE)
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, share["id"], constants.RULE_STATE_ACTIVE,
+            resource_name='access_rule', rule_id=rule["id"])
 
         original_replica = self.shares_v2_client.list_share_replicas(
             share["id"])[0]
@@ -340,23 +342,26 @@
         new_replica = self.create_share_replica(share["id"],
                                                 self.replica_zone,
                                                 cleanup_in_class=False)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, new_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         # Promote the new replica to active and verify the replica states
         self.promote_share_replica(new_replica['id'])
         self._verify_active_replica_count(share["id"])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, original_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         # Promote the original replica back to active
         self.promote_share_replica(original_replica['id'])
         self._verify_active_replica_count(share["id"])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, new_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
     @decorators.idempotent_id('1452156b-75a5-4f3c-a921-834732a03b0a')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
diff --git a/manila_tempest_tests/tests/api/test_replication_export_locations.py b/manila_tempest_tests/tests/api/test_replication_export_locations.py
index 371f02d..36b6003 100644
--- a/manila_tempest_tests/tests/api/test_replication_export_locations.py
+++ b/manila_tempest_tests/tests/api/test_replication_export_locations.py
@@ -152,9 +152,10 @@
         )
         primary_replica = self.shares_v2_client.get_share_replica(
             primary_replica_exports[0]['share_instance_id'])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         # Share export locations list API
         share_exports = self.shares_v2_client.list_share_export_locations(
diff --git a/manila_tempest_tests/tests/api/test_replication_negative.py b/manila_tempest_tests/tests/api/test_replication_negative.py
index 30367bd..ef80fec 100644
--- a/manila_tempest_tests/tests/api/test_replication_negative.py
+++ b/manila_tempest_tests/tests/api/test_replication_negative.py
@@ -147,10 +147,10 @@
         # Set replica state to out of sync
         self.admin_client.reset_share_replica_state(
             replica['id'], constants.REPLICATION_STATE_OUT_OF_SYNC)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, replica['id'],
             constants.REPLICATION_STATE_OUT_OF_SYNC,
-            status_attr='replica_state')
+            resource_name='share_replica', status_attr='replica_state')
         # Try promoting the first out_of_sync replica to active state
         self.assertRaises(lib_exc.Forbidden,
                           self.shares_v2_client.promote_share_replica,
@@ -177,9 +177,10 @@
         replica = self.create_share_replica(share["id"], self.replica_zone,
                                             cleanup_in_class=False)
         # By default, 'writable' replica is expected to be in active state
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, replica["id"],
-            constants.REPLICATION_STATE_ACTIVE, status_attr='replica_state')
+            constants.REPLICATION_STATE_ACTIVE, resource_name='share_replica',
+            status_attr='replica_state')
 
         # Try promoting the replica
         self.shares_v2_client.promote_share_replica(replica['id'])
diff --git a/manila_tempest_tests/tests/api/test_replication_snapshots.py b/manila_tempest_tests/tests/api/test_replication_snapshots.py
index 8bd6bdf..a150419 100644
--- a/manila_tempest_tests/tests/api/test_replication_snapshots.py
+++ b/manila_tempest_tests/tests/api/test_replication_snapshots.py
@@ -87,9 +87,10 @@
                                                   self.replica_zone,
                                                   cleanup=False)
         self.addCleanup(self.delete_share_replica, original_replica['id'])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         snapshot = self.create_snapshot_wait_for_active(share["id"])
         self.promote_share_replica(share_replica['id'])
@@ -123,13 +124,15 @@
                                                   self.replica_zone,
                                                   cleanup=False)
         self.addCleanup(self.delete_share_replica, original_replica['id'])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         # Wait for snapshot1 to become available
-        waiters.wait_for_snapshot_status(
-            self.shares_v2_client, snapshot['id'], "available")
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, snapshot['id'], "available",
+            resource_name='snapshot')
 
         self.promote_share_replica(share_replica['id'])
         self.delete_share_replica(original_replica['id'])
@@ -163,15 +166,17 @@
                                                   self.replica_zone,
                                                   cleanup=False)
         self.addCleanup(self.delete_share_replica, original_replica['id'])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         snapshot2 = self.create_snapshot_wait_for_active(share["id"])
 
         # Wait for snapshot1 to become available
-        waiters.wait_for_snapshot_status(
-            self.shares_v2_client, snapshot1['id'], "available")
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, snapshot1['id'], "available",
+            resource_name='snapshot')
 
         self.promote_share_replica(share_replica['id'])
         # Remove the original active replica to ensure that snapshot is
@@ -206,9 +211,10 @@
                                   share_network_id=self.sn_id)
         share_replica = self.create_share_replica(share["id"],
                                                   self.replica_zone)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
         snapshot = self.create_snapshot_wait_for_active(share["id"])
         self.shares_v2_client.delete_snapshot(snapshot['id'])
         self.shares_v2_client.wait_for_resource_deletion(
@@ -235,9 +241,10 @@
                                                   self.replica_zone,
                                                   cleanup=False)
         self.addCleanup(self.delete_share_replica, original_replica['id'])
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
         self.promote_share_replica(share_replica['id'])
         # Delete the demoted replica so promoted replica can be cleaned
         # during the cleanup
diff --git a/manila_tempest_tests/tests/api/test_revert_to_snapshot.py b/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
index 30d5fcc..4e7d0ca 100644
--- a/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
+++ b/manila_tempest_tests/tests/api/test_revert_to_snapshot.py
@@ -116,7 +116,7 @@
             self.share['id'],
             snapshot['id'],
             version=version)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.share['id'],
             constants.STATUS_AVAILABLE)
 
@@ -139,7 +139,7 @@
         self.shares_v2_client.revert_to_snapshot(self.share['id'],
                                                  snapshot1['id'],
                                                  version=version)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.share['id'],
             constants.STATUS_AVAILABLE)
 
@@ -161,9 +161,10 @@
 
         share_replica = self.create_share_replica(share["id"],
                                                   self.replica_zone)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
 
         snapshot = self.create_snapshot_wait_for_active(share["id"])
 
@@ -171,8 +172,9 @@
             share['id'],
             snapshot['id'],
             version=version)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
-        waiters.wait_for_share_replica_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share_replica['id'],
-            constants.REPLICATION_STATE_IN_SYNC, status_attr='replica_state')
+            constants.REPLICATION_STATE_IN_SYNC, resource_name='share_replica',
+            status_attr='replica_state')
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index 15bd7ae..5dd09f3 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -56,10 +56,11 @@
         self.assertEqual("queued_to_apply", rule['state'])
 
     if utils.is_microversion_le(version, '2.9'):
-        waiters.wait_for_access_rule_status(
-            self.shares_client, self.share["id"], rule["id"], "active")
+        waiters.wait_for_resource_status(
+            self.shares_client, self.share["id"], "active",
+            resource_name='access_rule', rule_id=rule["id"])
     else:
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.share["id"], "active",
             status_attr='access_rules_status', version=version)
         # If the 'access_rules_status' transitions to 'active',
@@ -138,13 +139,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -191,13 +194,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -292,13 +297,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -394,13 +401,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -442,13 +451,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -507,8 +518,9 @@
         self.assertEqual(access_level, rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
             self.assertNotIn(key, rule.keys())
-        waiters.wait_for_access_rule_status(
-            self.shares_v2_client, self.share["id"], rule["id"], "active")
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, self.share["id"], "active",
+            resource_name='access_rule', rule_id=rule["id"])
 
         self.shares_v2_client.delete_access_rule(
             self.share["id"], rule["id"], version=version)
@@ -521,8 +533,9 @@
         # Grant access to the share
         access1 = self.shares_v2_client.create_access_rule(
             self.share['id'], self.access_type, self.access_to, 'rw')
-        waiters.wait_for_access_rule_status(
-            self.shares_v2_client, self.share['id'], access1['id'], 'active')
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, self.share["id"], "active",
+            resource_name='access_rule', rule_id=access1["id"])
 
         # Create a new user in the current project
         project = self.os_admin.projects_client.show_project(
@@ -538,9 +551,9 @@
         # used in access1
         access2 = user_client.shares_v2_client.create_access_rule(
             share2['id'], self.access_type, self.access_to, 'rw')
-        waiters.wait_for_access_rule_status(
-            user_client.shares_v2_client, share2['id'], access2['id'],
-            'active')
+        waiters.wait_for_resource_status(
+            user_client.shares_v2_client, share2['id'], "active",
+            resource_name='access_rule', rule_id=access2['id'])
 
 
 @ddt.ddt
@@ -616,13 +629,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name="access_rule", rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name="access_rule", rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -701,13 +716,15 @@
             self.assertEqual("queued_to_apply", rule['state'])
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name="access_rule", rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name="access_rule", rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
diff --git a/manila_tempest_tests/tests/api/test_rules_negative.py b/manila_tempest_tests/tests/api/test_rules_negative.py
index ef6a85c..ac54d1f 100644
--- a/manila_tempest_tests/tests/api/test_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_rules_negative.py
@@ -104,13 +104,15 @@
                 self.share["id"], access_type, access_to, version=version)
 
         if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_access_rule_status(
-                self.shares_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_client, self.share["id"], "active",
+                resource_name='access_rule', rule_id=rule["id"])
         elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_access_rule_status(
-                self.shares_v2_client, self.share["id"], rule["id"], "active")
+            waiters.wait_for_resource_status(
+                self.shares_v2_client, self.share["id"], "active",
+                resource_name="access_rule", rule_id=rule["id"])
         else:
-            waiters.wait_for_share_status(
+            waiters.wait_for_resource_status(
                 self.shares_v2_client, self.share["id"], "active",
                 status_attr='access_rules_status', version=version)
 
@@ -154,7 +156,7 @@
             self.share["id"], "ip", access_to)
         self.addCleanup(self.shares_v2_client.delete_access_rule,
                         self.share["id"], rule['id'])
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, self.share["id"], "active",
             status_attr='access_rules_status')
 
@@ -186,7 +188,7 @@
         share = self.create_share(share_type_id=share_type['id'],
                                   cleanup_in_class=False,
                                   wait_for_status=False)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], constants.STATUS_ERROR)
         self.assertRaises(lib_exc.BadRequest,
                           self.admin_client.create_access_rule,
@@ -460,7 +462,7 @@
         # Check share's access_rules_status has transitioned to "active" status
         self.alt_shares_v2_client.delete_access_rule(
             share_alt['id'], rule1['id'])
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.alt_shares_v2_client, share_alt['id'], 'active',
             status_attr='access_rules_status')
 
diff --git a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
index a8d2c34..82d27ca 100644
--- a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
+++ b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
@@ -172,7 +172,7 @@
 
         # Create a share into the share network
         share = self.shares_v2_client.create_share(**args)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
         share = self.shares_v2_client.get_share(share['id'])
 
@@ -203,7 +203,7 @@
         )
 
         # Do some necessary cleanup
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, managed_share['id'],
             constants.STATUS_AVAILABLE)
         self.shares_client.delete_share(managed_share['id'])
@@ -256,7 +256,7 @@
 
         # Create a share into the share network
         share = self.shares_v2_client.create_share(**args)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'], constants.STATUS_AVAILABLE)
         share = self.admin_shares_v2_client.get_share(share['id'])
         share_server = self.admin_shares_v2_client.show_share_server(
diff --git a/manila_tempest_tests/tests/api/test_shares.py b/manila_tempest_tests/tests/api/test_shares.py
index 76204df..8802f4c 100644
--- a/manila_tempest_tests/tests/api/test_shares.py
+++ b/manila_tempest_tests/tests/api/test_shares.py
@@ -116,16 +116,6 @@
             detailed_elements.add('progress')
             self.assertTrue(detailed_elements.issubset(share.keys()), msg)
 
-        # This check will ensure that when a share creation request is handled,
-        # if the driver has the "driver handles share servers" option enabled,
-        # that a share server will be created, otherwise, not.
-        share_get = self.admin_shares_v2_client.get_share(share['id'])
-        share_server = share_get['share_server_id']
-        if CONF.share.multitenancy_enabled:
-            self.assertNotEmpty(share_server)
-        else:
-            self.assertEmpty(share_server)
-
         # Delete share
         self.shares_v2_client.delete_share(share['id'])
         self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
diff --git a/manila_tempest_tests/tests/api/test_shares_actions.py b/manila_tempest_tests/tests/api/test_shares_actions.py
index 4fed092..f0792eb 100644
--- a/manila_tempest_tests/tests/api/test_shares_actions.py
+++ b/manila_tempest_tests/tests/api/test_shares_actions.py
@@ -435,40 +435,6 @@
         shares = self.shares_v2_client.list_shares_with_detail(params)
         self.assertGreater(shares["count"], 0)
 
-    @decorators.idempotent_id('557a0474-9e30-47b4-a766-19e2afb13e66')
-    @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
-    def test_list_shares_public_with_detail(self):
-        public_share = self.create_share(
-            name='public_share',
-            description='public_share_desc',
-            share_type_id=self.share_type_id,
-            is_public=True,
-            cleanup_in_class=False
-        )
-        private_share = self.create_share(
-            name='private_share',
-            description='private_share_desc',
-            share_type_id=self.share_type_id,
-            is_public=False,
-            cleanup_in_class=False
-        )
-
-        params = {"is_public": True}
-        shares = self.alt_shares_client.list_shares_with_detail(params)
-
-        keys = [
-            "status", "description", "links", "availability_zone",
-            "created_at", "export_location", "share_proto",
-            "name", "snapshot_id", "id", "size", "project_id", "is_public",
-        ]
-        [self.assertIn(key, sh.keys()) for sh in shares for key in keys]
-
-        gen = [sid["id"] for sid in shares if sid["id"] == public_share["id"]]
-        msg = "expected id lists %s times in share list" % (len(gen))
-        self.assertEqual(1, len(gen), msg)
-
-        self.assertFalse(any([s["id"] == private_share["id"] for s in shares]))
-
     @decorators.idempotent_id('174829eb-fd3e-46ef-880b-f05c3d44d1fe')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     @testtools.skipUnless(CONF.share.run_snapshot_tests,
@@ -665,7 +631,7 @@
 
         # extend share and wait for active status
         self.shares_v2_client.extend_share(share['id'], new_size)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_client, share['id'], 'available')
 
         # check state and new size
@@ -693,7 +659,7 @@
 
         # shrink share and wait for active status
         self.shares_v2_client.shrink_share(share['id'], new_size)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_client, share['id'], 'available')
 
         # check state and new size
@@ -748,16 +714,15 @@
         new_name = data_utils.rand_name("tempest-new-name")
         new_desc = data_utils.rand_name("tempest-new-description")
         updated = self.shares_client.update_share(
-            share["id"], new_name, new_desc, is_public=True)
+            share["id"], name=new_name, desc=new_desc)
         self.assertEqual(new_name, updated["name"])
         self.assertEqual(new_desc, updated["description"])
-        self.assertTrue(updated["is_public"])
 
         # get share
         share = self.shares_client.get_share(self.share['id'])
         self.assertEqual(new_name, share["name"])
         self.assertEqual(new_desc, share["description"])
-        self.assertTrue(share["is_public"])
+        self.assertFalse(share["is_public"])
 
     @decorators.idempotent_id('20f299f6-2441-4629-b44e-d791d57f413c')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
diff --git a/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py b/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py
index f8c5255..c86c56d 100644
--- a/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py
+++ b/manila_tempest_tests/tests/api/test_shares_from_snapshot_across_pools.py
@@ -147,7 +147,6 @@
             raise self.skipException(msg)
         azs = list(azs)
         share_a = self.create_share(share_type_id=self.share_type_id,
-                                    is_public=True,
                                     availability_zone=azs[0])
 
         # Create snapshot
diff --git a/manila_tempest_tests/tests/api/test_shares_negative.py b/manila_tempest_tests/tests/api/test_shares_negative.py
index 9ee4202..d00216d 100644
--- a/manila_tempest_tests/tests/api/test_shares_negative.py
+++ b/manila_tempest_tests/tests/api/test_shares_negative.py
@@ -34,22 +34,6 @@
         cls.share_type = cls._create_share_type()
         cls.share_type_id = cls.share_type['id']
 
-        # create share
-        cls.share = cls.create_share(
-            name='public_share',
-            description='public_share_desc',
-            share_type_id=cls.share_type_id,
-            is_public=True,
-            metadata={'key': 'value'}
-        )
-
-    @decorators.idempotent_id('255011c0-4ed9-4174-bb13-8bbd06a62529')
-    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
-    def test_update_share_with_wrong_public_value(self):
-        self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_share, self.share["id"],
-                          is_public="truebar")
-
     @decorators.idempotent_id('b9bb8dee-0c7c-4e51-909c-028335b1a6a0')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     @testtools.skipUnless(CONF.share.run_snapshot_tests,
@@ -154,45 +138,6 @@
             snapshot_id=snap["id"],
         )
 
-    @decorators.idempotent_id('3443493b-f56a-4faa-9968-e7cbb0d2802f')
-    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
-    def test_update_other_tenants_public_share(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self.alt_shares_v2_client.update_share,
-                          self.share["id"],
-                          name="new_name")
-
-    @decorators.idempotent_id('68d1f1bc-16e4-4086-8982-7e44ca6bdc4d')
-    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
-    def test_delete_other_tenants_public_share(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self.alt_shares_v2_client.delete_share,
-                          self.share['id'])
-
-    @decorators.idempotent_id('1f9e5d84-0885-4a4b-9196-9031a1c01508')
-    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
-    def test_set_metadata_of_other_tenants_public_share(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self.alt_shares_v2_client.set_metadata,
-                          self.share['id'],
-                          {'key': 'value'})
-
-    @decorators.idempotent_id('fed7a935-9699-43a1-854e-67b61ba6233e')
-    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
-    def test_update_metadata_of_other_tenants_public_share(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self.alt_shares_v2_client.update_all_metadata,
-                          self.share['id'],
-                          {'key': 'value'})
-
-    @decorators.idempotent_id('bd62adeb-73c2-4b04-8812-80b479cd5c3b')
-    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
-    def test_delete_metadata_of_other_tenants_public_share(self):
-        self.assertRaises(lib_exc.Forbidden,
-                          self.alt_shares_v2_client.delete_metadata,
-                          self.share['id'],
-                          'key')
-
 
 class SharesAPIOnlyNegativeTest(base.BaseSharesMixedTest):
 
diff --git a/manila_tempest_tests/tests/api/test_snapshot_rules.py b/manila_tempest_tests/tests/api/test_snapshot_rules.py
index 30c7da4..615c68d 100644
--- a/manila_tempest_tests/tests/api/test_snapshot_rules.py
+++ b/manila_tempest_tests/tests/api/test_snapshot_rules.py
@@ -52,8 +52,10 @@
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
             self.assertNotIn(key, list(six.iterkeys(rule)))
 
-        waiters.wait_for_snapshot_access_rule_status(
-            self.shares_v2_client, self.snapshot['id'], rule['id'])
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, self.snapshot['id'], 'active',
+            resource_name='snapshot_access_rule', rule_id=rule['id'],
+            status_attr='state')
 
         # delete rule and wait for deletion
         self.shares_v2_client.delete_snapshot_access_rule(self.snapshot['id'],
diff --git a/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py b/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
index 6965c7f..8b3f2eb 100644
--- a/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_snapshot_rules_negative.py
@@ -93,8 +93,10 @@
         rule = self.shares_v2_client.create_snapshot_access_rule(
             self.snap['id'], access_type, access_to)
 
-        waiters.wait_for_snapshot_access_rule_status(
-            self.shares_v2_client, self.snap['id'], rule['id'])
+        waiters.wait_for_resource_status(
+            self.shares_v2_client, self.snap['id'], 'active',
+            resource_name='snapshot_access_rule', rule_id=rule['id'],
+            status_attr='state')
 
         # try create duplicate of rule
         self.assertRaises(lib_exc.BadRequest,
diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py
index 79a15a2..3f9e818 100644
--- a/manila_tempest_tests/tests/scenario/manager_share.py
+++ b/manila_tempest_tests/tests/scenario/manager_share.py
@@ -348,7 +348,7 @@
         """
         client = client or self.shares_client
         client.delete_access_rule(share_id, access_rule_id)
-        share_waiters.wait_for_share_status(
+        share_waiters.wait_for_resource_status(
             self.shares_v2_client, share_id, "active",
             status_attr='access_rules_status')
 
@@ -424,7 +424,7 @@
                 client.update_access_metadata(
                     metadata={"access_to": "{}".format(access_to)},
                     access_id=access_rule['id'])
-        get_access = client.get_access(access_rule['id'])
+        get_access = client.get_access_rule(access_rule['id'])
         # Set 'access_key' and 'access_to' attributes for being use in mount
         # operation.
         setattr(self, 'access_key', get_access['access_key'])
@@ -533,7 +533,8 @@
             self.addCleanup(client.delete_share,
                             share['id'])
 
-        share_waiters.wait_for_share_status(client, share['id'], 'available')
+        share_waiters.wait_for_resource_status(client, share['id'],
+                                               'available')
         return share
 
     def _create_snapshot(self, share_id, client=None, **kwargs):
@@ -542,8 +543,8 @@
         self.addCleanup(
             client.wait_for_resource_deletion, snapshot_id=snapshot['id'])
         self.addCleanup(client.delete_snapshot, snapshot['id'])
-        share_waiters.wait_for_snapshot_status(
-            client, snapshot["id"], "available")
+        share_waiters.wait_for_resource_status(
+            client, snapshot["id"], "available", resource_name='snapshot')
         return snapshot
 
     def _wait_for_share_server_deletion(self, sn_id, client=None):
@@ -593,7 +594,7 @@
         access = client.create_access_rule(share_id, access_type, access_to,
                                            access_level)
 
-        share_waiters.wait_for_share_status(
+        share_waiters.wait_for_resource_status(
             client, share_id, "active", status_attr='access_rules_status')
 
         if cleanup:
@@ -619,8 +620,10 @@
             self.addCleanup(client.delete_snapshot_access_rule,
                             snapshot_id, access['id'])
 
-        share_waiters.wait_for_snapshot_access_rule_status(
-            client, snapshot_id, access['id'])
+        share_waiters.wait_for_resource_status(
+            client, snapshot_id, 'active',
+            resource_name='snapshot_access_rule', rule_id=access['id'],
+            status_attr='state')
 
         return access
 
diff --git a/manila_tempest_tests/tests/scenario/test_share_extend.py b/manila_tempest_tests/tests/scenario/test_share_extend.py
index c099b91..f0f378b 100644
--- a/manila_tempest_tests/tests/scenario/test_share_extend.py
+++ b/manila_tempest_tests/tests/scenario/test_share_extend.py
@@ -96,7 +96,7 @@
         extended_share_size = default_share_size + 1
         self.shares_v2_client.extend_share(share["id"],
                                            new_size=extended_share_size)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share["id"], constants.STATUS_AVAILABLE)
         share = self.shares_v2_client.get_share(share["id"])
         self.assertEqual(extended_share_size, int(share["size"]))
diff --git a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
index cc2f1a7..0c60184 100644
--- a/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
+++ b/manila_tempest_tests/tests/scenario/test_share_manage_unmanage.py
@@ -132,7 +132,7 @@
             share['share_proto'],
             locations[0],
             share_type['id'])
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_admin_v2_client, managed_share['id'], 'available')
 
         LOG.debug('Step 11 - grant access again')
@@ -166,7 +166,7 @@
             share['share_proto'],
             locations[0],
             share_type['id'])
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_admin_v2_client, remanaged_share['id'], 'manage_error')
 
         self.shares_admin_v2_client.reset_state(remanaged_share['id'])
diff --git a/manila_tempest_tests/tests/scenario/test_share_shrink.py b/manila_tempest_tests/tests/scenario/test_share_shrink.py
index 94f4e62..2e1e44f 100644
--- a/manila_tempest_tests/tests/scenario/test_share_shrink.py
+++ b/manila_tempest_tests/tests/scenario/test_share_shrink.py
@@ -87,7 +87,7 @@
         LOG.debug('Step 8 - try update size, shrink and wait')
         self.shares_v2_client.shrink_share(share['id'],
                                            new_size=default_share_size)
-        waiters.wait_for_share_status(
+        waiters.wait_for_resource_status(
             self.shares_v2_client, share['id'],
             ['shrinking_possible_data_loss_error', 'available'])