Merge "Modify security service list test"
diff --git a/manila_tempest_tests/config.py b/manila_tempest_tests/config.py
index 1758064..b62c409 100644
--- a/manila_tempest_tests/config.py
+++ b/manila_tempest_tests/config.py
@@ -40,7 +40,7 @@
                     "This value is only used to validate the versions "
                     "response from Manila."),
     cfg.StrOpt("max_api_microversion",
-               default="2.70",
+               default="2.71",
                help="The maximum api microversion is configured to be the "
                     "value of the latest microversion supported by Manila."),
     cfg.StrOpt("region",
diff --git a/manila_tempest_tests/services/share/json/shares_client.py b/manila_tempest_tests/services/share/json/shares_client.py
index 3413387..6871eda 100644
--- a/manila_tempest_tests/services/share/json/shares_client.py
+++ b/manila_tempest_tests/services/share/json/shares_client.py
@@ -42,6 +42,10 @@
         self.share_network_id = CONF.share.share_network_id
         self.share_size = CONF.share.share_size
 
+    def _parse_resp(self, body, top_key_to_verify=None):
+        return super(SharesClient, self)._parse_resp(
+            body, top_key_to_verify=top_key_to_verify)
+
     def create_share(self, share_protocol=None, size=None,
                      name=None, snapshot_id=None, description=None,
                      metadata=None, share_network_id=None,
@@ -446,7 +450,7 @@
     def get_metadata_item(self, share_id, key):
         resp, body = self.get("shares/%s/metadata/%s" % (share_id, key))
         self.expected_success(200, resp.status)
-        return self._parse_resp(body)
+        return self._parse_resp(body, top_key_to_verify='meta')
 
 ###############
 
diff --git a/manila_tempest_tests/tests/api/admin/test_share_instances.py b/manila_tempest_tests/tests/api/admin/test_share_instances.py
index 38c3f49..4f2e454 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_instances.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_instances.py
@@ -95,6 +95,8 @@
             expected_keys.append("cast_rules_to_readonly")
         if utils.is_microversion_ge(version, '2.54'):
             expected_keys.append("progress")
+        if utils.is_microversion_ge(version, '2.71'):
+            expected_keys.append("updated_at")
         expected_keys = sorted(expected_keys)
         actual_keys = sorted(si.keys())
         self.assertEqual(expected_keys, actual_keys,
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
index 99d712c..2535745 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
@@ -95,14 +95,11 @@
         # protocols.
         access_rules = self._get_access_rule_data_for_protocols()
         for rule in access_rules:
-            self.shares_v2_client.create_access_rule(
+            self.allow_access(
                 share['id'], access_type=rule.get('access_type'),
                 access_to=rule.get('access_to'),
                 access_level=rule.get('access_level')
             )
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
-            status_attr='access_rules_status')
 
         share = self.shares_v2_client.get_share(share['id'])['share']
 
@@ -124,8 +121,8 @@
             self.assertIn(snapshot['status'], statuses)
 
     def _validate_share_server_migration_complete(
-        self, share, dest_host, dest_server_id, snapshot_id=None,
-        share_network_id=None, version=CONF.share.max_api_microversion):
+            self, share, dest_host, dest_server_id, snapshot_id=None,
+            share_network_id=None, version=CONF.share.max_api_microversion):
         """Validates the share server migration complete. """
 
         # Check the export locations
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index d5cc439..34f8e41 100755
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -58,6 +58,8 @@
     r"(?=.*\[.*\b(%(p)s|%(n)s)\b.*\])(?=.*\[.*\b(%(a)s|%(b)s|%(ab)s)\b.*\])" %
     TAGS_MAPPER)
 
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+
 
 def verify_test_has_appropriate_tags(self):
     if not TAGS_PATTERN.match(self.id()):
@@ -1046,7 +1048,8 @@
         return waiters.wait_for_message(self.shares_v2_client, share['id'])
 
     def allow_access(self, share_id, client=None, access_type=None,
-                     access_level='rw', access_to=None, status='active',
+                     access_level='rw', access_to=None, metadata=None,
+                     version=LATEST_MICROVERSION, status='active',
                      raise_rule_in_error_state=True, cleanup=True):
 
         client = client or self.shares_v2_client
@@ -1054,15 +1057,23 @@
         access_type = access_type or a_type
         access_to = access_to or a_to
 
-        rule = client.create_access_rule(share_id, access_type, access_to,
-                                         access_level)['access']
+        kwargs = {
+            'access_type': access_type,
+            'access_to': access_to,
+            'access_level': access_level
+        }
+        if client is self.shares_v2_client:
+            kwargs.update({'metadata': metadata, 'version': version})
+
+        rule = client.create_access_rule(share_id, **kwargs)['access']
         waiters.wait_for_resource_status(
             client, share_id, status, resource_name='access_rule',
-            rule_id=rule['id'],
+            rule_id=rule['id'], version=version,
             raise_rule_in_error_state=raise_rule_in_error_state)
         if cleanup:
-            self.addCleanup(client.wait_for_resource_deletion,
-                            rule_id=rule['id'], share_id=share_id)
+            self.addCleanup(
+                client.wait_for_resource_deletion, rule_id=rule['id'],
+                share_id=share_id, version=version)
             self.addCleanup(client.delete_access_rule, share_id, rule['id'])
         return rule
 
diff --git a/manila_tempest_tests/tests/api/test_access_rules_metadata.py b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
index fa3c0a7..ee541ac 100644
--- a/manila_tempest_tests/tests/api/test_access_rules_metadata.py
+++ b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
@@ -19,6 +19,7 @@
 from testtools import testcase as tc
 
 from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
 from manila_tempest_tests.tests.api import base
 from manila_tempest_tests import utils
 
@@ -77,16 +78,19 @@
             cls.share["id"], cls.access_type,
             cls.access_to[cls.access_type].pop(), 'rw',
             metadata=cls.md1)['access']
+        waiters.wait_for_resource_status(
+            cls.shares_v2_client, cls.share["id"], "active",
+            resource_name='access_rule', rule_id=cls.access["id"])
 
     @decorators.idempotent_id('4c8e0236-2e7b-4337-be3c-17b51a738644')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     def test_set_get_delete_access_metadata(self):
         data = {"key1": "v" * 255, "k" * 255: "value2"}
         # set metadata
-        access = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type,
-            self.access_to[self.access_type].pop(), 'rw',
-            metadata=data)['access']
+        access = self.allow_access(
+            self.share["id"], access_type=self.access_type,
+            access_to=self.access_to[self.access_type].pop(),
+            access_level='rw', metadata=data)
 
         # read metadata
         get_access = self.shares_v2_client.get_access_rule(
@@ -103,10 +107,6 @@
         access_without_md = self.shares_v2_client.get_access_rule(
             access["id"])['access']
         self.assertEqual({}, access_without_md['metadata'])
-        self.shares_v2_client.delete_access_rule(self.share["id"],
-                                                 access["id"])
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=access["id"], share_id=self.share["id"])
 
     @decorators.idempotent_id('8c294d7d-0702-49ce-b964-0945ec323370')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@@ -129,10 +129,10 @@
     def test_list_access_filter_by_metadata(self):
         data = {"key3": "v3", "key4": "value4"}
         # set metadata
-        access = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type,
-            self.access_to[self.access_type].pop(), 'rw',
-            metadata=data)['access']
+        access = self.allow_access(
+            self.share["id"], access_type=self.access_type,
+            access_to=self.access_to[self.access_type].pop(),
+            access_level='rw', metadata=data)
 
         # list metadata with metadata filter
         list_access = self.shares_v2_client.list_access_rules(
diff --git a/manila_tempest_tests/tests/api/test_metadata.py b/manila_tempest_tests/tests/api/test_metadata.py
index c8529a3..d2ae326 100644
--- a/manila_tempest_tests/tests/api/test_metadata.py
+++ b/manila_tempest_tests/tests/api/test_metadata.py
@@ -34,14 +34,15 @@
     def _verify_share_metadata(self, share, md):
 
         # get metadata of share
-        metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        metadata = self.shares_v2_client.get_metadata(share["id"])['metadata']
 
         # verify metadata
         self.assertEqual(md, metadata)
 
         # verify metadata items
         for key in md:
-            get_value = self.shares_client.get_metadata_item(share["id"], key)
+            get_value = self.shares_v2_client.get_metadata_item(share["id"],
+                                                                key)
             self.assertEqual(md[key], get_value[key])
 
     @decorators.idempotent_id('9070249f-6e94-4a38-a036-08debee547c3')
@@ -69,17 +70,18 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md)
+        self.shares_v2_client.set_metadata(share["id"], md)
 
         # verify metadata
         self._verify_share_metadata(share, md)
 
         # delete metadata
         for key in md.keys():
-            self.shares_client.delete_metadata(share["id"], key)
+            self.shares_v2_client.delete_metadata(share["id"], key)
 
         # verify deletion of metadata
-        get_metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        get_metadata = self.shares_v2_client.get_metadata(share["id"])[
+            'metadata']
         self.assertEmpty(get_metadata)
 
     @decorators.idempotent_id('4e5f8159-62b6-4d5c-f729-d8b1f029d7de')
@@ -93,13 +95,13 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md1)
+        self.shares_v2_client.set_metadata(share["id"], md1)
 
         # verify metadata
         self._verify_share_metadata(share, md1)
 
         # set metadata again
-        self.shares_client.set_metadata(share["id"], md2)
+        self.shares_v2_client.set_metadata(share["id"], md2)
 
         # verify metadata
         md1.update(md2)
@@ -110,10 +112,11 @@
 
         # delete metadata
         for key in md.keys():
-            self.shares_client.delete_metadata(share["id"], key)
+            self.shares_v2_client.delete_metadata(share["id"], key)
 
         # verify deletion of metadata
-        get_metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        get_metadata = self.shares_v2_client.get_metadata(
+            share["id"])['metadata']
         self.assertEmpty(get_metadata)
 
     @decorators.idempotent_id('2ec70ba5-050b-3b17-c862-c149e53543c0')
@@ -127,13 +130,13 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md1)
+        self.shares_v2_client.set_metadata(share["id"], md1)
 
         # verify metadata
         self._verify_share_metadata(share, md1)
 
         # set metadata again
-        self.shares_client.set_metadata(share["id"], md2)
+        self.shares_v2_client.set_metadata(share["id"], md2)
 
         # verify metadata
         md = {u"key9": u"value13", u"key10": u"value10",
@@ -142,10 +145,11 @@
 
         # delete metadata
         for key in md.keys():
-            self.shares_client.delete_metadata(share["id"], key)
+            self.shares_v2_client.delete_metadata(share["id"], key)
 
         # verify deletion of metadata
-        get_metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        get_metadata = self.shares_v2_client.get_metadata(
+            share["id"])['metadata']
         self.assertEmpty(get_metadata)
 
     @decorators.idempotent_id('c94851f4-2559-4712-9297-9912db1da7ff')
@@ -160,10 +164,10 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md1)
+        self.shares_v2_client.set_metadata(share["id"], md1)
 
         # update metadata
-        self.shares_client.update_all_metadata(share["id"], md2)
+        self.shares_v2_client.update_all_metadata(share["id"], md2)
 
         # verify metadata
         self._verify_share_metadata(share, md2)
@@ -173,9 +177,9 @@
     def test_set_metadata_min_size_key(self):
         data = {"k": "value"}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data['k'], body_get.get('k'))
 
@@ -185,9 +189,9 @@
         max_key = "k" * 255
         data = {max_key: "value"}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertIn(max_key, body_get)
         self.assertEqual(data[max_key], body_get.get(max_key))
@@ -197,9 +201,9 @@
     def test_set_metadata_min_size_value(self):
         data = {"key": "v"}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data['key'], body_get['key'])
 
@@ -209,9 +213,9 @@
         max_value = "v" * 1023
         data = {"key": max_value}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data['key'], body_get['key'])
 
@@ -220,9 +224,9 @@
     def test_upd_metadata_min_size_key(self):
         data = {"k": "value"}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
 
@@ -232,9 +236,9 @@
         max_key = "k" * 255
         data = {max_key: "value"}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
 
@@ -243,9 +247,9 @@
     def test_upd_metadata_min_size_value(self):
         data = {"key": "v"}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
 
@@ -255,8 +259,8 @@
         max_value = "v" * 1023
         data = {"key": max_value}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
diff --git a/manila_tempest_tests/tests/api/test_metadata_negative.py b/manila_tempest_tests/tests/api/test_metadata_negative.py
index 93a3628..caa34ac 100644
--- a/manila_tempest_tests/tests/api/test_metadata_negative.py
+++ b/manila_tempest_tests/tests/api/test_metadata_negative.py
@@ -61,14 +61,14 @@
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     def test_try_set_metadata_with_empty_key(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.set_metadata,
+                          self.shares_v2_client.set_metadata,
                           self.share["id"], {"": "value"})
 
     @decorators.idempotent_id('759ca34d-1c87-43f3-8da2-8e1d373049ac')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     def test_try_upd_metadata_with_empty_key(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_all_metadata,
+                          self.shares_v2_client.update_all_metadata,
                           self.share["id"], {"": "value"})
 
     @decorators.idempotent_id('94c7ebb3-14c3-4ff1-9839-ae3acb318cd0')
@@ -77,7 +77,7 @@
         too_big_key = "x" * 256
         md = {too_big_key: "value"}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.set_metadata,
+                          self.shares_v2_client.set_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('33ef3047-6ca3-4547-a681-b52314382dcb')
@@ -86,7 +86,7 @@
         too_big_key = "x" * 256
         md = {too_big_key: "value"}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_all_metadata,
+                          self.shares_v2_client.update_all_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('1114970a-1b45-4c56-b20a-e13e1764e3c4')
@@ -95,7 +95,7 @@
         too_big_value = "x" * 1024
         md = {"key": too_big_value}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.set_metadata,
+                          self.shares_v2_client.set_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('c2eddcf0-cf81-4f9f-b06d-c9165ab8553e')
@@ -104,12 +104,12 @@
         too_big_value = "x" * 1024
         md = {"key": too_big_value}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_all_metadata,
+                          self.shares_v2_client.update_all_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('14df3262-5a2b-4de4-b335-422329b22b07')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     def test_try_delete_unexisting_metadata(self):
         self.assertRaises(lib_exc.NotFound,
-                          self.shares_client.delete_metadata,
+                          self.shares_v2_client.delete_metadata,
                           self.share["id"], "wrong_key")
diff --git a/manila_tempest_tests/tests/api/test_replication.py b/manila_tempest_tests/tests/api/test_replication.py
index 20ba8fe..7873926 100644
--- a/manila_tempest_tests/tests/api/test_replication.py
+++ b/manila_tempest_tests/tests/api/test_replication.py
@@ -331,12 +331,9 @@
     def test_add_access_rule_create_replica_delete_rule(self):
         # Add access rule to the share
         access_type, access_to = self._get_access_rule_data_from_config()
-        rule = self.shares_v2_client.create_access_rule(
-            self.shares[0]["id"], access_type, access_to, 'ro')['access']
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.shares[0]["id"],
-            constants.RULE_STATE_ACTIVE, resource_name='access_rule',
-            rule_id=rule["id"])
+        self.allow_access(
+            self.shares[0]["id"], access_type=access_type, access_to=access_to,
+            access_level='ro')
 
         # Create the replica
         self._verify_create_replica()
@@ -346,12 +343,6 @@
             self.shares_v2_client, self.shares[0]["id"],
             constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
 
-        # Delete rule and wait for deletion
-        self.shares_v2_client.delete_access_rule(self.shares[0]["id"],
-                                                 rule["id"])
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.shares[0]['id'])
-
     @decorators.idempotent_id('3af3f19a-1195-464e-870b-1a3918914f1b')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     def test_create_replica_add_access_rule_delete_replica(self):
@@ -360,12 +351,9 @@
         share_replica = self._verify_create_replica()
 
         # Add access rule
-        self.shares_v2_client.create_access_rule(
-            self.shares[0]["id"], access_type, access_to, 'ro')
-
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.shares[0]["id"],
-            constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
+        self.allow_access(
+            self.shares[0]["id"], access_type=access_type, access_to=access_to,
+            access_level='ro')
 
         # Delete the replica
         self.delete_share_replica(share_replica["id"])
@@ -421,11 +409,9 @@
         share = self.create_shares([self.creation_data])[0]
         # Add access rule
         access_type, access_to = self._get_access_rule_data_from_config()
-        rule = self.shares_v2_client.create_access_rule(
-            share["id"], access_type, access_to, 'ro')['access']
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, share["id"], constants.RULE_STATE_ACTIVE,
-            resource_name='access_rule', rule_id=rule["id"])
+        self.allow_access(
+            share["id"], access_type=access_type, access_to=access_to,
+            access_level='ro')
 
         original_replica = self.shares_v2_client.list_share_replicas(
             share["id"])['share_replicas'][0]
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index 30b1fc5..979bf06 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -22,7 +22,6 @@
 import testtools
 from testtools import testcase as tc
 
-from manila_tempest_tests.common import waiters
 from manila_tempest_tests.tests.api import base
 from manila_tempest_tests import utils
 
@@ -37,13 +36,14 @@
     :param self: instance of test class
     """
 
-    if utils.is_microversion_eq(version, '1.0'):
-        rule = self.shares_client.create_access_rule(
-            self.share["id"], self.access_type, self.access_to, 'ro')['access']
+    if utils.is_microversion_le(version, '2.9'):
+        client = self.shares_client
     else:
-        rule = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type, self.access_to, 'ro',
-            version=version)['access']
+        client = self.shares_v2_client
+
+    rule = self.allow_access(
+        self.share["id"], client=client, access_type=self.access_type,
+        access_to=self.access_to, access_level='ro', version=version)
 
     self.assertEqual('ro', rule['access_level'])
     for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -55,14 +55,6 @@
     else:
         self.assertEqual("queued_to_apply", rule['state'])
 
-    if utils.is_microversion_le(version, '2.9'):
-        waiters.wait_for_resource_status(
-            self.shares_client, self.share["id"], "active",
-            resource_name='access_rule', rule_id=rule["id"])
-    else:
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            status_attr='access_rules_status', version=version)
         # If the 'access_rules_status' transitions to 'active',
         # rule state must too
         rules = self.shares_v2_client.list_access_rules(
@@ -70,16 +62,6 @@
         rule = [r for r in rules if r['id'] == rule['id']][0]
         self.assertEqual("active", rule['state'])
 
-    if utils.is_microversion_eq(version, '1.0'):
-        self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-        self.shares_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.share['id'])
-    else:
-        self.shares_v2_client.delete_access_rule(
-            self.share["id"], rule["id"], version=version)
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.share['id'], version=version)
-
 
 @ddt.ddt
 class ShareIpRulesForNFSTest(base.BaseSharesMixedTest):
@@ -109,8 +91,11 @@
     @decorators.idempotent_id('3390df2d-f6f8-4634-a562-87c1be994f6a')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @ddt.data(*itertools.chain(
-        itertools.product({'1.0', '2.9', '2.37', LATEST_MICROVERSION}, {4}),
-        itertools.product({'2.38', LATEST_MICROVERSION}, {6})
+        itertools.product(
+            utils.deduplicate(['1.0', '2.9', '2.37', LATEST_MICROVERSION]),
+            [4]),
+        itertools.product(
+            utils.deduplicate(['2.38', LATEST_MICROVERSION]), [6])
     ))
     @ddt.unpack
     def test_create_delete_access_rules_with_one_ip(self, version,
@@ -120,14 +105,16 @@
             access_to = utils.rand_ip()
         else:
             access_to = utils.rand_ipv6_ip()
-        # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, access_to)['access']
+
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, access_to,
-                version=version)['access']
+            client = self.shares_v2_client
+
+        # create rule
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=access_to, version=version)
 
         self.assertEqual('rw', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -139,35 +126,14 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('5d25168a-d646-443e-8cf1-3151eb7887f5')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @ddt.data(*itertools.chain(
-        itertools.product({'1.0', '2.9', '2.37', LATEST_MICROVERSION}, {4}),
-        itertools.product({'2.38', LATEST_MICROVERSION}, {6})
+        itertools.product(
+            utils.deduplicate(['1.0', '2.9', '2.37', LATEST_MICROVERSION]),
+            [4]),
+        itertools.product(
+            utils.deduplicate(['2.38', LATEST_MICROVERSION]), [6])
     ))
     @ddt.unpack
     def test_create_delete_access_rule_with_cidr(self, version, ip_version):
@@ -175,49 +141,19 @@
             access_to = utils.rand_ip(network=True)
         else:
             access_to = utils.rand_ipv6_ip(network=True)
-        # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, access_to)['access']
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, access_to,
-                version=version)['access']
+            client = self.shares_v2_client
+        # create rule
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=access_to, version=version)
 
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
             self.assertNotIn(key, rule.keys())
         self.assertEqual('rw', rule['access_level'])
 
-        # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
-        if utils.is_microversion_le(version, "2.27"):
-            self.assertEqual("new", rule['state'])
-        else:
-            self.assertEqual("queued_to_apply", rule['state'])
-
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('187a4fb0-ba1d-45b9-83c9-f0272e7e6f3e')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @testtools.skipIf(
@@ -277,15 +213,15 @@
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
                                  LATEST_MICROVERSION]))
     def test_create_delete_user_rule(self, version):
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
+        else:
+            client = self.shares_v2_client
 
         # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to)['access']
-        else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to,
-                version=version)['access']
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, version=version)
 
         self.assertEqual('rw', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -297,30 +233,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('ccb08342-b7ef-4dda-84ba-8de9879d8862')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @testtools.skipIf(
@@ -381,15 +293,15 @@
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
                                  LATEST_MICROVERSION]))
     def test_create_delete_cert_rule(self, version):
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
+        else:
+            client = self.shares_v2_client
 
         # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to)['access']
-        else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to,
-                version=version)['access']
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, version=version)
 
         self.assertEqual('rw', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -401,30 +313,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('cdd93d8e-7255-4ed4-8ef0-929a62bb302c')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @testtools.skipIf(
@@ -433,13 +321,13 @@
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
                                  LATEST_MICROVERSION]))
     def test_create_delete_cert_ro_access_rule(self, version):
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], 'cert', 'client2.com', 'ro')['access']
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], 'cert', 'client2.com', 'ro',
-                version=version)['access']
+            client = self.shares_v2_client
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type='cert',
+            access_to='client2.com', access_level='ro', version=version)
 
         self.assertEqual('ro', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -451,29 +339,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
 
 @ddt.ddt
 class ShareCephxRulesForCephFSTest(base.BaseSharesMixedTest):
@@ -512,31 +377,21 @@
         ('rw', 'ro')))
     @ddt.unpack
     def test_create_delete_cephx_rule(self, version, access_to, access_level):
-        rule = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type, access_to, version=version,
-            access_level=access_level)['access']
+        rule = self.allow_access(
+            self.share["id"], access_type=self.access_type,
+            access_to=access_to, version=version, access_level=access_level)
 
         self.assertEqual(access_level, rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
             self.assertNotIn(key, rule.keys())
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            resource_name='access_rule', rule_id=rule["id"])
-
-        self.shares_v2_client.delete_access_rule(
-            self.share["id"], rule["id"], version=version)
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.share['id'])
 
     @decorators.idempotent_id('ad907303-a439-4fcb-8845-fe91ecab7dc2')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     def test_different_users_in_same_tenant_can_use_same_cephx_id(self):
         # Grant access to the share
-        access1 = self.shares_v2_client.create_access_rule(
-            self.share['id'], self.access_type, self.access_to, 'rw')['access']
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            resource_name='access_rule', rule_id=access1["id"])
+        self.allow_access(
+            self.share['id'], access_type=self.access_type,
+            access_to=self.access_to, access_level='rw')
 
         # Create a new user in the current project
         project = self.os_admin.projects_client.show_project(
@@ -550,11 +405,10 @@
 
         # Grant access to the second share using the same cephx ID that was
         # used in access1
-        access2 = user_client.shares_v2_client.create_access_rule(
-            share2['id'], self.access_type, self.access_to, 'rw')['access']
-        waiters.wait_for_resource_status(
-            user_client.shares_v2_client, share2['id'], "active",
-            resource_name='access_rule', rule_id=access2['id'])
+        self.allow_access(
+            share2['id'], client=user_client.shares_v2_client,
+            access_type=self.access_type, access_to=self.access_to,
+            access_level='rw')
 
 
 @ddt.ddt
@@ -606,14 +460,14 @@
         metadata = None
         if utils.is_microversion_ge(version, '2.45'):
             metadata = {'key1': 'v1', 'key2': 'v2'}
-        # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to)['access']
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to,
-                metadata=metadata, version=version)['access']
+            client = self.shares_v2_client
+        # create rule
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, metadata=metadata, version=version)
 
         # verify added rule keys since 2.33 when create rule
         if utils.is_microversion_ge(version, '2.33'):
@@ -629,19 +483,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
         # list rules
         if utils.is_microversion_eq(version, '1.0'):
             rules = self.shares_client.list_access_rules(
@@ -678,16 +519,6 @@
         msg = "expected id lists %s times in rule list" % (len(gen))
         self.assertEqual(1, len(gen), msg)
 
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('b77bcbda-9754-48f0-9be6-79341ad1af64')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
@@ -698,18 +529,18 @@
             msg = ("API version %s does not support cephx access type, need "
                    "version >= 2.13." % version)
             raise self.skipException(msg)
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
+        else:
+            client = self.shares_v2_client
 
         # create share
         share = self.create_share(share_type_id=self.share_type_id)
 
         # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                share["id"], self.access_type, self.access_to)['access']
-        else:
-            rule = self.shares_v2_client.create_access_rule(
-                share["id"], self.access_type, self.access_to,
-                version=version)['access']
+        rule = self.allow_access(
+            share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, version=version, cleanup=False)
 
         # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
         if utils.is_microversion_le(version, "2.27"):
@@ -717,19 +548,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
         # delete share
         if utils.is_microversion_eq(version, '1.0'):
             self.shares_client.delete_share(share['id'])
diff --git a/manila_tempest_tests/tests/api/test_rules_negative.py b/manila_tempest_tests/tests/api/test_rules_negative.py
index 5225651..1eb858d 100644
--- a/manila_tempest_tests/tests/api/test_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_rules_negative.py
@@ -99,27 +99,15 @@
         access_type = "ip"
         access_to = "1.2.3.4"
 
-        # create rule
         if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], access_type, access_to)['access']
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], access_type, access_to,
-                version=version)['access']
+            client = self.shares_v2_client
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
+        # create rule
+        self.allow_access(
+            self.share["id"], client=client, access_type=access_type,
+            access_to=access_to, version=version)
 
         # try create duplicate of rule
         if utils.is_microversion_eq(version, '1.0'):
@@ -132,18 +120,6 @@
                               self.share["id"], access_type, access_to,
                               version=version)
 
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"],
-                                                  rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share["id"])
-        else:
-            self.shares_v2_client.delete_access_rule(self.share["id"],
-                                                     rule["id"])
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share["id"], version=version)
-
     @decorators.idempotent_id('63932d1d-a60a-4af7-ba3b-7cf6c68aaee9')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     @ddt.data("10.20.30.40", "fd8c:b029:bba6:ac54::1",
@@ -157,13 +133,8 @@
                       "is %s" % CONF.share.max_api_microversion)
             raise self.skipException(reason)
 
-        rule = self.shares_v2_client.create_access_rule(
-            self.share["id"], "ip", access_to)['access']
-        self.addCleanup(self.shares_v2_client.delete_access_rule,
-                        self.share["id"], rule['id'])
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            status_attr='access_rules_status')
+        self.allow_access(
+            self.share["id"], access_type="ip", access_to=access_to)
 
         self.assertRaises(lib_exc.BadRequest,
                           self.shares_v2_client.create_access_rule,
diff --git a/manila_tempest_tests/tests/api/test_scheduler_hints.py b/manila_tempest_tests/tests/api/test_scheduler_hints.py
index 83e5a1a..5012f17 100644
--- a/manila_tempest_tests/tests/api/test_scheduler_hints.py
+++ b/manila_tempest_tests/tests/api/test_scheduler_hints.py
@@ -70,10 +70,10 @@
         self.assertEqual(backend_a, backend_b)
 
         # get metadata of share
-        metadata_a = self.shares_client.get_metadata(
+        metadata_a = self.shares_v2_client.get_metadata(
             self.share_a["id"])['metadata']
         md_a = {"__affinity_same_host": "%s" % share_b["id"]}
-        metadata_b = self.shares_client.get_metadata(
+        metadata_b = self.shares_v2_client.get_metadata(
             share_b["id"])['metadata']
         md_b = {"__affinity_same_host": "%s" % self.share_a["id"]}
 
diff --git a/manila_tempest_tests/tests/scenario/manager.py b/manila_tempest_tests/tests/scenario/manager.py
index 947d0e6..a73ab44 100644
--- a/manila_tempest_tests/tests/scenario/manager.py
+++ b/manila_tempest_tests/tests/scenario/manager.py
@@ -14,290 +14,28 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import subprocess
-
-import netaddr
 from oslo_log import log
-from oslo_utils import netutils
 from oslo_utils import uuidutils
-from tempest.common import compute
 from tempest.common import image as common_image
-from tempest.common.utils.linux import remote_client
-from tempest.common.utils import net_utils
-from tempest.common import waiters
 from tempest import config
-from tempest import exceptions
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib import exceptions as lib_exc
-from tempest import test
+from tempest.scenario import manager
 
 CONF = config.CONF
 
 LOG = log.getLogger(__name__)
 
 
-class ScenarioTest(test.BaseTestCase):
+class ScenarioTest(manager.NetworkScenarioTest):
     """Base class for scenario tests. Uses tempest own clients. """
 
-    credentials = ['primary']
-
-    @classmethod
-    def setup_clients(cls):
-        super(ScenarioTest, cls).setup_clients()
-        # Clients (in alphabetical order)
-        cls.flavors_client = cls.os_primary.flavors_client
-        cls.compute_floating_ips_client = (
-            cls.os_primary.compute_floating_ips_client)
-        if CONF.service_available.glance:
-            # Check if glance v1 is available to determine which client to use.
-            if CONF.image_feature_enabled.api_v1:
-                cls.image_client = cls.os_primary.image_client
-            elif CONF.image_feature_enabled.api_v2:
-                cls.image_client = cls.os_primary.image_client_v2
-            else:
-                raise lib_exc.InvalidConfiguration(
-                    'Either api_v1 or api_v2 must be True in '
-                    '[image-feature-enabled].')
-        # Compute image client
-        cls.compute_images_client = cls.os_primary.compute_images_client
-        cls.keypairs_client = cls.os_primary.keypairs_client
-        # Nova security groups client
-        cls.compute_security_groups_client = (
-            cls.os_primary.compute_security_groups_client)
-        cls.compute_security_group_rules_client = (
-            cls.os_primary.compute_security_group_rules_client)
-        cls.servers_client = cls.os_primary.servers_client
-        cls.interface_client = cls.os_primary.interfaces_client
-        # Neutron network client
-        cls.networks_client = cls.os_primary.networks_client
-        cls.ports_client = cls.os_primary.ports_client
-        cls.routers_client = cls.os_primary.routers_client
-        cls.subnets_client = cls.os_primary.subnets_client
-        cls.floating_ips_client = cls.os_primary.floating_ips_client
-        cls.security_groups_client = cls.os_primary.security_groups_client
-        cls.security_group_rules_client = (
-            cls.os_primary.security_group_rules_client)
-
     # ## Test functions library
     #
     # The create_[resource] functions only return body and discard the
     # resp part which is not used in scenario tests
 
-    def _create_port(self, network_id, client=None, namestart='port-quotatest',
-                     **kwargs):
-        if not client:
-            client = self.ports_client
-        name = data_utils.rand_name(namestart)
-        result = client.create_port(
-            name=name,
-            network_id=network_id,
-            **kwargs)
-        self.assertIsNotNone(result, 'Unable to allocate port')
-        port = result['port']
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_port, port['id'])
-        return port
-
-    def create_keypair(self, client=None):
-        if not client:
-            client = self.keypairs_client
-        name = data_utils.rand_name(self.__class__.__name__)
-        # We don't need to create a keypair by pubkey in scenario
-        body = client.create_keypair(name=name)
-        self.addCleanup(client.delete_keypair, name)
-        return body['keypair']
-
-    def create_server(self, name=None, image_id=None, flavor=None,
-                      validatable=False, wait_until='ACTIVE',
-                      clients=None, **kwargs):
-        """Wrapper utility that returns a test server.
-
-        This wrapper utility calls the common create test server and
-        returns a test server. The purpose of this wrapper is to minimize
-        the impact on the code of the tests already using this
-        function.
-        """
-
-        # NOTE(jlanoux): As a first step, ssh checks in the scenario
-        # tests need to be run regardless of the run_validation and
-        # validatable parameters and thus until the ssh validation job
-        # becomes voting in CI. The test resources management and IP
-        # association are taken care of in the scenario tests.
-        # Therefore, the validatable parameter is set to false in all
-        # those tests. In this way create_server just return a standard
-        # server and the scenario tests always perform ssh checks.
-
-        # Needed for the cross_tenant_traffic test:
-        if clients is None:
-            clients = self.os_primary
-
-        if name is None:
-            name = data_utils.rand_name(self.__class__.__name__ + "-server")
-
-        vnic_type = CONF.network.port_vnic_type
-
-        # If vnic_type is configured create port for
-        # every network
-        if vnic_type:
-            ports = []
-
-            create_port_body = {'binding:vnic_type': vnic_type,
-                                'namestart': 'port-smoke'}
-            if kwargs:
-                # Convert security group names to security group ids
-                # to pass to create_port
-                if 'security_groups' in kwargs:
-                    security_groups = (
-                        clients.security_groups_client.list_security_groups(
-                        ).get('security_groups'))
-                    sec_dict = {s['name']: s['id'] for s in security_groups}
-
-                    sec_groups_names = [s['name'] for s in kwargs.pop(
-                        'security_groups')]
-                    security_groups_ids = [sec_dict[s]
-                                           for s in sec_groups_names]
-
-                    if security_groups_ids:
-                        create_port_body[
-                            'security_groups'] = security_groups_ids
-                networks = kwargs.pop('networks', [])
-            else:
-                networks = []
-
-            # If there are no networks passed to us we look up
-            # for the project's private networks and create a port.
-            # The same behaviour as we would expect when passing
-            # the call to the clients with no networks
-            if not networks:
-                networks = clients.networks_client.list_networks(
-                    **{'router:external': False, 'fields': 'id'})['networks']
-
-            # It's net['uuid'] if networks come from kwargs
-            # and net['id'] if they come from
-            # clients.networks_client.list_networks
-            for net in networks:
-                net_id = net.get('uuid', net.get('id'))
-                if 'port' not in net:
-                    port = self._create_port(network_id=net_id,
-                                             client=clients.ports_client,
-                                             **create_port_body)
-                    ports.append({'port': port['id']})
-                else:
-                    ports.append({'port': net['port']})
-            if ports:
-                kwargs['networks'] = ports
-            self.ports = ports
-
-        tenant_network = self.get_tenant_network()
-
-        body, servers = compute.create_test_server(
-            clients,
-            tenant_network=tenant_network,
-            wait_until=wait_until,
-            name=name, flavor=flavor,
-            image_id=image_id, **kwargs)
-
-        self.addCleanup(waiters.wait_for_server_termination,
-                        clients.servers_client, body['id'])
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        clients.servers_client.delete_server, body['id'])
-        server = clients.servers_client.show_server(body['id'])['server']
-        return server
-
-    def _create_loginable_secgroup_rule(self, secgroup_id=None):
-        _client = self.compute_security_groups_client
-        _client_rules = self.compute_security_group_rules_client
-        if secgroup_id is None:
-            sgs = _client.list_security_groups()['security_groups']
-            for sg in sgs:
-                if sg['name'] == 'default':
-                    secgroup_id = sg['id']
-
-        # These rules are intended to permit inbound ssh and icmp
-        # traffic from all sources, so no group_id is provided.
-        # Setting a group_id would only permit traffic from ports
-        # belonging to the same security group.
-        rulesets = [
-            {
-                # ssh
-                'ip_protocol': 'tcp',
-                'from_port': 22,
-                'to_port': 22,
-                'cidr': '0.0.0.0/0',
-            },
-            {
-                # ping
-                'ip_protocol': 'icmp',
-                'from_port': -1,
-                'to_port': -1,
-                'cidr': '0.0.0.0/0',
-            }
-        ]
-        rules = list()
-        for ruleset in rulesets:
-            sg_rule = _client_rules.create_security_group_rule(
-                parent_group_id=secgroup_id, **ruleset)['security_group_rule']
-            rules.append(sg_rule)
-        return rules
-
-    def _create_security_group(self):
-        # Create security group
-        sg_name = data_utils.rand_name(self.__class__.__name__)
-        sg_desc = sg_name + " description"
-        secgroup = self.compute_security_groups_client.create_security_group(
-            name=sg_name, description=sg_desc)['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(secgroup['description'], sg_desc)
-        self.addCleanup(
-            test_utils.call_and_ignore_notfound_exc,
-            self.compute_security_groups_client.delete_security_group,
-            secgroup['id'])
-
-        # Add rules to the security group
-        self._create_loginable_secgroup_rule(secgroup['id'])
-
-        return secgroup
-
-    def get_remote_client(self, ip_address, username=None, private_key=None):
-        """Get a SSH client to a remote server
-
-        @param ip_address the server floating or fixed IP address to use
-                          for ssh validation
-        @param username name of the Linux account on the remote server
-        @param private_key the SSH private key to use
-        @return a RemoteClient object
-        """
-
-        if username is None:
-            username = CONF.validation.image_ssh_user
-        # Set this with 'keypair' or others to log in with keypair or
-        # username/password.
-        if CONF.validation.auth_method == 'keypair':
-            password = None
-            if private_key is None:
-                private_key = self.keypair['private_key']
-        else:
-            password = CONF.validation.image_ssh_password
-            private_key = None
-        linux_client = remote_client.RemoteClient(ip_address, username,
-                                                  pkey=private_key,
-                                                  password=password)
-        try:
-            linux_client.validate_authentication()
-        except Exception as e:
-            message = ('Initializing SSH connection to %(ip)s failed. '
-                       'Error: %(error)s' % {'ip': ip_address,
-                                             'error': e})
-            caller = test_utils.find_test_caller()
-            if caller:
-                message = '(%s) %s' % (caller, message)
-            LOG.exception(message)
-            self._log_console_output()
-            raise
-
-        return linux_client
-
     def _image_create(self, name, fmt, path,
                       disk_format=None, properties=None):
         if properties is None:
@@ -345,206 +83,11 @@
 
         return image
 
-    def _log_console_output(self, servers=None):
-        if not CONF.compute_feature_enabled.console_output:
-            LOG.debug('Console output not supported, cannot log')
-            return
-        if not servers:
-            servers = self.servers_client.list_servers()
-            servers = servers['servers']
-        for server in servers:
-            try:
-                console_output = self.servers_client.get_console_output(
-                    server['id'])['output']
-                LOG.debug('Console output for %s\nbody=\n%s',
-                          server['id'], console_output)
-            except lib_exc.NotFound:
-                LOG.debug("Server %s disappeared(deleted) while looking "
-                          "for the console log", server['id'])
-
     def _log_net_info(self, exc):
         # network debug is called as part of ssh init
         if not isinstance(exc, lib_exc.SSHTimeout):
             LOG.debug('Network information on a devstack host')
 
-    def rebuild_server(self, server_id, image=None,
-                       preserve_ephemeral=False, wait=True,
-                       rebuild_kwargs=None):
-        if image is None:
-            image = CONF.compute.image_ref
-
-        rebuild_kwargs = rebuild_kwargs or {}
-
-        LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
-                  server_id, image, preserve_ephemeral)
-        self.servers_client.rebuild_server(
-            server_id=server_id, image_ref=image,
-            preserve_ephemeral=preserve_ephemeral,
-            **rebuild_kwargs)
-        if wait:
-            waiters.wait_for_server_status(self.servers_client,
-                                           server_id, 'ACTIVE')
-
-    def ping_ip_address(self, ip_address, should_succeed=True,
-                        ping_timeout=None, mtu=None):
-        timeout = ping_timeout or CONF.validation.ping_timeout
-        cmd = ['ping', '-c1', '-w1']
-
-        if mtu:
-            cmd += [
-                # don't fragment
-                '-M', 'do',
-                # ping receives just the size of ICMP payload
-                '-s', str(net_utils.get_ping_payload_size(mtu, 4))
-            ]
-        cmd.append(ip_address)
-
-        def ping():
-            proc = subprocess.Popen(cmd,
-                                    stdout=subprocess.PIPE,
-                                    stderr=subprocess.PIPE)
-            proc.communicate()
-
-            return (proc.returncode == 0) == should_succeed
-
-        caller = test_utils.find_test_caller()
-        LOG.debug('%(caller)s begins to ping %(ip)s in %(timeout)s sec and the'
-                  ' expected result is %(should_succeed)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'should_succeed':
-                      'reachable' if should_succeed else 'unreachable'
-                  })
-        result = test_utils.call_until_true(ping, timeout, 1)
-        LOG.debug('%(caller)s finishes ping %(ip)s in %(timeout)s sec and the '
-                  'ping result is %(result)s', {
-                      'caller': caller, 'ip': ip_address, 'timeout': timeout,
-                      'result': 'expected' if result else 'unexpected'
-                  })
-        return result
-
-    def check_vm_connectivity(self, ip_address,
-                              username=None,
-                              private_key=None,
-                              should_connect=True,
-                              mtu=None):
-        """Check server connectivity
-
-        :param ip_address: server to test against
-        :param username: server's ssh username
-        :param private_key: server's ssh private key to be used
-        :param should_connect: True/False indicates positive/negative test
-            positive - attempt ping and ssh
-            negative - attempt ping and fail if succeed
-        :param mtu: network MTU to use for connectivity validation
-
-        :raises: AssertError if the result of the connectivity check does
-            not match the value of the should_connect param
-        """
-        if should_connect:
-            msg = "Timed out waiting for %s to become reachable" % ip_address
-        else:
-            msg = "ip address %s is reachable" % ip_address
-        self.assertTrue(self.ping_ip_address(ip_address,
-                                             should_succeed=should_connect,
-                                             mtu=mtu),
-                        msg=msg)
-        if should_connect:
-            # no need to check ssh for negative connectivity
-            self.get_remote_client(ip_address, username, private_key)
-
-    def check_public_network_connectivity(self, ip_address, username,
-                                          private_key, should_connect=True,
-                                          msg=None, servers=None, mtu=None):
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        LOG.debug('checking network connections to IP %s with user: %s',
-                  ip_address, username)
-        try:
-            self.check_vm_connectivity(ip_address,
-                                       username,
-                                       private_key,
-                                       should_connect=should_connect,
-                                       mtu=mtu)
-        except Exception:
-            ex_msg = 'Public network connectivity check failed'
-            if msg:
-                ex_msg += ": " + msg
-            LOG.exception(ex_msg)
-            self._log_console_output(servers)
-            raise
-
-    def create_floating_ip(self, thing, pool_name=None):
-        """Create a floating IP and associates to a server on Nova"""
-
-        if not pool_name:
-            pool_name = CONF.network.floating_network_name
-        floating_ip = (self.compute_floating_ips_client.
-                       create_floating_ip(pool=pool_name)['floating_ip'])
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.compute_floating_ips_client.delete_floating_ip,
-                        floating_ip['id'])
-        self.compute_floating_ips_client.associate_floating_ip_to_server(
-            floating_ip['ip'], thing['id'])
-        return floating_ip
-
-    def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                         private_key=None):
-        ssh_client = self.get_remote_client(ip_address,
-                                            private_key=private_key)
-        if dev_name is not None:
-            ssh_client.make_fs(dev_name)
-            ssh_client.mount(dev_name, mount_path)
-        cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
-        ssh_client.exec_command(cmd_timestamp)
-        timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
-                                            % mount_path)
-        if dev_name is not None:
-            ssh_client.umount(mount_path)
-        return timestamp
-
-    def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
-                      private_key=None):
-        ssh_client = self.get_remote_client(ip_address,
-                                            private_key=private_key)
-        if dev_name is not None:
-            ssh_client.mount(dev_name, mount_path)
-        timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
-                                            % mount_path)
-        if dev_name is not None:
-            ssh_client.umount(mount_path)
-        return timestamp
-
-    def get_server_ip(self, server):
-        """Get the server fixed or floating IP.
-
-        Based on the configuration we're in, return a correct ip
-        address for validating that a guest is up.
-        """
-        if CONF.validation.connect_method == 'floating':
-            # The tests calling this method don't have a floating IP
-            # and can't make use of the validation resources. So the
-            # method is creating the floating IP there.
-            return self.create_floating_ip(server)['ip']
-        elif CONF.validation.connect_method == 'fixed':
-            # Determine the network name to look for based on config or creds
-            # provider network resources.
-            if CONF.validation.network_for_ssh:
-                addresses = server['addresses'][
-                    CONF.validation.network_for_ssh]
-            else:
-                creds_provider = self._get_credentials_provider()
-                net_creds = creds_provider.get_primary_creds()
-                network = getattr(net_creds, 'network', None)
-                addresses = (server['addresses'][network['name']]
-                             if network else [])
-            for address in addresses:
-                if (address['version'] == CONF.validation.ip_version_for_ssh
-                        and address['OS-EXT-IPS:type'] == 'fixed'):
-                    return address['addr']
-            raise exceptions.ServerUnreachable(server_id=server['id'])
-        else:
-            raise lib_exc.InvalidConfiguration()
-
 
 class NetworkScenarioTest(ScenarioTest):
     """Base class for network scenario tests.
@@ -558,164 +101,12 @@
 
     """
 
-    credentials = ['primary', 'admin']
-
     @classmethod
     def skip_checks(cls):
         super(NetworkScenarioTest, cls).skip_checks()
         if not CONF.service_available.neutron:
             raise cls.skipException('Neutron not available')
 
-    def _create_network(self, networks_client=None,
-                        tenant_id=None,
-                        namestart='network-smoke-',
-                        port_security_enabled=True):
-        if not networks_client:
-            networks_client = self.networks_client
-        if not tenant_id:
-            tenant_id = networks_client.tenant_id
-        name = data_utils.rand_name(namestart)
-        network_kwargs = dict(name=name, tenant_id=tenant_id)
-        # Neutron disables port security by default so we have to check the
-        # config before trying to create the network with port_security_enabled
-        if CONF.network_feature_enabled.port_security:
-            network_kwargs['port_security_enabled'] = port_security_enabled
-        result = networks_client.create_network(**network_kwargs)
-        network = result['network']
-
-        self.assertEqual(network['name'], name)
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        networks_client.delete_network,
-                        network['id'])
-        return network
-
-    def _create_subnet(self, network, subnets_client=None,
-                       routers_client=None, namestart='subnet-smoke',
-                       **kwargs):
-        """Create a subnet for the given network
-
-        within the cidr block configured for tenant networks.
-        """
-        if not subnets_client:
-            subnets_client = self.subnets_client
-        if not routers_client:
-            routers_client = self.routers_client
-
-        def cidr_in_use(cidr, tenant_id):
-            """Check cidr existence
-
-            :returns: True if subnet with cidr already exist in tenant
-                  False else
-            """
-            cidr_in_use = self.os_admin.subnets_client.list_subnets(
-                tenant_id=tenant_id, cidr=cidr)['subnets']
-            return len(cidr_in_use) != 0
-
-        def _make_create_subnet_request(namestart, network,
-                                        ip_version, subnets_client, **kwargs):
-
-            subnet = dict(
-                name=data_utils.rand_name(namestart),
-                network_id=network['id'],
-                tenant_id=network['tenant_id'],
-                ip_version=ip_version,
-                **kwargs
-            )
-
-            if ip_version == 6:
-                subnet['ipv6_address_mode'] = 'slaac'
-                subnet['ipv6_ra_mode'] = 'slaac'
-
-            try:
-                return subnets_client.create_subnet(**subnet)
-            except lib_exc.Conflict as e:
-                if 'overlaps with another subnet' not in str(e):
-                    raise
-
-        result = None
-        str_cidr = None
-
-        use_default_subnetpool = kwargs.get('use_default_subnetpool', False)
-
-        ip_version = kwargs.pop('ip_version', 4)
-
-        if not use_default_subnetpool:
-
-            if ip_version == 6:
-                tenant_cidr = netaddr.IPNetwork(
-                    CONF.network.project_network_v6_cidr)
-                num_bits = CONF.network.project_network_v6_mask_bits
-            else:
-                tenant_cidr = netaddr.IPNetwork(
-                    CONF.network.project_network_cidr)
-                num_bits = CONF.network.project_network_mask_bits
-
-            # Repeatedly attempt subnet creation with sequential cidr
-            # blocks until an unallocated block is found.
-            for subnet_cidr in tenant_cidr.subnet(num_bits):
-                str_cidr = str(subnet_cidr)
-                if cidr_in_use(str_cidr, tenant_id=network['tenant_id']):
-                    continue
-
-                result = _make_create_subnet_request(
-                    namestart, network, ip_version, subnets_client,
-                    cidr=str_cidr, **kwargs)
-                if result is not None:
-                    break
-        else:
-            result = _make_create_subnet_request(
-                namestart, network, ip_version, subnets_client,
-                **kwargs)
-
-        self.assertIsNotNone(result)
-
-        subnet = result['subnet']
-        if str_cidr is not None:
-            self.assertEqual(subnet['cidr'], str_cidr)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        subnets_client.delete_subnet, subnet['id'])
-
-        return subnet
-
-    def _get_server_port_id_and_ip4(self, server, ip_addr=None):
-        if ip_addr:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'],
-                fixed_ips='ip_address=%s' % ip_addr)['ports']
-        else:
-            ports = self.os_admin.ports_client.list_ports(
-                device_id=server['id'])['ports']
-        # A port can have more than one IP address in some cases.
-        # If the network is dual-stack (IPv4 + IPv6), this port is associated
-        # with 2 subnets
-
-        def _is_active(port):
-            # NOTE(vsaienko) With Ironic, instances live on separate hardware
-            # servers. Neutron does not bind ports for Ironic instances, as a
-            # result the port remains in the DOWN state. This has been fixed
-            # with the introduction of the networking-baremetal plugin but
-            # it's not mandatory (and is not used on all stable branches).
-            return (port['status'] == 'ACTIVE' or
-                    port.get('binding:vnic_type') == 'baremetal')
-
-        port_map = [(p["id"], fxip["ip_address"])
-                    for p in ports
-                    for fxip in p["fixed_ips"]
-                    if (netutils.is_valid_ipv4(fxip["ip_address"]) and
-                        _is_active(p))]
-        inactive = [p for p in ports if p['status'] != 'ACTIVE']
-        if inactive:
-            LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
-        self.assertNotEmpty(port_map,
-                            "No IPv4 addresses found in: %s" % ports)
-        self.assertEqual(len(port_map), 1,
-                         "Found multiple IPv4 addresses: %s. "
-                         "Unable to determine which port to target."
-                         % port_map)
-        return port_map[0]
-
     def _get_network_by_name_or_id(self, identifier):
 
         if uuidutils.is_uuid_like(identifier):
@@ -728,9 +119,6 @@
                             "Unable to get network by name: %s" % identifier)
         return networks[0]
 
-    def get_networks(self):
-        return self.os_admin.networks_client.list_networks()['networks']
-
     def create_floating_ip(self, thing, external_network_id=None,
                            port_id=None, ip_addr=None, client=None):
         """Create a floating IP and associates to a resource/port on Neutron"""
@@ -739,8 +127,8 @@
         if not client:
             client = self.floating_ips_client
         if not port_id:
-            port_id, ip4 = self._get_server_port_id_and_ip4(thing,
-                                                            ip_addr=ip_addr)
+            port_id, ip4 = self.get_server_port_id_and_ip4(thing,
+                                                           ip_addr=ip_addr)
         else:
             ip4 = None
         result = client.create_floatingip(
@@ -755,235 +143,9 @@
                         floating_ip['id'])
         return floating_ip
 
-    def _associate_floating_ip(self, floating_ip, server):
-        port_id, _ = self._get_server_port_id_and_ip4(server)
-        kwargs = dict(port_id=port_id)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertEqual(port_id, floating_ip['port_id'])
-        return floating_ip
-
-    def _disassociate_floating_ip(self, floating_ip):
-        """:param floating_ip: floating_ips_client.create_floatingip"""
-        kwargs = dict(port_id=None)
-        floating_ip = self.floating_ips_client.update_floatingip(
-            floating_ip['id'], **kwargs)['floatingip']
-        self.assertIsNone(floating_ip['port_id'])
-        return floating_ip
-
-    def check_floating_ip_status(self, floating_ip, status):
-        """Verifies floatingip reaches the given status
-
-        :param dict floating_ip: floating IP dict to check status
-        :param status: target status
-        :raises: AssertionError if status doesn't match
-        """
-        floatingip_id = floating_ip['id']
-
-        def refresh():
-            result = (self.floating_ips_client.
-                      show_floatingip(floatingip_id)['floatingip'])
-            return status == result['status']
-
-        test_utils.call_until_true(refresh,
-                                   CONF.network.build_timeout,
-                                   CONF.network.build_interval)
-        floating_ip = self.floating_ips_client.show_floatingip(
-            floatingip_id)['floatingip']
-        self.assertEqual(status, floating_ip['status'],
-                         message="FloatingIP: {fp} is at status: {cst}. "
-                                 "failed  to reach status: {st}"
-                         .format(fp=floating_ip, cst=floating_ip['status'],
-                                 st=status))
-        LOG.info("FloatingIP: {fp} is at status: {st}"
-                 .format(fp=floating_ip, st=status))
-
-    def _check_tenant_network_connectivity(self, server,
-                                           username,
-                                           private_key,
-                                           should_connect=True,
-                                           servers_for_debug=None):
-        if not CONF.network.project_networks_reachable:
-            msg = 'Tenant networks not configured to be reachable.'
-            LOG.info(msg)
-            return
-        # The target login is assumed to have been configured for
-        # key-based authentication by cloud-init.
-        try:
-            for ip_addresses in server['addresses'].values():
-                for ip_address in ip_addresses:
-                    self.check_vm_connectivity(ip_address['addr'],
-                                               username,
-                                               private_key,
-                                               should_connect=should_connect)
-        except Exception as e:
-            LOG.exception('Tenant network connectivity check failed')
-            self._log_console_output(servers_for_debug)
-            self._log_net_info(e)
-            raise
-
-    def _check_remote_connectivity(self, source, dest, should_succeed=True,
-                                   nic=None):
-        """assert ping server via source ssh connection
-
-        Note: This is an internal method.  Use check_remote_connectivity
-        instead.
-
-        :param source: RemoteClient: an ssh connection from which to ping
-        :param dest: and IP to ping against
-        :param should_succeed: boolean should ping succeed or not
-        :param nic: specific network interface to ping from
-        """
-        def ping_remote():
-            try:
-                source.ping_host(dest, nic=nic)
-            except lib_exc.SSHExecCommandFailed:
-                LOG.warning('Failed to ping IP: %s via a ssh connection '
-                            'from: %s.', dest, source.ssh_client.host)
-                return not should_succeed
-            return should_succeed
-
-        return test_utils.call_until_true(ping_remote,
-                                          CONF.validation.ping_timeout,
-                                          1)
-
-    def check_remote_connectivity(self, source, dest, should_succeed=True,
-                                  nic=None):
-        """assert ping server via source ssh connection
-
-        :param source: RemoteClient: an ssh connection from which to ping
-        :param dest: and IP to ping against
-        :param should_succeed: boolean should ping succeed or not
-        :param nic: specific network interface to ping from
-        """
-        result = self._check_remote_connectivity(source, dest, should_succeed,
-                                                 nic)
-        source_host = source.ssh_client.host
-        if should_succeed:
-            msg = ("Timed out waiting for %s to become reachable from %s"
-                   % (dest, source_host))
-        else:
-            msg = "%s is reachable from %s" % (dest, source_host)
-        self.assertTrue(result, msg)
-
-    def _create_security_group(self, security_group_rules_client=None,
-                               tenant_id=None,
-                               namestart='secgroup-smoke',
-                               security_groups_client=None):
-        if security_group_rules_client is None:
-            security_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if tenant_id is None:
-            tenant_id = security_groups_client.tenant_id
-        secgroup = self._create_empty_security_group(
-            namestart=namestart, client=security_groups_client,
-            tenant_id=tenant_id)
-
-        # Add rules to the security group
-        rules = self._create_loginable_secgroup_rule(
-            security_group_rules_client=security_group_rules_client,
-            secgroup=secgroup,
-            security_groups_client=security_groups_client)
-        for rule in rules:
-            self.assertEqual(tenant_id, rule['tenant_id'])
-            self.assertEqual(secgroup['id'], rule['security_group_id'])
-        return secgroup
-
-    def _create_empty_security_group(self, client=None, tenant_id=None,
-                                     namestart='secgroup-smoke'):
-        """Create a security group without rules.
-
-        Default rules will be created:
-         - IPv4 egress to any
-         - IPv6 egress to any
-
-        :param tenant_id: secgroup will be created in this tenant
-        :returns: the created security group
-        """
-        if client is None:
-            client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        sg_name = data_utils.rand_name(namestart)
-        sg_desc = sg_name + " description"
-        sg_dict = dict(name=sg_name,
-                       description=sg_desc)
-        sg_dict['tenant_id'] = tenant_id
-        result = client.create_security_group(**sg_dict)
-
-        secgroup = result['security_group']
-        self.assertEqual(secgroup['name'], sg_name)
-        self.assertEqual(tenant_id, secgroup['tenant_id'])
-        self.assertEqual(secgroup['description'], sg_desc)
-
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_security_group, secgroup['id'])
-        return secgroup
-
-    def _default_security_group(self, client=None, tenant_id=None):
-        """Get default secgroup for given tenant_id.
-
-        :returns: default secgroup for given tenant
-        """
-        if client is None:
-            client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        sgs = [
-            sg for sg in list(client.list_security_groups().values())[0]
-            if sg['tenant_id'] == tenant_id and sg['name'] == 'default'
-        ]
-        msg = "No default security group for tenant %s." % (tenant_id)
-        self.assertGreater(len(sgs), 0, msg)
-        return sgs[0]
-
-    def _create_security_group_rule(self, secgroup=None,
-                                    sec_group_rules_client=None,
-                                    tenant_id=None,
-                                    security_groups_client=None, **kwargs):
-        """Create a rule from a dictionary of rule parameters.
-
-        Create a rule in a secgroup. if secgroup not defined will search for
-        default secgroup in tenant_id.
-
-        :param secgroup: the security group.
-        :param tenant_id: if secgroup not passed -- the tenant in which to
-            search for default secgroup
-        :param kwargs: a dictionary containing rule parameters:
-            for example, to allow incoming ssh:
-            rule = {
-                    direction: 'ingress'
-                    protocol:'tcp',
-                    port_range_min: 22,
-                    port_range_max: 22
-                    }
-        """
-        if sec_group_rules_client is None:
-            sec_group_rules_client = self.security_group_rules_client
-        if security_groups_client is None:
-            security_groups_client = self.security_groups_client
-        if not tenant_id:
-            tenant_id = security_groups_client.tenant_id
-        if secgroup is None:
-            secgroup = self._default_security_group(
-                client=security_groups_client, tenant_id=tenant_id)
-
-        ruleset = dict(security_group_id=secgroup['id'],
-                       tenant_id=secgroup['tenant_id'])
-        ruleset.update(kwargs)
-
-        sg_rule = sec_group_rules_client.create_security_group_rule(**ruleset)
-        sg_rule = sg_rule['security_group_rule']
-
-        self.assertEqual(secgroup['tenant_id'], sg_rule['tenant_id'])
-        self.assertEqual(secgroup['id'], sg_rule['security_group_id'])
-
-        return sg_rule
-
-    def _create_loginable_secgroup_rule(self, security_group_rules_client=None,
-                                        secgroup=None,
-                                        security_groups_client=None):
+    def create_loginable_secgroup_rule(self, security_group_rules_client=None,
+                                       secgroup=None,
+                                       security_groups_client=None):
         """Create loginable security group rule
 
         This function will create:
@@ -1027,7 +189,7 @@
             for r_direction in ['ingress', 'egress']:
                 ruleset['direction'] = r_direction
                 try:
-                    sg_rule = self._create_security_group_rule(
+                    sg_rule = self.create_security_group_rule(
                         sec_group_rules_client=sec_group_rules_client,
                         secgroup=secgroup,
                         security_groups_client=security_groups_client,
@@ -1042,106 +204,3 @@
                     rules.append(sg_rule)
 
         return rules
-
-    def _get_router(self, client=None, tenant_id=None):
-        """Retrieve a router for the given tenant id.
-
-        If a public router has been configured, it will be returned.
-
-        If a public router has not been configured, but a public
-        network has, a tenant router will be created and returned that
-        routes traffic to the public network.
-        """
-        if not client:
-            client = self.routers_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        router_id = CONF.network.public_router_id
-        network_id = CONF.network.public_network_id
-        if router_id:
-            body = client.show_router(router_id)
-            return body['router']
-        elif network_id:
-            router = self._create_router(client, tenant_id)
-            kwargs = {'external_gateway_info': dict(network_id=network_id)}
-            router = client.update_router(router['id'], **kwargs)['router']
-            return router
-        else:
-            raise Exception("Neither of 'public_router_id' or "
-                            "'public_network_id' has been defined.")
-
-    def _create_router(self, client=None, tenant_id=None,
-                       namestart='router-smoke'):
-        if not client:
-            client = self.routers_client
-        if not tenant_id:
-            tenant_id = client.tenant_id
-        name = data_utils.rand_name(namestart)
-        result = client.create_router(name=name,
-                                      admin_state_up=True,
-                                      tenant_id=tenant_id)
-        router = result['router']
-        self.assertEqual(router['name'], name)
-        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        client.delete_router,
-                        router['id'])
-        return router
-
-    def _update_router_admin_state(self, router, admin_state_up):
-        kwargs = dict(admin_state_up=admin_state_up)
-        router = self.routers_client.update_router(
-            router['id'], **kwargs)['router']
-        self.assertEqual(admin_state_up, router['admin_state_up'])
-
-    def create_networks(self, networks_client=None,
-                        routers_client=None, subnets_client=None,
-                        tenant_id=None, dns_nameservers=None,
-                        port_security_enabled=True):
-        """Create a network with a subnet connected to a router.
-
-        The baremetal driver is a special case since all nodes are
-        on the same shared network.
-
-        :param tenant_id: id of tenant to create resources in.
-        :param dns_nameservers: list of dns servers to send to subnet.
-        :returns: network, subnet, router
-        """
-        if CONF.network.shared_physical_network:
-            # NOTE(Shrews): This exception is for environments where tenant
-            # credential isolation is available, but network separation is
-            # not (the current baremetal case). Likely can be removed when
-            # test account mgmt is reworked:
-            # https://blueprints.launchpad.net/tempest/+spec/test-accounts
-            if not CONF.compute.fixed_network_name:
-                m = 'fixed_network_name must be specified in config'
-                raise lib_exc.InvalidConfiguration(m)
-            network = self._get_network_by_name_or_id(
-                CONF.compute.fixed_network_name)
-            router = None
-            subnet = None
-        else:
-            network = self._create_network(
-                networks_client=networks_client,
-                tenant_id=tenant_id,
-                port_security_enabled=port_security_enabled)
-            router = self._get_router(client=routers_client,
-                                      tenant_id=tenant_id)
-            subnet_kwargs = dict(network=network,
-                                 subnets_client=subnets_client,
-                                 routers_client=routers_client)
-            # use explicit check because empty list is a valid option
-            if dns_nameservers is not None:
-                subnet_kwargs['dns_nameservers'] = dns_nameservers
-            subnet = self._create_subnet(**subnet_kwargs)
-            if not routers_client:
-                routers_client = self.routers_client
-            router_id = router['id']
-            routers_client.add_router_interface(router_id,
-                                                subnet_id=subnet['id'])
-
-            # save a cleanup job to remove this association between
-            # router and subnet
-            self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                            routers_client.remove_router_interface, router_id,
-                            subnet_id=subnet['id'])
-        return network, subnet, router
diff --git a/manila_tempest_tests/tests/scenario/manager_share.py b/manila_tempest_tests/tests/scenario/manager_share.py
index 657b1f1..a847217 100644
--- a/manila_tempest_tests/tests/scenario/manager_share.py
+++ b/manila_tempest_tests/tests/scenario/manager_share.py
@@ -49,6 +49,8 @@
     def setup_clients(cls):
         super(ShareScenarioTest, cls).setup_clients()
 
+        cls.compute_floating_ips_client = (
+            cls.os_primary.compute_floating_ips_client)
         # Manila clients
         cls.shares_client = cls.os_primary.share_v1.SharesClient()
         cls.shares_v2_client = cls.os_primary.share_v2.SharesV2Client()
@@ -108,19 +110,19 @@
 
         # Tests need to be able to ssh into the VM - so we need
         # a security group, and a tenant private network
-        self.security_group = self._create_security_group()
-        self.network = self._create_network(namestart="manila-share")
+        self.security_group = self.create_security_group()
+        self.network = self.create_network(namestart="manila-share")
         # When not using a "storage network" to connect shares to VMs,
         # we need the subnet to match the IP version we're testing
         subnet_ip_params = {} if self.storage_network else {
             'ip_version': self.ip_version,
             'use_default_subnetpool': self.ipv6_enabled
         }
-        self.subnet = self._create_subnet(
+        self.subnet = self.create_subnet(
             network=self.network,
             namestart="manila-share-sub",
             **subnet_ip_params)
-        router = self._get_router()
+        router = self.get_router()
         self._create_router_interface(subnet_id=self.subnet['id'],
                                       router_id=router['id'])
 
@@ -192,7 +194,8 @@
         remote_client = self.get_remote_client(
             server_or_ip=server_ip,
             username=self.ssh_user,
-            private_key=self.keypair['private_key'])
+            private_key=self.keypair['private_key'],
+            server=instance)
 
         # NOTE(u_glide): Workaround for bug #1465682
         remote_client = remote_client.ssh_client
@@ -307,7 +310,7 @@
             linux_client.validate_authentication()
         except Exception:
             LOG.exception('Initializing SSH connection to %s failed', ip)
-            self._log_console_output()
+            self.log_console_output()
             raise
 
         return linux_client
@@ -637,7 +640,7 @@
         if not client:
             client = self.routers_client
         if not router_id:
-            router_id = self._get_router()['id']
+            router_id = self.get_router()['id']
         client.add_router_interface(router_id, subnet_id=subnet_id)
         self.addCleanup(
             client.remove_router_interface, router_id, subnet_id=subnet_id)
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 0000000..bc1dc04
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+  roles:
+    - enable-fips
diff --git a/requirements.txt b/requirements.txt
index fd67fab..6922d70 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,4 +6,4 @@
 
 ddt>=1.0.1 # MIT
 oslo.log>=3.36.0 # Apache-2.0
-tempest>=17.1.0 # Apache-2.0
+tempest>=27.0.0 # Apache-2.0
diff --git a/zuul.d/manila-tempest-jobs.yaml b/zuul.d/manila-tempest-jobs.yaml
index a68aafe..6d786fd 100644
--- a/zuul.d/manila-tempest-jobs.yaml
+++ b/zuul.d/manila-tempest-jobs.yaml
@@ -117,7 +117,8 @@
         tempest: true
 
 - job:
-    name: manila-tempest-plugin-zfsonlinux
+    name: manila-tempest-plugin-zfsonlinux-base
+    abstract: true
     description: Test ZFSOnLinux multibackend (DHSS=False) with postgresql db
     parent: manila-tempest-plugin-base
     vars:
@@ -162,11 +163,18 @@
               image_password: manila
 
 - job:
-    name: manila-tempest-plugin-lvm
+    name: manila-tempest-plugin-zfsonlinux
+    description: Test ZFSOnLinux multibackend (DHSS=False) with postgresql db
+    parent: manila-tempest-plugin-zfsonlinux-base
+    branches: &ubuntu_jammy_test_image_branches ^(?!stable/(yoga|xena|wallaby|victoria|ussuri)).*$
+
+- job:
+    name: manila-tempest-plugin-lvm-base
     description: |
       Test LVM multibackend (DHSS=False) in a 4+6 (dual-stack) devstack
       environment with IPv6 control plane endpoints.
     parent: manila-tempest-plugin-ipv6-base
+    abstract: true
     required-projects:
       - openstack/neutron-dynamic-routing
     vars:
@@ -206,6 +214,14 @@
               image_password: manila
 
 - job:
+    name: manila-tempest-plugin-lvm
+    description: |
+      Test LVM multibackend (DHSS=False) in a 4+6 (dual-stack) devstack
+      environment with IPv6 control plane endpoints.
+    branches: *ubuntu_jammy_test_image_branches
+    parent: manila-tempest-plugin-lvm-base
+
+- job:
     name: manila-tempest-plugin-container
     description: |
       Test the container driver multibackend (DHSS=True) with CIFS
@@ -250,12 +266,13 @@
               run_network_allocation_update_tests: true
 
 - job:
-    name: manila-tempest-plugin-generic
+    name: manila-tempest-plugin-generic-base
+    abstract: true
     description: |
       Test the generic driver multibackend (DHSS=True) with NFS and CIFS
     parent: manila-tempest-plugin-base
     vars:
-      tempest_test_regex: '(^manila_tempest_tests.tests)(?=.*\[.*\bbackend\b.*\])'
+      tempest_test_regex: '(^manila_tempest_tests.tests.api)(?=.*\[.*\bbackend\b.*\])'
       # The generic driver uses nova VMs as share servers; running with a
       # high concurrency could starve the driver of RAM/Disk/CPUs to
       # function properly in a small single node devstack VM.
@@ -289,7 +306,38 @@
               image_password: manila
 
 - job:
-    name: manila-tempest-plugin-cephfs-native
+    name: manila-tempest-plugin-generic
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-base
+    branches: *ubuntu_jammy_test_image_branches
+
+- job:
+    name: manila-tempest-plugin-generic-scenario-base
+    abstract: true
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-base
+    vars:
+      tempest_test_regex: '(^manila_tempest_tests.tests.scenario)(?=.*\[.*\bbackend\b.*\])'
+      # The generic driver uses nova VMs as share servers; running with a
+      # high concurrency could starve the driver of RAM/Disk/CPUs to
+      # function properly in a small single node devstack VM.
+      tempest_concurrency: 1
+
+- job:
+    name: manila-tempest-plugin-generic-scenario
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-scenario-base
+    branches: *ubuntu_jammy_test_image_branches
+
+- job:
+    name: manila-tempest-plugin-cephfs-native-base
+    abstract: true
     description: Test CephFS Native (DHSS=False)
     parent: manila-tempest-plugin-base
     required-projects:
@@ -327,9 +375,15 @@
               enable_protocols: cephfs
               image_password: manila
 
+- job:
+    name: manila-tempest-plugin-cephfs-native
+    description: Test CephFS Native (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-native-base
+    branches: *ubuntu_jammy_test_image_branches
 
 - job:
-    name: manila-tempest-plugin-cephfs-nfs
+    name: manila-tempest-plugin-cephfs-nfs-base
+    abstract: true
     description: Test CephFS NFS (DHSS=False)
     parent: manila-tempest-plugin-base
     required-projects:
@@ -374,6 +428,12 @@
               image_password: manila
 
 - job:
+    name: manila-tempest-plugin-cephfs-nfs
+    description: Test CephFS NFS (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-nfs-base
+    branches: *ubuntu_jammy_test_image_branches
+
+- job:
     name: manila-tempest-plugin-dummy-no-dhss
     description: Test the Dummy driver with DHSS=False
     parent: manila-tempest-plugin-standalone-base
@@ -559,3 +619,53 @@
               enable_protocols: nfs
               # read-only access rules not supported
               enable_ro_access_level_for_protocols: ""
+
+- job:
+    name: manila-tempest-plugin-lvm-fips-base
+    parent: manila-tempest-plugin-lvm-base
+    nodeset: devstack-single-node-centos-9-stream
+    description: |
+      Run LVM DHSS false tests for manila project using FIPS.
+    pre-run:
+      - playbooks/enable-fips.yaml
+    vars:
+      nslookup_target: 'opendev.org'
+      devstack_localrc:
+        MANILA_SETUP_IPV6: false
+        NEUTRON_CREATE_INITIAL_NETWORKS: true
+        IP_VERSION: 4
+      configure_swap_size: 4096
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            validation:
+              ssh_key_type: 'ecdsa'
+            share:
+              run_ipv6_tests: false
+
+- job:
+    name: manila-tempest-plugin-lvm-fips
+    parent: manila-tempest-plugin-lvm-fips-base
+    branches: ^(?!stable/(yoga|xena|wallaby|victoria|ussuri)).*$
+
+- project-template:
+    name: manila-tempest-plugin-jobs-using-service-image
+    description: |
+      Runs jobs that will also perform scenario tests in the branches that are
+      newer than Yoga.
+    check:
+      jobs:
+        - manila-tempest-plugin-lvm
+        - manila-tempest-plugin-generic-scenario:
+            voting: false
+        - manila-tempest-plugin-generic:
+            voting: false
+        - manila-tempest-plugin-cephfs-native:
+            voting: false
+        - manila-tempest-plugin-cephfs-nfs:
+            voting: false
+        - manila-tempest-plugin-zfsonlinux:
+            voting: false
+    gate:
+      jobs:
+        - manila-tempest-plugin-lvm
diff --git a/zuul.d/manila-tempest-stable-jobs.yaml b/zuul.d/manila-tempest-stable-jobs.yaml
index 3d2447e..aa33c16 100644
--- a/zuul.d/manila-tempest-stable-jobs.yaml
+++ b/zuul.d/manila-tempest-stable-jobs.yaml
@@ -1,27 +1,117 @@
 # Stable branch jobs to test the trunk version of manila-tempest-plugin against
 # released stable branches of manila
+
+- job:
+    name: manila-tempest-plugin-generic-scenario-stable
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-scenario-base
+    branches: &manila_tempest_image_pinned_branches ^(stable/(yoga|xena|wallaby|victoria|ussuri)).*$
+    vars: &manila_tempest_image_pinned_vars
+      devstack_localrc:
+        # NOTE(carloss): Pinning manila service image to a Focal version,
+        # since on Zed we moved to Ubuntu Jammy (22), and it requires more
+        # VM resources.
+        MANILA_SERVICE_IMAGE_URL: https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-1.3.0-76-ga216835.qcow2
+        MANILA_SERVICE_IMAGE_NAME: manila-service-image-1.3.0-76-ga216835
+
+- job:
+    name: manila-tempest-plugin-generic-stable
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-base
+    branches: *manila_tempest_image_pinned_branches
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-lvm-stable
+    # NOTE(carloss): we are aware that focal is the current default, but
+    # in order to avoid breakages when devstack-minimal switches to a newer
+    # branch, we are pinning focal here.
+    nodeset: openstack-single-node-focal
+    description: |
+      Test LVM multibackend (DHSS=False) in a 4+6 (dual-stack) devstack
+      environment with IPv6 control plane endpoints.
+    branches: *manila_tempest_image_pinned_branches
+    parent: manila-tempest-plugin-lvm-base
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-cephfs-native-stable
+    description: Test CephFS Native (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-native-base
+    branches: *manila_tempest_image_pinned_branches
+    vars:
+      devstack_localrc:
+        # NOTE(gouthamr): The following need to use the latest images, however, there's a bug
+        # with cephfs on Ubuntu 20.04 LTS: https://tracker.ceph.com/issues/47236
+        # the latest image is built as https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-cephfs-master.qcow2
+        MANILA_SERVICE_IMAGE_URL: https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-cephfs-1.3.0-58-g2859569.qcow2
+        MANILA_SERVICE_IMAGE_NAME: manila-service-image-cephfs-1.3.0-58-g2859569
+
+- job:
+    name: manila-tempest-plugin-cephfs-nfs-stable
+    description: Test CephFS NFS (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-nfs-base
+    branches: *manila_tempest_image_pinned_branches
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-zfsonlinux-stable
+    description: Test ZFSOnLinux multibackend (DHSS=False) with postgresql db
+    parent: manila-tempest-plugin-zfsonlinux-base
+    branches: *manila_tempest_image_pinned_branches
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-lvm-fips-stable
+    parent: manila-tempest-plugin-lvm-fips-base
+    branches: ^(stable/(yoga|xena|wallaby)).*$
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-lvm-yoga
+    parent: manila-tempest-plugin-lvm-base
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/yoga
+    vars:
+      tempest_exclude_regex: "(^manila_tempest_tests.tests.scenario.*IPv6.*)"
+      <<: *manila_tempest_image_pinned_vars
+
 - job:
     name: manila-tempest-plugin-lvm-xena
-    parent: manila-tempest-plugin-lvm
-    override-checkout: stable/xena
+    parent: manila-tempest-plugin-lvm-base
     nodeset: openstack-single-node-focal
-    vars:
-        # NOTE(gouthamr): Disabled until https://launchpad.net/bugs/1940324 is
-        # fixed.
-        tempest_exclude_regex: "(^manila_tempest_tests.tests.scenario.*IPv6.*)"
+    override-checkout: stable/xena
+    vars: *manila_tempest_image_pinned_vars
 
 - job:
     name: manila-tempest-plugin-lvm-wallaby
-    parent: manila-tempest-plugin-lvm
+    parent: manila-tempest-plugin-lvm-base
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
-    nodeset: openstack-single-node-focal
-    vars:
-        tempest_exclude_regex: ''
+    vars: *manila_tempest_image_pinned_vars
 
-- job:
-    name: manila-tempest-plugin-lvm-victoria
-    parent: manila-tempest-plugin-lvm
-    override-checkout: stable/victoria
-    nodeset: openstack-single-node-focal
-    vars:
-        tempest_exclude_regex: ''
+- project-template:
+    name: manila-tempest-plugin-jobs-using-service-image-stable
+    description: |
+      Runs jobs that will also perform scenario tests in the stable branches
+      older than Yoga.
+    check:
+      jobs:
+        - manila-tempest-plugin-lvm-stable
+        - manila-tempest-plugin-generic-scenario-stable:
+            voting: false
+        - manila-tempest-plugin-generic-stable:
+            voting: false
+        - manila-tempest-plugin-cephfs-native-stable:
+            voting: false
+        - manila-tempest-plugin-cephfs-nfs-stable:
+            voting: false
+        - manila-tempest-plugin-zfsonlinux-stable:
+            voting: false
+    gate:
+      jobs:
+        - manila-tempest-plugin-lvm-stable
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index ce21547..9db197c 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -2,32 +2,23 @@
     templates:
       - check-requirements
       - tempest-plugin-jobs
+      - manila-tempest-plugin-jobs-using-service-image
+    queue: manila
     check:
       jobs:
         - manila-tempest-plugin-dummy-no-dhss
         - manila-tempest-plugin-dummy-dhss
-        - manila-tempest-plugin-lvm
+        - manila-tempest-plugin-lvm-yoga
         - manila-tempest-plugin-lvm-xena
         - manila-tempest-plugin-lvm-wallaby
-        - manila-tempest-plugin-lvm-victoria
-        - manila-tempest-plugin-zfsonlinux:
-            voting: false
-        - manila-tempest-plugin-cephfs-native:
-            voting: false
-        - manila-tempest-plugin-cephfs-nfs:
-            voting: false
         - manila-tempest-plugin-container:
             voting: false
-        - manila-tempest-plugin-generic:
-            voting: false
         - manila-tempest-plugin-glusterfs-nfs:
             voting: false
     gate:
-      queue: manila
       jobs:
         - manila-tempest-plugin-dummy-no-dhss
         - manila-tempest-plugin-dummy-dhss
-        - manila-tempest-plugin-lvm
     experimental:
       jobs:
         - manila-tempest-plugin-glusterfs-native: