Merge "Remove get_networks"
diff --git a/manila_tempest_tests/common/remote_client.py b/manila_tempest_tests/common/remote_client.py
index 30e64c7..573cdd6 100644
--- a/manila_tempest_tests/common/remote_client.py
+++ b/manila_tempest_tests/common/remote_client.py
@@ -70,8 +70,17 @@
         self.server = server
         self.servers_client = servers_client
         self.log_console = CONF.compute_feature_enabled.console_output
+        kwargs = {}
 
-        self.ssh_client = ssh.Client(ip_address, username, password, pkey=pkey)
+        try:
+            kwargs['ssh_key_type'] = CONF.validation.ssh_key_type
+        except Exception:
+            # Not all versions of tempest support the
+            # "validation.ssh_key_type" config option
+            pass
+
+        self.ssh_client = ssh.Client(
+            ip_address, username, password, pkey=pkey, **kwargs)
 
     @debug_ssh
     def exec_command(self, cmd):
diff --git a/manila_tempest_tests/common/waiters.py b/manila_tempest_tests/common/waiters.py
index 24b30de..8a97c8e 100644
--- a/manila_tempest_tests/common/waiters.py
+++ b/manila_tempest_tests/common/waiters.py
@@ -217,3 +217,31 @@
                            'timeout': client.build_timeout,
                        })
             raise exceptions.TimeoutException(message)
+
+
+def wait_for_subnet_create_check(client, share_network_id,
+                                 neutron_net_id=None,
+                                 neutron_subnet_id=None,
+                                 availability_zone=None):
+    result = client.subnet_create_check(
+        share_network_id, neutron_net_id=neutron_net_id,
+        neutron_subnet_id=neutron_subnet_id,
+        availability_zone=availability_zone)
+    start = int(time.time())
+    while not result['compatible']:
+        time.sleep(client.build_interval)
+        result = client.subnet_create_check(
+            share_network_id, neutron_net_id=neutron_net_id,
+            neutron_subnet_id=neutron_subnet_id,
+            availability_zone=availability_zone)
+        if result['compatible']:
+            break
+        elif int(time.time()) - start >= client.build_timeout or (
+                result['compatible'] is False):
+            message = ('Subnet create check failed within the '
+                       'required time %(timeout)s seconds for share network '
+                       '%(share_network)s.' % {
+                           'timeout': client.build_timeout,
+                           'share_network': share_network_id,
+                       })
+            raise exceptions.TimeoutException(message)
diff --git a/manila_tempest_tests/config.py b/manila_tempest_tests/config.py
index f5022af..b62c409 100644
--- a/manila_tempest_tests/config.py
+++ b/manila_tempest_tests/config.py
@@ -40,7 +40,7 @@
                     "This value is only used to validate the versions "
                     "response from Manila."),
     cfg.StrOpt("max_api_microversion",
-               default="2.69",
+               default="2.71",
                help="The maximum api microversion is configured to be the "
                     "value of the latest microversion supported by Manila."),
     cfg.StrOpt("region",
@@ -287,6 +287,16 @@
                 default=False,
                 help="Defines whether to run share servers migration tests. "
                      "Enable this option if the used driver supports it."),
+    cfg.BoolOpt("run_share_server_multiple_subnet_tests",
+                default=False,
+                help="Defines whether to run the share server multiple "
+                     "subnets tests. Enable this option if the used driver "
+                     "supports it."),
+    cfg.BoolOpt("run_network_allocation_update_tests",
+                default=False,
+                help="Defines whether to run the network allocation update "
+                     "tests. Enable this option if the used driver "
+                     "supports it."),
 
     cfg.StrOpt("image_with_share_tools",
                default="manila-service-image-master",
diff --git a/manila_tempest_tests/services/share/json/shares_client.py b/manila_tempest_tests/services/share/json/shares_client.py
index 3413387..6871eda 100644
--- a/manila_tempest_tests/services/share/json/shares_client.py
+++ b/manila_tempest_tests/services/share/json/shares_client.py
@@ -42,6 +42,10 @@
         self.share_network_id = CONF.share.share_network_id
         self.share_size = CONF.share.share_size
 
+    def _parse_resp(self, body, top_key_to_verify=None):
+        return super(SharesClient, self)._parse_resp(
+            body, top_key_to_verify=top_key_to_verify)
+
     def create_share(self, share_protocol=None, size=None,
                      name=None, snapshot_id=None, description=None,
                      metadata=None, share_network_id=None,
@@ -446,7 +450,7 @@
     def get_metadata_item(self, share_id, key):
         resp, body = self.get("shares/%s/metadata/%s" % (share_id, key))
         self.expected_success(200, resp.status)
-        return self._parse_resp(body)
+        return self._parse_resp(body, top_key_to_verify='meta')
 
 ###############
 
diff --git a/manila_tempest_tests/services/share/v2/json/shares_client.py b/manila_tempest_tests/services/share/v2/json/shares_client.py
index 6db5393..a87648c 100644
--- a/manila_tempest_tests/services/share/v2/json/shares_client.py
+++ b/manila_tempest_tests/services/share/v2/json/shares_client.py
@@ -1957,6 +1957,29 @@
         self.expected_success(202, resp.status)
         return rest_client.ResponseBody(resp, body)
 
+    def subnet_create_check(
+        self, share_network_id, neutron_net_id=None,
+        neutron_subnet_id=None, availability_zone=None,
+        reset=False, version=LATEST_MICROVERSION):
+        body = {
+            'share_network_subnet_create_check': {
+                'neutron_net_id': neutron_net_id,
+                'neutron_subnet_id': neutron_subnet_id,
+                'availability_zone': availability_zone,
+                'reset': reset,
+            }
+        }
+
+        body = json.dumps(body)
+        resp, body = self.post(
+            f'share-networks/{share_network_id}/action',
+            body, headers=EXPERIMENTAL, extra_headers=True,
+            version=version)
+        self.expected_success(202, resp.status)
+
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
 ###############
 
     def share_server_migration_check(
diff --git a/manila_tempest_tests/tests/api/admin/test_share_instances.py b/manila_tempest_tests/tests/api/admin/test_share_instances.py
index 38c3f49..4f2e454 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_instances.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_instances.py
@@ -95,6 +95,8 @@
             expected_keys.append("cast_rules_to_readonly")
         if utils.is_microversion_ge(version, '2.54'):
             expected_keys.append("progress")
+        if utils.is_microversion_ge(version, '2.71'):
+            expected_keys.append("updated_at")
         expected_keys = sorted(expected_keys)
         actual_keys = sorted(si.keys())
         self.assertEqual(expected_keys, actual_keys,
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers.py b/manila_tempest_tests/tests/api/admin/test_share_servers.py
index b406a92..01cc9fc 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers.py
@@ -196,8 +196,13 @@
         if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.49"):
             keys.append("is_auto_deletable")
             keys.append("identifier")
-        if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.51"):
+        if utils.is_microversion_ge(
+            CONF.share.max_api_microversion, "2.51") and (
+                utils.is_microversion_lt(
+                    CONF.share.max_api_microversion, "2.70")):
             keys.append("share_network_subnet_id")
+        if utils.is_microversion_ge(CONF.share.max_api_microversion, "2.70"):
+            keys.append("share_network_subnet_ids")
         # all expected keys are present
         for key in keys:
             self.assertIn(key, server.keys())
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_manage.py b/manila_tempest_tests/tests/api/admin/test_share_servers_manage.py
index 126dee1..8fdc22b 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_manage.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_manage.py
@@ -66,6 +66,12 @@
             msg = ("Manage share server with share network subnet is "
                    "supported starting from microversion '2.51'.")
             raise self.skipException(msg)
+        check_multiple_subnet = utils.is_microversion_ge(
+            CONF.share.max_api_microversion, '2.70')
+        if check_multiple_subnet:
+            network_subnet = 'share_network_subnet_ids'
+        else:
+            network_subnet = 'share_network_subnet_id'
         # create a new share network to make sure that a new share server
         # will be created
         original_share_network = self.shares_v2_client.get_share_network(
@@ -91,7 +97,7 @@
                 neutron_subnet_id=share_network['neutron_subnet_id'],
                 availability_zone=az
             )['share_network_subnet']
-            params = {'share_network_subnet_id': az_subnet['id']}
+            params = {network_subnet: az_subnet['id']}
 
         # create share
         share = self.create_share(
@@ -119,7 +125,7 @@
             "identifier",
         ]
         if add_subnet_field:
-            keys.append('share_network_subnet_id')
+            keys.append(network_subnet)
         # all expected keys are present
         for key in keys:
             self.assertIn(key, share_server)
@@ -127,9 +133,10 @@
         # check that the share server is initially auto-deletable
         self.assertIs(True, share_server["is_auto_deletable"])
         self.assertIsNotNone(share_server["identifier"])
-        if add_subnet_field:
-            self.assertEqual(az_subnet["id"],
-                             share_server["share_network_subnet_id"])
+        if add_subnet_field and check_multiple_subnet:
+            self.assertIn(az_subnet["id"], share_server[network_subnet])
+        elif add_subnet_field and not check_multiple_subnet:
+            self.assertEqual(az_subnet["id"], share_server[network_subnet])
 
         self._unmanage_share_and_wait(share)
 
diff --git a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
index 99d712c..2535745 100644
--- a/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
+++ b/manila_tempest_tests/tests/api/admin/test_share_servers_migration.py
@@ -95,14 +95,11 @@
         # protocols.
         access_rules = self._get_access_rule_data_for_protocols()
         for rule in access_rules:
-            self.shares_v2_client.create_access_rule(
+            self.allow_access(
                 share['id'], access_type=rule.get('access_type'),
                 access_to=rule.get('access_to'),
                 access_level=rule.get('access_level')
             )
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, share['id'], constants.RULE_STATE_ACTIVE,
-            status_attr='access_rules_status')
 
         share = self.shares_v2_client.get_share(share['id'])['share']
 
@@ -124,8 +121,8 @@
             self.assertIn(snapshot['status'], statuses)
 
     def _validate_share_server_migration_complete(
-        self, share, dest_host, dest_server_id, snapshot_id=None,
-        share_network_id=None, version=CONF.share.max_api_microversion):
+            self, share, dest_host, dest_server_id, snapshot_id=None,
+            share_network_id=None, version=CONF.share.max_api_microversion):
         """Validates the share server migration complete. """
 
         # Check the export locations
diff --git a/manila_tempest_tests/tests/api/base.py b/manila_tempest_tests/tests/api/base.py
index d5cc439..34f8e41 100755
--- a/manila_tempest_tests/tests/api/base.py
+++ b/manila_tempest_tests/tests/api/base.py
@@ -58,6 +58,8 @@
     r"(?=.*\[.*\b(%(p)s|%(n)s)\b.*\])(?=.*\[.*\b(%(a)s|%(b)s|%(ab)s)\b.*\])" %
     TAGS_MAPPER)
 
+LATEST_MICROVERSION = CONF.share.max_api_microversion
+
 
 def verify_test_has_appropriate_tags(self):
     if not TAGS_PATTERN.match(self.id()):
@@ -1046,7 +1048,8 @@
         return waiters.wait_for_message(self.shares_v2_client, share['id'])
 
     def allow_access(self, share_id, client=None, access_type=None,
-                     access_level='rw', access_to=None, status='active',
+                     access_level='rw', access_to=None, metadata=None,
+                     version=LATEST_MICROVERSION, status='active',
                      raise_rule_in_error_state=True, cleanup=True):
 
         client = client or self.shares_v2_client
@@ -1054,15 +1057,23 @@
         access_type = access_type or a_type
         access_to = access_to or a_to
 
-        rule = client.create_access_rule(share_id, access_type, access_to,
-                                         access_level)['access']
+        kwargs = {
+            'access_type': access_type,
+            'access_to': access_to,
+            'access_level': access_level
+        }
+        if client is self.shares_v2_client:
+            kwargs.update({'metadata': metadata, 'version': version})
+
+        rule = client.create_access_rule(share_id, **kwargs)['access']
         waiters.wait_for_resource_status(
             client, share_id, status, resource_name='access_rule',
-            rule_id=rule['id'],
+            rule_id=rule['id'], version=version,
             raise_rule_in_error_state=raise_rule_in_error_state)
         if cleanup:
-            self.addCleanup(client.wait_for_resource_deletion,
-                            rule_id=rule['id'], share_id=share_id)
+            self.addCleanup(
+                client.wait_for_resource_deletion, rule_id=rule['id'],
+                share_id=share_id, version=version)
             self.addCleanup(client.delete_access_rule, share_id, rule['id'])
         return rule
 
diff --git a/manila_tempest_tests/tests/api/test_access_rules_metadata.py b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
index fa3c0a7..ee541ac 100644
--- a/manila_tempest_tests/tests/api/test_access_rules_metadata.py
+++ b/manila_tempest_tests/tests/api/test_access_rules_metadata.py
@@ -19,6 +19,7 @@
 from testtools import testcase as tc
 
 from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
 from manila_tempest_tests.tests.api import base
 from manila_tempest_tests import utils
 
@@ -77,16 +78,19 @@
             cls.share["id"], cls.access_type,
             cls.access_to[cls.access_type].pop(), 'rw',
             metadata=cls.md1)['access']
+        waiters.wait_for_resource_status(
+            cls.shares_v2_client, cls.share["id"], "active",
+            resource_name='access_rule', rule_id=cls.access["id"])
 
     @decorators.idempotent_id('4c8e0236-2e7b-4337-be3c-17b51a738644')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     def test_set_get_delete_access_metadata(self):
         data = {"key1": "v" * 255, "k" * 255: "value2"}
         # set metadata
-        access = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type,
-            self.access_to[self.access_type].pop(), 'rw',
-            metadata=data)['access']
+        access = self.allow_access(
+            self.share["id"], access_type=self.access_type,
+            access_to=self.access_to[self.access_type].pop(),
+            access_level='rw', metadata=data)
 
         # read metadata
         get_access = self.shares_v2_client.get_access_rule(
@@ -103,10 +107,6 @@
         access_without_md = self.shares_v2_client.get_access_rule(
             access["id"])['access']
         self.assertEqual({}, access_without_md['metadata'])
-        self.shares_v2_client.delete_access_rule(self.share["id"],
-                                                 access["id"])
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=access["id"], share_id=self.share["id"])
 
     @decorators.idempotent_id('8c294d7d-0702-49ce-b964-0945ec323370')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
@@ -129,10 +129,10 @@
     def test_list_access_filter_by_metadata(self):
         data = {"key3": "v3", "key4": "value4"}
         # set metadata
-        access = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type,
-            self.access_to[self.access_type].pop(), 'rw',
-            metadata=data)['access']
+        access = self.allow_access(
+            self.share["id"], access_type=self.access_type,
+            access_to=self.access_to[self.access_type].pop(),
+            access_level='rw', metadata=data)
 
         # list metadata with metadata filter
         list_access = self.shares_v2_client.list_access_rules(
diff --git a/manila_tempest_tests/tests/api/test_metadata.py b/manila_tempest_tests/tests/api/test_metadata.py
index c8529a3..d2ae326 100644
--- a/manila_tempest_tests/tests/api/test_metadata.py
+++ b/manila_tempest_tests/tests/api/test_metadata.py
@@ -34,14 +34,15 @@
     def _verify_share_metadata(self, share, md):
 
         # get metadata of share
-        metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        metadata = self.shares_v2_client.get_metadata(share["id"])['metadata']
 
         # verify metadata
         self.assertEqual(md, metadata)
 
         # verify metadata items
         for key in md:
-            get_value = self.shares_client.get_metadata_item(share["id"], key)
+            get_value = self.shares_v2_client.get_metadata_item(share["id"],
+                                                                key)
             self.assertEqual(md[key], get_value[key])
 
     @decorators.idempotent_id('9070249f-6e94-4a38-a036-08debee547c3')
@@ -69,17 +70,18 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md)
+        self.shares_v2_client.set_metadata(share["id"], md)
 
         # verify metadata
         self._verify_share_metadata(share, md)
 
         # delete metadata
         for key in md.keys():
-            self.shares_client.delete_metadata(share["id"], key)
+            self.shares_v2_client.delete_metadata(share["id"], key)
 
         # verify deletion of metadata
-        get_metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        get_metadata = self.shares_v2_client.get_metadata(share["id"])[
+            'metadata']
         self.assertEmpty(get_metadata)
 
     @decorators.idempotent_id('4e5f8159-62b6-4d5c-f729-d8b1f029d7de')
@@ -93,13 +95,13 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md1)
+        self.shares_v2_client.set_metadata(share["id"], md1)
 
         # verify metadata
         self._verify_share_metadata(share, md1)
 
         # set metadata again
-        self.shares_client.set_metadata(share["id"], md2)
+        self.shares_v2_client.set_metadata(share["id"], md2)
 
         # verify metadata
         md1.update(md2)
@@ -110,10 +112,11 @@
 
         # delete metadata
         for key in md.keys():
-            self.shares_client.delete_metadata(share["id"], key)
+            self.shares_v2_client.delete_metadata(share["id"], key)
 
         # verify deletion of metadata
-        get_metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        get_metadata = self.shares_v2_client.get_metadata(
+            share["id"])['metadata']
         self.assertEmpty(get_metadata)
 
     @decorators.idempotent_id('2ec70ba5-050b-3b17-c862-c149e53543c0')
@@ -127,13 +130,13 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md1)
+        self.shares_v2_client.set_metadata(share["id"], md1)
 
         # verify metadata
         self._verify_share_metadata(share, md1)
 
         # set metadata again
-        self.shares_client.set_metadata(share["id"], md2)
+        self.shares_v2_client.set_metadata(share["id"], md2)
 
         # verify metadata
         md = {u"key9": u"value13", u"key10": u"value10",
@@ -142,10 +145,11 @@
 
         # delete metadata
         for key in md.keys():
-            self.shares_client.delete_metadata(share["id"], key)
+            self.shares_v2_client.delete_metadata(share["id"], key)
 
         # verify deletion of metadata
-        get_metadata = self.shares_client.get_metadata(share["id"])['metadata']
+        get_metadata = self.shares_v2_client.get_metadata(
+            share["id"])['metadata']
         self.assertEmpty(get_metadata)
 
     @decorators.idempotent_id('c94851f4-2559-4712-9297-9912db1da7ff')
@@ -160,10 +164,10 @@
                                   cleanup_in_class=False)
 
         # set metadata
-        self.shares_client.set_metadata(share["id"], md1)
+        self.shares_v2_client.set_metadata(share["id"], md1)
 
         # update metadata
-        self.shares_client.update_all_metadata(share["id"], md2)
+        self.shares_v2_client.update_all_metadata(share["id"], md2)
 
         # verify metadata
         self._verify_share_metadata(share, md2)
@@ -173,9 +177,9 @@
     def test_set_metadata_min_size_key(self):
         data = {"k": "value"}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data['k'], body_get.get('k'))
 
@@ -185,9 +189,9 @@
         max_key = "k" * 255
         data = {max_key: "value"}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertIn(max_key, body_get)
         self.assertEqual(data[max_key], body_get.get(max_key))
@@ -197,9 +201,9 @@
     def test_set_metadata_min_size_value(self):
         data = {"key": "v"}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data['key'], body_get['key'])
 
@@ -209,9 +213,9 @@
         max_value = "v" * 1023
         data = {"key": max_value}
 
-        self.shares_client.set_metadata(self.share["id"], data)
+        self.shares_v2_client.set_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data['key'], body_get['key'])
 
@@ -220,9 +224,9 @@
     def test_upd_metadata_min_size_key(self):
         data = {"k": "value"}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
 
@@ -232,9 +236,9 @@
         max_key = "k" * 255
         data = {max_key: "value"}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
 
@@ -243,9 +247,9 @@
     def test_upd_metadata_min_size_value(self):
         data = {"key": "v"}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
 
@@ -255,8 +259,8 @@
         max_value = "v" * 1023
         data = {"key": max_value}
 
-        self.shares_client.update_all_metadata(self.share["id"], data)
+        self.shares_v2_client.update_all_metadata(self.share["id"], data)
 
-        body_get = self.shares_client.get_metadata(
+        body_get = self.shares_v2_client.get_metadata(
             self.share["id"])['metadata']
         self.assertEqual(data, body_get)
diff --git a/manila_tempest_tests/tests/api/test_metadata_negative.py b/manila_tempest_tests/tests/api/test_metadata_negative.py
index 93a3628..caa34ac 100644
--- a/manila_tempest_tests/tests/api/test_metadata_negative.py
+++ b/manila_tempest_tests/tests/api/test_metadata_negative.py
@@ -61,14 +61,14 @@
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     def test_try_set_metadata_with_empty_key(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.set_metadata,
+                          self.shares_v2_client.set_metadata,
                           self.share["id"], {"": "value"})
 
     @decorators.idempotent_id('759ca34d-1c87-43f3-8da2-8e1d373049ac')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     def test_try_upd_metadata_with_empty_key(self):
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_all_metadata,
+                          self.shares_v2_client.update_all_metadata,
                           self.share["id"], {"": "value"})
 
     @decorators.idempotent_id('94c7ebb3-14c3-4ff1-9839-ae3acb318cd0')
@@ -77,7 +77,7 @@
         too_big_key = "x" * 256
         md = {too_big_key: "value"}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.set_metadata,
+                          self.shares_v2_client.set_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('33ef3047-6ca3-4547-a681-b52314382dcb')
@@ -86,7 +86,7 @@
         too_big_key = "x" * 256
         md = {too_big_key: "value"}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_all_metadata,
+                          self.shares_v2_client.update_all_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('1114970a-1b45-4c56-b20a-e13e1764e3c4')
@@ -95,7 +95,7 @@
         too_big_value = "x" * 1024
         md = {"key": too_big_value}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.set_metadata,
+                          self.shares_v2_client.set_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('c2eddcf0-cf81-4f9f-b06d-c9165ab8553e')
@@ -104,12 +104,12 @@
         too_big_value = "x" * 1024
         md = {"key": too_big_value}
         self.assertRaises(lib_exc.BadRequest,
-                          self.shares_client.update_all_metadata,
+                          self.shares_v2_client.update_all_metadata,
                           self.share["id"], md)
 
     @decorators.idempotent_id('14df3262-5a2b-4de4-b335-422329b22b07')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     def test_try_delete_unexisting_metadata(self):
         self.assertRaises(lib_exc.NotFound,
-                          self.shares_client.delete_metadata,
+                          self.shares_v2_client.delete_metadata,
                           self.share["id"], "wrong_key")
diff --git a/manila_tempest_tests/tests/api/test_replication.py b/manila_tempest_tests/tests/api/test_replication.py
index 722f02f..7873926 100644
--- a/manila_tempest_tests/tests/api/test_replication.py
+++ b/manila_tempest_tests/tests/api/test_replication.py
@@ -187,17 +187,153 @@
         # Delete subnet
         self.shares_v2_client.delete_subnet(self.sn_id, subnet['id'])
 
+    @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+    @testtools.skipIf(
+        not CONF.share.multitenancy_enabled, "Only for multitenancy.")
+    @testtools.skipIf(
+        not CONF.share.run_share_server_multiple_subnet_tests,
+        "Share server multiple subnet tests are disabled.")
+    @testtools.skipIf(CONF.share.share_network_id != "",
+                      "This test is not suitable for pre-existing "
+                      "share networks.")
+    @utils.skip_if_microversion_not_supported("2.70")
+    @decorators.idempotent_id('4235e789-dbd6-47a8-8f2e-d70edf78e532')
+    def test_add_delete_share_replica_multiple_subnets(self):
+        extra_specs = {
+            "replication_type": self.replication_type,
+            "driver_handles_share_servers": CONF.share.multitenancy_enabled,
+            "share_server_multiple_subnet_support": True,
+        }
+        share_type = self.create_share_type(
+            extra_specs=extra_specs, client=self.admin_client)
+        default_subnet = utils.share_network_get_default_subnet(
+            self.share_network)
+        new_share_network_id = self.create_share_network(
+            cleanup_in_class=False)['id']
+        subnet_data = {
+            'neutron_net_id': default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': default_subnet.get('neutron_subnet_id'),
+            'share_network_id': new_share_network_id,
+            'availability_zone': self.replica_zone,
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+        subnet2 = self.create_share_network_subnet(**subnet_data)
+        # Creating a third subnet in share replica az
+        subnet_data.update({'availability_zone': self.share_zone})
+        subnet3 = self.create_share_network_subnet(**subnet_data)
+        # Create the share and share replica
+        share = self.create_share(
+            share_type_id=share_type['id'], cleanup_in_class=False,
+            availability_zone=self.share_zone,
+            share_network_id=new_share_network_id)
+        share = self.admin_client.get_share(share['id'])['share']
+        replica = self.create_share_replica(share['id'], self.replica_zone)
+        replica = self.admin_client.get_share_replica(
+            replica['id'])['share_replica']
+        share_server = self.admin_client.show_share_server(
+            replica['share_server_id'])['share_server']
+        self.assertIn(subnet1['id'],
+                      share_server['share_network_subnet_ids'])
+        self.assertIn(subnet2['id'],
+                      share_server['share_network_subnet_ids'])
+        # Delete the replica
+        self.delete_share_replica(replica['id'])
+        # Delete share
+        self.shares_v2_client.delete_share(share['id'])
+        self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
+        # Delete subnets
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet1['id'])
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet2['id'])
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet3['id'])
+
+    @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+    @testtools.skipIf(
+        not CONF.share.multitenancy_enabled, "Only for multitenancy.")
+    @testtools.skipIf(
+        not CONF.share.run_network_allocation_update_tests,
+        "Share server network allocation update tests are disabled.")
+    @testtools.skipIf(CONF.share.share_network_id != "",
+                      "This test is not suitable for pre-existing "
+                      "share_network.")
+    @utils.skip_if_microversion_not_supported("2.70")
+    @decorators.idempotent_id('26694947-d4a0-46c8-99e8-2e0eca1b6a08')
+    def test_add_delete_share_replica_network_allocation_update(self):
+        extra_specs = {
+            "replication_type": self.replication_type,
+            "driver_handles_share_servers": CONF.share.multitenancy_enabled,
+            "network_allocation_update_support": True,
+        }
+        share_type = self.create_share_type(extra_specs=extra_specs)
+
+        default_subnet = utils.share_network_get_default_subnet(
+            self.share_network)
+        new_share_network_id = self.create_share_network(
+            cleanup_in_class=False)['id']
+        subnet_data = {
+            'neutron_net_id': default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': default_subnet.get('neutron_subnet_id'),
+            'share_network_id': new_share_network_id,
+            'availability_zone': self.share_zone,
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+        subnet_data.update({'availability_zone': self.replica_zone})
+        subnet2 = self.create_share_network_subnet(**subnet_data)
+        # Create the share and share replica
+        share = self.create_share(
+            share_type_id=share_type['id'], cleanup_in_class=False,
+            availability_zone=self.share_zone,
+            share_network_id=new_share_network_id)
+        share = self.admin_client.get_share(share['id'])['share']
+
+        replica = self.create_share_replica(share['id'], self.replica_zone)
+        replica = self.admin_client.get_share_replica(
+            replica['id'])['share_replica']
+
+        # Waits until the check is completed and positive
+        waiters.wait_for_subnet_create_check(
+            self.shares_v2_client, new_share_network_id,
+            neutron_net_id=subnet_data['neutron_net_id'],
+            neutron_subnet_id=subnet_data['neutron_subnet_id'],
+            availability_zone=self.replica_zone)
+        # Creating a third subnet in replica zone to trigger the network
+        # allocation update
+        subnet3 = self.create_share_network_subnet(**subnet_data)
+        waiters.wait_for_resource_status(
+            self.admin_client, replica['share_server_id'],
+            constants.SERVER_STATE_ACTIVE,
+            resource_name="share_server",
+            status_attr="status")
+        share_server = self.admin_client.show_share_server(
+            replica['share_server_id']
+        )['share_server']
+        self.assertIn(subnet2['id'],
+                      share_server['share_network_subnet_ids'])
+        self.assertIn(subnet3['id'],
+                      share_server['share_network_subnet_ids'])
+        # Delete the replica
+        self.delete_share_replica(replica['id'])
+        # Delete share
+        self.shares_v2_client.delete_share(share['id'])
+        self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
+        # Delete subnets
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet1['id'])
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet2['id'])
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet3['id'])
+
     @decorators.idempotent_id('00e12b41-b95d-494a-99be-e584aae10f5c')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     def test_add_access_rule_create_replica_delete_rule(self):
         # Add access rule to the share
         access_type, access_to = self._get_access_rule_data_from_config()
-        rule = self.shares_v2_client.create_access_rule(
-            self.shares[0]["id"], access_type, access_to, 'ro')['access']
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.shares[0]["id"],
-            constants.RULE_STATE_ACTIVE, resource_name='access_rule',
-            rule_id=rule["id"])
+        self.allow_access(
+            self.shares[0]["id"], access_type=access_type, access_to=access_to,
+            access_level='ro')
 
         # Create the replica
         self._verify_create_replica()
@@ -207,12 +343,6 @@
             self.shares_v2_client, self.shares[0]["id"],
             constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
 
-        # Delete rule and wait for deletion
-        self.shares_v2_client.delete_access_rule(self.shares[0]["id"],
-                                                 rule["id"])
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.shares[0]['id'])
-
     @decorators.idempotent_id('3af3f19a-1195-464e-870b-1a3918914f1b')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     def test_create_replica_add_access_rule_delete_replica(self):
@@ -221,12 +351,9 @@
         share_replica = self._verify_create_replica()
 
         # Add access rule
-        self.shares_v2_client.create_access_rule(
-            self.shares[0]["id"], access_type, access_to, 'ro')
-
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.shares[0]["id"],
-            constants.RULE_STATE_ACTIVE, status_attr='access_rules_status')
+        self.allow_access(
+            self.shares[0]["id"], access_type=access_type, access_to=access_to,
+            access_level='ro')
 
         # Delete the replica
         self.delete_share_replica(share_replica["id"])
@@ -282,11 +409,9 @@
         share = self.create_shares([self.creation_data])[0]
         # Add access rule
         access_type, access_to = self._get_access_rule_data_from_config()
-        rule = self.shares_v2_client.create_access_rule(
-            share["id"], access_type, access_to, 'ro')['access']
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, share["id"], constants.RULE_STATE_ACTIVE,
-            resource_name='access_rule', rule_id=rule["id"])
+        self.allow_access(
+            share["id"], access_type=access_type, access_to=access_to,
+            access_level='ro')
 
         original_replica = self.shares_v2_client.list_share_replicas(
             share["id"])['share_replicas'][0]
diff --git a/manila_tempest_tests/tests/api/test_rules.py b/manila_tempest_tests/tests/api/test_rules.py
index 30b1fc5..979bf06 100644
--- a/manila_tempest_tests/tests/api/test_rules.py
+++ b/manila_tempest_tests/tests/api/test_rules.py
@@ -22,7 +22,6 @@
 import testtools
 from testtools import testcase as tc
 
-from manila_tempest_tests.common import waiters
 from manila_tempest_tests.tests.api import base
 from manila_tempest_tests import utils
 
@@ -37,13 +36,14 @@
     :param self: instance of test class
     """
 
-    if utils.is_microversion_eq(version, '1.0'):
-        rule = self.shares_client.create_access_rule(
-            self.share["id"], self.access_type, self.access_to, 'ro')['access']
+    if utils.is_microversion_le(version, '2.9'):
+        client = self.shares_client
     else:
-        rule = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type, self.access_to, 'ro',
-            version=version)['access']
+        client = self.shares_v2_client
+
+    rule = self.allow_access(
+        self.share["id"], client=client, access_type=self.access_type,
+        access_to=self.access_to, access_level='ro', version=version)
 
     self.assertEqual('ro', rule['access_level'])
     for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -55,14 +55,6 @@
     else:
         self.assertEqual("queued_to_apply", rule['state'])
 
-    if utils.is_microversion_le(version, '2.9'):
-        waiters.wait_for_resource_status(
-            self.shares_client, self.share["id"], "active",
-            resource_name='access_rule', rule_id=rule["id"])
-    else:
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            status_attr='access_rules_status', version=version)
         # If the 'access_rules_status' transitions to 'active',
         # rule state must too
         rules = self.shares_v2_client.list_access_rules(
@@ -70,16 +62,6 @@
         rule = [r for r in rules if r['id'] == rule['id']][0]
         self.assertEqual("active", rule['state'])
 
-    if utils.is_microversion_eq(version, '1.0'):
-        self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-        self.shares_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.share['id'])
-    else:
-        self.shares_v2_client.delete_access_rule(
-            self.share["id"], rule["id"], version=version)
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.share['id'], version=version)
-
 
 @ddt.ddt
 class ShareIpRulesForNFSTest(base.BaseSharesMixedTest):
@@ -109,8 +91,11 @@
     @decorators.idempotent_id('3390df2d-f6f8-4634-a562-87c1be994f6a')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @ddt.data(*itertools.chain(
-        itertools.product({'1.0', '2.9', '2.37', LATEST_MICROVERSION}, {4}),
-        itertools.product({'2.38', LATEST_MICROVERSION}, {6})
+        itertools.product(
+            utils.deduplicate(['1.0', '2.9', '2.37', LATEST_MICROVERSION]),
+            [4]),
+        itertools.product(
+            utils.deduplicate(['2.38', LATEST_MICROVERSION]), [6])
     ))
     @ddt.unpack
     def test_create_delete_access_rules_with_one_ip(self, version,
@@ -120,14 +105,16 @@
             access_to = utils.rand_ip()
         else:
             access_to = utils.rand_ipv6_ip()
-        # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, access_to)['access']
+
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, access_to,
-                version=version)['access']
+            client = self.shares_v2_client
+
+        # create rule
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=access_to, version=version)
 
         self.assertEqual('rw', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -139,35 +126,14 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('5d25168a-d646-443e-8cf1-3151eb7887f5')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @ddt.data(*itertools.chain(
-        itertools.product({'1.0', '2.9', '2.37', LATEST_MICROVERSION}, {4}),
-        itertools.product({'2.38', LATEST_MICROVERSION}, {6})
+        itertools.product(
+            utils.deduplicate(['1.0', '2.9', '2.37', LATEST_MICROVERSION]),
+            [4]),
+        itertools.product(
+            utils.deduplicate(['2.38', LATEST_MICROVERSION]), [6])
     ))
     @ddt.unpack
     def test_create_delete_access_rule_with_cidr(self, version, ip_version):
@@ -175,49 +141,19 @@
             access_to = utils.rand_ip(network=True)
         else:
             access_to = utils.rand_ipv6_ip(network=True)
-        # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, access_to)['access']
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, access_to,
-                version=version)['access']
+            client = self.shares_v2_client
+        # create rule
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=access_to, version=version)
 
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
             self.assertNotIn(key, rule.keys())
         self.assertEqual('rw', rule['access_level'])
 
-        # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
-        if utils.is_microversion_le(version, "2.27"):
-            self.assertEqual("new", rule['state'])
-        else:
-            self.assertEqual("queued_to_apply", rule['state'])
-
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('187a4fb0-ba1d-45b9-83c9-f0272e7e6f3e')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @testtools.skipIf(
@@ -277,15 +213,15 @@
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
                                  LATEST_MICROVERSION]))
     def test_create_delete_user_rule(self, version):
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
+        else:
+            client = self.shares_v2_client
 
         # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to)['access']
-        else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to,
-                version=version)['access']
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, version=version)
 
         self.assertEqual('rw', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -297,30 +233,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('ccb08342-b7ef-4dda-84ba-8de9879d8862')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @testtools.skipIf(
@@ -381,15 +293,15 @@
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
                                  LATEST_MICROVERSION]))
     def test_create_delete_cert_rule(self, version):
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
+        else:
+            client = self.shares_v2_client
 
         # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to)['access']
-        else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to,
-                version=version)['access']
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, version=version)
 
         self.assertEqual('rw', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -401,30 +313,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        # delete rule
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('cdd93d8e-7255-4ed4-8ef0-929a62bb302c')
     @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
     @testtools.skipIf(
@@ -433,13 +321,13 @@
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
                                  LATEST_MICROVERSION]))
     def test_create_delete_cert_ro_access_rule(self, version):
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], 'cert', 'client2.com', 'ro')['access']
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], 'cert', 'client2.com', 'ro',
-                version=version)['access']
+            client = self.shares_v2_client
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type='cert',
+            access_to='client2.com', access_level='ro', version=version)
 
         self.assertEqual('ro', rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
@@ -451,29 +339,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
 
 @ddt.ddt
 class ShareCephxRulesForCephFSTest(base.BaseSharesMixedTest):
@@ -512,31 +377,21 @@
         ('rw', 'ro')))
     @ddt.unpack
     def test_create_delete_cephx_rule(self, version, access_to, access_level):
-        rule = self.shares_v2_client.create_access_rule(
-            self.share["id"], self.access_type, access_to, version=version,
-            access_level=access_level)['access']
+        rule = self.allow_access(
+            self.share["id"], access_type=self.access_type,
+            access_to=access_to, version=version, access_level=access_level)
 
         self.assertEqual(access_level, rule['access_level'])
         for key in ('deleted', 'deleted_at', 'instance_mappings'):
             self.assertNotIn(key, rule.keys())
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            resource_name='access_rule', rule_id=rule["id"])
-
-        self.shares_v2_client.delete_access_rule(
-            self.share["id"], rule["id"], version=version)
-        self.shares_v2_client.wait_for_resource_deletion(
-            rule_id=rule["id"], share_id=self.share['id'])
 
     @decorators.idempotent_id('ad907303-a439-4fcb-8845-fe91ecab7dc2')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     def test_different_users_in_same_tenant_can_use_same_cephx_id(self):
         # Grant access to the share
-        access1 = self.shares_v2_client.create_access_rule(
-            self.share['id'], self.access_type, self.access_to, 'rw')['access']
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            resource_name='access_rule', rule_id=access1["id"])
+        self.allow_access(
+            self.share['id'], access_type=self.access_type,
+            access_to=self.access_to, access_level='rw')
 
         # Create a new user in the current project
         project = self.os_admin.projects_client.show_project(
@@ -550,11 +405,10 @@
 
         # Grant access to the second share using the same cephx ID that was
         # used in access1
-        access2 = user_client.shares_v2_client.create_access_rule(
-            share2['id'], self.access_type, self.access_to, 'rw')['access']
-        waiters.wait_for_resource_status(
-            user_client.shares_v2_client, share2['id'], "active",
-            resource_name='access_rule', rule_id=access2['id'])
+        self.allow_access(
+            share2['id'], client=user_client.shares_v2_client,
+            access_type=self.access_type, access_to=self.access_to,
+            access_level='rw')
 
 
 @ddt.ddt
@@ -606,14 +460,14 @@
         metadata = None
         if utils.is_microversion_ge(version, '2.45'):
             metadata = {'key1': 'v1', 'key2': 'v2'}
-        # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to)['access']
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], self.access_type, self.access_to,
-                metadata=metadata, version=version)['access']
+            client = self.shares_v2_client
+        # create rule
+        rule = self.allow_access(
+            self.share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, metadata=metadata, version=version)
 
         # verify added rule keys since 2.33 when create rule
         if utils.is_microversion_ge(version, '2.33'):
@@ -629,19 +483,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
         # list rules
         if utils.is_microversion_eq(version, '1.0'):
             rules = self.shares_client.list_access_rules(
@@ -678,16 +519,6 @@
         msg = "expected id lists %s times in rule list" % (len(gen))
         self.assertEqual(1, len(gen), msg)
 
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"], rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'])
-        else:
-            self.shares_v2_client.delete_access_rule(
-                self.share["id"], rule["id"], version=version)
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share['id'], version=version)
-
     @decorators.idempotent_id('b77bcbda-9754-48f0-9be6-79341ad1af64')
     @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
     @ddt.data(*utils.deduplicate(['1.0', '2.9', '2.27', '2.28',
@@ -698,18 +529,18 @@
             msg = ("API version %s does not support cephx access type, need "
                    "version >= 2.13." % version)
             raise self.skipException(msg)
+        if utils.is_microversion_le(version, '2.9'):
+            client = self.shares_client
+        else:
+            client = self.shares_v2_client
 
         # create share
         share = self.create_share(share_type_id=self.share_type_id)
 
         # create rule
-        if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                share["id"], self.access_type, self.access_to)['access']
-        else:
-            rule = self.shares_v2_client.create_access_rule(
-                share["id"], self.access_type, self.access_to,
-                version=version)['access']
+        rule = self.allow_access(
+            share["id"], client=client, access_type=self.access_type,
+            access_to=self.access_to, version=version, cleanup=False)
 
         # rules must start out in 'new' until 2.28 & 'queued_to_apply' after
         if utils.is_microversion_le(version, "2.27"):
@@ -717,19 +548,6 @@
         else:
             self.assertEqual("queued_to_apply", rule['state'])
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, share["id"], "active",
-                status_attr='access_rules_status', version=version)
-
         # delete share
         if utils.is_microversion_eq(version, '1.0'):
             self.shares_client.delete_share(share['id'])
diff --git a/manila_tempest_tests/tests/api/test_rules_negative.py b/manila_tempest_tests/tests/api/test_rules_negative.py
index 5225651..1eb858d 100644
--- a/manila_tempest_tests/tests/api/test_rules_negative.py
+++ b/manila_tempest_tests/tests/api/test_rules_negative.py
@@ -99,27 +99,15 @@
         access_type = "ip"
         access_to = "1.2.3.4"
 
-        # create rule
         if utils.is_microversion_eq(version, '1.0'):
-            rule = self.shares_client.create_access_rule(
-                self.share["id"], access_type, access_to)['access']
+            client = self.shares_client
         else:
-            rule = self.shares_v2_client.create_access_rule(
-                self.share["id"], access_type, access_to,
-                version=version)['access']
+            client = self.shares_v2_client
 
-        if utils.is_microversion_eq(version, '1.0'):
-            waiters.wait_for_resource_status(
-                self.shares_client, self.share["id"], "active",
-                resource_name='access_rule', rule_id=rule["id"])
-        elif utils.is_microversion_eq(version, '2.9'):
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                resource_name="access_rule", rule_id=rule["id"])
-        else:
-            waiters.wait_for_resource_status(
-                self.shares_v2_client, self.share["id"], "active",
-                status_attr='access_rules_status', version=version)
+        # create rule
+        self.allow_access(
+            self.share["id"], client=client, access_type=access_type,
+            access_to=access_to, version=version)
 
         # try create duplicate of rule
         if utils.is_microversion_eq(version, '1.0'):
@@ -132,18 +120,6 @@
                               self.share["id"], access_type, access_to,
                               version=version)
 
-        # delete rule and wait for deletion
-        if utils.is_microversion_eq(version, '1.0'):
-            self.shares_client.delete_access_rule(self.share["id"],
-                                                  rule["id"])
-            self.shares_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share["id"])
-        else:
-            self.shares_v2_client.delete_access_rule(self.share["id"],
-                                                     rule["id"])
-            self.shares_v2_client.wait_for_resource_deletion(
-                rule_id=rule["id"], share_id=self.share["id"], version=version)
-
     @decorators.idempotent_id('63932d1d-a60a-4af7-ba3b-7cf6c68aaee9')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
     @ddt.data("10.20.30.40", "fd8c:b029:bba6:ac54::1",
@@ -157,13 +133,8 @@
                       "is %s" % CONF.share.max_api_microversion)
             raise self.skipException(reason)
 
-        rule = self.shares_v2_client.create_access_rule(
-            self.share["id"], "ip", access_to)['access']
-        self.addCleanup(self.shares_v2_client.delete_access_rule,
-                        self.share["id"], rule['id'])
-        waiters.wait_for_resource_status(
-            self.shares_v2_client, self.share["id"], "active",
-            status_attr='access_rules_status')
+        self.allow_access(
+            self.share["id"], access_type="ip", access_to=access_to)
 
         self.assertRaises(lib_exc.BadRequest,
                           self.shares_v2_client.create_access_rule,
diff --git a/manila_tempest_tests/tests/api/test_scheduler_hints.py b/manila_tempest_tests/tests/api/test_scheduler_hints.py
index 83e5a1a..5012f17 100644
--- a/manila_tempest_tests/tests/api/test_scheduler_hints.py
+++ b/manila_tempest_tests/tests/api/test_scheduler_hints.py
@@ -70,10 +70,10 @@
         self.assertEqual(backend_a, backend_b)
 
         # get metadata of share
-        metadata_a = self.shares_client.get_metadata(
+        metadata_a = self.shares_v2_client.get_metadata(
             self.share_a["id"])['metadata']
         md_a = {"__affinity_same_host": "%s" % share_b["id"]}
-        metadata_b = self.shares_client.get_metadata(
+        metadata_b = self.shares_v2_client.get_metadata(
             share_b["id"])['metadata']
         md_b = {"__affinity_same_host": "%s" % self.share_a["id"]}
 
diff --git a/manila_tempest_tests/tests/api/test_share_groups.py b/manila_tempest_tests/tests/api/test_share_groups.py
index 3b56161..b203481 100644
--- a/manila_tempest_tests/tests/api/test_share_groups.py
+++ b/manila_tempest_tests/tests/api/test_share_groups.py
@@ -15,12 +15,14 @@
 
 import ddt
 from tempest import config
+from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 import testtools
 from testtools import testcase as tc
 
 from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
 from manila_tempest_tests.tests.api import base
 from manila_tempest_tests import utils
 
@@ -263,3 +265,130 @@
         # Verify that share always has the same AZ as share group does
         self.assertEqual(
             share_group['availability_zone'], share['availability_zone'])
+
+    @utils.skip_if_microversion_not_supported("2.70")
+    @tc.attr(base.TAG_POSITIVE, base.TAG_API_WITH_BACKEND)
+    @testtools.skipUnless(CONF.share.multitenancy_enabled,
+                          "Multitenancy is disabled.")
+    @testtools.skipUnless(CONF.share.run_share_server_multiple_subnet_tests,
+                          "Share server multiple subnet tests are disabled.")
+    @testtools.skipIf(CONF.share.share_network_id != "",
+                      "This test is not suitable for pre-existing "
+                      "share networks.")
+    @ddt.data(False, True)
+    @decorators.idempotent_id('17fd1867-03a3-43d0-9be3-daf90b6c5e02')
+    def test_create_sg_and_share_with_multiple_subnets(
+        self, network_allocation_update):
+        if network_allocation_update and not (
+            CONF.share.run_network_allocation_update_tests):
+            raise self.skipException(
+                'Network allocation update tests are disabled.')
+        extra_specs = {
+            'driver_handles_share_servers': CONF.share.multitenancy_enabled,
+            'share_server_multiple_subnet_support': True,
+        }
+        if network_allocation_update:
+            extra_specs['network_allocation_update_support'] = True
+        share_type = self.create_share_type(extra_specs=extra_specs)
+        sg_type_name = data_utils.rand_name("tempest-manila")
+        sg_type = self.create_share_group_type(
+            name=sg_type_name, share_types=share_type['id'],
+            client=self.admin_shares_v2_client)
+        # Get list of existing availability zones, at least one always
+        # should exist
+        azs = self.get_availability_zones_matching_share_type(share_type)
+        if len(azs) == 0:
+            raise self.skipException(
+                "No AZs were found. Make sure there is at least one "
+                "configured.")
+        share_network = self.shares_v2_client.get_share_network(
+            self.shares_v2_client.share_network_id)['share_network']
+        new_share_network_id = self.create_share_network(
+            cleanup_in_class=False)['id']
+
+        default_subnet = utils.share_network_get_default_subnet(
+            share_network)
+        subnet_data = {
+            'neutron_net_id': default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': default_subnet.get('neutron_subnet_id'),
+            'share_network_id': new_share_network_id,
+            'availability_zone': azs[0]
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+        if not network_allocation_update:
+            subnet2 = self.create_share_network_subnet(**subnet_data)
+
+        sg_kwargs = {
+            'share_group_type_id': sg_type['id'],
+            'share_type_ids': [share_type['id']],
+            'share_network_id': new_share_network_id,
+            'availability_zone': azs[0],
+            'version': constants.MIN_SHARE_GROUP_MICROVERSION,
+            'cleanup_in_class': False,
+        }
+
+        # Create share group
+        share_group = self.create_share_group(**sg_kwargs)
+
+        # Get latest share group info
+        share_group = self.shares_v2_client.get_share_group(
+            share_group['id'])['share_group']
+
+        self.assertIn('availability_zone', share_group)
+        self.assertEqual(azs[0], share_group['availability_zone'])
+
+        # Test 'consistent_snapshot_support' as part of 2.33 API change
+        self.assertIn('consistent_snapshot_support', share_group)
+        self.assertIn(
+            share_group['consistent_snapshot_support'], ('host', 'pool', None))
+
+        share_data = {
+            'share_type_id': share_type['id'],
+            'share_group_id': share_group['id'],
+            'share_network_id': new_share_network_id,
+            'availability_zone': azs[0],
+            'cleanup_in_class': False,
+        }
+
+        # Create share in share group
+        share = self.create_share(**share_data)
+
+        # Get latest share info
+        share = self.admin_shares_v2_client.get_share(share['id'])['share']
+        # Verify that share always has the same AZ as share group does
+        self.assertEqual(
+            share_group['availability_zone'], share['availability_zone'])
+
+        # Get share server info
+        share_server = self.admin_shares_v2_client.show_share_server(
+            share['share_server_id'])['share_server']
+        if network_allocation_update:
+            waiters.wait_for_subnet_create_check(
+                self.shares_v2_client, new_share_network_id,
+                neutron_net_id=subnet_data['neutron_net_id'],
+                neutron_subnet_id=subnet_data['neutron_subnet_id'],
+                availability_zone=azs[0])
+
+            subnet2 = self.create_share_network_subnet(**subnet_data)
+            waiters.wait_for_resource_status(
+                self.admin_shares_v2_client, share['share_server_id'],
+                constants.SERVER_STATE_ACTIVE,
+                resource_name="share_server",
+                status_attr="status")
+        share_server = self.admin_shares_v2_client.show_share_server(
+            share['share_server_id'])['share_server']
+        # Check if share server has multiple subnets
+        self.assertIn(subnet1['id'], share_server['share_network_subnet_ids'])
+        self.assertIn(subnet2['id'], share_server['share_network_subnet_ids'])
+        # Delete share
+        params = {"share_group_id": share_group['id']}
+        self.shares_v2_client.delete_share(
+            share['id'],
+            params=params,
+            version=constants.MIN_SHARE_GROUP_MICROVERSION)
+        self.shares_client.wait_for_resource_deletion(share_id=share['id'])
+        # Delete subnet
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet1['id'])
+        self.shares_v2_client.delete_subnet(
+            new_share_network_id, subnet2['id'])
diff --git a/manila_tempest_tests/tests/api/test_share_network_subnets.py b/manila_tempest_tests/tests/api/test_share_network_subnets.py
index 0f651c9..cdf2162 100644
--- a/manila_tempest_tests/tests/api/test_share_network_subnets.py
+++ b/manila_tempest_tests/tests/api/test_share_network_subnets.py
@@ -127,6 +127,8 @@
             msg = ("This test needs at least two compatible storage "
                    "availability zones.")
             raise self.skipException(msg)
+        check_multiple_subnet = utils.is_microversion_ge(
+            CONF.share.max_api_microversion, '2.70')
 
         original_share_network = self.shares_v2_client.get_share_network(
             self.shares_v2_client.share_network_id
@@ -173,8 +175,12 @@
         # Match new subnet content
         self.assertDictContainsSubset(data, subnet)
         # Match share server subnet
-        self.assertEqual(subnet['id'],
-                         share_server['share_network_subnet_id'])
+        if check_multiple_subnet:
+            self.assertIn(subnet['id'],
+                          share_server['share_network_subnet_ids'])
+        else:
+            self.assertIn(subnet['id'],
+                          share_server['share_network_subnet_id'])
         # Delete share
         self.shares_v2_client.delete_share(share['id'])
         self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
@@ -197,10 +203,11 @@
             msg = ("This test needs at least two compatible storage "
                    "availability zones.")
             raise self.skipException(msg)
+        check_multiple_subnet = utils.is_microversion_ge(
+            CONF.share.max_api_microversion, '2.70')
 
         original_share_network = self.shares_v2_client.get_share_network(
-            self.shares_v2_client.share_network_id
-        )['share_network']
+            self.shares_v2_client.share_network_id)['share_network']
         share_net_info = (
             utils.share_network_get_default_subnet(original_share_network))
         share_network = self.create_share_network(
@@ -254,8 +261,12 @@
         # Default subnet was created during share network creation
         self.assertIsNone(default_subnet['availability_zone'])
         # Match share server subnet
-        self.assertEqual(expected_subnet_id,
-                         share_server['share_network_subnet_id'])
+        if not check_multiple_subnet:
+            self.assertEqual(
+                expected_subnet_id, share_server['share_network_subnet_id'])
+        else:
+            self.assertIn(
+                expected_subnet_id, share_server['share_network_subnet_ids'])
         if create_share_with_az:
             self.assertEqual(destination_az,
                              share['availability_zone'])
diff --git a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
index 4af34f2..7b0ca55 100644
--- a/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
+++ b/manila_tempest_tests/tests/api/test_share_network_subnets_negative.py
@@ -27,6 +27,7 @@
 
 
 CONF = config.CONF
+LATEST_MICROVERSION = CONF.share.max_api_microversion
 
 
 @ddt.ddt
@@ -72,6 +73,7 @@
     @decorators.idempotent_id('13f397bf-5e3a-42b0-b4f9-9cd2dbbb0955')
     @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
     @ddt.data(True, False)
+    @utils.skip_if_is_microversion_ge(LATEST_MICROVERSION, "2.70")
     def test_add_share_network_subnet_in_same_az_exists(self, is_default):
         share_network = self.shares_v2_client.create_share_network(
             )['share_network']
@@ -231,7 +233,8 @@
         # Get a compatible availability zone
         az = self.get_availability_zones_matching_share_type(
             self.share_type)[0]
-
+        check_multiple_subnets = utils.is_microversion_ge(
+            CONF.share.max_api_microversion, '2.70')
         original_share_network = self.shares_v2_client.get_share_network(
             self.shares_v2_client.share_network_id
         )['share_network']
@@ -269,8 +272,12 @@
             share['share_server_id']
         )['share_server']
         # Match share server subnet
-        self.assertEqual(subnet['id'],
-                         share_server['share_network_subnet_id'])
+        if check_multiple_subnets:
+            self.assertIn(subnet['id'],
+                          share_server['share_network_subnet_ids'])
+        else:
+            self.assertEqual(subnet['id'],
+                             share_server['share_network_subnet_id'])
 
         # Assert that the user cannot delete a subnet that contain shares
         self.assertRaises(lib_exc.Conflict,
diff --git a/manila_tempest_tests/tests/api/test_share_networks_negative.py b/manila_tempest_tests/tests/api/test_share_networks_negative.py
index 17e0c12..3306068 100644
--- a/manila_tempest_tests/tests/api/test_share_networks_negative.py
+++ b/manila_tempest_tests/tests/api/test_share_networks_negative.py
@@ -195,3 +195,24 @@
             self.shares_v2_client.create_share_network,
             availability_zone='inexistent-availability-zone',
         )
+
+    @utils.skip_if_microversion_not_supported("2.70")
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+    @decorators.idempotent_id('f6f47c64-6821-4d4a-aa7d-3b0244158197')
+    def test_check_add_share_network_subnet_share_network_not_found(self):
+        data = self.generate_subnet_data()
+        self.assertRaises(lib_exc.NotFound,
+                          self.shares_v2_client.subnet_create_check,
+                          'fake_inexistent_id',
+                          **data)
+
+    @utils.skip_if_microversion_not_supported("2.70")
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API)
+    @decorators.idempotent_id('d9a487fb-6638-4f93-8b69-3e1a85bfbc7d')
+    def test_check_add_share_network_subnet_az_not_found(self):
+        share_network = self.create_share_network()
+        data = {'availability_zone': 'non-existent-az'}
+
+        self.assertRaises(lib_exc.BadRequest,
+                          self.shares_v2_client.subnet_create_check,
+                          share_network['id'], **data)
diff --git a/manila_tempest_tests/tests/api/test_share_servers_multiple_subnet.py b/manila_tempest_tests/tests/api/test_share_servers_multiple_subnet.py
new file mode 100644
index 0000000..fccbf03
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_share_servers_multiple_subnet.py
@@ -0,0 +1,196 @@
+# Copyright 2022 NetApp Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib import decorators
+import testtools
+from testtools import testcase as tc
+
+from manila_tempest_tests.common import constants
+from manila_tempest_tests.common import waiters
+from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
+
+CONF = config.CONF
+
+
+class ShareServerMultipleSubnetTest(base.BaseSharesMixedTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ShareServerMultipleSubnetTest, cls).skip_checks()
+        if not CONF.share.multitenancy_enabled:
+            raise cls.skipException('Multitenancy tests are disabled.')
+        if not CONF.share.run_share_server_multiple_subnet_tests and not (
+                CONF.share.run_network_allocation_update_tests):
+            raise cls.skipException(
+                'Share server multiple subnets and network allocation '
+                'update tests are disabled.')
+        if CONF.share.share_network_id != "":
+            raise cls.skipException(
+                'These tests are not suitable for pre-existing '
+                'share_network.')
+        utils.check_skip_if_microversion_not_supported("2.70")
+
+    @classmethod
+    def resource_setup(cls):
+        super(ShareServerMultipleSubnetTest, cls).resource_setup()
+        cls.extra_specs = {
+            'driver_handles_share_servers': CONF.share.multitenancy_enabled,
+        }
+        if CONF.share.run_share_server_multiple_subnet_tests:
+            cls.extra_specs['share_server_multiple_subnet_support'] = True
+        if CONF.share.run_network_allocation_update_tests:
+            cls.extra_specs['network_allocation_update_support'] = True
+        share_type = cls.create_share_type(extra_specs=cls.extra_specs)
+        cls.share_type_id = share_type['id']
+
+        cls.zones = cls.get_availability_zones_matching_share_type(
+            share_type)
+        if len(cls.zones) == 0:
+            msg = ("These tests need at least one compatible "
+                   "availability zone.")
+            raise cls.skipException(msg)
+
+        cls.share_network = cls.alt_shares_v2_client.get_share_network(
+            cls.alt_shares_v2_client.share_network_id)['share_network']
+        cls.default_subnet = utils.share_network_get_default_subnet(
+            cls.share_network)
+
+    @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+    @testtools.skipIf(
+        not CONF.share.run_share_server_multiple_subnet_tests,
+        "Share network multiple subnets tests are disabled.")
+    @decorators.idempotent_id('5600bd52-ecb4-47d3-a4e8-3e6565cb0b80')
+    def test_create_share_on_multiple_subnets_same_az(self):
+        share_network_id = self.create_share_network(
+            cleanup_in_class=False)["id"]
+        subnet_data = {
+            'neutron_net_id': self.default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': self.default_subnet.get('neutron_subnet_id'),
+            'share_network_id': share_network_id,
+            'availability_zone': self.zones[0],
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+        subnet2 = self.create_share_network_subnet(**subnet_data)
+
+        share = self.create_share(
+            share_type_id=self.share_type_id,
+            share_network_id=share_network_id,
+            availability_zone=self.zones[0])
+        self.assertIn(share['status'], ('creating', 'available'))
+
+        share = self.admin_shares_v2_client.get_share(share['id'])['share']
+        share_server = self.admin_shares_v2_client.show_share_server(
+            share['share_server_id']
+        )['share_server']
+        self.assertIn(subnet1['id'],
+                      share_server['share_network_subnet_ids'])
+        self.assertIn(subnet2['id'],
+                      share_server['share_network_subnet_ids'])
+
+        # Delete share
+        self.shares_v2_client.delete_share(share['id'])
+        self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
+        # Delete the subnets
+        self.shares_v2_client.delete_subnet(share_network_id, subnet1['id'])
+        self.shares_v2_client.delete_subnet(share_network_id, subnet2['id'])
+
+    @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+    @testtools.skipIf(
+        not CONF.share.run_network_allocation_update_tests,
+        "Share server network allocation update are disabled.")
+    @decorators.idempotent_id('2a9debd5-47a3-42cc-823b-2b9de435a5e4')
+    def test_create_share_with_network_allocation_update(self):
+        share_network_id = self.create_share_network(
+            cleanup_in_class=False)["id"]
+        subnet_data = {
+            'neutron_net_id': self.default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': self.default_subnet.get('neutron_subnet_id'),
+            'share_network_id': share_network_id,
+            'availability_zone': self.zones[0],
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+
+        share = self.create_share(
+            share_type_id=self.share_type_id,
+            share_network_id=share_network_id,
+            availability_zone=self.zones[0])
+        self.assertIn(share['status'], ('creating', 'available'))
+        share = self.admin_shares_v2_client.get_share(share['id'])['share']
+
+        waiters.wait_for_subnet_create_check(
+            self.shares_v2_client, share_network_id,
+            neutron_net_id=subnet_data['neutron_net_id'],
+            neutron_subnet_id=subnet_data['neutron_subnet_id'],
+            availability_zone=self.zones[0])
+        subnet2 = self.create_share_network_subnet(**subnet_data)
+
+        waiters.wait_for_resource_status(
+            self.admin_shares_v2_client, share['share_server_id'],
+            constants.SERVER_STATE_ACTIVE,
+            resource_name="share_server",
+            status_attr="status")
+        share_server = self.admin_shares_v2_client.show_share_server(
+            share['share_server_id']
+        )['share_server']
+
+        self.assertIn(subnet1['id'],
+                      share_server['share_network_subnet_ids'])
+        self.assertIn(subnet2['id'],
+                      share_server['share_network_subnet_ids'])
+
+        # Delete share
+        self.shares_v2_client.delete_share(share['id'])
+        self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
+        # Delete subnets
+        self.shares_v2_client.delete_subnet(share_network_id, subnet1['id'])
+        self.shares_v2_client.delete_subnet(share_network_id, subnet2['id'])
+
+    @tc.attr(base.TAG_POSITIVE, base.TAG_BACKEND)
+    @testtools.skipIf(
+        not CONF.share.run_network_allocation_update_tests,
+        "Share server network allocation update are disabled.")
+    @decorators.idempotent_id('2624f9a7-660b-4f91-89b8-c026b3bb8d1f')
+    def test_share_network_subnet_create_check(self):
+        """The share network subnet create check compatibility test."""
+
+        share_network_id = self.create_share_network(
+            cleanup_in_class=False)["id"]
+        subnet_data = {
+            'neutron_net_id': self.default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': self.default_subnet.get('neutron_subnet_id'),
+            'share_network_id': share_network_id,
+            'availability_zone': self.zones[0],
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+
+        share = self.create_share(
+            share_type_id=self.share_type_id,
+            share_network_id=share_network_id,
+            availability_zone=self.zones[0]
+        )
+        self.assertIn(share['status'], ('creating', 'available'))
+        waiters.wait_for_subnet_create_check(
+            self.shares_v2_client, share_network_id,
+            neutron_net_id=subnet_data['neutron_net_id'],
+            neutron_subnet_id=subnet_data['neutron_subnet_id'],
+            availability_zone=self.zones[0])
+
+        # Delete share
+        self.shares_v2_client.delete_share(share['id'])
+        self.shares_v2_client.wait_for_resource_deletion(share_id=share['id'])
+        # Delete subnets
+        self.shares_v2_client.delete_subnet(share_network_id, subnet1['id'])
diff --git a/manila_tempest_tests/tests/api/test_share_servers_multiple_subnet_negative.py b/manila_tempest_tests/tests/api/test_share_servers_multiple_subnet_negative.py
new file mode 100644
index 0000000..522f34e
--- /dev/null
+++ b/manila_tempest_tests/tests/api/test_share_servers_multiple_subnet_negative.py
@@ -0,0 +1,90 @@
+# Copyright 2022 NetApp Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from testtools import testcase as tc
+
+from manila_tempest_tests import share_exceptions
+from manila_tempest_tests.tests.api import base
+from manila_tempest_tests import utils
+
+CONF = config.CONF
+
+
+class ShareServerMultipleSubNegativeTest(base.BaseSharesMixedTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ShareServerMultipleSubNegativeTest, cls).skip_checks()
+        if not CONF.share.multitenancy_enabled:
+            raise cls.skipException('Multitenancy tests are disabled.')
+        utils.check_skip_if_microversion_not_supported("2.70")
+
+    @classmethod
+    def resource_setup(cls):
+        super(ShareServerMultipleSubNegativeTest, cls).resource_setup()
+        cls.share_network = cls.alt_shares_v2_client.get_share_network(
+            cls.alt_shares_v2_client.share_network_id)['share_network']
+
+    @tc.attr(base.TAG_NEGATIVE, base.TAG_API_WITH_BACKEND)
+    @decorators.idempotent_id('1e2a9415-b02f-4c02-812d-bedc361f92ce')
+    def test_create_share_multiple_subnets_to_unsupported_backend(self):
+        extra_specs = {
+            'driver_handles_share_servers': CONF.share.multitenancy_enabled,
+            'share_server_multiple_subnet_support': False
+        }
+        share_type = self.create_share_type(extra_specs=extra_specs)
+        pools = self.get_pools_matching_share_type(
+            share_type, client=self.admin_shares_v2_client)
+        zones = self.get_availability_zones_matching_share_type(
+            share_type)
+        if not pools or not zones:
+            raise self.skipException("At least one backend that supports "
+                                     "adding multiple subnets into a share "
+                                     "network is needed for this test.")
+        extra_specs = {'pool_name': pools[0]['pool'],
+                       'availability_zone': zones[0]}
+        self.admin_shares_v2_client.update_share_type_extra_specs(
+            share_type['id'], extra_specs)
+
+        share_network_id = self.create_share_network(
+            cleanup_in_class=True)["id"]
+        default_subnet = utils.share_network_get_default_subnet(
+            self.share_network)
+        subnet_data = {
+            'neutron_net_id': default_subnet.get('neutron_net_id'),
+            'neutron_subnet_id': default_subnet.get('neutron_subnet_id'),
+            'share_network_id': share_network_id,
+            'availability_zone': zones[0],
+            'cleanup_in_class': False
+        }
+        subnet1 = self.create_share_network_subnet(**subnet_data)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.shares_v2_client.delete_subnet,
+                        share_network_id, subnet1['id'])
+        subnet2 = self.create_share_network_subnet(**subnet_data)
+        self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+                        self.shares_v2_client.delete_subnet,
+                        share_network_id, subnet2['id'])
+        self.assertRaises(
+            share_exceptions.ShareBuildErrorException,
+            self.create_share,
+            share_type_id=share_type['id'],
+            share_network_id=share_network_id,
+            availability_zone=zones[0],
+            cleanup_in_class=False
+        )
diff --git a/manila_tempest_tests/utils.py b/manila_tempest_tests/utils.py
index d0860f0..5ecfb36 100644
--- a/manila_tempest_tests/utils.py
+++ b/manila_tempest_tests/utils.py
@@ -94,6 +94,16 @@
     return lambda f: f
 
 
+def skip_if_is_microversion_ge(left, right):
+    """Skip if version for left is greater than or equal to the right one."""
+
+    if is_microversion_ge(left, right):
+        reason = ("Skipped. Test requires microversion "
+                  "< than '%s'." % right)
+        return testtools.skip(reason)
+    return lambda f: f
+
+
 def check_skip_if_microversion_not_supported(microversion):
     """Callable method for tests that are microversion-specific."""
     if not is_microversion_supported(microversion):
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 0000000..bc1dc04
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+  roles:
+    - enable-fips
diff --git a/zuul.d/manila-tempest-jobs.yaml b/zuul.d/manila-tempest-jobs.yaml
index 494b285..6d786fd 100644
--- a/zuul.d/manila-tempest-jobs.yaml
+++ b/zuul.d/manila-tempest-jobs.yaml
@@ -117,7 +117,8 @@
         tempest: true
 
 - job:
-    name: manila-tempest-plugin-zfsonlinux
+    name: manila-tempest-plugin-zfsonlinux-base
+    abstract: true
     description: Test ZFSOnLinux multibackend (DHSS=False) with postgresql db
     parent: manila-tempest-plugin-base
     vars:
@@ -162,11 +163,18 @@
               image_password: manila
 
 - job:
-    name: manila-tempest-plugin-lvm
+    name: manila-tempest-plugin-zfsonlinux
+    description: Test ZFSOnLinux multibackend (DHSS=False) with postgresql db
+    parent: manila-tempest-plugin-zfsonlinux-base
+    branches: &ubuntu_jammy_test_image_branches ^(?!stable/(yoga|xena|wallaby|victoria|ussuri)).*$
+
+- job:
+    name: manila-tempest-plugin-lvm-base
     description: |
       Test LVM multibackend (DHSS=False) in a 4+6 (dual-stack) devstack
       environment with IPv6 control plane endpoints.
     parent: manila-tempest-plugin-ipv6-base
+    abstract: true
     required-projects:
       - openstack/neutron-dynamic-routing
     vars:
@@ -206,6 +214,14 @@
               image_password: manila
 
 - job:
+    name: manila-tempest-plugin-lvm
+    description: |
+      Test LVM multibackend (DHSS=False) in a 4+6 (dual-stack) devstack
+      environment with IPv6 control plane endpoints.
+    branches: *ubuntu_jammy_test_image_branches
+    parent: manila-tempest-plugin-lvm-base
+
+- job:
     name: manila-tempest-plugin-container
     description: |
       Test the container driver multibackend (DHSS=True) with CIFS
@@ -246,14 +262,17 @@
               backend_names: LONDON,PARIS
               multi_backend: true
               run_share_server_migration_tests: true
+              run_share_server_multiple_subnet_tests: true
+              run_network_allocation_update_tests: true
 
 - job:
-    name: manila-tempest-plugin-generic
+    name: manila-tempest-plugin-generic-base
+    abstract: true
     description: |
       Test the generic driver multibackend (DHSS=True) with NFS and CIFS
     parent: manila-tempest-plugin-base
     vars:
-      tempest_test_regex: '(^manila_tempest_tests.tests)(?=.*\[.*\bbackend\b.*\])'
+      tempest_test_regex: '(^manila_tempest_tests.tests.api)(?=.*\[.*\bbackend\b.*\])'
       # The generic driver uses nova VMs as share servers; running with a
       # high concurrency could starve the driver of RAM/Disk/CPUs to
       # function properly in a small single node devstack VM.
@@ -287,7 +306,38 @@
               image_password: manila
 
 - job:
-    name: manila-tempest-plugin-cephfs-native
+    name: manila-tempest-plugin-generic
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-base
+    branches: *ubuntu_jammy_test_image_branches
+
+- job:
+    name: manila-tempest-plugin-generic-scenario-base
+    abstract: true
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-base
+    vars:
+      tempest_test_regex: '(^manila_tempest_tests.tests.scenario)(?=.*\[.*\bbackend\b.*\])'
+      # The generic driver uses nova VMs as share servers; running with a
+      # high concurrency could starve the driver of RAM/Disk/CPUs to
+      # function properly in a small single node devstack VM.
+      tempest_concurrency: 1
+
+- job:
+    name: manila-tempest-plugin-generic-scenario
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-scenario-base
+    branches: *ubuntu_jammy_test_image_branches
+
+- job:
+    name: manila-tempest-plugin-cephfs-native-base
+    abstract: true
     description: Test CephFS Native (DHSS=False)
     parent: manila-tempest-plugin-base
     required-projects:
@@ -325,9 +375,15 @@
               enable_protocols: cephfs
               image_password: manila
 
+- job:
+    name: manila-tempest-plugin-cephfs-native
+    description: Test CephFS Native (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-native-base
+    branches: *ubuntu_jammy_test_image_branches
 
 - job:
-    name: manila-tempest-plugin-cephfs-nfs
+    name: manila-tempest-plugin-cephfs-nfs-base
+    abstract: true
     description: Test CephFS NFS (DHSS=False)
     parent: manila-tempest-plugin-base
     required-projects:
@@ -372,6 +428,12 @@
               image_password: manila
 
 - job:
+    name: manila-tempest-plugin-cephfs-nfs
+    description: Test CephFS NFS (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-nfs-base
+    branches: *ubuntu_jammy_test_image_branches
+
+- job:
     name: manila-tempest-plugin-dummy-no-dhss
     description: Test the Dummy driver with DHSS=False
     parent: manila-tempest-plugin-standalone-base
@@ -490,6 +552,8 @@
               run_replication_tests: true
               run_revert_to_snapshot_tests: true
               run_share_server_migration_tests: true
+              run_share_server_multiple_subnet_tests: true
+              run_network_allocation_update_tests: true
 
 - job:
     name: manila-tempest-plugin-glusterfs-native
@@ -555,3 +619,53 @@
               enable_protocols: nfs
               # read-only access rules not supported
               enable_ro_access_level_for_protocols: ""
+
+- job:
+    name: manila-tempest-plugin-lvm-fips-base
+    parent: manila-tempest-plugin-lvm-base
+    nodeset: devstack-single-node-centos-9-stream
+    description: |
+      Run LVM DHSS false tests for manila project using FIPS.
+    pre-run:
+      - playbooks/enable-fips.yaml
+    vars:
+      nslookup_target: 'opendev.org'
+      devstack_localrc:
+        MANILA_SETUP_IPV6: false
+        NEUTRON_CREATE_INITIAL_NETWORKS: true
+        IP_VERSION: 4
+      configure_swap_size: 4096
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            validation:
+              ssh_key_type: 'ecdsa'
+            share:
+              run_ipv6_tests: false
+
+- job:
+    name: manila-tempest-plugin-lvm-fips
+    parent: manila-tempest-plugin-lvm-fips-base
+    branches: ^(?!stable/(yoga|xena|wallaby|victoria|ussuri)).*$
+
+- project-template:
+    name: manila-tempest-plugin-jobs-using-service-image
+    description: |
+      Runs jobs that will also perform scenario tests in the branches that are
+      newer than Yoga.
+    check:
+      jobs:
+        - manila-tempest-plugin-lvm
+        - manila-tempest-plugin-generic-scenario:
+            voting: false
+        - manila-tempest-plugin-generic:
+            voting: false
+        - manila-tempest-plugin-cephfs-native:
+            voting: false
+        - manila-tempest-plugin-cephfs-nfs:
+            voting: false
+        - manila-tempest-plugin-zfsonlinux:
+            voting: false
+    gate:
+      jobs:
+        - manila-tempest-plugin-lvm
diff --git a/zuul.d/manila-tempest-stable-jobs.yaml b/zuul.d/manila-tempest-stable-jobs.yaml
index 3d2447e..aa33c16 100644
--- a/zuul.d/manila-tempest-stable-jobs.yaml
+++ b/zuul.d/manila-tempest-stable-jobs.yaml
@@ -1,27 +1,117 @@
 # Stable branch jobs to test the trunk version of manila-tempest-plugin against
 # released stable branches of manila
+
+- job:
+    name: manila-tempest-plugin-generic-scenario-stable
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-scenario-base
+    branches: &manila_tempest_image_pinned_branches ^(stable/(yoga|xena|wallaby|victoria|ussuri)).*$
+    vars: &manila_tempest_image_pinned_vars
+      devstack_localrc:
+        # NOTE(carloss): Pinning manila service image to a Focal version,
+        # since on Zed we moved to Ubuntu Jammy (22), and it requires more
+        # VM resources.
+        MANILA_SERVICE_IMAGE_URL: https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-1.3.0-76-ga216835.qcow2
+        MANILA_SERVICE_IMAGE_NAME: manila-service-image-1.3.0-76-ga216835
+
+- job:
+    name: manila-tempest-plugin-generic-stable
+    description: |
+      Test the scenario test cases on the generic driver multibackend
+      (DHSS=True) with NFS and CIFS
+    parent: manila-tempest-plugin-generic-base
+    branches: *manila_tempest_image_pinned_branches
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-lvm-stable
+    # NOTE(carloss): we are aware that focal is the current default, but
+    # in order to avoid breakages when devstack-minimal switches to a newer
+    # branch, we are pinning focal here.
+    nodeset: openstack-single-node-focal
+    description: |
+      Test LVM multibackend (DHSS=False) in a 4+6 (dual-stack) devstack
+      environment with IPv6 control plane endpoints.
+    branches: *manila_tempest_image_pinned_branches
+    parent: manila-tempest-plugin-lvm-base
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-cephfs-native-stable
+    description: Test CephFS Native (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-native-base
+    branches: *manila_tempest_image_pinned_branches
+    vars:
+      devstack_localrc:
+        # NOTE(gouthamr): The following need to use the latest images, however, there's a bug
+        # with cephfs on Ubuntu 20.04 LTS: https://tracker.ceph.com/issues/47236
+        # the latest image is built as https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-cephfs-master.qcow2
+        MANILA_SERVICE_IMAGE_URL: https://tarballs.opendev.org/openstack/manila-image-elements/images/manila-service-image-cephfs-1.3.0-58-g2859569.qcow2
+        MANILA_SERVICE_IMAGE_NAME: manila-service-image-cephfs-1.3.0-58-g2859569
+
+- job:
+    name: manila-tempest-plugin-cephfs-nfs-stable
+    description: Test CephFS NFS (DHSS=False)
+    parent: manila-tempest-plugin-cephfs-nfs-base
+    branches: *manila_tempest_image_pinned_branches
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-zfsonlinux-stable
+    description: Test ZFSOnLinux multibackend (DHSS=False) with postgresql db
+    parent: manila-tempest-plugin-zfsonlinux-base
+    branches: *manila_tempest_image_pinned_branches
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-lvm-fips-stable
+    parent: manila-tempest-plugin-lvm-fips-base
+    branches: ^(stable/(yoga|xena|wallaby)).*$
+    vars: *manila_tempest_image_pinned_vars
+
+- job:
+    name: manila-tempest-plugin-lvm-yoga
+    parent: manila-tempest-plugin-lvm-base
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/yoga
+    vars:
+      tempest_exclude_regex: "(^manila_tempest_tests.tests.scenario.*IPv6.*)"
+      <<: *manila_tempest_image_pinned_vars
+
 - job:
     name: manila-tempest-plugin-lvm-xena
-    parent: manila-tempest-plugin-lvm
-    override-checkout: stable/xena
+    parent: manila-tempest-plugin-lvm-base
     nodeset: openstack-single-node-focal
-    vars:
-        # NOTE(gouthamr): Disabled until https://launchpad.net/bugs/1940324 is
-        # fixed.
-        tempest_exclude_regex: "(^manila_tempest_tests.tests.scenario.*IPv6.*)"
+    override-checkout: stable/xena
+    vars: *manila_tempest_image_pinned_vars
 
 - job:
     name: manila-tempest-plugin-lvm-wallaby
-    parent: manila-tempest-plugin-lvm
+    parent: manila-tempest-plugin-lvm-base
+    nodeset: openstack-single-node-focal
     override-checkout: stable/wallaby
-    nodeset: openstack-single-node-focal
-    vars:
-        tempest_exclude_regex: ''
+    vars: *manila_tempest_image_pinned_vars
 
-- job:
-    name: manila-tempest-plugin-lvm-victoria
-    parent: manila-tempest-plugin-lvm
-    override-checkout: stable/victoria
-    nodeset: openstack-single-node-focal
-    vars:
-        tempest_exclude_regex: ''
+- project-template:
+    name: manila-tempest-plugin-jobs-using-service-image-stable
+    description: |
+      Runs jobs that will also perform scenario tests in the stable branches
+      older than Yoga.
+    check:
+      jobs:
+        - manila-tempest-plugin-lvm-stable
+        - manila-tempest-plugin-generic-scenario-stable:
+            voting: false
+        - manila-tempest-plugin-generic-stable:
+            voting: false
+        - manila-tempest-plugin-cephfs-native-stable:
+            voting: false
+        - manila-tempest-plugin-cephfs-nfs-stable:
+            voting: false
+        - manila-tempest-plugin-zfsonlinux-stable:
+            voting: false
+    gate:
+      jobs:
+        - manila-tempest-plugin-lvm-stable
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index ce21547..9db197c 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -2,32 +2,23 @@
     templates:
       - check-requirements
       - tempest-plugin-jobs
+      - manila-tempest-plugin-jobs-using-service-image
+    queue: manila
     check:
       jobs:
         - manila-tempest-plugin-dummy-no-dhss
         - manila-tempest-plugin-dummy-dhss
-        - manila-tempest-plugin-lvm
+        - manila-tempest-plugin-lvm-yoga
         - manila-tempest-plugin-lvm-xena
         - manila-tempest-plugin-lvm-wallaby
-        - manila-tempest-plugin-lvm-victoria
-        - manila-tempest-plugin-zfsonlinux:
-            voting: false
-        - manila-tempest-plugin-cephfs-native:
-            voting: false
-        - manila-tempest-plugin-cephfs-nfs:
-            voting: false
         - manila-tempest-plugin-container:
             voting: false
-        - manila-tempest-plugin-generic:
-            voting: false
         - manila-tempest-plugin-glusterfs-nfs:
             voting: false
     gate:
-      queue: manila
       jobs:
         - manila-tempest-plugin-dummy-no-dhss
         - manila-tempest-plugin-dummy-dhss
-        - manila-tempest-plugin-lvm
     experimental:
       jobs:
         - manila-tempest-plugin-glusterfs-native: