Merge "Revert "Test create VM with unaddressed port""
diff --git a/doc/source/write_tests.rst b/doc/source/write_tests.rst
index 3626a3f..0c604cc 100644
--- a/doc/source/write_tests.rst
+++ b/doc/source/write_tests.rst
@@ -83,7 +83,7 @@
           """
           super(TestExampleCase, cls).skip_checks()
           if not CONF.section.foo
-              cls.skip('A helpful message')
+              cls.skipTest('A helpful message')
 
       @classmethod
       def setup_credentials(cls):
diff --git a/releasenotes/notes/bug-2132971-a89a576348dcd1d6.yaml b/releasenotes/notes/bug-2132971-a89a576348dcd1d6.yaml
new file mode 100644
index 0000000..d21289c
--- /dev/null
+++ b/releasenotes/notes/bug-2132971-a89a576348dcd1d6.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Fixed bug #2132971. ``test_rebuild_server`` will no longer expect a
+    floating ip when floating ip networks are disabled.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 33c141d..29ec4d5 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v46.0.0
    v45.0.0
    v44.0.0
    v43.0.0
diff --git a/releasenotes/source/v46.0.0.rst b/releasenotes/source/v46.0.0.rst
new file mode 100644
index 0000000..cbd2e95
--- /dev/null
+++ b/releasenotes/source/v46.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v46.0.0 Release Notes
+=====================
+
+.. release-notes:: 46.0.0 Release Notes
+   :version: 46.0.0
diff --git a/requirements.txt b/requirements.txt
index a1eff53..eafbe0a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,6 +1,6 @@
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 cliff!=2.9.0,>=2.8.0 # Apache-2.0
-jsonschema>=3.2.0 # MIT
+jsonschema>=4.5.0 # MIT
 testtools>=2.2.0 # MIT
 paramiko>=2.7.0 # LGPLv2.1+
 cryptography>=2.1 # BSD/Apache-2.0
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index e8011a6..ca654b5 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -37,6 +37,8 @@
     calls to Neutron to automatically allocate the network topology.
     """
 
+    credentials = ['primary', 'project_reader']
+
     force_tenant_isolation = True
 
     min_microversion = '2.37'
@@ -65,6 +67,14 @@
         cls.routers_client = cls.os_primary.routers_client
         cls.subnets_client = cls.os_primary.subnets_client
         cls.ports_client = cls.os_primary.ports_client
+        if CONF.enforce_scope.nova:
+            cls.reader_networks_client = cls.os_project_reader.networks_client
+            cls.reader_routers_client = cls.os_project_reader.routers_client
+            cls.reader_ports_client = cls.os_project_reader.ports_client
+        else:
+            cls.reader_networks_client = cls.networks_client
+            cls.reader_routers_client = cls.routers_client
+            cls.reader_ports_client = cls.ports_client
 
     @classmethod
     def resource_setup(cls):
@@ -74,14 +84,14 @@
         tenant_id = cls.networks_client.tenant_id
         # (1) Retrieve non-public network list owned by the tenant.
         search_opts = {'tenant_id': tenant_id, 'shared': False}
-        nets = cls.networks_client.list_networks(
+        nets = cls.reader_networks_client.list_networks(
             **search_opts).get('networks', [])
         if nets:
             raise lib_excs.TempestException(
                 'Found tenant networks: %s' % nets)
         # (2) Retrieve shared network list.
         search_opts = {'shared': True}
-        nets = cls.networks_client.list_networks(
+        nets = cls.reader_networks_client.list_networks(
             **search_opts).get('networks', [])
         if nets:
             raise cls.skipException('Found shared networks: %s' % nets)
@@ -93,7 +103,7 @@
         # Find the auto-allocated router for the tenant.
         # This is a bit hacky since we don't have a great way to find the
         # auto-allocated router given the private tenant network we have.
-        routers = cls.routers_client.list_routers().get('routers', [])
+        routers = cls.reader_routers_client.list_routers().get('routers', [])
         if len(routers) > 1:
             # This indicates a race where nova is concurrently calling the
             # neutron auto-allocated-topology API for multiple server builds
@@ -109,7 +119,7 @@
         # created. All such networks will be in the current tenant. Neutron
         # will cleanup duplicate resources automatically, so ignore 404s.
         search_opts = {'tenant_id': cls.networks_client.tenant_id}
-        networks = cls.networks_client.list_networks(
+        networks = cls.reader_networks_client.list_networks(
             **search_opts).get('networks', [])
 
         for router in routers:
@@ -127,7 +137,7 @@
 
         for network in networks:
             # Get and delete the ports for the given network.
-            ports = cls.ports_client.list_ports(
+            ports = cls.reader_ports_client.list_ports(
                 network_id=network['id']).get('ports', [])
             for port in ports:
                 test_utils.call_and_ignore_notfound_exc(
@@ -150,7 +160,7 @@
         # create the server with no networking
         server = self.create_test_server(networks='none', wait_until='ACTIVE')
         # get the server ips
-        addresses = self.servers_client.list_addresses(
+        addresses = self.reader_servers_client.list_addresses(
             server['id'])['addresses']
         # assert that there is no networking
         self.assertEqual({}, addresses)
@@ -180,7 +190,7 @@
         server_nets = set()
         for server in servers:
             # get the server ips
-            addresses = self.servers_client.list_addresses(
+            addresses = self.reader_servers_client.list_addresses(
                 server['id'])['addresses']
             # assert that there is networking (should only be one)
             self.assertEqual(1, len(addresses))
@@ -196,7 +206,7 @@
         search_opts = {'tenant_id': self.networks_client.tenant_id,
                        'shared': False,
                        'admin_state_up': True}
-        nets = self.networks_client.list_networks(
+        nets = self.reader_networks_client.list_networks(
             **search_opts).get('networks', [])
         self.assertEqual(1, len(nets))
         # verify the single private tenant network is the one that the servers
diff --git a/tempest/api/compute/admin/test_availability_zone.py b/tempest/api/compute/admin/test_availability_zone.py
index 3eb0d9a..10730c2 100644
--- a/tempest/api/compute/admin/test_availability_zone.py
+++ b/tempest/api/compute/admin/test_availability_zone.py
@@ -14,21 +14,30 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class AZAdminV2TestJSON(base.BaseV2ComputeAdminTest):
     """Tests Availability Zone API List"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(AZAdminV2TestJSON, cls).setup_clients()
         cls.client = cls.availability_zone_admin_client
+        if CONF.enforce_scope.nova:
+            cls.reader_client = cls.os_project_reader.availability_zone_client
+        else:
+            cls.reader_client = cls.availability_zone_client
 
     @decorators.idempotent_id('d3431479-8a09-4f76-aa2d-26dc580cb27c')
     def test_get_availability_zone_list(self):
         """Test listing availability zones"""
-        availability_zone = self.client.list_availability_zones()
+        availability_zone = self.reader_client.list_availability_zones()
         self.assertNotEmpty(availability_zone['availabilityZoneInfo'])
 
     @decorators.idempotent_id('ef726c58-530f-44c2-968c-c7bed22d5b8c')
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 293e284..b56faa9 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -29,6 +29,8 @@
 class ServersWithSpecificFlavorTestJSON(base.BaseV2ComputeAdminTest):
     """Test creating servers with specific flavor"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_credentials(cls):
         cls.prepare_instance_network()
@@ -38,6 +40,10 @@
     def setup_clients(cls):
         super(ServersWithSpecificFlavorTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
 
     @decorators.idempotent_id('b3c7bcfc-bb5b-4e22-b517-c7f686b802ca')
     @testtools.skipUnless(CONF.validation.run_validation,
@@ -46,7 +52,7 @@
                       "Aarch64 does not support ephemeral disk test")
     def test_verify_created_server_ephemeral_disk(self):
         """Verify that the ephemeral disk is created when creating server"""
-        flavor_base = self.flavors_client.show_flavor(
+        flavor_base = self.reader_flavors_client.show_flavor(
             self.flavor_ref)['flavor']
 
         def create_flavor_with_ephemeral(ephem_disk):
@@ -67,7 +73,7 @@
             # create server which should have been contained in
             # self.flavor_ref.
             extra_spec_keys = \
-                self.admin_flavors_client.list_flavor_extra_specs(
+                self.reader_flavors_client.list_flavor_extra_specs(
                     self.flavor_ref)['extra_specs']
             if extra_spec_keys:
                 self.admin_flavors_client.set_flavor_extra_spec(
@@ -96,7 +102,7 @@
                         server_no_eph_disk['id'])
 
         # Get partition number of server without ephemeral disk.
-        server_no_eph_disk = self.client.show_server(
+        server_no_eph_disk = self.reader_servers_client.show_server(
             server_no_eph_disk['id'])['server']
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(server_no_eph_disk,
@@ -124,7 +130,7 @@
                         self.servers_client.delete_server,
                         server_with_eph_disk['id'])
 
-        server_with_eph_disk = self.client.show_server(
+        server_with_eph_disk = self.reader_servers_client.show_server(
             server_with_eph_disk['id'])['server']
         linux_client = remote_client.RemoteClient(
             self.get_server_ip(server_with_eph_disk,
diff --git a/tempest/api/compute/admin/test_delete_server.py b/tempest/api/compute/admin/test_delete_server.py
index c625939..982c3a7 100644
--- a/tempest/api/compute/admin/test_delete_server.py
+++ b/tempest/api/compute/admin/test_delete_server.py
@@ -15,12 +15,17 @@
 
 from tempest.api.compute import base
 from tempest.common import waiters
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class DeleteServersAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Test deletion of servers"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     # NOTE: Server creations of each test class should be under 10
     # for preventing "Quota exceeded for instances".
 
@@ -36,7 +41,7 @@
         server = self.create_test_server(wait_until='ACTIVE')
         self.admin_client.reset_state(server['id'], state='error')
         # Verify server's state
-        server = self.non_admin_client.show_server(server['id'])['server']
+        server = self.reader_servers_client.show_server(server['id'])['server']
         self.assertEqual(server['status'], 'ERROR')
         self.non_admin_client.delete_server(server['id'])
         waiters.wait_for_server_termination(self.servers_client,
diff --git a/tempest/api/compute/admin/test_flavors.py b/tempest/api/compute/admin/test_flavors.py
index cece905..48a2867 100644
--- a/tempest/api/compute/admin/test_flavors.py
+++ b/tempest/api/compute/admin/test_flavors.py
@@ -27,6 +27,16 @@
 class FlavorsAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Tests Flavors API Create and Delete that require admin privileges"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(FlavorsAdminTestJSON, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
+
     @classmethod
     def resource_setup(cls):
         super(FlavorsAdminTestJSON, cls).resource_setup()
@@ -92,7 +102,7 @@
                            rxtx_factor=self.rxtx)
 
         # Check if flavor is present in list
-        flavors_list = self.admin_flavors_client.list_flavors(
+        flavors_list = self.reader_flavors_client.list_flavors(
             detail=True)['flavors']
         self.assertIn(flavor_name, [f['name'] for f in flavors_list])
 
@@ -130,13 +140,15 @@
         verify_flavor_response_extension(flavor)
 
         # Verify flavor is retrieved
-        flavor = self.admin_flavors_client.show_flavor(new_flavor_id)['flavor']
+        flavor = self.reader_flavors_client.show_flavor(
+            new_flavor_id)['flavor']
         self.assertEqual(flavor['name'], flavor_name)
         verify_flavor_response_extension(flavor)
 
         # Check if flavor is present in list
         flavors_list = [
-            f for f in self.flavors_client.list_flavors(detail=True)['flavors']
+            f for f in self.reader_flavors_client.list_flavors(
+                detail=True)['flavors']
             if f['name'] == flavor_name
         ]
         self.assertNotEmpty(flavors_list)
@@ -160,7 +172,7 @@
                            disk=self.disk,
                            is_public="False")
         # Verify flavor is not retrieved
-        flavors_list = self.admin_flavors_client.list_flavors(
+        flavors_list = self.reader_flavors_client.list_flavors(
             detail=True)['flavors']
         self.assertNotIn(flavor_name, [f['name'] for f in flavors_list])
 
@@ -197,7 +209,8 @@
                            disk=self.disk,
                            is_public="True")
         # Verify flavor is retrieved with new user
-        flavors_list = self.flavors_client.list_flavors(detail=True)['flavors']
+        flavors_list = self.reader_flavors_client.list_flavors(detail=True)[
+            'flavors']
         self.assertIn(flavor_name, [f['name'] for f in flavors_list])
 
     @decorators.idempotent_id('fb9cbde6-3a0e-41f2-a983-bdb0a823c44e')
diff --git a/tempest/api/compute/admin/test_flavors_access.py b/tempest/api/compute/admin/test_flavors_access.py
index c86ff76..f12d239 100644
--- a/tempest/api/compute/admin/test_flavors_access.py
+++ b/tempest/api/compute/admin/test_flavors_access.py
@@ -14,8 +14,11 @@
 #    under the License.
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class FlavorsAccessTestJSON(base.BaseV2ComputeAdminTest):
     """Tests Flavor Access API extension.
@@ -23,6 +26,16 @@
     Add and remove Flavor Access require admin privileges.
     """
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(FlavorsAccessTestJSON, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
+
     @classmethod
     def resource_setup(cls):
         super(FlavorsAccessTestJSON, cls).resource_setup()
@@ -64,7 +77,8 @@
         self.assertIn(resp_body, add_body)
 
         # The flavor is present in list.
-        flavors = self.flavors_client.list_flavors(detail=True)['flavors']
+        flavors = self.reader_flavors_client.list_flavors(
+            detail=True)['flavors']
         self.assertIn(flavor['id'], map(lambda x: x['id'], flavors))
 
         # Remove flavor access from a tenant.
@@ -73,5 +87,6 @@
         self.assertNotIn(resp_body, remove_body)
 
         # The flavor is not present in list.
-        flavors = self.flavors_client.list_flavors(detail=True)['flavors']
+        flavors = self.reader_flavors_client.list_flavors(
+            detail=True)['flavors']
         self.assertNotIn(flavor['id'], map(lambda x: x['id'], flavors))
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs.py b/tempest/api/compute/admin/test_flavors_extra_specs.py
index 5829269..cd15d76 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs.py
@@ -28,6 +28,16 @@
     GET Flavor Extra specs can be performed even by without admin privileges.
     """
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(FlavorsExtraSpecsTestJSON, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
+
     @classmethod
     def resource_setup(cls):
         super(FlavorsExtraSpecsTestJSON, cls).resource_setup()
@@ -69,7 +79,7 @@
             self.flavor['id'], **specs)['extra_specs']
         self.assertEqual(set_body, specs)
         # GET extra specs and verify
-        get_body = (self.admin_flavors_client.list_flavor_extra_specs(
+        get_body = (self.reader_flavors_client.list_flavor_extra_specs(
             self.flavor['id'])['extra_specs'])
         self.assertEqual(get_body, specs)
 
@@ -80,7 +90,7 @@
 
         # GET extra specs and verify the value of the 'hw:cpu_policy'
         # is the same as before
-        get_body = self.admin_flavors_client.list_flavor_extra_specs(
+        get_body = self.reader_flavors_client.list_flavor_extra_specs(
             self.flavor['id'])['extra_specs']
         self.assertEqual(
             get_body, {'hw:numa_nodes': '2', 'hw:cpu_policy': 'shared'}
@@ -93,7 +103,7 @@
         self.admin_flavors_client.unset_flavor_extra_spec(
             self.flavor['id'], 'hw:cpu_policy'
         )
-        get_body = self.admin_flavors_client.list_flavor_extra_specs(
+        get_body = self.reader_flavors_client.list_flavor_extra_specs(
             self.flavor['id'])['extra_specs']
         self.assertEmpty(get_body)
 
@@ -103,7 +113,7 @@
         specs = {'hw:numa_nodes': '1', 'hw:cpu_policy': 'shared'}
         self.admin_flavors_client.set_flavor_extra_spec(self.flavor['id'],
                                                         **specs)
-        body = (self.flavors_client.list_flavor_extra_specs(
+        body = (self.reader_flavors_client.list_flavor_extra_specs(
             self.flavor['id'])['extra_specs'])
 
         for key in specs:
@@ -119,7 +129,7 @@
         self.assertEqual(body['hw:numa_nodes'], '1')
         self.assertIn('hw:cpu_policy', body)
 
-        body = self.flavors_client.show_flavor_extra_spec(
+        body = self.reader_flavors_client.show_flavor_extra_spec(
             self.flavor['id'], 'hw:numa_nodes')
         self.assertEqual(body['hw:numa_nodes'], '1')
         self.assertNotIn('hw:cpu_policy', body)
diff --git a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
index 7f518d2..f6d15f5 100644
--- a/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
+++ b/tempest/api/compute/admin/test_flavors_extra_specs_negative.py
@@ -29,6 +29,16 @@
     SET, UNSET, UPDATE Flavor Extra specs require admin privileges.
     """
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(FlavorsExtraSpecsNegativeTestJSON, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
+
     @classmethod
     def resource_setup(cls):
         super(FlavorsExtraSpecsNegativeTestJSON, cls).resource_setup()
@@ -110,7 +120,7 @@
     def test_flavor_get_nonexistent_key(self):
         """Getting non existence flavor extra spec key should fail"""
         self.assertRaises(lib_exc.NotFound,
-                          self.flavors_client.show_flavor_extra_spec,
+                          self.reader_flavors_client.show_flavor_extra_spec,
                           self.flavor['id'],
                           'hw:cpu_thread_policy')
 
diff --git a/tempest/api/compute/admin/test_flavors_microversions.py b/tempest/api/compute/admin/test_flavors_microversions.py
index d904cbd..4380326 100644
--- a/tempest/api/compute/admin/test_flavors_microversions.py
+++ b/tempest/api/compute/admin/test_flavors_microversions.py
@@ -13,9 +13,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
+CONF = config.CONF
+
 
 class FlavorsV255TestJSON(base.BaseV2ComputeAdminTest):
     """Test flavors API with compute microversion greater than 2.54"""
@@ -23,9 +26,19 @@
     min_microversion = '2.55'
     max_microversion = 'latest'
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     # NOTE(gmann): This class tests the flavors APIs
     # response schema for the 2.55 microversion.
 
+    @classmethod
+    def setup_clients(cls):
+        super(FlavorsV255TestJSON, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
+
     @decorators.idempotent_id('61976b25-488d-41dc-9dcb-cb9693a7b075')
     def test_crud_flavor(self):
         """Test create/show/update/list flavor
@@ -40,14 +53,14 @@
                                            disk=10,
                                            id=flavor_id)['id']
         # Checking show API response schema
-        self.flavors_client.show_flavor(new_flavor_id)
+        self.reader_flavors_client.show_flavor(new_flavor_id)
         # Checking update API response schema
         self.admin_flavors_client.update_flavor(new_flavor_id,
                                                 description='new')
         # Checking list details API response schema
-        self.flavors_client.list_flavors(detail=True)
+        self.reader_flavors_client.list_flavors(detail=True)
         # Checking list API response schema
-        self.flavors_client.list_flavors()
+        self.reader_flavors_client.list_flavors()
 
 
 class FlavorsV261TestJSON(FlavorsV255TestJSON):
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 526c8a7..5b7614d 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -35,7 +35,7 @@
 class LiveMigrationTestBase(base.BaseV2ComputeAdminTest):
     """Test live migration operations supported by admin user"""
 
-    credentials = ['primary', 'admin', 'project_manager']
+    credentials = ['primary', 'admin', 'project_manager', 'project_reader']
     create_default_network = True
 
     @classmethod
@@ -59,6 +59,10 @@
         cls.ports_client = cls.os_primary.ports_client
         cls.trunks_client = cls.os_primary.trunks_client
         cls.server_client = cls.admin_servers_client
+        if CONF.enforce_scope.nova:
+            cls.reader_ports_client = cls.os_project_reader.ports_client
+        else:
+            cls.reader_ports_client = cls.ports_client
 
     def _migrate_server_to(self, server_id, dest_host, volume_backed=False,
                            use_manager_client=False):
@@ -101,7 +105,8 @@
 
         self._migrate_server_to(server_id, target_host, volume_backed,
                                 use_manager_client)
-        waiters.wait_for_server_status(self.servers_client, server_id, state)
+        waiters.wait_for_server_status(
+            self.reader_servers_client, server_id, state)
 
         migration_list = (self.os_admin.migrations_client.list_migrations()
                           ['migrations'])
@@ -156,7 +161,7 @@
 
         if state == 'PAUSED':
             self.admin_servers_client.pause_server(server_id)
-            waiters.wait_for_server_status(self.admin_servers_client,
+            waiters.wait_for_server_status(self.reader_servers_client,
                                            server_id, state)
 
         LOG.info("Live migrate from source %s to destination %s",
@@ -230,11 +235,11 @@
         # Attach the volume to the server
         self.attach_volume(server, volume, device='/dev/xvdb',
                            wait_for_detach=False)
-        server = self.admin_servers_client.show_server(server_id)['server']
+        server = self.reader_servers_client.show_server(server_id)['server']
         volume_id1 = server["os-extended-volumes:volumes_attached"][0]["id"]
         self._live_migrate(server_id, target_host, 'ACTIVE')
 
-        server = self.admin_servers_client.show_server(server_id)['server']
+        server = self.reader_servers_client.show_server(server_id)['server']
         volume_id2 = server["os-extended-volumes:volumes_attached"][0]["id"]
 
         self.assertEqual(volume_id1, volume_id2)
@@ -284,7 +289,7 @@
         return trunk, parent, subport
 
     def _is_port_status_active(self, port_id):
-        port = self.ports_client.show_port(port_id)['port']
+        port = self.reader_ports_client.show_port(port_id)['port']
         return port['status'] == 'ACTIVE'
 
     @decorators.unstable_test(bug='2024160')
@@ -309,7 +314,7 @@
             test_utils.call_until_true(
                 self._is_port_status_active, CONF.validation.connect_timeout,
                 5, parent['id']))
-        subport = self.ports_client.show_port(subport['id'])['port']
+        subport = self.reader_ports_client.show_port(subport['id'])['port']
 
         if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
             # not to specify a host so that the scheduler will pick one
@@ -530,7 +535,7 @@
                      "to that force completed live migration is not "
                      "performed.", server_id, in_progress_migration_uuid)
 
-        waiters.wait_for_server_status(self.mgr_server_client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        server_id, 'ACTIVE')
         # List migration with project_id as filter so that manager can
         # get its own project migrations.
diff --git a/tempest/api/compute/admin/test_live_migration_negative.py b/tempest/api/compute/admin/test_live_migration_negative.py
index c956d99..a5b6d71 100644
--- a/tempest/api/compute/admin/test_live_migration_negative.py
+++ b/tempest/api/compute/admin/test_live_migration_negative.py
@@ -26,6 +26,12 @@
 class LiveMigrationNegativeTest(base.BaseV2ComputeAdminTest):
     """Negative tests of live migration"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(LiveMigrationNegativeTest, cls).setup_clients()
+
     @classmethod
     def skip_checks(cls):
         super(LiveMigrationNegativeTest, cls).skip_checks()
@@ -49,8 +55,8 @@
 
         self.assertRaises(lib_exc.BadRequest, self._migrate_server_to,
                           server['id'], target_host)
-        waiters.wait_for_server_status(self.servers_client, server['id'],
-                                       'ACTIVE')
+        waiters.wait_for_server_status(self.reader_servers_client,
+                                       server['id'], 'ACTIVE')
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('6e2f94f5-2ee8-4830-bef5-5bc95bb0795b')
@@ -59,7 +65,7 @@
         server = self.create_test_server(wait_until="ACTIVE")
 
         self.admin_servers_client.suspend_server(server['id'])
-        waiters.wait_for_server_status(self.servers_client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        server['id'], 'SUSPENDED')
 
         destination_host = self.get_host_other_than(server['id'])
diff --git a/tempest/api/compute/admin/test_migrations.py b/tempest/api/compute/admin/test_migrations.py
index 88847e6..fa9d68f 100644
--- a/tempest/api/compute/admin/test_migrations.py
+++ b/tempest/api/compute/admin/test_migrations.py
@@ -29,7 +29,7 @@
 class MigrationsAdminTest(base.BaseV2ComputeAdminTest):
     """Test migration operations supported by admin user"""
 
-    credentials = ['primary', 'admin', 'project_manager']
+    credentials = ['primary', 'admin', 'project_manager', 'project_reader']
 
     @classmethod
     def setup_clients(cls):
@@ -44,6 +44,10 @@
             LOG.info("Using project manager for migrating servers, "
                      "project manager user id: %s",
                      cls.mgr_server_client.user_id)
+        if CONF.enforce_scope.nova:
+            cls.reader_flavors_client = cls.os_project_reader.flavors_client
+        else:
+            cls.reader_flavors_client = cls.flavors_client
 
     @decorators.idempotent_id('75c0b83d-72a0-4cf8-a153-631e83e7d53f')
     def test_list_migrations(self):
@@ -84,7 +88,7 @@
 
         # First we have to create a flavor that we can delete so make a copy
         # of the normal flavor from which we'd create a server.
-        flavor = self.admin_flavors_client.show_flavor(
+        flavor = self.reader_flavors_client.show_flavor(
             self.flavor_ref)['flavor']
         flavor = self.admin_flavors_client.create_flavor(
             name=data_utils.rand_name(
@@ -100,7 +104,7 @@
         # because the environment may need some special extra specs to
         # create server which should have been contained in
         # self.flavor_ref.
-        extra_spec_keys = self.admin_flavors_client.list_flavor_extra_specs(
+        extra_spec_keys = self.reader_flavors_client.list_flavor_extra_specs(
             self.flavor_ref)['extra_specs']
         if extra_spec_keys:
             self.admin_flavors_client.set_flavor_extra_spec(
@@ -109,14 +113,15 @@
         # Now boot a server with the copied flavor.
         server = self.create_test_server(
             wait_until='ACTIVE', flavor=flavor['id'])
-        server = self.servers_client.show_server(server['id'])['server']
+        server = self.reader_servers_client.show_server(server['id'])['server']
 
         # If 'id' not in server['flavor'], we can only compare the flavor
         # details, so here we should save the to-be-deleted flavor's details,
         # for the flavor comparison after the server resizing.
         if not server['flavor'].get('id'):
             pre_flavor = {}
-            body = self.flavors_client.show_flavor(flavor['id'])['flavor']
+            body = (self.reader_flavors_client.show_flavor(flavor['id'])
+                    ['flavor'])
             for key in ['name', 'ram', 'vcpus', 'disk']:
                 pre_flavor[key] = body[key]
 
@@ -125,16 +130,16 @@
 
         # Now resize the server and wait for it to go into verify state.
         self.servers_client.resize_server(server['id'], self.flavor_ref_alt)
-        waiters.wait_for_server_status(self.servers_client, server['id'],
-                                       'VERIFY_RESIZE')
+        waiters.wait_for_server_status(self.reader_servers_client,
+                                       server['id'], 'VERIFY_RESIZE')
 
         # Now revert the resize, it should be OK even though the original
         # flavor used to boot the server was deleted.
         self.servers_client.revert_resize_server(server['id'])
-        waiters.wait_for_server_status(self.servers_client, server['id'],
-                                       'ACTIVE')
+        waiters.wait_for_server_status(self.reader_servers_client,
+                                       server['id'], 'ACTIVE')
 
-        server = self.servers_client.show_server(server['id'])['server']
+        server = self.reader_servers_client.show_server(server['id'])['server']
         if server['flavor'].get('id'):
             msg = ('server flavor is not same as flavor!')
             self.assertEqual(flavor['id'], server['flavor']['id'], msg)
@@ -158,7 +163,7 @@
 
         self.mgr_server_client.migrate_server(server['id'])
 
-        waiters.wait_for_server_status(self.servers_client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        server['id'], 'VERIFY_RESIZE')
 
         if revert:
@@ -168,7 +173,7 @@
             self.servers_client.confirm_resize_server(server['id'])
             assert_func = self.assertNotEqual
 
-        waiters.wait_for_server_status(self.servers_client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        server['id'], 'ACTIVE')
         dst_host = self.get_host_for_server(server['id'])
         assert_func(src_host, dst_host)
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index d7fb62d..64b9cc0 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -28,15 +28,21 @@
     """
     max_microversion = '2.35'
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(NetworksTest, cls).setup_clients()
         cls.client = cls.os_admin.compute_networks_client
+        if CONF.enforce_scope.nova:
+            cls.reader_client = cls.os_project_reader.compute_networks_client
+        else:
+            cls.reader_client = cls.client
 
     @decorators.idempotent_id('d206d211-8912-486f-86e2-a9d090d1f416')
     def test_get_network(self):
         """Test getting network from nova side"""
-        networks = self.client.list_networks()['networks']
+        networks = self.reader_client.list_networks()['networks']
         if CONF.compute.fixed_network_name:
             configured_network = [x for x in networks if x['label'] ==
                                   CONF.compute.fixed_network_name]
@@ -51,14 +57,14 @@
             raise self.skipException(
                 "Environment has no known-for-sure existing network.")
         configured_network = configured_network[0]
-        network = (self.client.show_network(configured_network['id'])
+        network = (self.reader_client.show_network(configured_network['id'])
                    ['network'])
         self.assertEqual(configured_network['label'], network['label'])
 
     @decorators.idempotent_id('df3d1046-6fa5-4b2c-ad0c-cfa46a351cb9')
     def test_list_all_networks(self):
         """Test getting all networks from nova side"""
-        networks = self.client.list_networks()['networks']
+        networks = self.reader_client.list_networks()['networks']
         # Check the configured network is in the list
         if CONF.compute.fixed_network_name:
             configured_network = CONF.compute.fixed_network_name
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 70711f5..ad4f625 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -31,6 +31,8 @@
 class QuotasAdminTestBase(base.BaseV2ComputeAdminTest):
     force_tenant_isolation = True
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     def setUp(self):
         # NOTE(mriedem): Avoid conflicts with os-quota-class-sets tests.
         self.useFixture(fixtures.LockFixture('compute_quotas'))
@@ -40,6 +42,10 @@
     def setup_clients(cls):
         super(QuotasAdminTestBase, cls).setup_clients()
         cls.adm_client = cls.os_admin.quotas_client
+        if CONF.enforce_scope.nova:
+            cls.reader_quotas_client = cls.os_project_reader.quotas_client
+        else:
+            cls.reader_quotas_client = cls.quotas_client
 
     def _get_updated_quotas(self):
         # Verify that GET shows the updated quota set of project
@@ -110,7 +116,7 @@
     def test_get_default_quotas(self):
         """Test admin can get the default compute quota set for a project"""
         expected_quota_set = self.default_quota_set | set(['id'])
-        quota_set = self.adm_client.show_default_quota_set(
+        quota_set = self.reader_quotas_client.show_default_quota_set(
             self.demo_tenant_id)['quota_set']
         self.assertEqual(quota_set['id'], self.demo_tenant_id)
         for quota in expected_quota_set:
@@ -121,7 +127,7 @@
                       'Legacy quota update not available with unified limits')
     def test_update_all_quota_resources_for_tenant(self):
         """Test admin can update all the compute quota limits for a project"""
-        default_quota_set = self.adm_client.show_default_quota_set(
+        default_quota_set = self.reader_quotas_client.show_default_quota_set(
             self.demo_tenant_id)['quota_set']
         new_quota_set = {'metadata_items': 256, 'ram': 10240,
                          'key_pairs': 200, 'instances': 20,
@@ -170,15 +176,16 @@
         project_id = project['id']
         self.addCleanup(identity.identity_utils(self.os_admin).delete_project,
                         project_id)
-        quota_set_default = (self.adm_client.show_quota_set(project_id)
-                             ['quota_set'])
+        quota_set_default = (self.adm_client.show_quota_set(
+            project_id)['quota_set'])
         ram_default = quota_set_default['ram']
 
         self.adm_client.update_quota_set(project_id, ram='5120')
 
         self.adm_client.delete_quota_set(project_id)
 
-        quota_set_new = self.adm_client.show_quota_set(project_id)['quota_set']
+        quota_set_new = (self.adm_client.show_quota_set(
+            project_id)['quota_set'])
         self.assertEqual(ram_default, quota_set_new['ram'])
 
 
@@ -227,6 +234,8 @@
 class QuotaClassesAdminTestJSON(base.BaseV2ComputeAdminTest):
     """Tests the os-quota-class-sets API to update default quotas."""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     def setUp(self):
         # All test cases in this class need to externally lock on doing
         # anything with default quota values.
@@ -237,6 +246,11 @@
     def resource_setup(cls):
         super(QuotaClassesAdminTestJSON, cls).resource_setup()
         cls.adm_client = cls.os_admin.quota_classes_client
+        if CONF.enforce_scope.nova:
+            cls.reader_quota_classes_client = (
+                cls.os_project_reader.quota_classes_client)
+        else:
+            cls.reader_quota_classes_client = cls.adm_client
 
     def _restore_default_quotas(self, original_defaults):
         LOG.debug("restoring quota class defaults")
@@ -270,8 +284,8 @@
         self.assertThat(update_body.items(),
                         matchers.ContainsAll(body.items()))
         # check quota values are changed
-        show_body = self.adm_client.show_quota_class_set(
-            'default')['quota_class_set']
+        show_body = (self.adm_client.show_quota_class_set(
+            'default')['quota_class_set'])
         self.assertThat(show_body.items(),
                         matchers.ContainsAll(body.items()))
 
diff --git a/tempest/api/compute/admin/test_quotas_negative.py b/tempest/api/compute/admin/test_quotas_negative.py
index ef89cc1..faf1330 100644
--- a/tempest/api/compute/admin/test_quotas_negative.py
+++ b/tempest/api/compute/admin/test_quotas_negative.py
@@ -27,6 +27,8 @@
 class QuotasAdminNegativeTestBase(base.BaseV2ComputeAdminTest):
     force_tenant_isolation = True
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(QuotasAdminNegativeTestBase, cls).setup_clients()
@@ -34,6 +36,12 @@
         cls.adm_client = cls.os_admin.quotas_client
         cls.sg_client = cls.security_groups_client
         cls.sgr_client = cls.security_group_rules_client
+        if CONF.enforce_scope.nova:
+            cls.reader_quotas_client = cls.os_project_reader.quotas_client
+            cls.reader_limits_client = cls.os_project_reader.limits_client
+        else:
+            cls.reader_quotas_client = cls.client
+            cls.reader_limits_client = cls.limits_client
 
     @classmethod
     def resource_setup(cls):
@@ -43,8 +51,8 @@
         cls.demo_tenant_id = cls.client.tenant_id
 
     def _update_quota(self, quota_item, quota_value):
-        quota_set = (self.adm_client.show_quota_set(self.demo_tenant_id)
-                     ['quota_set'])
+        quota_set = (self.reader_quotas_client.show_quota_set(
+            self.demo_tenant_id)['quota_set'])
         default_quota_value = quota_set[quota_item]
 
         self.adm_client.update_quota_set(self.demo_tenant_id,
@@ -112,8 +120,8 @@
     def test_security_groups_exceed_limit(self):
         """Negative test: Creation Security Groups over limit should FAIL"""
         # Set the quota to number of used security groups
-        sg_quota = self.limits_client.show_limits()['limits']['absolute'][
-            'totalSecurityGroupsUsed']
+        sg_quota = (self.reader_limits_client.show_limits()['limits']
+                    ['absolute']['totalSecurityGroupsUsed'])
         self._update_quota('security_groups', sg_quota)
 
         # Check we cannot create anymore
diff --git a/tempest/api/compute/admin/test_security_groups.py b/tempest/api/compute/admin/test_security_groups.py
index 41acc94..f614218 100644
--- a/tempest/api/compute/admin/test_security_groups.py
+++ b/tempest/api/compute/admin/test_security_groups.py
@@ -31,11 +31,18 @@
 
     max_microversion = '2.35'
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(SecurityGroupsTestAdminJSON, cls).setup_clients()
         cls.adm_client = cls.os_admin.compute_security_groups_client
         cls.client = cls.security_groups_client
+        if CONF.enforce_scope.nova:
+            cls.reader_client = (
+                cls.os_project_reader.compute_security_groups_client)
+        else:
+            cls.reader_client = cls.client
 
     def _delete_security_group(self, securitygroup_id, admin=True):
         if admin:
@@ -93,8 +100,8 @@
 
         # Fetch all security groups for non-admin user with 'all_tenants'
         # search filter
-        fetched_list = (self.client.list_security_groups(all_tenants='true')
-                        ['security_groups'])
+        fetched_list = (self.reader_client.list_security_groups(
+            all_tenants='true')['security_groups'])
         sec_group_id_list = [sg['id'] for sg in fetched_list]
         # Now check that 'all_tenants='true' filter for non-admin user only
         # provide the requested non-admin user's created security groups,
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index 6c9aafb..62696ee 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -27,6 +27,8 @@
 
     create_default_network = True
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(ServersAdminTestJSON, cls).setup_clients()
@@ -48,7 +50,7 @@
         server = cls.create_test_server(name=cls.s2_name,
                                         wait_until='ACTIVE')
         cls.s2_id = server['id']
-        waiters.wait_for_server_status(cls.non_admin_client,
+        waiters.wait_for_server_status(cls.reader_servers_client,
                                        cls.s1_id, 'ACTIVE')
 
     @decorators.idempotent_id('06f960bb-15bb-48dc-873d-f96e89be7870')
@@ -56,11 +58,11 @@
         """Test filtering the list of servers by server error status"""
         params = {'status': 'error'}
         self.client.reset_state(self.s1_id, state='error')
-        body = self.non_admin_client.list_servers(**params)
+        body = self.reader_servers_client.list_servers(**params)
         # Reset server's state to 'active'
         self.client.reset_state(self.s1_id, state='active')
         # Verify server's state
-        server = self.client.show_server(self.s1_id)['server']
+        server = self.reader_servers_client.show_server(self.s1_id)['server']
         self.assertEqual(server['status'], 'ACTIVE')
         servers = body['servers']
         # Verify error server in list result
@@ -72,11 +74,13 @@
         """Test filtering the list of servers by invalid server status"""
         params = {'status': 'invalid_status'}
         if self.is_requested_microversion_compatible('2.37'):
-            body = self.client.list_servers(detail=True, **params)
+            body = self.reader_servers_client.list_servers(
+                detail=True, **params)
             servers = body['servers']
             self.assertEmpty(servers)
         else:
-            self.assertRaises(lib_exc.BadRequest, self.client.list_servers,
+            self.assertRaises(lib_exc.BadRequest,
+                              self.reader_servers_client.list_servers,
                               detail=True, **params)
 
     @decorators.idempotent_id('51717b38-bdc1-458b-b636-1cf82d99f62f')
@@ -154,7 +158,8 @@
 
         nonexistent_params = {'host': 'nonexistent_host',
                               'all_tenants': '1'}
-        nonexistent_body = self.client.list_servers(**nonexistent_params)
+        nonexistent_body = self.client.list_servers(
+            **nonexistent_params)
         nonexistent_servers = nonexistent_body['servers']
         self.assertNotIn(server['id'],
                          map(lambda x: x['id'], nonexistent_servers))
@@ -166,14 +171,14 @@
         self.client.reset_state(self.s1_id, state='error')
 
         # Verify server's state
-        server = self.client.show_server(self.s1_id)['server']
+        server = self.reader_servers_client.show_server(self.s1_id)['server']
         self.assertEqual(server['status'], 'ERROR')
 
         # Reset server's state to 'active'
         self.client.reset_state(self.s1_id, state='active')
 
         # Verify server's state
-        server = self.client.show_server(self.s1_id)['server']
+        server = self.reader_servers_client.show_server(self.s1_id)['server']
         self.assertEqual(server['status'], 'ACTIVE')
 
     @decorators.idempotent_id('682cb127-e5bb-4f53-87ce-cb9003604442')
@@ -187,7 +192,8 @@
         self.client.reset_state(self.s1_id, state='error')
         rebuilt_server = self.non_admin_client.rebuild_server(
             self.s1_id, self.image_ref_alt)['server']
-        self.addCleanup(waiters.wait_for_server_status, self.non_admin_client,
+        self.addCleanup(waiters.wait_for_server_status,
+                        self.reader_servers_client,
                         self.s1_id, 'ACTIVE')
         self.addCleanup(self.non_admin_client.rebuild_server, self.s1_id,
                         self.image_ref)
@@ -197,11 +203,11 @@
         rebuilt_image_id = rebuilt_server['image']['id']
         self.assertEqual(self.image_ref_alt, rebuilt_image_id)
         self.assert_flavor_equal(self.flavor_ref, rebuilt_server['flavor'])
-        waiters.wait_for_server_status(self.non_admin_client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        rebuilt_server['id'], 'ACTIVE',
                                        raise_on_error=False)
         # Verify the server properties after rebuilding
-        server = (self.non_admin_client.show_server(rebuilt_server['id'])
+        server = (self.reader_servers_client.show_server(rebuilt_server['id'])
                   ['server'])
         rebuilt_image_id = server['image']['id']
         self.assertEqual(self.image_ref_alt, rebuilt_image_id)
@@ -235,20 +241,26 @@
 
     min_microversion = '2.75'
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(ServersAdmin275Test, cls).setup_clients()
+
     @decorators.idempotent_id('bf2b4a00-73a3-4d53-81fa-acbcd97d6339')
     def test_rebuild_update_server_275(self):
         server = self.create_test_server()
         # Checking update response schema.
         self.servers_client.update_server(server['id'])
-        waiters.wait_for_server_status(self.servers_client, server['id'],
-                                       'ACTIVE')
+        waiters.wait_for_server_status(
+            self.reader_servers_client, server['id'], 'ACTIVE')
         # Checking rebuild API response schema
         self.servers_client.rebuild_server(server['id'], self.image_ref_alt)
-        waiters.wait_for_server_status(self.servers_client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        server['id'], 'ACTIVE')
         # Checking rebuild server with admin response schema.
         self.os_admin.servers_client.rebuild_server(
             server['id'], self.image_ref)
         self.addCleanup(waiters.wait_for_server_status,
-                        self.os_admin.servers_client,
+                        self.reader_servers_client,
                         server['id'], 'ACTIVE')
diff --git a/tempest/api/compute/admin/test_servers_negative.py b/tempest/api/compute/admin/test_servers_negative.py
index c933c80..8480c24 100644
--- a/tempest/api/compute/admin/test_servers_negative.py
+++ b/tempest/api/compute/admin/test_servers_negative.py
@@ -28,11 +28,18 @@
 class ServersAdminNegativeTestJSON(base.BaseV2ComputeAdminTest):
     """Negative Tests of Servers API using admin privileges"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(ServersAdminNegativeTestJSON, cls).setup_clients()
         cls.client = cls.os_admin.servers_client
         cls.quotas_client = cls.os_admin.quotas_client
+        if CONF.enforce_scope.nova:
+            cls.reader_quotas_client = (
+                cls.os_project_reader.quotas_client)
+        else:
+            cls.reader_quotas_client = cls.quotas_client
 
     @classmethod
     def resource_setup(cls):
@@ -144,7 +151,7 @@
         server_id = server['id']
         # suspend the server.
         self.client.suspend_server(server_id)
-        waiters.wait_for_server_status(self.client,
+        waiters.wait_for_server_status(self.reader_servers_client,
                                        server_id, 'SUSPENDED')
         # migrate a suspended server should fail
         self.assertRaises(lib_exc.Conflict,
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index e0290e4..7a0d5ca 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -25,6 +25,18 @@
 
 class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
     """Test creating servers on multiple nodes with scheduler_hints."""
+
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(ServersOnMultiNodesTest, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_server_groups_client = (
+                cls.os_project_reader.server_groups_client)
+        else:
+            cls.reader_server_groups_client = cls.server_groups_client
+
     @classmethod
     def resource_setup(cls):
         super(ServersOnMultiNodesTest, cls).resource_setup()
@@ -47,12 +59,12 @@
             return_reservation_id=True)['reservation_id']
 
         # Get the servers using the reservation_id.
-        servers = self.servers_client.list_servers(
+        servers = self.reader_servers_client.list_servers(
             detail=True, reservation_id=reservation_id)['servers']
         self.assertEqual(2, len(servers))
 
         # Assert the servers are in the group.
-        server_group = self.server_groups_client.show_server_group(
+        server_group = self.reader_server_groups_client.show_server_group(
             group_id)['server_group']
         hosts = {}
         for server in servers:
@@ -142,6 +154,12 @@
     min_microversion = '2.91'
     max_microversion = 'latest'
 
+    credentials = ['primary', 'admin', 'project_reader']
+
+    @classmethod
+    def setup_clients(cls):
+        super(UnshelveToHostMultiNodesTest, cls).setup_clients()
+
     @classmethod
     def skip_checks(cls):
         super(UnshelveToHostMultiNodesTest, cls).skip_checks()
@@ -167,8 +185,8 @@
             server['id'],
             body={'unshelve': {'host': host}}
             )
-        waiters.wait_for_server_status(self.servers_client, server['id'],
-                                       'ACTIVE')
+        waiters.wait_for_server_status(
+            self.reader_servers_client, server['id'], 'ACTIVE')
 
     @decorators.attr(type='multinode')
     @decorators.idempotent_id('b5cc0889-50c2-46a0-b8ff-b5fb4c3a6e20')
diff --git a/tempest/api/compute/admin/test_simple_tenant_usage.py b/tempest/api/compute/admin/test_simple_tenant_usage.py
index c24f420..b71c8b6 100644
--- a/tempest/api/compute/admin/test_simple_tenant_usage.py
+++ b/tempest/api/compute/admin/test_simple_tenant_usage.py
@@ -16,10 +16,13 @@
 import datetime
 
 from tempest.api.compute import base
+from tempest import config
 from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as e
 
+CONF = config.CONF
+
 # Time that waits for until returning valid response
 # TODO(takmatsu): Ideally this value would come from configuration.
 VALID_WAIT = 30
@@ -28,11 +31,18 @@
 class TenantUsagesTestJSON(base.BaseV2ComputeAdminTest):
     """Test tenant usages"""
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def setup_clients(cls):
         super(TenantUsagesTestJSON, cls).setup_clients()
         cls.adm_client = cls.os_admin.tenant_usages_client
         cls.client = cls.os_primary.tenant_usages_client
+        if CONF.enforce_scope.nova:
+            cls.reader_client = (
+                cls.os_project_reader.tenant_usages_client)
+        else:
+            cls.reader_client = cls.client
 
     @classmethod
     def resource_setup(cls):
@@ -87,6 +97,6 @@
     def test_get_usage_tenant_with_non_admin_user(self):
         """Test getting usage for a specific tenant with non admin user"""
         tenant_usage = self.call_until_valid(
-            self.client.show_tenant_usage, VALID_WAIT,
+            self.reader_client.show_tenant_usage, VALID_WAIT,
             self.tenant_id, start=self.start, end=self.end)['tenant_usage']
         self.assertEqual(len(tenant_usage), 8)
diff --git a/tempest/api/compute/admin/test_spice.py b/tempest/api/compute/admin/test_spice.py
index f09012d..8942565 100644
--- a/tempest/api/compute/admin/test_spice.py
+++ b/tempest/api/compute/admin/test_spice.py
@@ -31,6 +31,8 @@
     min_microversion = '2.99'
     max_microversion = 'latest'
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     # SPICE client protocol constants
     magic = b'REDQ'
     major = 2
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 2813d7a..bdfa9e5 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -25,18 +25,39 @@
     """Base class for the admin volume tests in this module."""
     create_default_network = True
 
+    credentials = ['primary', 'admin', 'project_reader']
+
     @classmethod
     def skip_checks(cls):
         super(BaseAttachSCSIVolumeTest, cls).skip_checks()
         if not CONF.service_available.cinder:
             skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
             raise cls.skipException(skip_msg)
+        if not CONF.service_available.glance:
+            skip_msg = ("%s skipped as Glance is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+        if not CONF.image_feature_enabled.api_v2:
+            skip_msg = ("%s skipped as Glance API v2 is not enabled" %
+                        cls.__name__)
+            raise cls.skipException(skip_msg)
 
     @classmethod
     def setup_credentials(cls):
         cls.prepare_instance_network()
         super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
 
+    @classmethod
+    def setup_clients(cls):
+        super(BaseAttachSCSIVolumeTest, cls).setup_clients()
+        if CONF.enforce_scope.nova:
+            cls.reader_volumes_client = (
+                cls.os_project_reader.volumes_client_latest)
+            cls.reader_image_client = (
+                cls.os_project_reader.image_client_v2)
+        else:
+            cls.reader_volumes_client = cls.volumes_client
+            cls.reader_image_client = cls.images_client
+
     def _create_image_with_custom_property(self, **kwargs):
         """Wrapper utility that returns the custom image.
 
@@ -46,7 +67,7 @@
 
         :param return image_id: The UUID of the newly created image.
         """
-        image = self.admin_image_client.show_image(CONF.compute.image_ref)
+        image = self.reader_image_client.show_image(CONF.compute.image_ref)
         # NOTE(danms): We need to stream this, so chunked=True means we get
         # back a urllib3.HTTPResponse and have to carefully pass it to
         # store_image_file() to upload it in pieces.
@@ -67,8 +88,9 @@
         create_dict.update(kwargs)
         try:
             new_image = self.admin_image_client.create_image(**create_dict)
-            self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
-                            new_image['id'])
+            self.addCleanup(
+                self.reader_image_client.wait_for_resource_deletion,
+                new_image['id'])
             self.addCleanup(
                 self.admin_image_client.delete_image, new_image['id'])
             self.admin_image_client.store_image_file(new_image['id'],
@@ -110,20 +132,21 @@
         # deleted otherwise image deletion can start before server is
         # deleted.
         self.addCleanup(waiters.wait_for_server_termination,
-                        self.servers_client, server['id'])
+                        self.reader_servers_client, server['id'])
         self.addCleanup(self.servers_client.delete_server, server['id'])
 
         volume = self.create_volume()
         attachment = self.attach_volume(server, volume)
         waiters.wait_for_volume_resource_status(
-            self.volumes_client, attachment['volumeId'], 'in-use')
-        volume_after_attach = self.servers_client.list_volume_attachments(
-            server['id'])['volumeAttachments']
+            self.reader_volumes_client, attachment['volumeId'], 'in-use')
+        volume_after_attach = (
+            self.reader_servers_client.list_volume_attachments(
+                server['id'])['volumeAttachments'])
         self.assertEqual(1, len(volume_after_attach),
                          "Failed to attach volume")
         self.servers_client.detach_volume(
             server['id'], attachment['volumeId'])
         waiters.wait_for_volume_resource_status(
-            self.volumes_client, attachment['volumeId'], 'available')
+            self.reader_volumes_client, attachment['volumeId'], 'available')
         waiters.wait_for_volume_attachment_remove_from_server(
-            self.servers_client, server['id'], attachment['volumeId'])
+            self.reader_servers_client, server['id'], attachment['volumeId'])
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 423407e..481367c 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -24,7 +24,8 @@
 class TestVolumeSwapBase(base.BaseV2ComputeAdminTest):
     create_default_network = True
 
-    credentials = ['primary', 'admin', ['service_user', 'admin', 'service']]
+    credentials = ['primary', 'admin', 'project_reader',
+                   ['service_user', 'admin', 'service']]
 
     @classmethod
     def setup_credentials(cls):
@@ -43,12 +44,18 @@
     def setup_clients(cls):
         super(TestVolumeSwapBase, cls).setup_clients()
         cls.service_client = cls.os_service_user.servers_client
+        if CONF.enforce_scope.nova:
+            cls.reader_volumes_client = (
+                cls.os_project_reader.volumes_client_latest)
+        else:
+            cls.reader_volumes_client = cls.volumes_client
 
     def wait_for_server_volume_swap(self, server_id, old_volume_id,
                                     new_volume_id):
         """Waits for a server to swap the old volume to a new one."""
-        volume_attachments = self.servers_client.list_volume_attachments(
-            server_id)['volumeAttachments']
+        volume_attachments = (
+            self.reader_servers_client.list_volume_attachments(
+                server_id)['volumeAttachments'])
         attached_volume_ids = [attachment['volumeId']
                                for attachment in volume_attachments]
         start = int(time.time())
@@ -56,8 +63,9 @@
         while (old_volume_id in attached_volume_ids) \
                 or (new_volume_id not in attached_volume_ids):
             time.sleep(self.servers_client.build_interval)
-            volume_attachments = self.servers_client.list_volume_attachments(
-                server_id)['volumeAttachments']
+            volume_attachments = (
+                self.reader_servers_client.list_volume_attachments(
+                    server_id)['volumeAttachments'])
             attached_volume_ids = [attachment['volumeId']
                                    for attachment in volume_attachments]
 
@@ -127,15 +135,15 @@
             lib_exc.Conflict, self.service_client.update_attached_volume,
             server['id'], volume1['id'], volumeId=volume2['id'])
         # Verify "volume1" is attached to the server
-        vol_attachments = self.servers_client.list_volume_attachments(
+        vol_attachments = self.reader_servers_client.list_volume_attachments(
             server['id'])['volumeAttachments']
         self.assertEqual(1, len(vol_attachments))
         self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
         waiters.wait_for_volume_resource_status(
-            self.volumes_client, volume1['id'], 'in-use')
+            self.reader_volumes_client, volume1['id'], 'in-use')
         # verify "volume2" is still available
         waiters.wait_for_volume_resource_status(
-            self.volumes_client, volume2['id'], 'available')
+            self.reader_volumes_client, volume2['id'], 'available')
 
 
 class TestMultiAttachVolumeSwap(TestVolumeSwapBase):
@@ -216,7 +224,7 @@
             return_reservation_id=True,
         )['reservation_id']
         # Get the servers using the reservation_id.
-        servers = self.servers_client.list_servers(
+        servers = self.reader_servers_client.list_servers(
             reservation_id=reservation_id)['servers']
         self.assertEqual(2, len(servers))
         # Attach volume1 to server1
@@ -232,19 +240,19 @@
             server1['id'], volume1['id'], volumeId=volume2['id'])
 
         # volume1 remains in in-use and volume2 in available
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume1['id'], 'in-use')
-        waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                volume2['id'], 'available')
+        waiters.wait_for_volume_resource_status(
+            self.reader_volumes_client, volume1['id'], 'in-use')
+        waiters.wait_for_volume_resource_status(
+            self.reader_volumes_client, volume2['id'], 'available')
 
         # Verify volume1 is attached to server1
-        vol_attachments = self.servers_client.list_volume_attachments(
+        vol_attachments = self.reader_servers_client.list_volume_attachments(
             server1['id'])['volumeAttachments']
         self.assertEqual(1, len(vol_attachments))
         self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
 
         # Verify volume1 is still attached to server2
-        vol_attachments = self.servers_client.list_volume_attachments(
+        vol_attachments = self.reader_servers_client.list_volume_attachments(
             server2['id'])['volumeAttachments']
         self.assertEqual(1, len(vol_attachments))
         self.assertIn(volume1['id'], vol_attachments[0]['volumeId'])
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 3d42388..1868e2b 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -26,7 +26,8 @@
     """Negative tests of volume swapping"""
 
     create_default_network = True
-    credentials = ['primary', 'admin', ['service_user', 'admin', 'service']]
+    credentials = ['primary', 'admin', 'project_reader',
+                   ['service_user', 'admin', 'service']]
 
     @classmethod
     def setup_credentials(cls):
@@ -44,6 +45,14 @@
     def setup_clients(cls):
         super(VolumesAdminNegativeTest, cls).setup_clients()
         cls.service_client = cls.os_service_user.servers_client
+        if CONF.enforce_scope.nova:
+            cls.reader_volumes_client = (
+                cls.os_project_reader.volumes_client_latest)
+            cls.reader_attachments_client = (
+                cls.os_project_reader.attachments_client_latest)
+        else:
+            cls.reader_volumes_client = cls.volumes_client
+            cls.reader_attachments_client = cls.attachments_client
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('309b5ecd-0585-4a7e-a36f-d2b2bf55259d')
@@ -96,7 +105,8 @@
     volume_min_microversion = '3.27'
 
     create_default_network = True
-    credentials = ['primary', 'admin', ['service_user', 'admin', 'service']]
+    credentials = ['primary', 'admin', 'project_reader',
+                   ['service_user', 'admin', 'service']]
 
     @classmethod
     def setup_credentials(cls):
@@ -113,6 +123,14 @@
     def setup_clients(cls):
         super(UpdateMultiattachVolumeNegativeTest, cls).setup_clients()
         cls.service_client = cls.os_service_user.servers_client
+        if CONF.enforce_scope.nova:
+            cls.reader_volumes_client = (
+                cls.os_project_reader.volumes_client_latest)
+            cls.reader_attachments_client = (
+                cls.os_project_reader.attachments_client_latest)
+        else:
+            cls.reader_volumes_client = cls.volumes_client
+            cls.reader_attachments_client = cls.attachments_client
 
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
@@ -157,7 +175,7 @@
         vol1_attachment2 = self.attach_volume(server2, vol1)
 
         # Assert that we now have two attachments.
-        vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+        vol1 = self.reader_volumes_client.show_volume(vol1['id'])['volume']
         self.assertEqual(2, len(vol1['attachments']))
 
         # By default both of these attachments should have an attach_mode of
@@ -165,7 +183,7 @@
         # the volume will be rejected.
         for volume_attachment in vol1['attachments']:
             attachment_id = volume_attachment['attachment_id']
-            attachment = self.attachments_client.show_attachment(
+            attachment = self.reader_attachments_client.show_attachment(
                 attachment_id)['attachment']
             self.assertEqual('rw', attachment['attach_mode'])
 
@@ -179,7 +197,7 @@
                           server2['id'], vol1['id'], volumeId=vol2['id'])
 
         # Fetch the volume 1 to check the current attachments.
-        vol1 = self.volumes_client.show_volume(vol1['id'])['volume']
+        vol1 = self.reader_volumes_client.show_volume(vol1['id'])['volume']
         vol1_attachment_ids = [a['id'] for a in vol1['attachments']]
 
         # Assert that volume 1 is still attached to both server 1 and 2.
@@ -187,5 +205,5 @@
         self.assertIn(vol1_attachment2['id'], vol1_attachment_ids)
 
         # Assert that volume 2 has no attachments.
-        vol2 = self.volumes_client.show_volume(vol2['id'])['volume']
+        vol2 = self.reader_volumes_client.show_volume(vol2['id'])['volume']
         self.assertEqual([], vol2['attachments'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 3ac2b46..3b44ded 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -560,8 +560,17 @@
                             'tagging metadata was not checked in the '
                             'metadata API')
                 return True
+
             cmd = 'curl %s' % md_url
-            md_json = ssh_client.exec_command(cmd)
+            try:
+                md_json = ssh_client.exec_command(cmd)
+            except lib_exc.SSHExecCommandFailed:
+                # NOTE(eolivare): We cannot guarantee that the metadata service
+                # is available right after the VM is ssh-able, because it could
+                # obtain authorized ssh keys from config_drive or it could use
+                # password. Hence, retries may be needed.
+                LOG.exception('metadata service not available yet')
+                return False
             return verify_method(md_json)
         # NOTE(gmann) Keep refreshing the metadata info until the metadata
         # cache is refreshed. For safer side, we will go with wait loop of
diff --git a/tempest/api/compute/security_groups/base.py b/tempest/api/compute/security_groups/base.py
index ef69a13..f57ed59 100644
--- a/tempest/api/compute/security_groups/base.py
+++ b/tempest/api/compute/security_groups/base.py
@@ -27,6 +27,15 @@
     create_default_network = True
 
     @classmethod
+    def setup_clients(cls):
+        super(BaseSecurityGroupsTest, cls).setup_clients()
+        if CONF.enforce_scope.nova and hasattr(cls, 'os_project_reader'):
+            cls.reader_security_groups_client = (
+                cls.os_project_reader.compute_security_groups_client)
+        else:
+            cls.reader_security_groups_client = cls.security_groups_client
+
+    @classmethod
     def skip_checks(cls):
         super(BaseSecurityGroupsTest, cls).skip_checks()
         if not utils.get_service_list()['network']:
diff --git a/tempest/api/compute/security_groups/test_security_group_rules.py b/tempest/api/compute/security_groups/test_security_group_rules.py
index 3c4daf6..ed6dcd4 100644
--- a/tempest/api/compute/security_groups/test_security_group_rules.py
+++ b/tempest/api/compute/security_groups/test_security_group_rules.py
@@ -148,7 +148,7 @@
             rule2_id)
 
         # Get rules of the created Security Group
-        rules = self.security_groups_client.show_security_group(
+        rules = self.reader_security_groups_client.show_security_group(
             securitygroup_id)['security_group']['rules']
         self.assertNotEmpty([i for i in rules if i['id'] == rule1_id])
         self.assertNotEmpty([i for i in rules if i['id'] == rule2_id])
@@ -173,7 +173,8 @@
         # Delete group2
         self.security_groups_client.delete_security_group(sg2_id)
         # Get rules of the Group1
-        rules = (self.security_groups_client.show_security_group(sg1_id)
-                 ['security_group']['rules'])
+        rules = (
+            self.reader_security_groups_client.show_security_group(sg1_id)
+            ['security_group']['rules'])
         # The group1 has no rules because group2 has deleted
         self.assertEmpty(rules)
diff --git a/tempest/api/compute/security_groups/test_security_groups.py b/tempest/api/compute/security_groups/test_security_groups.py
index 01a7986..89b1025 100644
--- a/tempest/api/compute/security_groups/test_security_groups.py
+++ b/tempest/api/compute/security_groups/test_security_groups.py
@@ -45,7 +45,9 @@
             security_group_list.append(body)
         # Fetch all Security Groups and verify the list
         # has all created Security Groups
-        fetched_list = self.client.list_security_groups()['security_groups']
+        fetched_list = (
+            self.reader_security_groups_client.list_security_groups()
+            ['security_groups'])
         # Now check if all the created Security Groups are in fetched list
         missing_sgs = \
             [sg for sg in security_group_list if sg not in fetched_list]
@@ -58,7 +60,9 @@
             self.client.delete_security_group(sg['id'])
             self.client.wait_for_resource_deletion(sg['id'])
         # Now check if all the created Security Groups are deleted
-        fetched_list = self.client.list_security_groups()['security_groups']
+        fetched_list = (
+            self.reader_security_groups_client.list_security_groups()
+            ['security_groups'])
         deleted_sgs = [sg for sg in security_group_list if sg in fetched_list]
         self.assertFalse(deleted_sgs,
                          "Failed to delete Security Group %s "
@@ -80,8 +84,9 @@
                          "The created Security Group name is "
                          "not equal to the requested name")
         # Now fetch the created Security Group by its 'id'
-        fetched_group = (self.client.show_security_group(securitygroup['id'])
-                         ['security_group'])
+        fetched_group = (
+            self.reader_security_groups_client.show_security_group(
+                securitygroup['id'])['security_group'])
         self.assertEqual(securitygroup, fetched_group,
                          "The fetched Security Group is different "
                          "from the created Group")
@@ -144,8 +149,9 @@
                                           name=s_new_name,
                                           description=s_new_des)
         # get the security group
-        fetched_group = (self.client.show_security_group(securitygroup_id)
-                         ['security_group'])
+        fetched_group = (
+            self.reader_security_groups_client.show_security_group(
+                securitygroup_id)['security_group'])
         self.assertEqual(s_new_name, fetched_group['name'])
         self.assertEqual(s_new_des, fetched_group['description'])
 
@@ -170,7 +176,7 @@
 
         # list security groups for a server
         fetched_groups = (
-            self.servers_client.list_security_groups_by_server(
+            self.reader_servers_client.list_security_groups_by_server(
                 server_id)['security_groups'])
         fetched_security_groups_ids = [i['id'] for i in fetched_groups]
         # verifying the security groups ids in list
diff --git a/tempest/api/compute/security_groups/test_security_groups_negative.py b/tempest/api/compute/security_groups/test_security_groups_negative.py
index c7d873f..b4f060f 100644
--- a/tempest/api/compute/security_groups/test_security_groups_negative.py
+++ b/tempest/api/compute/security_groups/test_security_groups_negative.py
@@ -41,8 +41,10 @@
     def test_security_group_get_nonexistent_group(self):
         """Test getting non existent security group details should fail"""
         non_exist_id = self.generate_random_security_group_id()
-        self.assertRaises(lib_exc.NotFound, self.client.show_security_group,
-                          non_exist_id)
+        self.assertRaises(
+            lib_exc.NotFound,
+            self.reader_security_groups_client.show_security_group,
+            non_exist_id)
 
     @decorators.skip_because(bug="1161411",
                              condition=CONF.service_available.neutron)
@@ -111,7 +113,9 @@
     def test_delete_the_default_security_group(self):
         """Test deleting "default" security group should fail"""
         default_security_group_id = None
-        body = self.client.list_security_groups()['security_groups']
+        body = (
+            self.reader_security_groups_client.list_security_groups()
+            ['security_groups'])
         for i in range(len(body)):
             if body[i]['name'] == 'default':
                 default_security_group_id = body[i]['id']
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 1fe4a65..10c2e91 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -283,10 +283,11 @@
         # a situation when a newly created server doesn't have a floating
         # ip attached at the beginning of the test_rebuild_server let's
         # make sure right here the floating ip is attached
-        waiters.wait_for_server_floating_ip(
-            self.servers_client,
-            server,
-            validation_resources['floating_ip'])
+        if 'floating_ip' in validation_resources:
+            waiters.wait_for_server_floating_ip(
+                self.servers_client,
+                server,
+                validation_resources['floating_ip'])
 
         self.addCleanup(waiters.wait_for_server_termination,
                         self.servers_client, server['id'])
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index e267b0f..2816975 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -473,9 +473,9 @@
             # but cirros (and other) test images won't see the device unless
             # they have lsilogic drivers (which is the default). So use this
             # as sort of the indication that the test should be enabled.
-            self.skip('hw_scsi_model=virtio-scsi not set on image')
+            self.skipTest('hw_scsi_model=virtio-scsi not set on image')
         if not CONF.validation.run_validation:
-            self.skip('validation is required for this test')
+            self.skipTest('validation is required for this test')
 
         validation_resources = self.get_test_validation_resources(
             self.os_primary)
diff --git a/tempest/api/image/v2/test_images_formats.py b/tempest/api/image/v2/test_images_formats.py
index 520a215..6914745 100644
--- a/tempest/api/image/v2/test_images_formats.py
+++ b/tempest/api/image/v2/test_images_formats.py
@@ -196,8 +196,9 @@
                 server = self._create_server_with_image_def(self.imgdef)
             except exceptions.BuildErrorException:
                 if is_broken:
-                    self.skip('Tolerating failed build with known-broken '
-                              'image format')
+                    self.skipTest(
+                        'Tolerating failed build with known-broken image '
+                        'format')
                 else:
                     raise
             self.delete_server(server['id'])
diff --git a/tempest/api/object_storage/test_account_quotas.py b/tempest/api/object_storage/test_account_quotas.py
index 37783b8..6c472a6 100644
--- a/tempest/api/object_storage/test_account_quotas.py
+++ b/tempest/api/object_storage/test_account_quotas.py
@@ -109,6 +109,57 @@
         nafter = self._get_bytes_used()
         self.assertEqual(nbefore, nafter)
 
+    @decorators.idempotent_id('aab68903-cc9f-493a-b17e-b387db3e4e44')
+    @utils.requires_ext(extension='account_quotas', service='object')
+    def test_storage_policy_quota_limit(self):
+        """Verify quota limits are enforced per storage policy"""
+        policy_names = [p["name"] for p in self.policies]
+        if 'silver' not in policy_names:
+            raise self.skipException("Missing storage policy 'silver'")
+
+        policy_quota = 10
+        policy_quota_header = {
+            "X-Account-Quota-Bytes-Policy-silver": str(policy_quota)
+        }
+        self.account_client.auth_provider.set_alt_auth_data(
+            request_part='headers',
+            auth_data=self.reselleradmin_auth_data
+        )
+        self.os_roles_operator.account_client.request(
+            "POST", url="", headers=policy_quota_header, body=""
+        )
+
+        # Create a new container using the "silver" storage policy
+        silver_container = data_utils.rand_name("silver-container")
+        headers = {'X-Storage-Policy': 'silver'}
+        self.container_client.create_container(
+            silver_container, **headers
+        )
+
+        # Try uploading an object larger than the quota
+        large_data = data_utils.arbitrary_string(size=policy_quota + 1)
+        object_name = data_utils.rand_name(name='large_object')
+        self.assertRaises(
+            lib_exc.OverLimit,
+            self.object_client.create_object,
+            silver_container,
+            object_name,
+            large_data
+            )
+
+        # Upload same large object to default container
+        default_container = data_utils.rand_name(
+            "default_container"
+        )
+        self.container_client.create_container(default_container)
+        default_object = data_utils.rand_name(name='default_object')
+        resp, _ = self.object_client.create_object(
+            default_container,
+            default_object,
+            large_data
+        )
+        self.assertHeaders(resp, 'Object', 'PUT')
+
     @decorators.attr(type=["smoke"])
     @decorators.idempotent_id('63f51f9f-5f1d-4fc6-b5be-d454d70949d6')
     @utils.requires_ext(extension='account_quotas', service='object')
diff --git a/tempest/lib/common/jsonschema_validator.py b/tempest/lib/common/jsonschema_validator.py
index 1618175..e56a81f 100644
--- a/tempest/lib/common/jsonschema_validator.py
+++ b/tempest/lib/common/jsonschema_validator.py
@@ -18,7 +18,7 @@
 
 # JSON Schema validator and format checker used for JSON Schema validation
 JSONSCHEMA_VALIDATOR = jsonschema.Draft4Validator
-FORMAT_CHECKER = jsonschema.draft4_format_checker
+FORMAT_CHECKER = JSONSCHEMA_VALIDATOR.FORMAT_CHECKER
 
 
 # NOTE(gmann): Add customized format checker for 'date-time' format because:
@@ -39,7 +39,7 @@
         return True
 
 
-@jsonschema.FormatChecker.cls_checks('base64')
+@FORMAT_CHECKER.checks('base64')
 def _validate_base64_format(instance):
     try:
         if isinstance(instance, str):
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index 055dcb6..faff6f9 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -152,21 +152,36 @@
             min_kbps=self.BANDWIDTH_2
         )
 
-    def _create_network_and_qos_policies(self, policy_method):
-        physnet_name = CONF.network_feature_enabled.qos_placement_physnet
-        base_segm = \
-            CONF.network_feature_enabled.provider_net_base_segmentation_id
-
-        self.prov_network, _, _ = self.setup_network_subnet_with_router(
-            networks_client=self.networks_client,
-            routers_client=self.routers_client,
-            subnets_client=self.subnets_client,
+    def _use_or_create_network_and_qos_policies(self, policy_method):
+        vlan_ext_nets = self.networks_client.list_networks(
             **{
-                'shared': True,
                 'provider:network_type': 'vlan',
-                'provider:physical_network': physnet_name,
-                'provider:segmentation_id': base_segm
-            })
+                'router:external': True}
+        )['networks']
+        if vlan_ext_nets:
+            self.prov_network = vlan_ext_nets[0]
+            if not self.prov_network['shared']:
+                self.prov_network = self.networks_client.update_network(
+                    self.prov_network['id'], shared=True)['network']
+                self.addClassResourceCleanup(
+                    self.networks_client.update_network,
+                    self.prov_network['id'],
+                    shared=False)
+        else:
+            physnet_name = CONF.network_feature_enabled.qos_placement_physnet
+            base_segm = \
+                CONF.network_feature_enabled.provider_net_base_segmentation_id
+
+            self.prov_network, _, _ = self.setup_network_subnet_with_router(
+                networks_client=self.networks_client,
+                routers_client=self.routers_client,
+                subnets_client=self.subnets_client,
+                **{
+                    'shared': True,
+                    'provider:network_type': 'vlan',
+                    'provider:physical_network': physnet_name,
+                    'provider:segmentation_id': base_segm
+                })
 
         policy_method()
 
@@ -261,7 +276,8 @@
         * Create port with invalid QoS policy, and try to boot VM with that,
         it should fail.
         """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        self._use_or_create_network_and_qos_policies(
+            self._create_qos_basic_policies)
         server1, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
         self._assert_allocation_is_as_expected(server1['id'],
@@ -297,7 +313,8 @@
         * If the VM goes to ACTIVE state check that allocations are as
         expected.
         """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        self._use_or_create_network_and_qos_policies(
+            self._create_qos_basic_policies)
         server, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
         self._assert_allocation_is_as_expected(server['id'],
@@ -335,7 +352,8 @@
         * If the VM goes to ACTIVE state check that allocations are as
         expected.
         """
-        self._create_network_and_qos_policies(self._create_qos_basic_policies)
+        self._use_or_create_network_and_qos_policies(
+            self._create_qos_basic_policies)
         server, valid_port = self._boot_vm_with_min_bw(
             qos_policy_id=self.qos_policy_valid['id'])
         self._assert_allocation_is_as_expected(server['id'],
@@ -378,7 +396,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(
@@ -432,7 +450,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(self.prov_network['id'])
@@ -457,7 +475,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(
@@ -479,7 +497,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port1 = self.create_port(
@@ -506,7 +524,7 @@
         if not utils.is_network_feature_enabled('update_port_qos'):
             raise self.skipException("update_port_qos feature is not enabled")
 
-        self._create_network_and_qos_policies(
+        self._use_or_create_network_and_qos_policies(
             self._create_qos_policies_from_life)
 
         port = self.create_port(
@@ -552,7 +570,7 @@
                 direction=self.EGRESS_DIRECTION,
             )
 
-        self._create_network_and_qos_policies(create_policies)
+        self._use_or_create_network_and_qos_policies(create_policies)
 
         port = self.create_port(
             self.prov_network['id'],
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index f378881..ec220b0 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -169,6 +169,61 @@
         # Step 6: Verify console log
         self.log_console_output([instance])
 
+    @decorators.idempotent_id('3c87bc15-cf6a-4dd6-8c23-4541b2cc3dbb')
+    @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
+                          'Cinder volume snapshots are disabled')
+    @utils.services('compute', 'volume', 'image')
+    def test_bootable_volume_last_snapshot_delete_while_stopped(self):
+        """Test bootable volume snapshot deletion while instance is stopped.
+
+        This test ensures that all volume snapshots can be deleted which
+        verifies that their backing files were handled correctly during
+        deletions.
+
+        This scenario is related to the Cinder NFS backend.
+
+        Steps:
+        1. Create a bootable volume from an image.
+        2. Launch an instance from the created volume.
+        3. Create three volume snapshots of the created volume.
+        4. Stop the instance.
+        5. Delete the latest snapshot and verify it succeeds.
+        6. Delete the next latest snapshot and verify it succeeds.
+        7. Delete the remaining snapshot and verify it succeeds.
+        """
+        # Step 1: Create a bootable volume from an image
+        volume = self.create_volume_from_image()
+
+        # Step 2: Boot an instance from the created volume
+        instance = self.boot_instance_from_resource(
+            source_id=volume['id'],
+            source_type='volume',
+            wait_until='SSHABLE'
+        )
+
+        # Step 3: Create three volume snapshots of the bootable volume.
+        # The force=True is needed in order to snapshot an attached volume.
+        snapshot1 = self.create_volume_snapshot(volume['id'], force=True)
+        snapshot2 = self.create_volume_snapshot(volume['id'], force=True)
+        snapshot3 = self.create_volume_snapshot(volume['id'], force=True)
+
+        # Step 4: Stop the instance
+        self.servers_client.stop_server(instance['id'])
+        waiters.wait_for_server_status(self.servers_client, instance['id'],
+                                       'SHUTOFF')
+
+        # Step 5: Delete the latest (newest) snapshot
+        self.snapshots_client.delete_snapshot(snapshot3['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot3['id'])
+
+        # Step 6: Delete the next latest (next newest) snapshot
+        self.snapshots_client.delete_snapshot(snapshot2['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot2['id'])
+
+        # Step 7: Delete the last remaining snapshot
+        self.snapshots_client.delete_snapshot(snapshot1['id'])
+        self.snapshots_client.wait_for_resource_deletion(snapshot1['id'])
+
     @decorators.idempotent_id('05795fb2-b2a7-4c9f-8fac-ff25aedb1489')
     @decorators.attr(type='slow')
     @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
diff --git a/test-requirements.txt b/test-requirements.txt
index b925921..f599d53 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -1,4 +1,4 @@
 hacking>=7.0.0,<7.1.0
 coverage!=4.4,>=4.0 # Apache-2.0
 oslotest>=3.2.0 # Apache-2.0
-flake8-import-order>=0.18.0,<0.19.0 # LGPLv3
+flake8-import-order>=0.19.0 # LGPLv3
diff --git a/tools/generate-tempest-plugins-list.py b/tools/generate-tempest-plugins-list.py
index 2e8ced5..0690d57 100644
--- a/tools/generate-tempest-plugins-list.py
+++ b/tools/generate-tempest-plugins-list.py
@@ -79,6 +79,10 @@
     # No changes are merging in this
     # https://review.opendev.org/q/project:x%252Fnetworking-fortinet
     'x/networking-fortinet'
+    # It is broken and it use retired plugin 'patrol'. Last change done
+    # in this plugin was 7 years ago.
+    # https://opendev.org/airship/tempest-plugin
+    'airship/tempest-plugin'
 ]
 
 url = 'https://review.opendev.org/projects/'
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index d151274..2fc7aea 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -82,8 +82,8 @@
       Former names for this job where:
         * legacy-tempest-dsvm-py35
         * gate-tempest-dsvm-py35
-    required-projects:
-      - openstack/horizon
+    # required-projects:
+    #  - openstack/horizon
     vars:
       # NOTE(gmann): Default concurrency is higher (number of cpu -2) which
       # end up 6 in upstream CI. Higher concurrency means high parallel
@@ -101,7 +101,11 @@
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
         # Enable horizon so that we can run horizon test.
-        horizon: true
+        # horizon: true
+        # FIXME(sean-k-mooney): restore horizon deployment
+        # once horizon does not depend on setuptools to provide
+        # pkg_resources or bug #2141277 is resolved by other means
+        horizon: false
 
 - job:
     name: tempest-full-centos-9-stream
@@ -436,29 +440,6 @@
       run on neutron gate only.
     check:
       jobs:
-        - grenade
-        # NOTE(gmann): These template are generic and used on stable branch
-        # as well as master testing. So grenade-skip-level on stable/2023.1
-        # which test stable/yoga to stable/2023.1 upgrade is non-voting.
-        - grenade-skip-level:
-            voting: false
-            branches:
-              - stable/2023.1
-        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
-        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
-        # SLURP release only.
-        - grenade-skip-level:
-            branches:
-              - ^.*/2024.1
-        # on 2025.1(SLURP) grenade-skip-level-always is voting.
-        # which test stable/2024.1 to 2025.1 upgrade.
-        # As extra testing, we do run it voting on current master(even that is non SLURP).
-        # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
-        - grenade-skip-level-always:
-            branches:
-              - ^.*/2025.2
-              - ^.*/2025.1
-              - master
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -470,23 +451,7 @@
               negate: true
     gate:
       jobs:
-        - grenade
         - tempest-integrated-networking
-        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
-        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
-        # SLURP release only.
-        - grenade-skip-level:
-            branches:
-              - ^.*/2024.1
-        # on 2025.1(SLURP) grenade-skip-level-always is voting.
-        # which test stable/2024.1 to 2025.1 upgrade.
-        # As extra testing, we do run it voting on current master(even that is non SLURP).
-        # but if project feel that is not required to run for non SLURP releases then they can opt to make it non-voting or remove it.
-        - grenade-skip-level-always:
-            branches:
-              - ^.*/2025.2
-              - ^.*/2025.1
-              - master
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
         # and job is broken up to wallaby branch due to the issue
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index d9c7352..45e117f 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -154,6 +154,7 @@
     experimental:
       jobs:
         - nova-multi-cell
+        - nova-alt-configurations
         - tempest-with-latest-microversion
         - tempest-full-oslo-master
         - tempest-stestr-master