Merge "Test srbac on user_messages"
diff --git a/.zuul.yaml b/.zuul.yaml
index dec486b..6bbcd1b 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -8,7 +8,13 @@
         - cinder-tempest-plugin-lvm-lio-barbican
         - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream:
             voting: false
-        - cinder-tempest-plugin-lvm-tgt-barbican
+        # FIXME: the tgt job is broken on jammy, and we may be removing tgt
+        # support anyway.  So make it non-voting until we figure out what to
+        # do about this, which should be at the March 2023 virtual PTG.
+        - cinder-tempest-plugin-lvm-tgt-barbican:
+            voting: false
+        - cinder-tempest-plugin-lvm-lio-barbican-fips:
+            voting: false
         - nova-ceph-multistore:
             voting: false
         - cinder-tempest-plugin-cbak-ceph
@@ -18,22 +24,21 @@
         # branches. That is what we need to do for all tempest plugins. Only jobs
         # for the current releasable ("Maintained") stable branches should be listed
         # here.
+        - cinder-tempest-plugin-basic-zed
         - cinder-tempest-plugin-basic-yoga
         - cinder-tempest-plugin-basic-xena
-        - cinder-tempest-plugin-basic-wallaby
-        # Set this job to voting once we have some actual tests to run
-        - cinder-tempest-plugin-protection-functional:
-            voting: false
+        - cinder-tempest-plugin-protection-functional
     gate:
       jobs:
         - cinder-tempest-plugin-lvm-lio-barbican
-        - cinder-tempest-plugin-lvm-tgt-barbican
+        # FIXME: no sense running a non-voting gate job.  See comment above.
+        # - cinder-tempest-plugin-lvm-tgt-barbican
         - cinder-tempest-plugin-cbak-ceph
     experimental:
       jobs:
+        - cinder-tempest-plugin-cbak-ceph-zed
         - cinder-tempest-plugin-cbak-ceph-yoga
         - cinder-tempest-plugin-cbak-ceph-xena
-        - cinder-tempest-plugin-cbak-ceph-wallaby
 
 - job:
     name: cinder-tempest-plugin-protection-functional
@@ -78,6 +83,7 @@
       - ^.*\.rst$
       - ^doc/.*$
       - ^releasenotes/.*$
+    timeout: 10800
 
 - job:
     name: cinder-tempest-plugin-lvm-barbican-base-abstract
@@ -212,6 +218,15 @@
               volume_revert: True
       devstack_services:
         c-bak: true
+      devstack_localrc:
+        CINDER_QUOTA_VOLUMES: 25
+    timeout: 10800
+
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-zed
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/zed
 
 - job:
     name: cinder-tempest-plugin-cbak-ceph-yoga
@@ -270,6 +285,19 @@
       runs tempest tests and cinderlib tests on CentOS Stream 9.
 
 - job:
+    name: cinder-tempest-plugin-lvm-lio-barbican-fips
+    parent: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream
+    description: |
+      This job configures Cinder with LVM, LIO, barbican and
+      runs tempest tests and cinderlib tests on CentOS Stream 9
+      under FIPS mode
+    pre-run: playbooks/enable-fips.yaml
+    vars:
+      configure_swap_size: 4096
+      nslookup_target: 'opendev.org'
+      tempest_exclude_regex: 'test_encrypted_cinder_volumes_cryptsetup'
+
+- job:
     name: cinder-tempest-plugin-lvm-tgt-barbican
     description: |
       This jobs configures Cinder with LVM, tgt, barbican and
@@ -315,6 +343,12 @@
       - ^releasenotes/.*$
 
 - job:
+    name: cinder-tempest-plugin-basic-zed
+    parent: cinder-tempest-plugin-basic
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/zed
+
+- job:
     name: cinder-tempest-plugin-basic-yoga
     parent: cinder-tempest-plugin-basic
     nodeset: openstack-single-node-focal
@@ -325,9 +359,3 @@
     parent: cinder-tempest-plugin-basic
     nodeset: openstack-single-node-focal
     override-checkout: stable/xena
-
-- job:
-    name: cinder-tempest-plugin-basic-wallaby
-    parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/wallaby
diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py
index f948a93..ea6bd2e 100644
--- a/cinder_tempest_plugin/api/volume/base.py
+++ b/cinder_tempest_plugin/api/volume/base.py
@@ -138,6 +138,11 @@
             'name',
             data_utils.rand_name(self.__class__.__name__ + '-instance'))
 
+        if wait_until == 'SSHABLE' and not kwargs.get('validation_resources'):
+            kwargs['validation_resources'] = (
+                self.get_test_validation_resources(self.os_primary))
+            kwargs['validatable'] = True
+
         tenant_network = self.get_tenant_network()
         body, _ = compute.create_test_server(
             self.os_primary,
diff --git a/cinder_tempest_plugin/api/volume/test_volume_backup.py b/cinder_tempest_plugin/api/volume/test_volume_backup.py
index 7ac33c2..190a483 100644
--- a/cinder_tempest_plugin/api/volume/test_volume_backup.py
+++ b/cinder_tempest_plugin/api/volume/test_volume_backup.py
@@ -31,6 +31,16 @@
         if not CONF.volume_feature_enabled.backup:
             raise cls.skipException("Cinder backup feature disabled")
 
+    @classmethod
+    def setup_credentials(cls):
+        # Setting network=True, subnet=True creates a default network
+        cls.set_network_resources(
+            network=True,
+            subnet=True,
+            router=True,
+            dhcp=True)
+        super(VolumesBackupsTest, cls).setup_credentials()
+
     @decorators.idempotent_id('885410c6-cd1d-452c-a409-7c32b7e0be15')
     def test_volume_snapshot_backup(self):
         """Create backup from snapshot."""
@@ -107,7 +117,7 @@
         server = self.create_server(
             name=server_name,
             block_device_mapping=bd_map,
-            wait_until='ACTIVE')
+            wait_until='SSHABLE')
 
         # Delete VM
         self.os_primary.servers_client.delete_server(server['id'])
diff --git a/cinder_tempest_plugin/rbac/v3/base.py b/cinder_tempest_plugin/rbac/v3/base.py
index d1a11e5..17644f4 100644
--- a/cinder_tempest_plugin/rbac/v3/base.py
+++ b/cinder_tempest_plugin/rbac/v3/base.py
@@ -10,12 +10,21 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.common import waiters
 from tempest import config
+from tempest.lib.common import api_microversion_fixture
+from tempest.lib.common import api_version_utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib.decorators import cleanup_order
+from tempest import test
 
 CONF = config.CONF
 
 
-class VolumeV3RbacBaseTests(object):
+class VolumeV3RbacBaseTests(
+    api_version_utils.BaseMicroversionTest, test.BaseTestCase
+):
 
     identity_version = 'v3'
 
@@ -28,8 +37,44 @@
                 "skipping RBAC tests. To enable these tests set "
                 "`tempest.conf [enforce_scope] cinder=True`."
             )
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+
+        api_version_utils.check_skip_with_microversion(
+            cls.min_microversion, cls.max_microversion,
+            CONF.volume.min_microversion, CONF.volume.max_microversion)
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.set_network_resources()
+        super(VolumeV3RbacBaseTests, cls).setup_credentials()
+
+    def setUp(self):
+        super(VolumeV3RbacBaseTests, self).setUp()
+        self.useFixture(api_microversion_fixture.APIMicroversionFixture(
+            volume_microversion=self.request_microversion))
+
+    @classmethod
+    def resource_setup(cls):
+        super(VolumeV3RbacBaseTests, cls).resource_setup()
+        cls.request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.min_microversion,
+                CONF.volume.min_microversion))
 
     def do_request(self, method, expected_status=200, client=None, **payload):
+        """Perform API call
+
+        Args:
+            method: Name of the API call
+            expected_status: HTTP desired response code
+            client: Client object if exists, None otherwise
+            payload: API call required parameters
+
+        Returns:
+            HTTP response
+        """
         if not client:
             client = self.client
         if isinstance(expected_status, type(Exception)):
@@ -40,3 +85,78 @@
             response = getattr(client, method)(**payload)
             self.assertEqual(response.response.status, expected_status)
             return response
+
+    @cleanup_order
+    def create_volume(self, client, **kwargs):
+        """Wrapper utility that returns a test volume
+
+        Args:
+            client: Client object
+
+        Returns:
+            ID of the created volume
+        """
+        kwargs['size'] = CONF.volume.volume_size
+        kwargs['name'] = data_utils.rand_name(
+            VolumeV3RbacBaseTests.__name__ + '-Volume'
+        )
+
+        volume_id = client.create_volume(**kwargs)['volume']['id']
+        self.cleanup(
+            test_utils.call_and_ignore_notfound_exc, func=self.delete_resource,
+            client=client, volume_id=volume_id
+        )
+        waiters.wait_for_volume_resource_status(
+            client=client, resource_id=volume_id, status='available'
+        )
+
+        return volume_id
+
+    @cleanup_order
+    def create_snapshot(self, client, volume_id, cleanup=True, **kwargs):
+        """Wrapper utility that returns a test snapshot.
+
+        Args:
+            client: Client object
+            volume_id: ID of the volume
+            cleanup: Boolean if should delete the snapshot
+
+        Returns:
+            ID of the created snapshot
+        """
+        kwargs['name'] = data_utils.rand_name(
+            VolumeV3RbacBaseTests.__name__ + '-Snapshot'
+        )
+
+        snapshot_id = client.create_snapshot(
+            volume_id=volume_id, **kwargs)['snapshot']['id']
+        if cleanup:
+            self.cleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                func=self.delete_resource,
+                client=client, snapshot_id=snapshot_id
+            )
+        waiters.wait_for_volume_resource_status(
+            client=client, resource_id=snapshot_id, status='available'
+        )
+
+        return snapshot_id
+
+    @classmethod
+    def delete_resource(cls, client, **kwargs):
+        """Delete a resource by a given client
+
+        Args:
+            client: Client object
+
+        Keyword Args:
+            snapshot_id: ID of a snapshot
+            volume_id: ID of a volume
+        """
+        key, resource_id = list(kwargs.items())[0]
+        resource_name = key.split('_')[0]
+
+        del_action = getattr(client, f'delete_{resource_name}')
+        test_utils.call_and_ignore_notfound_exc(del_action, resource_id)
+        test_utils.call_and_ignore_notfound_exc(
+            client.wait_for_resource_deletion, resource_id)
diff --git a/cinder_tempest_plugin/rbac/v3/test_capabilities.py b/cinder_tempest_plugin/rbac/v3/test_capabilities.py
index 7024e30..861cca9 100644
--- a/cinder_tempest_plugin/rbac/v3/test_capabilities.py
+++ b/cinder_tempest_plugin/rbac/v3/test_capabilities.py
@@ -10,17 +10,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import abc
-
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
 from tempest.lib import decorators
 from tempest.lib import exceptions
 
-from cinder_tempest_plugin.api.volume import base
-from cinder_tempest_plugin.rbac.v3 import base as rbac_base
 
-
-class VolumeV3RbacCapabilityTests(rbac_base.VolumeV3RbacBaseTests,
-                                  metaclass=abc.ABCMeta):
+class VolumeV3RbacCapabilityTests(rbac_base.VolumeV3RbacBaseTests):
 
     @classmethod
     def setup_clients(cls):
@@ -37,47 +32,35 @@
         cls.admin_stats_client = (
             admin_client.volume_scheduler_stats_client_latest)
 
-    @classmethod
-    def setup_credentials(cls):
-        super().setup_credentials()
-        cls.os_primary = getattr(cls, 'os_%s' % cls.credentials[0])
-
-    @abc.abstractmethod
-    def test_get_capabilities(self):
-        """Test volume_extension:capabilities policy.
-
-        This test must check:
-          * whether the persona can fetch capabilities for a host.
-
-        """
-        pass
-
-
-class ProjectAdminTests(VolumeV3RbacCapabilityTests, base.BaseVolumeTest):
-
-    credentials = ['project_admin', 'system_admin']
-
-    @decorators.idempotent_id('1fdbe493-e58f-48bf-bb38-52003eeef8cb')
-    def test_get_capabilities(self):
+    def _get_capabilities(self, expected_status):
         pools = self.admin_stats_client.list_pools()['pools']
         host_name = pools[0]['name']
-        self.do_request('show_backend_capabilities', expected_status=200,
-                        host=host_name)
+        self.do_request(
+            'show_backend_capabilities',
+            expected_status=expected_status,
+            host=host_name
+        )
 
 
-class ProjectMemberTests(ProjectAdminTests, base.BaseVolumeTest):
+class ProjectReaderTests(VolumeV3RbacCapabilityTests):
+    credentials = ['project_reader', 'project_admin', 'system_admin']
 
+    @decorators.idempotent_id('d16034fc-4204-4ea8-94b3-714de59fdfbf')
+    def test_get_capabilities(self):
+        self._get_capabilities(expected_status=exceptions.Forbidden)
+
+
+class ProjectMemberTests(VolumeV3RbacCapabilityTests):
     credentials = ['project_member', 'project_admin', 'system_admin']
 
     @decorators.idempotent_id('dbaf51de-fafa-4f55-875f-7537524489ab')
     def test_get_capabilities(self):
-        pools = self.admin_stats_client.list_pools()['pools']
-        host_name = pools[0]['name']
-        self.do_request('show_backend_capabilities',
-                        expected_status=exceptions.Forbidden,
-                        host=host_name)
+        self._get_capabilities(expected_status=exceptions.Forbidden)
 
 
-class ProjectReaderTests(ProjectMemberTests, base.BaseVolumeTest):
+class ProjectAdminTests(VolumeV3RbacCapabilityTests):
+    credentials = ['project_admin', 'system_admin']
 
-    credentials = ['project_reader', 'project_admin', 'system_admin']
+    @decorators.idempotent_id('1fdbe493-e58f-48bf-bb38-52003eeef8cb')
+    def test_get_capabilities(self):
+        self._get_capabilities(expected_status=200)
diff --git a/cinder_tempest_plugin/rbac/v3/test_snapshots.py b/cinder_tempest_plugin/rbac/v3/test_snapshots.py
new file mode 100644
index 0000000..f11da42
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_snapshots.py
@@ -0,0 +1,374 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+CONF = config.CONF
+
+
+class VolumeV3RbacSnapshotsTests(rbac_base.VolumeV3RbacBaseTests):
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.vol_other_client = cls.os_project_admin.volumes_client_latest
+        cls.snap_other_client = cls.os_project_admin.snapshots_client_latest
+
+    def _list_snapshots(self, expected_status):
+        """Test list_snapshots operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            expected_status=expected_status, method='list_snapshots'
+        )
+
+    def _show_snapshot(self, expected_status):
+        """Test show_snapshot operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            expected_status=expected_status, method='show_snapshot',
+            snapshot_id=snapshot_id
+        )
+
+    def _create_snapshot(self, expected_status):
+        """Test create_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snap_name = data_utils.rand_name(
+            self.__name__ + '-Snapshot'
+        )
+        if expected_status == 202:
+            snapshot_id = self.do_request(
+                method='create_snapshot', expected_status=202,
+                volume_id=volume_id, name=snap_name
+            )['snapshot']['id']
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc, self.delete_resource,
+                client=self.client, snapshot_id=snapshot_id
+            )
+            waiters.wait_for_volume_resource_status(
+                client=self.client, resource_id=snapshot_id, status='available'
+            )
+        elif expected_status == exceptions.Forbidden:
+            self.do_request(
+                method='create_snapshot', expected_status=expected_status,
+                volume_id=volume_id, name=snap_name
+            )
+
+    def _remove_snapshot(self, expected_status):
+        """Test create_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+
+        self.do_request(
+            method='delete_snapshot', snapshot_id=snapshot_id,
+            expected_status=expected_status
+        )
+        if expected_status == 202:
+            self.client.wait_for_resource_deletion(id=snapshot_id)
+
+    def _reset_snapshot_status(self, expected_status):
+        """Test reset_snapshot_status operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            'reset_snapshot_status', expected_status=expected_status,
+            snapshot_id=snapshot_id, status='error'
+        )
+
+    def _update_snapshot(self, expected_status):
+        """Test update_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        new_desc = self.__name__ + '-update_test'
+        self.do_request(
+            method='update_snapshot', expected_status=expected_status,
+            snapshot_id=snapshot_id, description=new_desc
+        )
+
+    def _update_snapshot_status(self, expected_status):
+        """Test update_snapshot_status operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+
+        reset_status = 'creating' if expected_status == 202 else 'error'
+        request_status = 'error' if expected_status == 202 else 'creating'
+        self.os_project_admin.snapshots_client_latest.reset_snapshot_status(
+            snapshot_id=snapshot_id, status=reset_status
+        )
+        waiters.wait_for_volume_resource_status(
+            client=self.os_project_admin.snapshots_client_latest,
+            resource_id=snapshot_id, status=reset_status
+        )
+
+        self.do_request(
+            'update_snapshot_status', expected_status=expected_status,
+            snapshot_id=snapshot_id, status=request_status, progress='80%'
+        )
+
+    def _force_delete_snapshot(self, expected_status):
+        """Test force_delete_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            method='force_delete_snapshot', snapshot_id=snapshot_id,
+            expected_status=expected_status
+        )
+        if expected_status != exceptions.Forbidden:
+            self.client.wait_for_resource_deletion(id=snapshot_id)
+            waiters.wait_for_volume_resource_status(
+                client=self.os_project_admin.volumes_client_latest,
+                resource_id=volume_id, status='available'
+            )
+
+    def _unmanage_snapshot(self, expected_status):
+        """Test unmanage_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            method='unmanage_snapshot',
+            expected_status=expected_status, snapshot_id=snapshot_id
+        )
+        if expected_status != exceptions.Forbidden:
+            self.client.wait_for_resource_deletion(id=snapshot_id)
+
+    def _manage_snapshot(self, client, expected_status):
+        """Test reset_snapshot_status operation.
+
+        Args:
+            client: The client to perform the needed request
+            expected_status: The expected HTTP response code
+        """
+        # Create a volume
+        volume_id = self.create_volume(client=self.vol_other_client)
+
+        # Create a snapshot
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client,
+            volume_id=volume_id,
+            cleanup=False
+        )
+        # Unmanage the snapshot
+        # Unmanage snapshot function works almost the same as delete snapshot,
+        # but it does not delete the snapshot data
+        self.snap_other_client.unmanage_snapshot(snapshot_id)
+        self.client.wait_for_resource_deletion(snapshot_id)
+
+        # Verify the original snapshot does not exist in snapshot list
+        params = {'all_tenants': 1}
+        all_snapshots = self.snap_other_client.list_snapshots(
+            detail=True, **params)['snapshots']
+        self.assertNotIn(snapshot_id, [v['id'] for v in all_snapshots])
+
+        # Manage the snapshot
+        name = data_utils.rand_name(
+            self.__class__.__name__ + '-Managed-Snapshot'
+        )
+        description = data_utils.rand_name(
+            self.__class__.__name__ + '-Managed-Snapshot-Description'
+        )
+        metadata = {"manage-snap-meta1": "value1",
+                    "manage-snap-meta2": "value2",
+                    "manage-snap-meta3": "value3"}
+        snapshot_ref = {
+            'volume_id': volume_id,
+            'ref': {CONF.volume.manage_snapshot_ref[0]:
+                    CONF.volume.manage_snapshot_ref[1] % snapshot_id},
+            'name': name,
+            'description': description,
+            'metadata': metadata
+        }
+
+        new_snapshot = self.do_request(
+            client=client,
+            method='manage_snapshot', expected_status=expected_status,
+            volume_id=volume_id, ref=snapshot_ref
+        )
+        if expected_status != exceptions.Forbidden:
+            snapshot = new_snapshot['snapshot']
+            waiters.wait_for_volume_resource_status(
+                client=self.snap_other_client,
+                resource_id=snapshot['id'],
+                status='available'
+            )
+            self.delete_resource(
+                client=self.snap_other_client, snapshot_id=snapshot['id']
+            )
+
+
+class ProjectReaderTests(VolumeV3RbacSnapshotsTests):
+
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.snapshots_client_latest
+
+    @decorators.idempotent_id('dd8e19dc-c8fd-443c-8aed-cdffe07fa6be')
+    def test_list_snapshots(self):
+        self._list_snapshots(expected_status=200)
+
+    @decorators.idempotent_id('6f69e8ed-4e11-40a1-9620-258cf3c45872')
+    def test_show_snapshot(self):
+        self._show_snapshot(expected_status=200)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('13ae344f-fa01-44cc-b9f1-d04452940dc1')
+    def test_create_snapshot(self):
+        self._create_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('5b58f647-da0f-4d2a-bf68-680fc692efb4')
+    def test_delete_snapshot(self):
+        self._remove_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('809d8c8c-25bf-4f1f-9b77-1a81ce4292d1')
+    def test_reset_snapshot_status(self):
+        self._reset_snapshot_status(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('c46f5df8-9a6f-4ed6-b94c-3b65ef05ee9e')
+    def test_update_snapshot(self):
+        self._update_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('c90f98d7-3665-4c9f-820f-3f4c2adfdbf5')
+    def test_update_snapshot_status(self):
+        self._update_snapshot_status(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('63aa8184-897d-4e00-9b80-d2e7828f1b13')
+    def test_force_delete_snapshot(self):
+        self._force_delete_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('35495666-b663-4c68-ba44-0695e30a6838')
+    def test_unmanage_snapshot(self):
+        self._unmanage_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('d2d1326d-fb47-4448-a1e1-2d1219d30fd5')
+    def test_manage_snapshot(self):
+        self._manage_snapshot(
+            expected_status=exceptions.Forbidden,
+            client=self.os_project_reader.snapshot_manage_client_latest
+        )
+
+
+class ProjectMemberTests(VolumeV3RbacSnapshotsTests):
+
+    credentials = ['project_member', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_member.snapshots_client_latest
+
+    @decorators.idempotent_id('5b3ec87f-443f-42f7-bd3c-ab05ea30c5e1')
+    def test_list_snapshots(self):
+        self._list_snapshots(expected_status=200)
+
+    @decorators.idempotent_id('6fee8967-951c-4957-b51b-97b83c13c7c3')
+    def test_show_snapshot(self):
+        self._show_snapshot(expected_status=200)
+
+    @decorators.idempotent_id('43f77b31-aab4-46d0-b76f-e17000d23589')
+    def test_create_snapshot(self):
+        self._create_snapshot(expected_status=202)
+
+    @decorators.idempotent_id('22939122-8b4e-47d5-abaa-774bc55c07fc')
+    def test_delete_snapshot(self):
+        self._remove_snapshot(expected_status=202)
+
+    @decorators.idempotent_id('da391afd-8baa-458b-b222-f6ab42ab47c3')
+    def test_reset_snapshot_status(self):
+        self._reset_snapshot_status(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('a774bdca-bfbe-477d-9711-5fb64d7e34ea')
+    def test_update_snapshot(self):
+        self._update_snapshot(expected_status=200)
+
+    @decorators.idempotent_id('12e00e1b-bf84-41c1-8a1e-8625d1317789')
+    def test_update_snapshot_status(self):
+        self._update_snapshot_status(expected_status=202)
+
+    @decorators.idempotent_id('e7cb3eb0-d607-4c90-995d-df82d030eca8')
+    def test_force_delete_snapshot(self):
+        self._force_delete_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('dd7da3da-68ef-42f5-af1d-29803a4a04fd')
+    def test_unmanage_snapshot(self):
+        self._unmanage_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('c2501d05-9bca-42d7-9ab5-c0d9133e762f')
+    def test_manage_snapshot(self):
+        self._manage_snapshot(
+            expected_status=exceptions.Forbidden,
+            client=self.os_project_member.snapshot_manage_client_latest
+        )
diff --git a/cinder_tempest_plugin/scenario/test_snapshots.py b/cinder_tempest_plugin/scenario/test_snapshots.py
index 99e1057..f376954 100644
--- a/cinder_tempest_plugin/scenario/test_snapshots.py
+++ b/cinder_tempest_plugin/scenario/test_snapshots.py
@@ -23,7 +23,14 @@
 
     def setUp(self):
         super(SnapshotDataIntegrityTests, self).setUp()
-        self.keypair = self.create_keypair()
+        self.validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        # NOTE(danms): If validation is enabled, we will have a keypair to use,
+        # otherwise we need to create our own.
+        if 'keypair' in self.validation_resources:
+            self.keypair = self.validation_resources['keypair']
+        else:
+            self.keypair = self.create_keypair()
         self.security_group = self.create_security_group()
 
     @decorators.idempotent_id('ff10644e-5a70-4a9f-9801-8204bb81fb61')
@@ -48,6 +55,9 @@
         # Create an instance
         server = self.create_server(
             key_name=self.keypair['name'],
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='SSHABLE',
             security_groups=[{'name': self.security_group['name']}])
 
         # Create an empty volume
diff --git a/cinder_tempest_plugin/scenario/test_volume_encrypted.py b/cinder_tempest_plugin/scenario/test_volume_encrypted.py
index 69edfa6..69b0ab2 100644
--- a/cinder_tempest_plugin/scenario/test_volume_encrypted.py
+++ b/cinder_tempest_plugin/scenario/test_volume_encrypted.py
@@ -38,11 +38,6 @@
     def resource_cleanup(cls):
         super(TestEncryptedCinderVolumes, cls).resource_cleanup()
 
-    def launch_instance(self):
-        keypair = self.create_keypair()
-
-        return self.create_server(key_name=keypair['name'])
-
     def attach_detach_volume(self, server, volume):
         attached_volume = self.nova_volume_attach(server, volume)
         self.nova_volume_detach(server, attached_volume)
@@ -108,7 +103,11 @@
             self.volumes_client, volume_s['id'], 'available')
         volume_source = self.volumes_client.show_volume(
             volume_s['id'])['volume']
-        server = self.launch_instance()
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_server(wait_until='SSHABLE',
+                                    validatable=True,
+                                    validation_resources=validation_resources)
         self.attach_detach_volume(server, volume_source)
 
     @decorators.idempotent_id('5bb622ab-5060-48a8-8840-d589a548b7e4')
@@ -122,9 +121,8 @@
         * Create an encrypted volume from image
         * Boot an instance from the volume
         * Write data to the volume
-        * Detach volume
-        * Create a clone from the first volume
-        * Create another encrypted volume from source_volumeid
+        * Destroy the instance
+        * Create a clone of the encrypted volume
         * Boot an instance from cloned volume
         * Verify the data
         """
diff --git a/cinder_tempest_plugin/scenario/test_volume_multiattach.py b/cinder_tempest_plugin/scenario/test_volume_multiattach.py
index 235cb25..e04610f 100644
--- a/cinder_tempest_plugin/scenario/test_volume_multiattach.py
+++ b/cinder_tempest_plugin/scenario/test_volume_multiattach.py
@@ -31,7 +31,14 @@
 
     def setUp(self):
         super(VolumeMultiattachTests, self).setUp()
-        self.keypair = self.create_keypair()
+        self.validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        # NOTE(danms): If validation is enabled, we will have a keypair to use,
+        # otherwise we need to create our own.
+        if 'keypair' in self.validation_resources:
+            self.keypair = self.validation_resources['keypair']
+        else:
+            self.keypair = self.create_keypair()
         self.security_group = self.create_security_group()
 
     @classmethod
@@ -52,6 +59,9 @@
         # Create an instance
         server_1 = self.create_server(
             key_name=self.keypair['name'],
+            wait_until='SSHABLE',
+            validatable=True,
+            validation_resources=self.validation_resources,
             security_groups=[{'name': self.security_group['name']}])
 
         # Create multiattach type
@@ -92,6 +102,9 @@
         # Create another instance
         server_2 = self.create_server(
             key_name=self.keypair['name'],
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='SSHABLE',
             security_groups=[{'name': self.security_group['name']}])
 
         instance_2_ip = self.get_server_ip(server_2)
@@ -117,6 +130,9 @@
         # Create an instance
         server = self.create_server(
             key_name=self.keypair['name'],
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='SSHABLE',
             security_groups=[{'name': self.security_group['name']}])
 
         # Create multiattach type
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 0000000..bc1dc04
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+  roles:
+    - enable-fips
diff --git a/requirements.txt b/requirements.txt
index 4d75108..c25d1c5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,4 +5,4 @@
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 oslo.config>=5.1.0 # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-tempest>=27.0.0 # Apache-2.0
+tempest>=34.2.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 3b246b5..f224c5c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,13 +1,12 @@
 [metadata]
 name = cinder-tempest-plugin
-summary = Tempest plugin tests for Cinder.
-description_file =
-    README.rst
+description = Tempest plugin tests for Cinder.
+long_description = file: README.rst
 author = OpenStack
 author_email = openstack-discuss@lists.openstack.org
-home_page = http://www.openstack.org/
+url = http://www.openstack.org/
 python_requires = >=3.6
-classifier =
+classifiers =
     Environment :: OpenStack
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -20,6 +19,7 @@
     Programming Language :: Python :: 3.7
     Programming Language :: Python :: 3.8
     Programming Language :: Python :: 3.9
+    Programming Language :: Python :: 3.10
 
 [files]
 packages =
diff --git a/tox.ini b/tox.ini
index d217818..8844306 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,9 @@
 [tox]
-minversion = 3.18.0
+minversion = 4.0.0
+# specify virtualenv here to keep local runs consistent with the
+# gate (it sets the versions of pip, setuptools, and wheel)
+requires = virtualenv>=20.17.1
 envlist = pep8
-skipsdist = True
 # this allows tox to infer the base python from the environment name
 # and override any basepython configured in this file
 ignore_basepython_conflict=true