Merge "Cleanup: unneeded client definition, API override"
diff --git a/.zuul.yaml b/.zuul.yaml
index 8c37531..2b6b59c 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -4,32 +4,43 @@
       - tempest-plugin-jobs
     check:
       jobs:
+        - cinder-tempest-plugin-lvm-multiattach
         - cinder-tempest-plugin-lvm-lio-barbican
         - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream:
             voting: false
-        - cinder-tempest-plugin-lvm-tgt-barbican
+        # FIXME: the tgt job is broken on jammy, and we may be removing tgt
+        # support anyway.  So make it non-voting until we figure out what to
+        # do about this, which should be at the March 2023 virtual PTG.
+        - cinder-tempest-plugin-lvm-tgt-barbican:
+            voting: false
+        - cinder-tempest-plugin-lvm-lio-barbican-fips:
+            voting: false
+        - cinder-tempest-plugin-lvm-nvmet-barbican
         - nova-ceph-multistore:
             voting: false
         - cinder-tempest-plugin-cbak-ceph
         - cinder-tempest-plugin-cbak-s3
-        - cinder-tempest-plugin-basic-xena
-        - cinder-tempest-plugin-basic-wallaby
-        - cinder-tempest-plugin-basic-victoria
-        - cinder-tempest-plugin-basic-ussuri
-        # Set this job to voting once we have some actual tests to run
-        - cinder-tempest-plugin-protection-functional:
-            voting: false
+        # As per the Tempest "Stable Branch Support Policy", Tempest will only
+        # support the "Maintained" stable branches and not the "Extended Maintained"
+        # branches. That is what we need to do for all tempest plugins. Only jobs
+        # for the current releasable ("Maintained") stable branches should be listed
+        # here.
+        - cinder-tempest-plugin-basic-2023-2
+        - cinder-tempest-plugin-basic-2023-1
+        - cinder-tempest-plugin-basic-zed
+        - cinder-tempest-plugin-protection-functional
     gate:
       jobs:
         - cinder-tempest-plugin-lvm-lio-barbican
-        - cinder-tempest-plugin-lvm-tgt-barbican
+        - cinder-tempest-plugin-lvm-nvmet-barbican
+        # FIXME: no sense running a non-voting gate job.  See comment above.
+        # - cinder-tempest-plugin-lvm-tgt-barbican
         - cinder-tempest-plugin-cbak-ceph
     experimental:
       jobs:
-        - cinder-tempest-plugin-cbak-ceph-xena
-        - cinder-tempest-plugin-cbak-ceph-wallaby
-        - cinder-tempest-plugin-cbak-ceph-victoria
-        - cinder-tempest-plugin-cbak-ceph-ussuri
+        - cinder-tempest-plugin-cbak-ceph-2023-2
+        - cinder-tempest-plugin-cbak-ceph-2023-1
+        - cinder-tempest-plugin-cbak-ceph-zed
 
 - job:
     name: cinder-tempest-plugin-protection-functional
@@ -40,10 +51,13 @@
     vars:
       tox_envlist: all
       tempest_test_regex: 'cinder_tempest_plugin.rbac'
+      devstack_localrc:
+        KEYSTONE_ENFORCE_SCOPE: True
       devstack_local_conf:
         test-config:
           $CINDER_CONF:
             oslo_policy:
+              enforce_scope: True
               enforce_new_defaults: True
           $TEMPEST_CONFIG:
             enforce_scope:
@@ -52,6 +66,34 @@
         - cinder-tempest-plugin
 
 - job:
+    name: cinder-tempest-plugin-lvm-multiattach
+    description: |
+      This enables multiattach tests along with standard tempest tests
+    parent: devstack-tempest
+    required-projects:
+      - opendev.org/openstack/tempest
+      - opendev.org/openstack/cinder-tempest-plugin
+      - opendev.org/openstack/cinder
+    vars:
+      configure_swap_size: 8192
+      tempest_test_regex: '(^tempest\.(api|scenario)|(^cinder_tempest_plugin))'
+      tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+      # Temporarily exclude TestMultiAttachVolumeSwap until LP bug #1980816 is resolved.
+      # Other excluded tests are tests that are somewhat time consuming but unrelated
+      # to multi-attach testing.
+      tempest_exclude_regex: 'TestMultiAttachVolumeSwap|^tempest.api.image|^tempest.api.object_storage|^tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_boot_server_from_encrypted|^tempest.scenario.test_server_advanced_ops|^tempest.scenario.test_unified_limits'
+      tox_envlist: all
+      devstack_localrc:
+        ENABLE_VOLUME_MULTIATTACH: true
+      tempest_plugins:
+        - cinder-tempest-plugin
+    irrelevant-files:
+      - ^.*\.rst$
+      - ^doc/.*$
+      - ^releasenotes/.*$
+    timeout: 10800
+
+- job:
     name: cinder-tempest-plugin-lvm-barbican-base-abstract
     description: |
       This is a base job for lvm with lio & tgt targets
@@ -68,6 +110,7 @@
         devstack_plugins:
           barbican: https://opendev.org/openstack/barbican
     vars:
+      configure_swap_size: 8192
       tempest_test_regex: '(^tempest\.(api|scenario)|(^cinder_tempest_plugin))'
       tox_envlist: all
       devstack_localrc:
@@ -80,6 +123,8 @@
               # FIXME: 'creator' should be re-added by the barbican devstack plugin
               # but the value below override everything.
               tempest_roles: member,creator
+            volume:
+              build_timeout: 300
             volume-feature-enabled:
               volume_revert: True
       devstack_services:
@@ -94,9 +139,22 @@
 - job:
     name: cinder-tempest-plugin-lvm-barbican-base
     description: |
+      This is a base job for lvm with lio & tgt targets.
+      No cinderlib testing beginning with 2024.1 development.
+    # FIXME: the following RE2 expression won't work after the 9999.2 release.
+    # If you are reading this during the 9999.2 development cycle, greetings
+    # from the 21st century!
+    branches: ^(master|(stable/(202[4-9]|20[3-9]\d|2[1-9]\d\d|[3-9]\d\d\d))\.[12])$
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    vars:
+      tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+
+- job:
+    name: cinder-tempest-plugin-lvm-barbican-base
+    description: |
       This is a base job for lvm with lio & tgt targets
       with cinderlib tests.
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    branches: ^(stable/(xena|yoga|zed|2023\.[12]))$
     parent: cinder-tempest-plugin-lvm-barbican-base-abstract
     roles:
       - zuul: opendev.org/openstack/cinderlib
@@ -114,8 +172,37 @@
     name: cinder-tempest-plugin-lvm-barbican-base
     description: |
       This is a base job for lvm with lio & tgt targets
-      with cinderlib tests to run on stable/train testing.
-    branches: stable/train
+      with cinderlib tests to run on stable/wallaby
+      testing. To run on those stable branches that are using tempest
+      29.0.0 (which is set in the devstack stackrc file), we must
+      use cinder-tempest-plugin compatible version 1.8.0.
+    branches:
+      - stable/wallaby
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    roles:
+      - zuul: opendev.org/openstack/cinderlib
+    required-projects:
+      - opendev.org/openstack/cinderlib
+      - name: opendev.org/openstack/cinder-tempest-plugin
+        override-checkout: 1.8.0
+    run: playbooks/tempest-and-cinderlib-run.yaml
+    # Required to collect the tox-based logs of the cinderlib functional tests
+    post-run: playbooks/post-cinderlib.yaml
+    vars:
+      fetch_subunit_output_additional_dirs:
+        - "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}"
+      tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+
+- job:
+    name: cinder-tempest-plugin-lvm-barbican-base
+    description: |
+      This is a base job for lvm with lio & tgt targets
+      with cinderlib tests to run on stable/victoria
+      testing. To run on those stable branches that are using tempest
+      26.1.0 (which is set in the devstack stackrc file), we must
+      use cinder-tempest-plugin compatible version 1.3.0.
+    branches:
+      - stable/victoria
     parent: cinder-tempest-plugin-lvm-barbican-base-abstract
     roles:
       - zuul: opendev.org/openstack/cinderlib
@@ -135,13 +222,32 @@
     name: cinder-tempest-plugin-lvm-barbican-base
     description: |
       This is a base job for lvm with lio & tgt targets
-    branches: ^(?=stable/(ocata|pike|queens|rocky|stein)).*$
+      for stable/train and stable/ussuri testing.
+      To run on those stable branches that are using tempest
+      26.1.0 (which is set in the devstack stackrc file), we must
+      use cinder-tempest-plugin compatible version 1.3.0.
+      We no longer test cinderlib because it's EOL on these branches.
+    branches:
+      - stable/train
+      - stable/ussuri
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    required-projects:
+      - name: opendev.org/openstack/cinder-tempest-plugin
+        override-checkout: 1.3.0
+    vars:
+      tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+
+- job:
+    name: cinder-tempest-plugin-lvm-barbican-base
+    description: |
+      This is a base job for lvm with lio & tgt targets
+    branches: ^stable/(ocata|pike|queens|rocky|stein).*$
     parent: cinder-tempest-plugin-lvm-barbican-base-abstract
     required-projects:
       - name: opendev.org/openstack/cinder-tempest-plugin
         override-checkout: stein-last
     vars:
-      tempest_test_blacklist: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-blacklist.txt'
+      tempest_test_blacklist: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
 
 - job:
     name: cinder-tempest-plugin-cbak-ceph
@@ -150,6 +256,7 @@
       Integration tests that runs with the ceph devstack plugin, py3
       and enable the backup service.
     vars:
+      configure_swap_size: 4096
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -157,36 +264,33 @@
               volume_revert: True
       devstack_services:
         c-bak: true
+      devstack_localrc:
+        CINDER_QUOTA_VOLUMES: 25
+    timeout: 10800
 
 - job:
-    name: cinder-tempest-plugin-cbak-ceph-xena
+    name: cinder-tempest-plugin-cbak-ceph-2023-2
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.2
+
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-2023-1
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.1
+
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-zed
     parent: cinder-tempest-plugin-cbak-ceph
     nodeset: openstack-single-node-focal
-    override-checkout: stable/xena
-
-- job:
-    name: cinder-tempest-plugin-cbak-ceph-wallaby
-    parent: cinder-tempest-plugin-cbak-ceph
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/wallaby
-
-- job:
-    name: cinder-tempest-plugin-cbak-ceph-victoria
-    parent: cinder-tempest-plugin-cbak-ceph
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/victoria
-
-- job:
-    name: cinder-tempest-plugin-cbak-ceph-ussuri
-    parent: cinder-tempest-plugin-cbak-ceph
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/ussuri
+    override-checkout: stable/zed
 
 # variant for pre-Ussuri branches (no volume revert for Ceph),
 # should this job be used on those branches
 - job:
     name: cinder-tempest-plugin-cbak-ceph
-    branches: ^(?=stable/(ocata|pike|queens|rocky|stein|train)).*$
+    branches: ^stable/(ocata|pike|queens|rocky|stein|train).*$
     vars:
       devstack_local_conf:
         test-config:
@@ -221,6 +325,52 @@
       runs tempest tests and cinderlib tests on CentOS Stream 9.
 
 - job:
+    name: cinder-tempest-plugin-lvm-lio-barbican-fips
+    parent: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream
+    description: |
+      This job configures Cinder with LVM, LIO, barbican and
+      runs tempest tests and cinderlib tests on CentOS Stream 9
+      under FIPS mode
+    pre-run: playbooks/enable-fips.yaml
+    vars:
+      nslookup_target: 'opendev.org'
+      tempest_exclude_regex: 'test_encrypted_cinder_volumes_cryptsetup'
+
+- job:
+    name: cinder-tempest-plugin-lvm-nvmet-barbican
+    description: |
+      This jobs configures Cinder with LVM, nvmet, barbican and
+      runs tempest tests and cinderlib tests.  nvmet is
+      configured to use the new connection information format,
+      NVMe-oF native multipathing, and sharing the NVMe-oF
+      namespace for all the connections to the same node.
+    # TODO: switch to cinder-tempest-plugin-lvm-barbican-base
+    # when cinderlib support for NVMe is fixed
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    pre-run: playbooks/install-multipath.yaml
+    vars:
+      devstack_localrc:
+        CINDER_TARGET_HELPER: nvmet
+        CINDER_TARGET_PROTOCOL: nvmet_tcp
+        TEMPEST_STORAGE_PROTOCOL: nvmeof
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            volume-feature-enabled:
+              # NotImplementedError: Revert volume to snapshot not implemented for thin LVM.
+              volume_revert: False
+        post-config:
+          $NOVA_CONF:
+            libvirt:
+              volume_use_multipath: true
+          $CINDER_CONF:
+            lvmdriver-1:
+              nvmeof_conn_info_version: 2
+              lvm_share_target: true
+              target_secondary_ip_addresses: 127.0.0.1
+              use_multipath_for_image_xfer: true
+
+- job:
     name: cinder-tempest-plugin-lvm-tgt-barbican
     description: |
       This jobs configures Cinder with LVM, tgt, barbican and
@@ -266,25 +416,19 @@
       - ^releasenotes/.*$
 
 - job:
-    name: cinder-tempest-plugin-basic-xena
+    name: cinder-tempest-plugin-basic-2023-2
     parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/xena
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.2
 
 - job:
-    name: cinder-tempest-plugin-basic-wallaby
+    name: cinder-tempest-plugin-basic-2023-1
     parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/wallaby
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.1
 
 - job:
-    name: cinder-tempest-plugin-basic-victoria
+    name: cinder-tempest-plugin-basic-zed
     parent: cinder-tempest-plugin-basic
     nodeset: openstack-single-node-focal
-    override-checkout: stable/victoria
-
-- job:
-    name: cinder-tempest-plugin-basic-ussuri
-    parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/ussuri
+    override-checkout: stable/zed
diff --git a/README.rst b/README.rst
index 3fd608a..ad536b8 100644
--- a/README.rst
+++ b/README.rst
@@ -34,7 +34,7 @@
     SYSLOG=False
     LOG_COLOR=False
     RECLONE=yes
-    ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key
+    ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,key
     ENABLED_SERVICES+=,mysql,n-api,n-cond,n-cpu,n-crt,n-sch,rabbit,tempest,placement-api
     CINDER_ENABLED_BACKENDS=lvmdriver-1
     CINDER_DEFAULT_VOLUME_TYPE=lvmdriver-1
diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py
index 21da190..1fd82bf 100644
--- a/cinder_tempest_plugin/api/volume/base.py
+++ b/cinder_tempest_plugin/api/volume/base.py
@@ -16,7 +16,6 @@
 from tempest.common import compute
 from tempest.common import waiters
 from tempest import config
-from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import api_version_utils
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
@@ -57,8 +56,6 @@
 
     def setUp(self):
         super(BaseVolumeTest, self).setUp()
-        self.useFixture(api_microversion_fixture.APIMicroversionFixture(
-            volume_microversion=self.request_microversion))
 
     @classmethod
     def resource_setup(cls):
@@ -67,12 +64,14 @@
             api_version_utils.select_request_microversion(
                 cls.min_microversion,
                 CONF.volume.min_microversion))
+        cls.setup_api_microversion_fixture(
+            volume_microversion=cls.request_microversion)
 
     @classmethod
     def create_volume(cls, wait_until='available', **kwargs):
         """Wrapper utility that returns a test volume.
 
-           :param wait_until: wait till volume status.
+           :param wait_until: wait till volume status, None means no wait.
         """
         if 'size' not in kwargs:
             kwargs['size'] = CONF.volume.volume_size
@@ -93,8 +92,9 @@
         cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
                                     cls.volumes_client.delete_volume,
                                     volume['id'])
-        waiters.wait_for_volume_resource_status(cls.volumes_client,
-                                                volume['id'], wait_until)
+        if wait_until:
+            waiters.wait_for_volume_resource_status(cls.volumes_client,
+                                                    volume['id'], wait_until)
         return volume
 
     @classmethod
@@ -137,6 +137,11 @@
             'name',
             data_utils.rand_name(self.__class__.__name__ + '-instance'))
 
+        if wait_until == 'SSHABLE' and not kwargs.get('validation_resources'):
+            kwargs['validation_resources'] = (
+                self.get_test_validation_resources(self.os_primary))
+            kwargs['validatable'] = True
+
         tenant_network = self.get_tenant_network()
         body, _ = compute.create_test_server(
             self.os_primary,
@@ -199,3 +204,19 @@
             cls.admin_volume_types_client.delete_volume_type, type_id)
         test_utils.call_and_ignore_notfound_exc(
             cls.admin_volume_types_client.wait_for_resource_deletion, type_id)
+
+
+class CreateMultipleResourceTest(BaseVolumeTest):
+
+    def _create_multiple_resource(self, callback, repeat_count=5,
+                                  **kwargs):
+
+        res = []
+        for _ in range(repeat_count):
+            res.append(callback(**kwargs)['id'])
+        return res
+
+    def _wait_for_multiple_resources(self, callback, wait_list, **kwargs):
+
+        for r in wait_list:
+            callback(resource_id=r, **kwargs)
diff --git a/cinder_tempest_plugin/api/volume/test_create_from_image.py b/cinder_tempest_plugin/api/volume/test_create_from_image.py
index dc296c0..acb1943 100644
--- a/cinder_tempest_plugin/api/volume/test_create_from_image.py
+++ b/cinder_tempest_plugin/api/volume/test_create_from_image.py
@@ -23,64 +23,6 @@
 CONF = config.CONF
 
 
-class VolumeFromImageTest(base.BaseVolumeTest):
-
-    @classmethod
-    def skip_checks(cls):
-        super(VolumeFromImageTest, cls).skip_checks()
-        if not CONF.service_available.glance:
-            raise cls.skipException("Glance service is disabled")
-
-    @classmethod
-    def create_volume_no_wait(cls, **kwargs):
-        """Returns a test volume.
-
-        This does not wait for volume creation to finish,
-        so that multiple operations can happen on the
-        Cinder server in parallel.
-        """
-        if 'size' not in kwargs:
-            kwargs['size'] = CONF.volume.volume_size
-
-        if 'imageRef' in kwargs:
-            image = cls.os_primary.image_client_v2.show_image(
-                kwargs['imageRef'])
-            min_disk = image['min_disk']
-            kwargs['size'] = max(kwargs['size'], min_disk)
-
-        if 'name' not in kwargs:
-            name = data_utils.rand_name(cls.__name__ + '-Volume')
-            kwargs['name'] = name
-
-        volume = cls.volumes_client.create_volume(**kwargs)['volume']
-        cls.addClassResourceCleanup(
-            cls.volumes_client.wait_for_resource_deletion, volume['id'])
-        cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
-                                    cls.volumes_client.delete_volume,
-                                    volume['id'])
-
-        return volume
-
-    @decorators.idempotent_id('8976a11b-1ddc-49b6-b66f-8c26adf3fa9e')
-    def test_create_from_image_multiple(self):
-        """Create a handful of volumes from the same image at once.
-
-        The purpose of this test is to stress volume drivers,
-        image download, the image cache, etc., within Cinder.
-        """
-
-        img_uuid = CONF.compute.image_ref
-
-        vols = []
-        for v in range(0, 5):
-            vols.append(self.create_volume_no_wait(imageRef=img_uuid))
-
-        for v in vols:
-            waiters.wait_for_volume_resource_status(self.volumes_client,
-                                                    v['id'],
-                                                    'available')
-
-
 class VolumeAndVolumeTypeFromImageTest(base.BaseVolumeAdminTest):
     # needs AdminTest as superclass to manipulate volume_types
 
diff --git a/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py b/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py
new file mode 100644
index 0000000..10a79f0
--- /dev/null
+++ b/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py
@@ -0,0 +1,105 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+from cinder_tempest_plugin.api.volume import base
+
+CONF = config.CONF
+
+
+class CreateVolumesFromSnapshotTest(base.CreateMultipleResourceTest):
+
+    @decorators.idempotent_id('3b879ad1-d861-4ad3-b2c8-c89162e867c3')
+    def test_create_multiple_volume_from_snapshot(self):
+        """Create multiple volumes from a snapshot."""
+
+        volume = self.create_volume()
+        snapshot = self.create_snapshot(volume_id=volume['id'])
+        kwargs_create = {"'snapshot_id": snapshot['id'], "wait_until": None}
+        res = self._create_multiple_resource(self.create_volume,
+                                             **kwargs_create)
+        kwargs_wait = {"client": self.volumes_client, "status": "available"}
+        self._wait_for_multiple_resources(
+            waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromSourceVolumeTest(base.CreateMultipleResourceTest):
+
+    @decorators.idempotent_id('b4a250d1-3ffd-4727-a2f5-9d858b298558')
+    def test_create_multiple_volume_from_source_volume(self):
+        """Create multiple volumes from a source volume.
+
+        The purpose of this test is to check the synchronization
+        of driver clone method with simultaneous requests.
+        """
+
+        volume = self.create_volume()
+        kwargs_create = {"'source_volid": volume['id'], "wait_until": None}
+        res = self._create_multiple_resource(self.create_volume,
+                                             **kwargs_create)
+        kwargs_wait = {"client": self.volumes_client, "status": "available"}
+        self._wait_for_multiple_resources(
+            waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromBackupTest(base.CreateMultipleResourceTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(CreateVolumesFromBackupTest, cls).skip_checks()
+        if not CONF.volume_feature_enabled.backup:
+            raise cls.skipException("Cinder backup feature disabled")
+
+    @decorators.idempotent_id('9db67083-bf1a-486c-8f77-3778467f39a1')
+    def test_create_multiple_volume_from_backup(self):
+        """Create multiple volumes from a backup."""
+
+        volume = self.create_volume()
+        backup = self.create_backup(volume_id=volume['id'])
+        kwargs_create = {"'backup_id": backup['id'], "wait_until": None}
+        res = self._create_multiple_resource(self.create_volume,
+                                             **kwargs_create)
+        kwargs_wait = {"client": self.volumes_client, "status": "available"}
+        self._wait_for_multiple_resources(
+            waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromImageTest(base.CreateMultipleResourceTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(CreateVolumesFromImageTest, cls).skip_checks()
+        if not CONF.service_available.glance:
+            raise cls.skipException("Glance service is disabled")
+
+    @decorators.idempotent_id('8976a11b-1ddc-49b6-b66f-8c26adf3fa9e')
+    def test_create_from_image_multiple(self):
+        """Create a handful of volumes from the same image at once.
+
+        The purpose of this test is to stress volume drivers,
+        image download, the image cache, etc., within Cinder.
+        """
+
+        img_uuid = CONF.compute.image_ref
+
+        kwargs_create = {"'imageRef": img_uuid, "wait_until": None}
+        res = self._create_multiple_resource(self.create_volume,
+                                             **kwargs_create)
+        kwargs_wait = {"client": self.volumes_client, "status": "available"}
+        self._wait_for_multiple_resources(
+            waiters.wait_for_volume_resource_status, res, **kwargs_wait)
diff --git a/cinder_tempest_plugin/api/volume/test_volume_backup.py b/cinder_tempest_plugin/api/volume/test_volume_backup.py
index 7982a83..190a483 100644
--- a/cinder_tempest_plugin/api/volume/test_volume_backup.py
+++ b/cinder_tempest_plugin/api/volume/test_volume_backup.py
@@ -31,6 +31,16 @@
         if not CONF.volume_feature_enabled.backup:
             raise cls.skipException("Cinder backup feature disabled")
 
+    @classmethod
+    def setup_credentials(cls):
+        # Setting network=True, subnet=True creates a default network
+        cls.set_network_resources(
+            network=True,
+            subnet=True,
+            router=True,
+            dhcp=True)
+        super(VolumesBackupsTest, cls).setup_credentials()
+
     @decorators.idempotent_id('885410c6-cd1d-452c-a409-7c32b7e0be15')
     def test_volume_snapshot_backup(self):
         """Create backup from snapshot."""
@@ -67,7 +77,7 @@
         self.assertEqual(src_vol['id'], restore['volume_id'])
         self.assertEqual(backup['id'], restore['backup_id'])
 
-    @decorators.idempotent_id('b5d837b0-7066-455d-88fc-4a721a899306')
+    @decorators.idempotent_id('457359e2-a663-4758-8f76-06d392dfd7c7')
     def test_incr_backup_create_and_restore_to_an_existing_volume(self):
         """Test incr backup create and restore to an existing volume."""
         # Create volume
@@ -107,7 +117,7 @@
         server = self.create_server(
             name=server_name,
             block_device_mapping=bd_map,
-            wait_until='ACTIVE')
+            wait_until='SSHABLE')
 
         # Delete VM
         self.os_primary.servers_client.delete_server(server['id'])
diff --git a/cinder_tempest_plugin/api/volume/test_volume_dependency.py b/cinder_tempest_plugin/api/volume/test_volume_dependency.py
new file mode 100644
index 0000000..96a3c83
--- /dev/null
+++ b/cinder_tempest_plugin/api/volume/test_volume_dependency.py
@@ -0,0 +1,65 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib import decorators
+
+from cinder_tempest_plugin.api.volume import base
+
+CONF = config.CONF
+
+
+class VolumeDependencyTests(base.BaseVolumeTest):
+    min_microversion = '3.40'
+
+    @classmethod
+    def setup_clients(cls):
+        super(VolumeDependencyTests, cls).setup_clients()
+
+    @decorators.idempotent_id('42e9df95-854b-4840-9d55-ae62f65e9b8e')
+    def test_delete_source_volume(self):
+        """Test basic dependency deletion
+
+        * Create a volume with source_volid
+        * Delete the source volume
+        """
+        source_volume = self.create_volume()
+        kwargs = {'source_volid': source_volume['id']}
+        cloned_volume = self.create_volume(**kwargs)
+        self.assertEqual(source_volume['id'], cloned_volume['source_volid'])
+        self.volumes_client.delete_volume(source_volume['id'])
+        self.volumes_client.wait_for_resource_deletion(source_volume['id'])
+
+    @decorators.idempotent_id('900d8ea5-2afd-4fe5-a0c3-fab4744f0d40')
+    def test_delete_source_snapshot(self):
+        """Test basic dependency deletion with snapshot
+
+        * Create a snapshot from source volume
+        * Create a volume from that snapshot
+        * Delete the source snapshot
+        * Delete the source volume
+        """
+        source_volume = self.create_volume()
+        snapshot_source_volume = self.create_snapshot(source_volume['id'])
+        kwargs = {'snapshot_id': snapshot_source_volume['id']}
+        volume_from_snapshot = self.create_volume(**kwargs)
+        self.assertEqual(volume_from_snapshot['snapshot_id'],
+                         snapshot_source_volume['id'])
+
+        self.snapshots_client.delete_snapshot(snapshot_source_volume['id'])
+        self.snapshots_client.wait_for_resource_deletion(
+            snapshot_source_volume['id'])
+        self.volumes_client.delete_volume(source_volume['id'])
+        self.volumes_client.wait_for_resource_deletion(source_volume['id'])
diff --git a/cinder_tempest_plugin/config.py b/cinder_tempest_plugin/config.py
index e15a399..78dd6ea 100644
--- a/cinder_tempest_plugin/config.py
+++ b/cinder_tempest_plugin/config.py
@@ -23,3 +23,15 @@
                 default=False,
                 help='Enable to run Cinder volume revert tests'),
 ]
+
+# The barbican service is discovered by config_tempest [1], and will appear
+# in the [service_available] group in tempest.conf. However, the 'barbican'
+# option isn't registered by tempest itself, and so we may need to do it.
+# This adds the ability to test CONF.service_available.barbican.
+#
+# [1] I96800a95f844ce7675d266e456e01620e63e347a
+barbican_service_option = [
+    cfg.BoolOpt('barbican',
+                default=False,
+                help="Whether or not barbican is expected to be available"),
+]
diff --git a/cinder_tempest_plugin/plugin.py b/cinder_tempest_plugin/plugin.py
index 5b7723d..5d170e5 100644
--- a/cinder_tempest_plugin/plugin.py
+++ b/cinder_tempest_plugin/plugin.py
@@ -14,6 +14,7 @@
 #    under the License.
 
 import os
+import sys
 
 from tempest import config
 from tempest.test_discover import plugins
@@ -46,12 +47,25 @@
         config.register_opt_group(conf, config.volume_feature_group,
                                   project_config.cinder_option)
 
+        # Define the 'barbican' service_available option, but only if the
+        # barbican_tempest_plugin isn't present. It also defines the option,
+        # and we need to avoid a duplicate option registration.
+        if 'barbican_tempest_plugin' not in sys.modules:
+            config.register_opt_group(conf, config.service_available_group,
+                                      project_config.barbican_service_option)
+
     def get_opt_lists(self):
         """Get a list of options for sample config generation.
 
         :return: A list of tuples with the group name and options in that
                  group.
         """
-        return [
+        opt_lists = [
             (config.volume_feature_group.name, project_config.cinder_option),
         ]
+
+        if 'barbican_tempest_plugin' not in sys.modules:
+            opt_lists.append((config.service_available_group.name,
+                              project_config.barbican_service_option))
+
+        return opt_lists
diff --git a/cinder_tempest_plugin/rbac/v3/base.py b/cinder_tempest_plugin/rbac/v3/base.py
index d1a11e5..6fc6be5 100644
--- a/cinder_tempest_plugin/rbac/v3/base.py
+++ b/cinder_tempest_plugin/rbac/v3/base.py
@@ -1,3 +1,4 @@
+
 #    Licensed under the Apache License, Version 2.0 (the "License"); you may
 #    not use this file except in compliance with the License. You may obtain
 #    a copy of the License at
@@ -10,13 +11,20 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from tempest.common import waiters
 from tempest import config
+from tempest.lib.common import api_version_utils
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib.decorators import cleanup_order
+from tempest import test
 
 CONF = config.CONF
 
 
-class VolumeV3RbacBaseTests(object):
-
+class VolumeV3RbacBaseTests(
+    api_version_utils.BaseMicroversionTest, test.BaseTestCase
+):
     identity_version = 'v3'
 
     @classmethod
@@ -28,8 +36,43 @@
                 "skipping RBAC tests. To enable these tests set "
                 "`tempest.conf [enforce_scope] cinder=True`."
             )
+        if not CONF.service_available.cinder:
+            skip_msg = ("%s skipped as Cinder is not available" % cls.__name__)
+            raise cls.skipException(skip_msg)
+        api_version_utils.check_skip_with_microversion(
+            cls.min_microversion, cls.max_microversion,
+            CONF.volume.min_microversion, CONF.volume.max_microversion)
+
+    @classmethod
+    def setup_credentials(cls):
+        cls.set_network_resources()
+        super(VolumeV3RbacBaseTests, cls).setup_credentials()
+
+    def setUp(self):
+        super(VolumeV3RbacBaseTests, self).setUp()
+
+    @classmethod
+    def resource_setup(cls):
+        super(VolumeV3RbacBaseTests, cls).resource_setup()
+        cls.request_microversion = (
+            api_version_utils.select_request_microversion(
+                cls.min_microversion,
+                CONF.volume.min_microversion))
+        cls.setup_api_microversion_fixture(
+            volume_microversion=cls.request_microversion)
 
     def do_request(self, method, expected_status=200, client=None, **payload):
+        """Perform API call
+
+        Args:
+            method: Name of the API call
+            expected_status: HTTP desired response code
+            client: Client object if exists, None otherwise
+            payload: API call required parameters
+
+        Returns:
+            HTTP response
+        """
         if not client:
             client = self.client
         if isinstance(expected_status, type(Exception)):
@@ -40,3 +83,104 @@
             response = getattr(client, method)(**payload)
             self.assertEqual(response.response.status, expected_status)
             return response
+
+    @cleanup_order
+    def create_volume(self, client, **kwargs):
+        """Wrapper utility that returns a test volume
+
+        Args:
+            client: Client object
+
+        Returns:
+            ID of the created volume
+        """
+        kwargs['size'] = CONF.volume.volume_size
+        kwargs['name'] = data_utils.rand_name(
+            VolumeV3RbacBaseTests.__name__ + '-Volume'
+        )
+
+        volume_id = client.create_volume(**kwargs)['volume']['id']
+        self.cleanup(
+            test_utils.call_and_ignore_notfound_exc, func=self.delete_resource,
+            client=client, volume_id=volume_id
+        )
+        waiters.wait_for_volume_resource_status(
+            client=client, resource_id=volume_id, status='available'
+        )
+
+        return volume_id
+
+    @cleanup_order
+    def create_snapshot(self, client, volume_id, cleanup=True, **kwargs):
+        """Wrapper utility that returns a test snapshot.
+
+        Args:
+            client: Client object
+            volume_id: ID of the volume
+            cleanup: Boolean if should delete the snapshot
+
+        Returns:
+            ID of the created snapshot
+        """
+        kwargs['name'] = data_utils.rand_name(
+            VolumeV3RbacBaseTests.__name__ + '-Snapshot'
+        )
+
+        snapshot_id = client.create_snapshot(
+            volume_id=volume_id, **kwargs)['snapshot']['id']
+        if cleanup:
+            self.cleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                func=self.delete_resource,
+                client=client, snapshot_id=snapshot_id
+            )
+        waiters.wait_for_volume_resource_status(
+            client=client, resource_id=snapshot_id, status='available'
+        )
+
+        return snapshot_id
+
+    @classmethod
+    def delete_resource(cls, client, **kwargs):
+        """Delete a resource by a given client
+
+        Args:
+            client: Client object
+
+        Keyword Args:
+            snapshot_id: ID of a snapshot
+            volume_id: ID of a volume
+        """
+        key, resource_id = list(kwargs.items())[0]
+        resource_name = key.split('_')[0]
+
+        del_action = getattr(client, f'delete_{resource_name}')
+        test_utils.call_and_ignore_notfound_exc(del_action, resource_id)
+        test_utils.call_and_ignore_notfound_exc(
+            client.wait_for_resource_deletion, resource_id)
+
+    @classmethod
+    def create_backup(
+            cls, volume_id, backup_client=None, add_cleanup=True, **kwargs
+    ):
+        """Wrapper utility that returns a test backup."""
+        if backup_client is None:
+            backup_client = cls.backups_client
+        if 'name' not in kwargs:
+            name = data_utils.rand_name(cls.__class__.__name__ + '-Backup')
+            kwargs['name'] = name
+
+        backup = backup_client.create_backup(
+            volume_id=volume_id, **kwargs
+        )['backup']
+        if add_cleanup:
+            cls.addClassResourceCleanup(
+                test_utils.call_and_ignore_notfound_exc,
+                cls.delete_resource,
+                client=backup_client,
+                backup_id=backup['id']
+            )
+        waiters.wait_for_volume_resource_status(
+            backup_client, backup['id'], 'available'
+        )
+        return backup
diff --git a/cinder_tempest_plugin/rbac/v3/test_backups.py b/cinder_tempest_plugin/rbac/v3/test_backups.py
new file mode 100644
index 0000000..d309b3e
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_backups.py
@@ -0,0 +1,455 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import base64
+from oslo_serialization import jsonutils as json
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+CONF = config.CONF
+
+
+class RbacV3BackupsTests(rbac_base.VolumeV3RbacBaseTests):
+    @classmethod
+    def skip_checks(cls):
+        super(RbacV3BackupsTests, cls).skip_checks()
+        if not CONF.volume_feature_enabled.backup:
+            raise cls.skipException("Cinder backup feature disabled")
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        admin_client = cls.os_project_admin
+        cls.admin_backups_client = admin_client.backups_client_latest
+        cls.admin_volumes_client = admin_client.volumes_client_latest
+
+    @classmethod
+    def resource_setup(cls):
+        super(RbacV3BackupsTests, cls).resource_setup()
+        cls.volume_id = cls.create_volume(client=cls.admin_volumes_client)
+        backup = cls.create_backup(
+            volume_id=cls.volume_id, backup_client=cls.admin_backups_client
+        )
+        cls.backup_id = backup['id']
+        cls.backup_name = backup['name']
+
+
+class RbacV3BackupsTests33(RbacV3BackupsTests):
+    """Test API with microversion greater than 3.3"""
+    min_microversion = '3.3'
+
+    def _encode_backup(self, backup):
+        retval = json.dumps(backup)
+        return base64.encode_as_text(retval)
+
+    def _decode_url(self, backup_url):
+        return json.loads(base64.decode_as_text(backup_url))
+
+    def _modify_backup_url(self, backup_url, changes):
+        backup = self._decode_url(backup_url)
+        backup.update(changes)
+        return self._encode_backup(backup)
+
+    def _list_backups(self, expected_status):
+        """List all backups"""
+        backups = self.do_request(
+            method='list_backups', expected_status=expected_status
+        )['backups']
+        backup_list = [
+            b['id'] for b in backups if b['name'] == self.backup_name
+        ]
+
+        self.assertNotEmpty(
+            backup_list, f"Backup {self.backup_name} not found"
+        )
+
+    def _list_project_backups(self, expected_status):
+        """List all backups for a project"""
+        backups = self.do_request(
+            method='list_backups',
+            expected_status=expected_status,
+            project_id=self.client.project_id
+        )['backups']
+        backup_list = [
+            b['id'] for b in backups if b['name'] == self.backup_name
+        ]
+
+        self.assertNotEmpty(
+            backup_list, f"Backup {self.backup_name} not found"
+        )
+
+    def _show_backup(self, expected_status):
+        """Show backup details"""
+        backup = self.do_request(
+            method='show_backup',
+            expected_status=expected_status,
+            backup_id=self.backup_id
+        )['backup']
+        self.assertNotEmpty(backup, f"Backup {self.backup_name} not found")
+
+    def _delete_backup(self, expected_status):
+        """Delete a backup"""
+        add_cleanup = True if expected_status == exceptions.Forbidden\
+            else False
+        volume_id = self.create_volume(client=self.admin_volumes_client)
+        backup = self.create_backup(
+            volume_id=volume_id,
+            backup_client=self.admin_backups_client,
+            add_cleanup=add_cleanup
+        )
+
+        self.do_request(
+            method='delete_backup',
+            expected_status=expected_status,
+            backup_id=backup['id']
+        )
+
+    def _restore_backup(self, expected_status):
+        """Restore a backup"""
+        res = self.do_request(
+            method='restore_backup',
+            expected_status=expected_status,
+            backup_id=self.backup_id,
+            name='new-backup-vol'
+        )
+        if expected_status != exceptions.Forbidden:
+            waiters.wait_for_volume_resource_status(
+                self.admin_backups_client,
+                self.backup_id, 'available'
+            )
+            self.delete_resource(
+                client=self.admin_volumes_client,
+                volume_id=res['restore']['volume_id']
+            )
+
+    def _create_backup(self, expected_status):
+        """Create a backup"""
+        res = self.do_request(
+            method='create_backup',
+            expected_status=expected_status,
+            volume_id=self.volume_id
+        )
+        if expected_status != exceptions.Forbidden:
+            backup = res['backup']
+            waiters.wait_for_volume_resource_status(
+                self.admin_backups_client, backup['id'], 'available'
+            )
+            self.admin_backups_client.delete_backup(backup_id=backup['id'])
+
+    def _export_backup(self, expected_status):
+        """Export a backup"""
+        self.do_request(
+            method='export_backup',
+            expected_status=expected_status,
+            backup_id=self.backup_id
+        )
+
+    def _import_backup(self, expected_status):
+        """Import a backup"""
+        volume_id = self.create_volume(client=self.admin_volumes_client)
+        backup = self.create_backup(
+            volume_id=volume_id,
+            backup_client=self.admin_backups_client
+        )
+
+        export_backup = (
+            self.admin_backups_client.export_backup(
+                backup['id']
+            )['backup-record']
+        )
+        waiters.wait_for_volume_resource_status(
+            self.admin_backups_client, backup['id'], 'available'
+        )
+        self.assertTrue(
+            export_backup['backup_service'].startswith('cinder.backup.drivers')
+        )
+        # NOTE(ybenshim): Backups are imported with the same backup id
+        # (important for incremental backups among other things), so we cannot
+        # import the exported backup information as it is, because that Backup
+        # ID already exists.  So we'll fake the data by changing the backup id
+        # in the exported backup DB info we have retrieved before importing it
+        # back.
+
+        new_id = data_utils.rand_uuid()
+        new_url = self._modify_backup_url(
+            export_backup['backup_url'], {'id': new_id})
+
+        res = self.do_request(
+            method='import_backup',
+            expected_status=expected_status,
+            backup_service=export_backup['backup_service'],
+            backup_url=new_url
+        )
+        if expected_status != exceptions.Forbidden:
+            new_backup = res['backup']
+            waiters.wait_for_volume_resource_status(
+                self.client, new_backup['id'], 'available'
+            )
+            self.delete_resource(
+                client=self.admin_backups_client,
+                backup_id=new_backup['id']
+            )
+
+    def _reset_backup_status(self, expected_status):
+        """Reset a backup status"""
+        new_status = 'error'
+        volume_id = self.create_volume(client=self.admin_volumes_client)
+        backup = self.create_backup(
+            volume_id=volume_id,
+            backup_client=self.admin_backups_client
+        )
+
+        self.do_request(
+            method='reset_backup_status',
+            expected_status=expected_status,
+            backup_id=backup['id'],
+            status=new_status
+        )
+
+
+class ProjectReaderTests33(RbacV3BackupsTests33):
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.backups_client_latest
+
+    @decorators.idempotent_id('9dd02d4b-d6f8-45ca-a95e-534dbd586aab')
+    def test_list_backups(self):
+        """List all backups"""
+        self._list_backups(expected_status=200)
+
+    @decorators.idempotent_id('9ba2e970-c08b-4c1c-b912-2f3b1373ae6e')
+    def test_list_project_backups(self):
+        """List all backups for a project"""
+        self._list_project_backups(expected_status=200)
+
+    @decorators.idempotent_id('e88f8971-2892-4a54-80bb-dd21b18f19e9')
+    def test_show_backup(self):
+        """Show backup details"""
+        self._show_backup(expected_status=200)
+
+    @decorators.skip_because(bug='2017110')
+    @decorators.idempotent_id('a9ab3279-aa5e-4ad8-b740-b80a7769d3f9')
+    def test_delete_backup(self):
+        """Delete a backup"""
+        self._delete_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug='2017110')
+    @decorators.idempotent_id('0566fa4a-4e03-4cca-822f-d5a4922da2ab')
+    def test_restore_backup(self):
+        """Restore a backup"""
+        self._restore_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug='2017110')
+    @decorators.idempotent_id('bad2514e-18c0-4fa0-9e35-221182ee24cf')
+    def test_create_backup(self):
+        """Create a backup"""
+        self._create_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('ab74b8cc-5005-49b4-94f4-994567171b07')
+    def test_export_backup(self):
+        """Export a backup"""
+        self._export_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('caaa5756-261a-4d9c-bfc2-788719630a06')
+    def test_import_backup(self):
+        """Import a backup"""
+        self._import_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('c832ff77-8f22-499f-a7a3-0834972a1507')
+    def test_reset_backup_status(self):
+        """Reset a backup status"""
+        self._reset_backup_status(expected_status=exceptions.Forbidden)
+
+
+class ProjectMemberTests33(RbacV3BackupsTests33):
+    credentials = ['project_member', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_member.backups_client_latest
+
+    @decorators.idempotent_id('5a23c53c-924b-47f6-a5d1-ab6327391c12')
+    def test_list_backups(self):
+        """List all backups"""
+        self._list_backups(expected_status=200)
+
+    @decorators.idempotent_id('c737bd7b-293c-4d8f-ada9-3b00f7e1adce')
+    def test_list_project_backups(self):
+        """List all backups for a project"""
+        self._list_project_backups(expected_status=200)
+
+    @decorators.idempotent_id('9944bb15-02fa-4321-97a4-ef8cb5b5fec2')
+    def test_show_backup(self):
+        """Show backup details"""
+        self._show_backup(expected_status=200)
+
+    @decorators.idempotent_id('c98dfea8-b9f2-4a84-947b-1d857c707789')
+    def test_delete_backup(self):
+        """Delete a backup"""
+        self._delete_backup(expected_status=202)
+
+    @decorators.idempotent_id('7a6fd066-00e7-4140-866c-8195fbd71e87')
+    def test_restore_backup(self):
+        """Restore a backup"""
+        self._restore_backup(expected_status=202)
+
+    @decorators.idempotent_id('44644140-4d05-4725-9a4b-6d1a71eda9b7')
+    def test_create_backup(self):
+        """Create a backup"""
+        self._create_backup(expected_status=202)
+
+    @decorators.idempotent_id('71c7cfaf-7809-4872-b1b2-3feb90b939d4')
+    def test_export_backup(self):
+        """Export a backup"""
+        self._export_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('f1c03c1b-2b48-4be0-8b6a-81df8a75f78c')
+    def test_import_backup(self):
+        """Import a backup"""
+        self._import_backup(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('307f6fe9-81ed-444a-9aae-99a571d24bf5')
+    def test_reset_backup_status(self):
+        """Reset a backup status"""
+        self._reset_backup_status(expected_status=exceptions.Forbidden)
+
+
+class ProjectAdminTests33(RbacV3BackupsTests33):
+    credentials = ['project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_admin.backups_client_latest
+
+    @decorators.idempotent_id('81c579bc-db98-4773-9590-b742d0b00b89')
+    def test_list_backups(self):
+        """List all backups"""
+        self._list_backups(expected_status=200)
+
+    @decorators.idempotent_id('602dd42d-10df-4eb2-9664-3c9c44e3b35e')
+    def test_list_project_backups(self):
+        """List all backups for a project"""
+        self._list_project_backups(expected_status=200)
+
+    @decorators.idempotent_id('2094dcee-9585-4745-b045-a0f8c79fbe52')
+    def test_show_backup(self):
+        """Show backup details"""
+        self._show_backup(expected_status=200)
+
+    @decorators.idempotent_id('b77a8d69-1d12-480d-a83e-5f712d7c2b74')
+    def test_delete_backup(self):
+        """Delete a backup"""
+        self._delete_backup(expected_status=202)
+
+    @decorators.idempotent_id('7221d2df-338c-4932-be40-ad7166c03db1')
+    def test_restore_backup(self):
+        """Restore a backup"""
+        self._restore_backup(expected_status=202)
+
+    @decorators.idempotent_id('d347fa21-a5bf-4ce5-ab6b-246c3a06a735')
+    def test_create_backup(self):
+        """Create a backup"""
+        self._create_backup(expected_status=202)
+
+    @decorators.idempotent_id('e179a062-47d5-4fa8-b359-dedab2afddd8')
+    def test_export_backup(self):
+        """Export a backup"""
+        self._export_backup(expected_status=200)
+
+    @decorators.idempotent_id('1be80834-2463-49fb-a763-906e8c672fd5')
+    def test_import_backup(self):
+        """Import a backup"""
+        self._import_backup(expected_status=201)
+
+    @decorators.idempotent_id('88db5943-0053-489a-af30-12b139a38a0b')
+    def test_reset_backup_status(self):
+        """Reset a backup status"""
+        self._reset_backup_status(expected_status=202)
+
+
+class RbacV3BackupsTests39(RbacV3BackupsTests):
+    """Test API with microversion greater than 3.3"""
+    min_microversion = '3.9'
+
+    def _update_backup(self, expected_status):
+        """Update a backup"""
+        new_description = "Updated backup description"
+        update_kwargs = {"description": new_description}
+        self.do_request(
+            method='update_backup',
+            expected_status=expected_status,
+            backup_id=self.backup_id,
+            **update_kwargs
+        )
+        if expected_status != exceptions.Forbidden:
+            backup = self.admin_backups_client.show_backup(
+                backup_id=self.backup_id
+            )['backup']
+            self.assertEqual(
+                backup['description'], new_description,
+                f"Backup {backup['name']} description should be "
+                f"{new_description}"
+            )
+
+
+class ProjectReaderTests39(RbacV3BackupsTests39):
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.backups_client_latest
+
+    @decorators.idempotent_id('50ccc892-6ed0-4015-b181-9f64ffa45f33')
+    @decorators.skip_because(bug='2017110')
+    def test_update_backup(self):
+        """Update a backup"""
+        self._update_backup(expected_status=exceptions.Forbidden)
+
+
+class ProjectMemberTests39(RbacV3BackupsTests39):
+    credentials = ['project_member', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_member.backups_client_latest
+
+    @decorators.idempotent_id('a1cdd6f2-e9bc-4f6a-a0e6-2493ac6f9f27')
+    def test_update_backup(self):
+        """Update a backup"""
+        self._update_backup(expected_status=200)
+
+
+class ProjectAdminTests39(RbacV3BackupsTests39):
+    credentials = ['project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_admin.backups_client_latest
+
+    @decorators.idempotent_id('2686eecf-e3cd-4f23-8771-aa040ed9be4b')
+    def test_update_backup(self):
+        """Update a backup"""
+        self._update_backup(expected_status=200)
diff --git a/cinder_tempest_plugin/rbac/v3/test_capabilities.py b/cinder_tempest_plugin/rbac/v3/test_capabilities.py
index 1fa542d..861cca9 100644
--- a/cinder_tempest_plugin/rbac/v3/test_capabilities.py
+++ b/cinder_tempest_plugin/rbac/v3/test_capabilities.py
@@ -10,16 +10,12 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import abc
-
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+from tempest.lib import decorators
 from tempest.lib import exceptions
 
-from cinder_tempest_plugin.api.volume import base
-from cinder_tempest_plugin.rbac.v3 import base as rbac_base
 
-
-class VolumeV3RbacCapabilityTests(rbac_base.VolumeV3RbacBaseTests,
-                                  metaclass=abc.ABCMeta):
+class VolumeV3RbacCapabilityTests(rbac_base.VolumeV3RbacBaseTests):
 
     @classmethod
     def setup_clients(cls):
@@ -36,45 +32,35 @@
         cls.admin_stats_client = (
             admin_client.volume_scheduler_stats_client_latest)
 
-    @classmethod
-    def setup_credentials(cls):
-        super().setup_credentials()
-        cls.os_primary = getattr(cls, 'os_%s' % cls.credentials[0])
-
-    @abc.abstractmethod
-    def test_get_capabilities(self):
-        """Test volume_extension:capabilities policy.
-
-        This test must check:
-          * whether the persona can fetch capabilities for a host.
-
-        """
-        pass
-
-
-class ProjectAdminTests(VolumeV3RbacCapabilityTests, base.BaseVolumeTest):
-
-    credentials = ['project_admin', 'system_admin']
-
-    def test_get_capabilities(self):
+    def _get_capabilities(self, expected_status):
         pools = self.admin_stats_client.list_pools()['pools']
         host_name = pools[0]['name']
-        self.do_request('show_backend_capabilities', expected_status=200,
-                        host=host_name)
+        self.do_request(
+            'show_backend_capabilities',
+            expected_status=expected_status,
+            host=host_name
+        )
 
 
-class ProjectMemberTests(ProjectAdminTests, base.BaseVolumeTest):
+class ProjectReaderTests(VolumeV3RbacCapabilityTests):
+    credentials = ['project_reader', 'project_admin', 'system_admin']
 
+    @decorators.idempotent_id('d16034fc-4204-4ea8-94b3-714de59fdfbf')
+    def test_get_capabilities(self):
+        self._get_capabilities(expected_status=exceptions.Forbidden)
+
+
+class ProjectMemberTests(VolumeV3RbacCapabilityTests):
     credentials = ['project_member', 'project_admin', 'system_admin']
 
+    @decorators.idempotent_id('dbaf51de-fafa-4f55-875f-7537524489ab')
     def test_get_capabilities(self):
-        pools = self.admin_stats_client.list_pools()['pools']
-        host_name = pools[0]['name']
-        self.do_request('show_backend_capabilities',
-                        expected_status=exceptions.Forbidden,
-                        host=host_name)
+        self._get_capabilities(expected_status=exceptions.Forbidden)
 
 
-class ProjectReaderTests(ProjectMemberTests, base.BaseVolumeTest):
+class ProjectAdminTests(VolumeV3RbacCapabilityTests):
+    credentials = ['project_admin', 'system_admin']
 
-    credentials = ['project_reader', 'project_admin', 'system_admin']
+    @decorators.idempotent_id('1fdbe493-e58f-48bf-bb38-52003eeef8cb')
+    def test_get_capabilities(self):
+        self._get_capabilities(expected_status=200)
diff --git a/cinder_tempest_plugin/rbac/v3/test_snapshots.py b/cinder_tempest_plugin/rbac/v3/test_snapshots.py
new file mode 100644
index 0000000..f11da42
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_snapshots.py
@@ -0,0 +1,374 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+CONF = config.CONF
+
+
+class VolumeV3RbacSnapshotsTests(rbac_base.VolumeV3RbacBaseTests):
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.vol_other_client = cls.os_project_admin.volumes_client_latest
+        cls.snap_other_client = cls.os_project_admin.snapshots_client_latest
+
+    def _list_snapshots(self, expected_status):
+        """Test list_snapshots operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            expected_status=expected_status, method='list_snapshots'
+        )
+
+    def _show_snapshot(self, expected_status):
+        """Test show_snapshot operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            expected_status=expected_status, method='show_snapshot',
+            snapshot_id=snapshot_id
+        )
+
+    def _create_snapshot(self, expected_status):
+        """Test create_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snap_name = data_utils.rand_name(
+            self.__name__ + '-Snapshot'
+        )
+        if expected_status == 202:
+            snapshot_id = self.do_request(
+                method='create_snapshot', expected_status=202,
+                volume_id=volume_id, name=snap_name
+            )['snapshot']['id']
+            self.addCleanup(
+                test_utils.call_and_ignore_notfound_exc, self.delete_resource,
+                client=self.client, snapshot_id=snapshot_id
+            )
+            waiters.wait_for_volume_resource_status(
+                client=self.client, resource_id=snapshot_id, status='available'
+            )
+        elif expected_status == exceptions.Forbidden:
+            self.do_request(
+                method='create_snapshot', expected_status=expected_status,
+                volume_id=volume_id, name=snap_name
+            )
+
+    def _remove_snapshot(self, expected_status):
+        """Test create_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+
+        self.do_request(
+            method='delete_snapshot', snapshot_id=snapshot_id,
+            expected_status=expected_status
+        )
+        if expected_status == 202:
+            self.client.wait_for_resource_deletion(id=snapshot_id)
+
+    def _reset_snapshot_status(self, expected_status):
+        """Test reset_snapshot_status operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            'reset_snapshot_status', expected_status=expected_status,
+            snapshot_id=snapshot_id, status='error'
+        )
+
+    def _update_snapshot(self, expected_status):
+        """Test update_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        new_desc = self.__name__ + '-update_test'
+        self.do_request(
+            method='update_snapshot', expected_status=expected_status,
+            snapshot_id=snapshot_id, description=new_desc
+        )
+
+    def _update_snapshot_status(self, expected_status):
+        """Test update_snapshot_status operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+
+        reset_status = 'creating' if expected_status == 202 else 'error'
+        request_status = 'error' if expected_status == 202 else 'creating'
+        self.os_project_admin.snapshots_client_latest.reset_snapshot_status(
+            snapshot_id=snapshot_id, status=reset_status
+        )
+        waiters.wait_for_volume_resource_status(
+            client=self.os_project_admin.snapshots_client_latest,
+            resource_id=snapshot_id, status=reset_status
+        )
+
+        self.do_request(
+            'update_snapshot_status', expected_status=expected_status,
+            snapshot_id=snapshot_id, status=request_status, progress='80%'
+        )
+
+    def _force_delete_snapshot(self, expected_status):
+        """Test force_delete_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            method='force_delete_snapshot', snapshot_id=snapshot_id,
+            expected_status=expected_status
+        )
+        if expected_status != exceptions.Forbidden:
+            self.client.wait_for_resource_deletion(id=snapshot_id)
+            waiters.wait_for_volume_resource_status(
+                client=self.os_project_admin.volumes_client_latest,
+                resource_id=volume_id, status='available'
+            )
+
+    def _unmanage_snapshot(self, expected_status):
+        """Test unmanage_snapshot operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client, volume_id=volume_id
+        )
+        self.do_request(
+            method='unmanage_snapshot',
+            expected_status=expected_status, snapshot_id=snapshot_id
+        )
+        if expected_status != exceptions.Forbidden:
+            self.client.wait_for_resource_deletion(id=snapshot_id)
+
+    def _manage_snapshot(self, client, expected_status):
+        """Test reset_snapshot_status operation.
+
+        Args:
+            client: The client to perform the needed request
+            expected_status: The expected HTTP response code
+        """
+        # Create a volume
+        volume_id = self.create_volume(client=self.vol_other_client)
+
+        # Create a snapshot
+        snapshot_id = self.create_snapshot(
+            client=self.snap_other_client,
+            volume_id=volume_id,
+            cleanup=False
+        )
+        # Unmanage the snapshot
+        # Unmanage snapshot function works almost the same as delete snapshot,
+        # but it does not delete the snapshot data
+        self.snap_other_client.unmanage_snapshot(snapshot_id)
+        self.client.wait_for_resource_deletion(snapshot_id)
+
+        # Verify the original snapshot does not exist in snapshot list
+        params = {'all_tenants': 1}
+        all_snapshots = self.snap_other_client.list_snapshots(
+            detail=True, **params)['snapshots']
+        self.assertNotIn(snapshot_id, [v['id'] for v in all_snapshots])
+
+        # Manage the snapshot
+        name = data_utils.rand_name(
+            self.__class__.__name__ + '-Managed-Snapshot'
+        )
+        description = data_utils.rand_name(
+            self.__class__.__name__ + '-Managed-Snapshot-Description'
+        )
+        metadata = {"manage-snap-meta1": "value1",
+                    "manage-snap-meta2": "value2",
+                    "manage-snap-meta3": "value3"}
+        snapshot_ref = {
+            'volume_id': volume_id,
+            'ref': {CONF.volume.manage_snapshot_ref[0]:
+                    CONF.volume.manage_snapshot_ref[1] % snapshot_id},
+            'name': name,
+            'description': description,
+            'metadata': metadata
+        }
+
+        new_snapshot = self.do_request(
+            client=client,
+            method='manage_snapshot', expected_status=expected_status,
+            volume_id=volume_id, ref=snapshot_ref
+        )
+        if expected_status != exceptions.Forbidden:
+            snapshot = new_snapshot['snapshot']
+            waiters.wait_for_volume_resource_status(
+                client=self.snap_other_client,
+                resource_id=snapshot['id'],
+                status='available'
+            )
+            self.delete_resource(
+                client=self.snap_other_client, snapshot_id=snapshot['id']
+            )
+
+
+class ProjectReaderTests(VolumeV3RbacSnapshotsTests):
+
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.snapshots_client_latest
+
+    @decorators.idempotent_id('dd8e19dc-c8fd-443c-8aed-cdffe07fa6be')
+    def test_list_snapshots(self):
+        self._list_snapshots(expected_status=200)
+
+    @decorators.idempotent_id('6f69e8ed-4e11-40a1-9620-258cf3c45872')
+    def test_show_snapshot(self):
+        self._show_snapshot(expected_status=200)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('13ae344f-fa01-44cc-b9f1-d04452940dc1')
+    def test_create_snapshot(self):
+        self._create_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('5b58f647-da0f-4d2a-bf68-680fc692efb4')
+    def test_delete_snapshot(self):
+        self._remove_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('809d8c8c-25bf-4f1f-9b77-1a81ce4292d1')
+    def test_reset_snapshot_status(self):
+        self._reset_snapshot_status(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('c46f5df8-9a6f-4ed6-b94c-3b65ef05ee9e')
+    def test_update_snapshot(self):
+        self._update_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2017108")
+    @decorators.idempotent_id('c90f98d7-3665-4c9f-820f-3f4c2adfdbf5')
+    def test_update_snapshot_status(self):
+        self._update_snapshot_status(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('63aa8184-897d-4e00-9b80-d2e7828f1b13')
+    def test_force_delete_snapshot(self):
+        self._force_delete_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('35495666-b663-4c68-ba44-0695e30a6838')
+    def test_unmanage_snapshot(self):
+        self._unmanage_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('d2d1326d-fb47-4448-a1e1-2d1219d30fd5')
+    def test_manage_snapshot(self):
+        self._manage_snapshot(
+            expected_status=exceptions.Forbidden,
+            client=self.os_project_reader.snapshot_manage_client_latest
+        )
+
+
+class ProjectMemberTests(VolumeV3RbacSnapshotsTests):
+
+    credentials = ['project_member', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_member.snapshots_client_latest
+
+    @decorators.idempotent_id('5b3ec87f-443f-42f7-bd3c-ab05ea30c5e1')
+    def test_list_snapshots(self):
+        self._list_snapshots(expected_status=200)
+
+    @decorators.idempotent_id('6fee8967-951c-4957-b51b-97b83c13c7c3')
+    def test_show_snapshot(self):
+        self._show_snapshot(expected_status=200)
+
+    @decorators.idempotent_id('43f77b31-aab4-46d0-b76f-e17000d23589')
+    def test_create_snapshot(self):
+        self._create_snapshot(expected_status=202)
+
+    @decorators.idempotent_id('22939122-8b4e-47d5-abaa-774bc55c07fc')
+    def test_delete_snapshot(self):
+        self._remove_snapshot(expected_status=202)
+
+    @decorators.idempotent_id('da391afd-8baa-458b-b222-f6ab42ab47c3')
+    def test_reset_snapshot_status(self):
+        self._reset_snapshot_status(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('a774bdca-bfbe-477d-9711-5fb64d7e34ea')
+    def test_update_snapshot(self):
+        self._update_snapshot(expected_status=200)
+
+    @decorators.idempotent_id('12e00e1b-bf84-41c1-8a1e-8625d1317789')
+    def test_update_snapshot_status(self):
+        self._update_snapshot_status(expected_status=202)
+
+    @decorators.idempotent_id('e7cb3eb0-d607-4c90-995d-df82d030eca8')
+    def test_force_delete_snapshot(self):
+        self._force_delete_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('dd7da3da-68ef-42f5-af1d-29803a4a04fd')
+    def test_unmanage_snapshot(self):
+        self._unmanage_snapshot(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('c2501d05-9bca-42d7-9ab5-c0d9133e762f')
+    def test_manage_snapshot(self):
+        self._manage_snapshot(
+            expected_status=exceptions.Forbidden,
+            client=self.os_project_member.snapshot_manage_client_latest
+        )
diff --git a/cinder_tempest_plugin/rbac/v3/test_user_messages.py b/cinder_tempest_plugin/rbac/v3/test_user_messages.py
new file mode 100644
index 0000000..c55a4dd
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_user_messages.py
@@ -0,0 +1,168 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+CONF = config.CONF
+
+
+class RbacV3UserMessagesTests(rbac_base.VolumeV3RbacBaseTests):
+    min_microversion = '3.3'
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        admin_client = cls.os_project_admin
+        cls.admin_messages_client = admin_client.volume_messages_client_latest
+        cls.admin_volumes_client = admin_client.volumes_client_latest
+        cls.admin_types_client = admin_client.volume_types_client_latest
+
+    def create_user_message(self):
+        """Trigger a 'no valid host' situation to generate a message."""
+        bad_protocol = data_utils.rand_name('storage_protocol')
+        bad_vendor = data_utils.rand_name('vendor_name')
+        extra_specs = {'storage_protocol': bad_protocol,
+                       'vendor_name': bad_vendor}
+        vol_type_name = data_utils.rand_name(
+            self.__class__.__name__ + '-volume-type'
+        )
+        bogus_type = self.admin_types_client.create_volume_type(
+            name=vol_type_name, extra_specs=extra_specs
+        )['volume_type']
+        self.addCleanup(
+            self.admin_types_client.delete_volume_type, bogus_type['id']
+        )
+
+        params = {
+            'volume_type': bogus_type['id'], 'size': CONF.volume.volume_size
+        }
+        volume = self.admin_volumes_client.create_volume(**params)['volume']
+        waiters.wait_for_volume_resource_status(
+            self.admin_volumes_client, volume['id'], 'error'
+        )
+        self.addCleanup(
+            test_utils.call_and_ignore_notfound_exc,
+            self.admin_volumes_client.delete_volume,
+            volume['id']
+        )
+
+        messages = self.admin_messages_client.list_messages()['messages']
+        message_id = None
+        for message in messages:
+            if message['resource_uuid'] == volume['id']:
+                message_id = message['id']
+                break
+        self.assertIsNotNone(
+            message_id, f"No user message generated for volume {volume['id']}"
+        )
+        return message_id
+
+    def _list_messages(self, expected_status):
+        message_id = self.create_user_message()
+        self.addCleanup(
+            self.admin_messages_client.delete_message, message_id
+        )
+        self.do_request(
+            method='list_messages', expected_status=expected_status
+        )
+
+    def _show_message(self, expected_status):
+        message_id = self.create_user_message()
+        self.addCleanup(self.admin_messages_client.delete_message, message_id)
+        self.do_request(
+            method='show_message', expected_status=expected_status,
+            message_id=message_id
+        )
+
+    def _delete_message(self, expected_status):
+        message_id = self.create_user_message()
+        self.do_request(
+            method='delete_message', expected_status=expected_status,
+            message_id=message_id
+        )
+        if expected_status == exceptions.Forbidden:
+            self.addCleanup(
+                self.admin_messages_client.delete_message, message_id
+            )
+        else:
+            self.client.wait_for_resource_deletion(id=message_id)
+
+
+class ProjectReaderTests(RbacV3UserMessagesTests):
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.volume_messages_client_latest
+
+    @decorators.idempotent_id('1bef8bf9-6457-40f8-ada2-bc4d27602a07')
+    def test_list_messages(self):
+        self._list_messages(expected_status=200)
+
+    @decorators.idempotent_id('689c53a9-6db9-44a8-9878-41d28899e0af')
+    def test_show_message(self):
+        self._show_message(expected_status=200)
+
+    @decorators.skip_because(bug='2009818')
+    @decorators.idempotent_id('c6e8744b-7749-425f-81b6-b1c3df6c7162')
+    def test_delete_message(self):
+        self._delete_message(expected_status=exceptions.Forbidden)
+
+
+class ProjectMemberTests(RbacV3UserMessagesTests):
+    credentials = ['project_member', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_member.volume_messages_client_latest
+
+    @decorators.idempotent_id('fb470249-a482-49c6-84af-eda34891a714')
+    def test_list_messages(self):
+        self._list_messages(expected_status=200)
+
+    @decorators.idempotent_id('43d248ef-008d-4aff-8c7f-37959a0fa195')
+    def test_show_message(self):
+        self._show_message(expected_status=200)
+
+    @decorators.idempotent_id('a77cd089-cb74-4b44-abcb-06f1a6f80378')
+    def test_delete_message(self):
+        self._delete_message(expected_status=204)
+
+
+class ProjectAdminTests(RbacV3UserMessagesTests):
+    credentials = ['project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_admin.volume_messages_client_latest
+
+    @decorators.idempotent_id('f3567efc-863c-4668-8fb1-6aa3f836451d')
+    def test_list_messages(self):
+        self._list_messages(expected_status=200)
+
+    @decorators.idempotent_id('eecc7045-017b-492c-8594-2d40f5fda139')
+    def test_show_message(self):
+        self._show_message(expected_status=200)
+
+    @decorators.idempotent_id('1f2db6f2-148f-44c2-97ef-dcff0fccd49a')
+    def test_delete_message(self):
+        self._delete_message(expected_status=204)
diff --git a/cinder_tempest_plugin/rbac/v3/test_volume_actions.py b/cinder_tempest_plugin/rbac/v3/test_volume_actions.py
new file mode 100644
index 0000000..bf34f58
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_volume_actions.py
@@ -0,0 +1,154 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+CONF = config.CONF
+
+
+class VolumeV3RbacVolumeActionsTests(rbac_base.VolumeV3RbacBaseTests):
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.vol_other_client = cls.os_project_admin.volumes_client_latest
+
+    def _extend_volume(self, expected_status):
+        """Test extend_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='extend_volume', volume_id=volume_id,
+            new_size=2, expected_status=expected_status
+        )
+
+    def _reset_volume_status(self, expected_status):
+        """Test reset_volume_status operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='reset_volume_status', volume_id=volume_id,
+            status='error', expected_status=expected_status
+        )
+
+    def _retype_volume(self, expected_status):
+        """Test retype_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='retype_volume', volume_id=volume_id,
+            new_type='dedup-tier-replication', expected_status=expected_status
+        )
+
+    def _update_volume_readonly(self, expected_status):
+        """Test update_volume_readonly operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='update_volume_readonly', volume_id=volume_id,
+            readonly=True, expected_status=expected_status
+        )
+
+    def _force_delete_volume(self, expected_status):
+        """Test force_delete_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='force_delete_volume', volume_id=volume_id,
+            expected_status=expected_status
+        )
+
+    def _reserve_volume(self, expected_status):
+        """Test reserve_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='reserve_volume', volume_id=volume_id,
+            expected_status=expected_status
+        )
+
+    def _unreserve_volume(self, expected_status):
+        """Test unreserve_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='unreserve_volume', volume_id=volume_id,
+            expected_status=expected_status
+        )
+
+
+class ProjectReaderTests(VolumeV3RbacVolumeActionsTests):
+
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.volumes_client_latest
+
+    @decorators.skip_because(bug="2020261")
+    @decorators.idempotent_id('4d721c58-2f6f-4857-8f4f-0664d5f7bf49')
+    def test_extend_volume(self):
+        self._extend_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('434b454a-5cbe-492d-a416-70b8ff41f636')
+    def test_reset_volume_status(self):
+        self._reset_volume_status(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2020261")
+    @decorators.idempotent_id('4675295a-7c72-4b04-8a43-03d7c88ab6bf')
+    def test_retype_volume(self):
+        self._retype_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2020261")
+    @decorators.idempotent_id('3beecd52-e314-40d8-875d-a0e7db8dd88f')
+    def test_update_volume_readonly(self):
+        self._update_volume_readonly(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('b025ff12-73a4-4f15-af55-876cd43cade3')
+    def test_force_delete_volume(self):
+        self._force_delete_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2020261")
+    @decorators.idempotent_id('d2c13bf9-267a-4a71-be5c-391f22e9b433')
+    def test_reserve_volume(self):
+        self._reserve_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2020261")
+    @decorators.idempotent_id('725d85cf-96b2-4338-98f4-2f468099c4ed')
+    def test_unreserve_volume(self):
+        self._unreserve_volume(expected_status=exceptions.Forbidden)
diff --git a/cinder_tempest_plugin/rbac/v3/test_volume_types.py b/cinder_tempest_plugin/rbac/v3/test_volume_types.py
new file mode 100644
index 0000000..cdbc341
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_volume_types.py
@@ -0,0 +1,516 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+
+class RbacV3VolumeTypesTests(rbac_base.VolumeV3RbacBaseTests):
+
+    min_microversion = '3.3'
+    extra_spec_key = 'key1'
+    encryption_type_key_cipher = 'cipher'
+    create_kwargs = {
+        'provider': 'LuksEncryptor',
+        'key_size': 256,
+        encryption_type_key_cipher: 'aes-xts-plain64',
+        'control_location': 'front-end'
+    }
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        admin_client = cls.os_project_admin
+        cls.admin_volumes_client = admin_client.volumes_client_latest
+        cls.admin_types_client = admin_client.volume_types_client_latest
+        cls.admin_encryption_types_client = \
+            admin_client.encryption_types_client_latest
+
+    @classmethod
+    def resource_setup(cls):
+        """Create a new volume-type for the test"""
+        super(RbacV3VolumeTypesTests, cls).resource_setup()
+        # create a volume type
+        cls.volume_type = cls.create_volume_type()
+
+    @classmethod
+    def create_volume_type(
+            cls, name=None, with_encryption=True, cleanup=True
+    ):
+        # create a volume type
+        if not name:
+            name = data_utils.rand_name("volume-type")
+        extra_specs = {cls.extra_spec_key: 'value1'}
+        params = {'name': name,
+                  'description': "description",
+                  'extra_specs': extra_specs,
+                  'os-volume-type-access:is_public': True}
+        volume_type = cls.admin_types_client.create_volume_type(
+            **params
+        )['volume_type']
+
+        if with_encryption:
+            # Create encryption_type
+            cls.encryption_type = \
+                cls.admin_encryption_types_client.create_encryption_type(
+                    volume_type['id'], **cls.create_kwargs)['encryption']
+
+        if cleanup:
+            cls.addClassResourceCleanup(
+                cls.admin_types_client.delete_volume_type, volume_type['id']
+            )
+
+        return volume_type
+
+    def _update_volume_type(self, expected_status):
+        """Update volume type"""
+        self.do_request(
+            method='update_volume_type',
+            expected_status=expected_status,
+            volume_type_id=self.volume_type['id'],
+            description='Updated volume type description'
+        )
+
+    def _create_or_update_extra_specs_for_volume_type(self, expected_status):
+        """Create or update extra specs"""
+        volume_type = self.create_volume_type(with_encryption=False)
+        # Create extra spec 'key2' with value 'value2'
+        extra_spec = {'key2': 'value2'}
+        self.do_request(
+            method='create_volume_type_extra_specs',
+            expected_status=expected_status,
+            volume_type_id=volume_type['id'],
+            extra_specs=extra_spec
+        )
+
+        # Update extra spec 'key2' with value 'updated value'
+        extra_spec = {'key2': 'updated value'}
+        self.do_request(
+            method='update_volume_type_extra_specs',
+            expected_status=expected_status,
+            volume_type_id=volume_type['id'],
+            extra_spec_name='key2',
+            extra_specs=extra_spec
+        )
+
+    def _list_all_extra_specs_for_volume_type(self, expected_status):
+        """List all extra_specs for a volume type"""
+        extra_specs = self.do_request(
+            method='list_volume_types_extra_specs',
+            expected_status=expected_status,
+            volume_type_id=self.volume_type['id']
+        )['extra_specs']
+        self.assertIn(
+            self.extra_spec_key,
+            list(extra_specs.keys()),
+            message=f"Key '{self.extra_spec_key}' not found in extra_specs."
+        )
+
+    def _show_extra_spec_for_volume_type(self, expected_status):
+        """Show extra_spec for a volume type"""
+        self.do_request(
+            method='show_volume_type_extra_specs',
+            expected_status=expected_status,
+            volume_type_id=self.volume_type['id'],
+            extra_specs_name=self.extra_spec_key
+        )
+
+    def _update_extra_spec_for_volume_type(self, expected_status):
+        """Update extra_spec for a volume type"""
+        spec_name = self.extra_spec_key
+        extra_spec = {spec_name: 'updated value'}
+        self.do_request(
+            method='update_volume_type_extra_specs',
+            expected_status=expected_status,
+            volume_type_id=self.volume_type['id'],
+            extra_spec_name=spec_name,
+            extra_specs=extra_spec
+        )
+
+    def _delete_extra_spec_for_volume_type(self, expected_status):
+        """Delete a volume type extra_spec"""
+        volume_type = self.create_volume_type(with_encryption=False)
+
+        self.do_request(
+            method='delete_volume_type_extra_specs',
+            expected_status=expected_status,
+            volume_type_id=volume_type['id'],
+            extra_spec_name=self.extra_spec_key
+        )
+
+    def _show_volume_type_detail(self, expected_status):
+        """Show volume type"""
+        self.do_request(
+            method='show_volume_type',
+            expected_status=expected_status,
+            volume_type_id=self.volume_type['id']
+        )
+
+    def _show_default_volume_type(self, expected_status):
+        """Show default volume type"""
+        self.do_request(
+            method='show_default_volume_type',
+            expected_status=expected_status
+        )
+
+    def _delete_volume_type(self, expected_status):
+        """Delete a volume type"""
+        cleanup = True if expected_status == exceptions.Forbidden\
+            else False
+        volume_type = self.create_volume_type(
+            with_encryption=False, cleanup=cleanup
+        )
+
+        self.do_request(
+            method='delete_volume_type',
+            expected_status=expected_status,
+            volume_type_id=volume_type['id']
+        )
+
+    def _list_volume_types(self, expected_status):
+        """List all volume types"""
+        self.do_request(
+            method='list_volume_types',
+            expected_status=expected_status
+        )
+
+    def _create_volume_type(self, expected_status):
+        """Create a volume type"""
+        volume_type = self.do_request(
+            method='create_volume_type',
+            expected_status=expected_status,
+            name="test-new-volume-type"
+        )
+        if expected_status != exceptions.Forbidden:
+            volume_type = volume_type['volume_type']
+            self.admin_types_client.delete_volume_type(
+                volume_type_id=volume_type['id']
+            )
+
+    def _show_encryption_type(self, expected_status):
+        """Show volume type's encryption type"""
+        self.do_request(
+            method='show_encryption_type',
+            expected_status=expected_status,
+            client=self.encryption_types_client,
+            volume_type_id=self.volume_type['id']
+        )
+
+    def _show_encryption_spec_item(self, expected_status):
+        """Show encryption spec item"""
+        self.do_request(
+            method='show_encryption_specs_item',
+            expected_status=expected_status,
+            client=self.encryption_types_client,
+            volume_type_id=self.volume_type['id'],
+            key=self.encryption_type_key_cipher
+        )
+
+    def _delete_encryption_type(self, expected_status):
+        """Delete encryption type"""
+        volume_type = self.create_volume_type(with_encryption=True)
+
+        self.do_request(
+            method='delete_encryption_type',
+            expected_status=expected_status,
+            client=self.encryption_types_client,
+            volume_type_id=volume_type['id']
+        )
+
+    def _create_encryption_type(self, expected_status):
+        """Create encryption type"""
+        volume_type = self.create_volume_type(with_encryption=False)
+
+        self.do_request(
+            method='create_encryption_type',
+            expected_status=expected_status,
+            client=self.encryption_types_client,
+            volume_type_id=volume_type['id'],
+            **self.create_kwargs
+        )
+
+    def _update_encryption_type(self, expected_status):
+        """Update encryption type"""
+        update_kwargs = {'key_size': 128}
+
+        self.do_request(
+            method='update_encryption_type',
+            expected_status=expected_status,
+            client=self.encryption_types_client,
+            volume_type_id=self.volume_type['id'],
+            **update_kwargs
+        )
+
+
+class VolumeTypesReaderTests(RbacV3VolumeTypesTests):
+    """Test Volume types using 'reader' user"""
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.volume_types_client_latest
+        cls.encryption_types_client = \
+            cls.os_project_reader.encryption_types_client_latest
+
+    @decorators.idempotent_id('e3fdabf0-fd8c-4bab-9870-5a67fe25c6e4')
+    def test_update_volume_type(self):
+        self._update_volume_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('b046a4d7-79a0-436b-9075-863e2299b73d')
+    def test_create_or_update_extra_specs_for_volume_type(self):
+        self._create_or_update_extra_specs_for_volume_type(
+            expected_status=exceptions.Forbidden
+        )
+
+    @decorators.skip_because(bug='2018467')
+    @decorators.idempotent_id('9499752c-3b27-41a3-8f55-4bdba7297f92')
+    def test_list_all_extra_specs_for_volume_type(self):
+        self._list_all_extra_specs_for_volume_type(
+            expected_status=200
+        )
+
+    @decorators.skip_because(bug='2018467')
+    @decorators.idempotent_id('a38f7248-3a5b-4e51-8e32-d2dcf9c771ea')
+    def test_show_extra_spec_for_volume_type(self):
+        self._show_extra_spec_for_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('68689644-22a8-4ba6-a642-db4258681586')
+    def test_update_extra_spec_for_volume_type(self):
+        self._update_extra_spec_for_volume_type(
+            expected_status=exceptions.Forbidden
+        )
+
+    @decorators.idempotent_id('a7cdd9ae-f389-48f6-b144-abf336b1637b')
+    def test_delete_extra_spec_for_volume_type(self):
+        self._delete_extra_spec_for_volume_type(
+            expected_status=exceptions.Forbidden
+        )
+
+    @decorators.skip_because(bug='2016402')
+    @decorators.idempotent_id('7ea28fc2-ce5a-48c9-8d03-31c2826fe566')
+    def test_show_volume_type_detail(self):
+        self._show_volume_type_detail(expected_status=200)
+
+    @decorators.skip_because(bug='2016402')
+    @decorators.idempotent_id('aceab52a-c503-4081-936e-b9df1c31046d')
+    def test_show_default_volume_type(self):
+        self._show_default_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('35581811-6288-4698-aaaf-7f5a4fe662e8')
+    def test_delete_volume_type(self):
+        self._delete_volume_type(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug='2016402')
+    @decorators.idempotent_id('e8a438f9-e9c1-4f3f-8ae3-ad80ee02cd6a')
+    def test_list_volume_types(self):
+        self._list_volume_types(expected_status=200)
+
+    @decorators.idempotent_id('3c3a39b1-fff5-492b-8c1c-9520063901ef')
+    def test_create_volume_type(self):
+        self._create_volume_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('84bd20f1-621c-416d-add2-fbae57137239')
+    def test_show_encryption_type(self):
+        self._show_encryption_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('ab9c7149-fab7-4584-b4ff-8b997cd62e75')
+    def test_show_encryption_spec_item(self):
+        self._show_encryption_spec_item(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('8d85ec39-bc32-4f49-88e6-63adc7e1f832')
+    def test_delete_encryption_type(self):
+        self._delete_encryption_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('c7c0892e-08d1-45e0-8ebf-be949cb4ab02')
+    def test_create_encryption_type(self):
+        self._create_encryption_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('8186d5bc-183a-4fcc-9c6a-e2b247a0caee')
+    def test_update_encryption_type(self):
+        self._update_encryption_type(expected_status=exceptions.Forbidden)
+
+
+class VolumeTypesMemberTests(RbacV3VolumeTypesTests):
+    """Test Volume types using 'member' user"""
+    credentials = ['project_member', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_member.volume_types_client_latest
+        cls.encryption_types_client = \
+            cls.os_project_member.encryption_types_client_latest
+
+    @decorators.idempotent_id('e5e642bf-2f31-4d04-ad43-6ad75562b7e4')
+    def test_update_volume_type(self):
+        self._update_volume_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('fda21e7e-9292-49b8-9754-f3c25b8e5f57')
+    def test_create_or_update_extra_specs_for_volume_type(self):
+        self._create_or_update_extra_specs_for_volume_type(
+            expected_status=exceptions.Forbidden
+        )
+
+    @decorators.skip_because(bug='2018467')
+    @decorators.idempotent_id('82fd0d34-17b3-4f45-bd2e-728c9a8bff8c')
+    def test_list_all_extra_specs_for_volume_type(self):
+        self._list_all_extra_specs_for_volume_type(
+            expected_status=200
+        )
+
+    @decorators.skip_because(bug='2018467')
+    @decorators.idempotent_id('67aa0b40-7c0a-4ae7-8682-fb4f20abd390')
+    def test_show_extra_spec_for_volume_type(self):
+        self._show_extra_spec_for_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('65470a71-254d-4152-bdaa-6b7f43e9c74f')
+    def test_update_extra_spec_for_volume_type(self):
+        self._update_extra_spec_for_volume_type(
+            expected_status=exceptions.Forbidden
+        )
+
+    @decorators.idempotent_id('3695be33-bd22-4090-8252-9c42eb7eeef6')
+    def test_delete_extra_spec_for_volume_type(self):
+        self._delete_extra_spec_for_volume_type(
+            expected_status=exceptions.Forbidden
+        )
+
+    @decorators.idempotent_id('319f3ca1-bdd7-433c-9bed-03c7b093e7a2')
+    def test_show_volume_type_detail(self):
+        self._show_volume_type_detail(expected_status=200)
+
+    @decorators.skip_because(bug='2016402')
+    @decorators.idempotent_id('2e990c61-a2ea-4a01-a2dc-1f483c934e8d')
+    def test_show_default_volume_type(self):
+        self._show_default_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('6847c211-647b-4d02-910c-773e76b99fcd')
+    def test_delete_volume_type(self):
+        self._delete_volume_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('308f80c9-6342-45a1-8e6e-9e400b510013')
+    def test_list_volume_types(self):
+        self._list_volume_types(expected_status=200)
+
+    @decorators.idempotent_id('81cebbb8-fa0d-4bd8-a433-e43c7b187456')
+    def test_create_volume_type(self):
+        self._create_volume_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('7c84b013-c5a8-434f-8ea7-23c5b2d46d5e')
+    def test_show_encryption_type(self):
+        self._show_encryption_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('387974ce-3544-48e3-81c0-3f86a5b60b93')
+    def test_show_encryption_spec_item(self):
+        self._show_encryption_spec_item(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('c0163522-524f-4dfb-a3d4-6648f58ce99c')
+    def test_delete_encryption_type(self):
+        self._delete_encryption_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('65d86181-905a-4aa6-a9e5-672415d819a0')
+    def test_create_encryption_type(self):
+        self._create_encryption_type(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('2633f1d3-e648-4d12-86b9-e7f72b41ec68')
+    def test_update_encryption_type(self):
+        self._update_encryption_type(expected_status=exceptions.Forbidden)
+
+
+class VolumeTypesAdminTests(RbacV3VolumeTypesTests):
+    """Test Volume types using 'admin' user"""
+    credentials = ['project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_admin.volume_types_client_latest
+        cls.encryption_types_client = \
+            cls.os_project_admin.encryption_types_client_latest
+
+    @decorators.idempotent_id('77d065ef-ffdd-4749-b326-d64fbf5d0432')
+    def test_update_volume_type(self):
+        self._update_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('422271a7-0128-4fd6-9f60-aeb4a1ce16ea')
+    def test_create_or_update_extra_specs_for_volume_type(self):
+        self._create_or_update_extra_specs_for_volume_type(
+            expected_status=200
+        )
+
+    @decorators.idempotent_id('5c491d13-df15-4721-812e-2ed473b86a12')
+    def test_list_all_extra_specs_for_volume_type(self):
+        self._list_all_extra_specs_for_volume_type(
+            expected_status=200
+        )
+
+    @decorators.skip_because(bug='2018467')
+    @decorators.idempotent_id('a2cca7b6-0af9-47e5-b8c1-4e0f01822d4e')
+    def test_show_extra_spec_for_volume_type(self):
+        self._show_extra_spec_for_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('d0ff17d3-2c47-485f-b2f1-d53ec32c32e2')
+    def test_update_extra_spec_for_volume_type(self):
+        self._update_extra_spec_for_volume_type(
+            expected_status=200
+        )
+
+    @decorators.idempotent_id('4661cc2f-8727-4998-a427-8cb1d512b68a')
+    def test_delete_extra_spec_for_volume_type(self):
+        self._delete_extra_spec_for_volume_type(
+            expected_status=202
+        )
+
+    @decorators.idempotent_id('7f794e33-b5cf-4172-b39e-a56cd9c18a2e')
+    def test_show_volume_type_detail(self):
+        self._show_volume_type_detail(expected_status=200)
+
+    @decorators.skip_because(bug='2016402')
+    @decorators.idempotent_id('93886ad8-5cd0-4def-8b0e-40418e55050d')
+    def test_show_default_volume_type(self):
+        self._show_default_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('7486259d-5c40-4fb3-8a95-491c45a0a872')
+    def test_delete_volume_type(self):
+        self._delete_volume_type(expected_status=202)
+
+    @decorators.idempotent_id('e075e8ff-bb05-4c84-b2ab-0205ef3e8dbd')
+    def test_list_volume_types(self):
+        self._list_volume_types(expected_status=200)
+
+    @decorators.idempotent_id('57384db2-9408-4a31-8c15-022eea5f9b76')
+    def test_create_volume_type(self):
+        self._create_volume_type(expected_status=200)
+
+    @decorators.idempotent_id('46fc49a3-f76f-4c22-ac83-8d1665437810')
+    def test_show_encryption_type(self):
+        self._show_encryption_type(expected_status=200)
+
+    @decorators.idempotent_id('4ff57649-bfe1-48f4-aaac-4577affba8d7')
+    def test_show_encryption_spec_item(self):
+        self._show_encryption_spec_item(expected_status=200)
+
+    @decorators.idempotent_id('e622af7d-a412-4903-9256-256d8e3cc560')
+    def test_delete_encryption_type(self):
+        self._delete_encryption_type(expected_status=202)
+
+    @decorators.idempotent_id('e7c4e925-6ce6-439b-8be8-6df4cbc32cdc')
+    def test_create_encryption_type(self):
+        self._create_encryption_type(expected_status=200)
+
+    @decorators.idempotent_id('90beb71d-93fa-4252-8566-192bdd517715')
+    def test_update_encryption_type(self):
+        self._update_encryption_type(expected_status=200)
diff --git a/cinder_tempest_plugin/rbac/v3/test_volumes.py b/cinder_tempest_plugin/rbac/v3/test_volumes.py
new file mode 100644
index 0000000..517e846
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_volumes.py
@@ -0,0 +1,166 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+CONF = config.CONF
+
+
+class VolumeV3RbacVolumesTests(rbac_base.VolumeV3RbacBaseTests):
+
+    min_microversion = '3.12'
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.vol_other_client = cls.os_project_admin.volumes_client_latest
+
+    def _create_volume(self, expected_status, **kwargs):
+        """Test create_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        kwargs['size'] = CONF.volume.volume_size
+        self.do_request(
+            method='create_volume', expected_status=expected_status, **kwargs
+        )
+
+    def _show_volume(self, expected_status):
+        """Test show_volume operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='show_volume', volume_id=volume_id,
+            expected_status=expected_status
+        )
+
+    def _list_volumes(self, expected_status):
+        """Test list_volumes operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        self.create_volume(client=self.vol_other_client)
+        self.do_request(method='list_volumes', expected_status=expected_status)
+
+    def _list_volumes_detail(self, expected_status):
+        """Test list_volumes details operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='list_volumes', detail=True, expected_status=expected_status
+        )
+
+    def _show_volume_summary(self, expected_status):
+        """Test show_volume_summary operation
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='show_volume_summary', expected_status=expected_status
+        )
+
+    def _update_volume(self, expected_status):
+        """Test update_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        new_desc = self.__name__ + '-update_test'
+        self.do_request(
+            method='update_volume', volume_id=volume_id, description=new_desc,
+            expected_status=expected_status
+        )
+
+    def _set_bootable_volume(self, expected_status):
+        """Test set_bootable_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='set_bootable_volume', volume_id=volume_id,
+            bootable=True, expected_status=expected_status
+        )
+
+    def _delete_volume(self, expected_status):
+        """Test delete_volume operation.
+
+        Args:
+            expected_status: The expected HTTP response code
+        """
+        volume_id = self.create_volume(client=self.vol_other_client)
+        self.do_request(
+            method='delete_volume', volume_id=volume_id,
+            expected_status=expected_status
+        )
+
+
+class ProjectReaderTests(VolumeV3RbacVolumesTests):
+
+    credentials = ['project_reader', 'project_admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.client = cls.os_project_reader.volumes_client_latest
+
+    @decorators.skip_because(bug="2020113")
+    @decorators.idempotent_id('3d87f960-6210-45f5-b70b-679d67a4e17e')
+    def test_create_volume(self):
+        self._create_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.idempotent_id('9b2667f2-744e-4d1f-8c39-17060010f19f')
+    def test_show_volume(self):
+        self._show_volume(expected_status=200)
+
+    @decorators.idempotent_id('2f4da8f9-cdc5-4a6e-9143-8237634a629c')
+    def test_list_volumes(self):
+        self._list_volumes(expected_status=200)
+
+    @decorators.idempotent_id('b11e59cd-d1dd-43e4-9676-22ab394f5d18')
+    def test_list_volumes_detail(self):
+        self._list_volumes_detail(expected_status=200)
+
+    @decorators.idempotent_id('ef347930-54dc-432f-b742-0a060fc37ae8')
+    def test_show_volume_summary(self):
+        self._show_volume_summary(expected_status=200)
+
+    @decorators.skip_because(bug="2020113")
+    @decorators.idempotent_id('cda92972-7213-4fa0-bc14-ab012dc95931')
+    def test_update_volume(self):
+        self._update_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2020113")
+    @decorators.idempotent_id('9970b57d-8d5d-460e-931b-28a112df81e0')
+    def test_set_bootable_volume(self):
+        self._set_bootable_volume(expected_status=exceptions.Forbidden)
+
+    @decorators.skip_because(bug="2020113")
+    @decorators.idempotent_id('4fd4dce8-ed8a-4f05-8aac-da99858b563d')
+    def test_delete_volume(self):
+        self._delete_volume(expected_status=exceptions.Forbidden)
diff --git a/cinder_tempest_plugin/scenario/manager.py b/cinder_tempest_plugin/scenario/manager.py
index 3b25bb1..8598ade 100644
--- a/cinder_tempest_plugin/scenario/manager.py
+++ b/cinder_tempest_plugin/scenario/manager.py
@@ -13,6 +13,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import contextlib
+
 from oslo_log import log
 
 from tempest.common import waiters
@@ -55,20 +57,25 @@
                        if item not in disks_list_before_attach][0]
         return volume_name
 
+    @contextlib.contextmanager
+    def mount_dev_path(self, ssh_client, dev_name, mount_path):
+        if dev_name is not None:
+            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+                                                               mount_path))
+            yield
+            ssh_client.exec_command('sudo umount %s' % mount_path)
+        else:
+            yield
+
     def _get_file_md5(self, ip_address, filename, dev_name=None,
                       mount_path='/mnt', private_key=None, server=None):
 
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
                                             server=server)
-        if dev_name is not None:
-            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
-                                                               mount_path))
-
-        md5_sum = ssh_client.exec_command(
-            'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
-        if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
+        with self.mount_dev_path(ssh_client, dev_name, mount_path):
+            md5_sum = ssh_client.exec_command(
+                'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
         return md5_sum
 
     def _count_files(self, ip_address, dev_name=None, mount_path='/mnt',
@@ -76,12 +83,9 @@
         ssh_client = self.get_remote_client(ip_address,
                                             private_key=private_key,
                                             server=server)
-        if dev_name is not None:
-            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
-                                                               mount_path))
-        count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
-        if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
+        with self.mount_dev_path(ssh_client, dev_name, mount_path):
+            count = ssh_client.exec_command(
+                'sudo ls -l %s | wc -l' % mount_path)
         # We subtract 2 from the count since `wc -l` also includes the count
         # of new line character and while creating the filesystem, a
         # lost+found folder is also created
@@ -100,17 +104,13 @@
                                             private_key=private_key,
                                             server=server)
 
-        if dev_name is not None:
-            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
-                                                               mount_path))
-        ssh_client.exec_command(
-            'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
-            (mount_path, filename))
-        md5 = ssh_client.exec_command(
-            'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
-        ssh_client.exec_command('sudo sync')
-        if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
+        with self.mount_dev_path(ssh_client, dev_name, mount_path):
+            ssh_client.exec_command(
+                'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
+                (mount_path, filename))
+            md5 = ssh_client.exec_command(
+                'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
+            ssh_client.exec_command('sudo sync')
         return md5
 
     def get_md5_from_file(self, instance, instance_ip, filename,
@@ -125,6 +125,40 @@
                                   server=instance)
         return count, md5_sum
 
+    def write_data_to_device(self, ip_address, out_dev, in_dev='/dev/urandom',
+                             bs=1024, count=100, private_key=None,
+                             server=None, sha_sum=False):
+        ssh_client = self.get_remote_client(
+            ip_address, private_key=private_key, server=server)
+
+        # Write data to device
+        write_command = (
+            'sudo dd bs=%(bs)s count=%(count)s if=%(in_dev)s of=%(out_dev)s '
+            '&& sudo dd bs=%(bs)s count=%(count)s if=%(out_dev)s' %
+            {'bs': str(bs), 'count': str(count), 'in_dev': in_dev,
+             'out_dev': out_dev})
+        if sha_sum:
+            # If we want to read sha1sum instead of the device data
+            write_command += ' | sha1sum | head -c 40'
+        data = ssh_client.exec_command(write_command)
+
+        return data
+
+    def read_data_from_device(self, ip_address, in_dev, bs=1024, count=100,
+                              private_key=None, server=None, sha_sum=False):
+        ssh_client = self.get_remote_client(
+            ip_address, private_key=private_key, server=server)
+
+        # Read data from device
+        read_command = ('sudo dd bs=%(bs)s count=%(count)s if=%(in_dev)s' %
+                        {'bs': bs, 'count': count, 'in_dev': in_dev})
+        if sha_sum:
+            # If we want to read sha1sum instead of the device data
+            read_command += ' | sha1sum  | head -c 40'
+        data = ssh_client.exec_command(read_command)
+
+        return data
+
     def _attach_and_get_volume_device_name(self, server, volume, instance_ip,
                                            private_key):
         ssh_client = self.get_remote_client(
diff --git a/cinder_tempest_plugin/scenario/test_encrypted_volume_transfer.py b/cinder_tempest_plugin/scenario/test_encrypted_volume_transfer.py
new file mode 100644
index 0000000..7232433
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/test_encrypted_volume_transfer.py
@@ -0,0 +1,185 @@
+# Copyright 2022 Red Hat, Inc.
+# All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class TransferEncryptedVolumeTest(manager.EncryptionScenarioTest):
+
+    volume_min_microversion = '3.70'
+    volume_max_microversion = 'latest'
+
+    credentials = ['primary', 'alt', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(TransferEncryptedVolumeTest, cls).setup_clients()
+
+        # We need the "mv355" volume transfers client
+        cls.client = cls.os_primary.volume_transfers_mv355_client_latest
+        cls.alt_client = cls.os_alt.volume_transfers_mv355_client_latest
+        cls.alt_volumes_client = cls.os_alt.volumes_client_latest
+
+    @classmethod
+    def skip_checks(cls):
+        super(TransferEncryptedVolumeTest, cls).skip_checks()
+        if not CONF.service_available.barbican:
+            raise cls.skipException('Barbican is required')
+
+    def setUp(self):
+        super(TransferEncryptedVolumeTest, self).setUp()
+        self.keypair = self.create_keypair()
+        self.security_group = self.create_security_group()
+
+    def _create_encrypted_volume_from_image(self):
+        volume_type = self.create_volume_type()
+        self.create_encryption_type(type_id=volume_type['id'],
+                                    provider='luks',
+                                    key_size=256,
+                                    cipher='aes-xts-plain64',
+                                    control_location='front-end')
+        return self.create_volume_from_image(volume_type=volume_type['id'])
+
+    def _create_or_get_timestamp(self, volume, timestamp_fn):
+        server = self.boot_instance_from_resource(
+            source_id=volume['id'],
+            source_type='volume',
+            keypair=self.keypair,
+            security_group=self.security_group)
+        server_ip = self.get_server_ip(server)
+        timestamp = timestamp_fn(server_ip,
+                                 private_key=self.keypair['private_key'],
+                                 server=server)
+        self.servers_client.delete_server(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
+        return timestamp
+
+    def _create_transfer(self, volume, transfer_client, volumes_client):
+        body = transfer_client.create_volume_transfer(volume_id=volume['id'])
+        transfer = body['transfer']
+        waiters.wait_for_volume_resource_status(volumes_client,
+                                                volume['id'],
+                                                'awaiting-transfer')
+        return transfer
+
+    def _accept_transfer(self, transfer, transfer_client, volumes_client):
+        _ = transfer_client.accept_volume_transfer(
+            transfer['id'], auth_key=transfer['auth_key'])
+        waiters.wait_for_volume_resource_status(volumes_client,
+                                                transfer['volume_id'],
+                                                'available')
+
+    def _delete_transfer(self, transfer, transfer_client, volumes_client):
+        _ = transfer_client.delete_volume_transfer(transfer['id'])
+        waiters.wait_for_volume_resource_status(volumes_client,
+                                                transfer['volume_id'],
+                                                'available')
+
+    @decorators.idempotent_id('a694dc4d-d11b-45cb-b268-62e76cc1b4f4')
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_create_accept_volume_transfer(self):
+        """Verify the ability to transfer an encrypted volume:
+
+        * Create an encrypted volume from image
+        * Boot an instance from the volume and write a timestamp
+        * Transfer the volume to another project, then transfer it back
+          again to the original project (see comments in the code for why
+          this is done).
+        * Boot annother instance from the volume and read the timestamp
+        * Verify the timestamps match, and the volume has a new
+          encryption_key_id.
+        """
+
+        # Create a bootable encrypted volume.
+        volume = self._create_encrypted_volume_from_image()
+
+        # Create an instance from the volume and write a timestamp.
+        timestamp_1 = self._create_or_get_timestamp(volume,
+                                                    self.create_timestamp)
+
+        # Transfer the volume to another project.
+        transfer = self._create_transfer(volume,
+                                         self.client,
+                                         self.volumes_client)
+        self._accept_transfer(transfer,
+                              self.alt_client,
+                              self.alt_volumes_client)
+
+        # Transfer the volume back to the original project. This is done
+        # only because it's awkward in tempest to boot an instance and
+        # access it (to read the timestamp) in another project without
+        # setting up another security group and group rules.
+        transfer = self._create_transfer(volume,
+                                         self.alt_client,
+                                         self.alt_volumes_client)
+        self._accept_transfer(transfer, self.client, self.volumes_client)
+
+        # Create another instance from the volume and read the timestamp.
+        timestamp_2 = self._create_or_get_timestamp(volume,
+                                                    self.get_timestamp)
+
+        self.assertEqual(timestamp_1, timestamp_2)
+
+        # Verify the volume has a new encryption_key_id.
+        encryption_key_id_1 = volume['encryption_key_id']
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
+        encryption_key_id_2 = volume['encryption_key_id']
+
+        self.assertNotEqual(encryption_key_id_1, encryption_key_id_2)
+
+    @decorators.idempotent_id('00c04d27-b3c6-454c-a0b4-223a195c4a89')
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_create_delete_volume_transfer(self):
+        """Verify the ability to cancel an encrypted volume transfer:
+
+        * Create an encrypted volume from image
+        * Boot an instance from the volume and write a timestamp
+        * Create and delete a volume transfer
+        * Boot annother instance from the volume and read the timestamp
+        * Verify the timestamps match, and the volume has a new
+          encryption_key_id.
+        """
+
+        # Create a bootable encrypted volume.
+        volume = self._create_encrypted_volume_from_image()
+
+        # Create an instance from the volume and write a timestamp.
+        timestamp_1 = self._create_or_get_timestamp(volume,
+                                                    self.create_timestamp)
+
+        # Create and then delete a transfer of the volume
+        transfer = self._create_transfer(volume,
+                                         self.client,
+                                         self.volumes_client)
+        self._delete_transfer(transfer, self.client, self.volumes_client)
+
+        # Create another instance from the volume and read the timestamp.
+        timestamp_2 = self._create_or_get_timestamp(volume,
+                                                    self.get_timestamp)
+
+        self.assertEqual(timestamp_1, timestamp_2)
+
+        # Verify the volume has a new encryption_key_id.
+        encryption_key_id_1 = volume['encryption_key_id']
+        volume = self.volumes_client.show_volume(volume['id'])['volume']
+        encryption_key_id_2 = volume['encryption_key_id']
+
+        self.assertNotEqual(encryption_key_id_1, encryption_key_id_2)
diff --git a/cinder_tempest_plugin/scenario/test_snapshots.py b/cinder_tempest_plugin/scenario/test_snapshots.py
index 5a9611f..f376954 100644
--- a/cinder_tempest_plugin/scenario/test_snapshots.py
+++ b/cinder_tempest_plugin/scenario/test_snapshots.py
@@ -23,7 +23,14 @@
 
     def setUp(self):
         super(SnapshotDataIntegrityTests, self).setUp()
-        self.keypair = self.create_keypair()
+        self.validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        # NOTE(danms): If validation is enabled, we will have a keypair to use,
+        # otherwise we need to create our own.
+        if 'keypair' in self.validation_resources:
+            self.keypair = self.validation_resources['keypair']
+        else:
+            self.keypair = self.create_keypair()
         self.security_group = self.create_security_group()
 
     @decorators.idempotent_id('ff10644e-5a70-4a9f-9801-8204bb81fb61')
@@ -36,7 +43,7 @@
         1) Create an instance with ephemeral disk
         2) Create a volume, attach it to the instance and create a filesystem
            on it and mount it
-        3) Mount the volume, create a file and write data into it, Unmount it
+        3) Create a file and write data into it, Unmount it
         4) create snapshot
         5) repeat 3 and 4 two more times (simply creating 3 snapshots)
 
@@ -48,6 +55,9 @@
         # Create an instance
         server = self.create_server(
             key_name=self.keypair['name'],
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='SSHABLE',
             security_groups=[{'name': self.security_group['name']}])
 
         # Create an empty volume
@@ -93,41 +103,21 @@
         # Detach the volume
         self.nova_volume_detach(server, volume)
 
-        # Create volume from snapshot, attach it to instance and check file
-        # and contents for snap1
-        volume_snap_1 = self.create_volume(snapshot_id=snapshot1['id'])
-        volume_device_name, __ = self._attach_and_get_volume_device_name(
-            server, volume_snap_1, instance_ip, self.keypair['private_key'])
-        count_snap_1, md5_file_1 = self.get_md5_from_file(
-            server, instance_ip, 'file1', dev_name=volume_device_name)
-        # Detach the volume
-        self.nova_volume_detach(server, volume_snap_1)
+        snap_map = {1: snapshot1, 2: snapshot2, 3: snapshot3}
+        file_map = {1: file1_md5, 2: file2_md5, 3: file3_md5}
 
-        self.assertEqual(count_snap_1, 1)
-        self.assertEqual(file1_md5, md5_file_1)
+        # Loop over 3 times to check the data integrity of all 3 snapshots
+        for i in range(1, 4):
+            # Create volume from snapshot, attach it to instance and check file
+            # and contents for snap
+            volume_snap = self.create_volume(snapshot_id=snap_map[i]['id'])
+            volume_device_name, __ = self._attach_and_get_volume_device_name(
+                server, volume_snap, instance_ip, self.keypair['private_key'])
+            count_snap, md5_file = self.get_md5_from_file(
+                server, instance_ip, 'file' + str(i),
+                dev_name=volume_device_name)
+            # Detach the volume
+            self.nova_volume_detach(server, volume_snap)
 
-        # Create volume from snapshot, attach it to instance and check file
-        # and contents for snap2
-        volume_snap_2 = self.create_volume(snapshot_id=snapshot2['id'])
-        volume_device_name, __ = self._attach_and_get_volume_device_name(
-            server, volume_snap_2, instance_ip, self.keypair['private_key'])
-        count_snap_2, md5_file_2 = self.get_md5_from_file(
-            server, instance_ip, 'file2', dev_name=volume_device_name)
-        # Detach the volume
-        self.nova_volume_detach(server, volume_snap_2)
-
-        self.assertEqual(count_snap_2, 2)
-        self.assertEqual(file2_md5, md5_file_2)
-
-        # Create volume from snapshot, attach it to instance and check file
-        # and contents for snap3
-        volume_snap_3 = self.create_volume(snapshot_id=snapshot3['id'])
-        volume_device_name, __ = self._attach_and_get_volume_device_name(
-            server, volume_snap_3, instance_ip, self.keypair['private_key'])
-        count_snap_3, md5_file_3 = self.get_md5_from_file(
-            server, instance_ip, 'file3', dev_name=volume_device_name)
-        # Detach the volume
-        self.nova_volume_detach(server, volume_snap_3)
-
-        self.assertEqual(count_snap_3, 3)
-        self.assertEqual(file3_md5, md5_file_3)
+            self.assertEqual(count_snap, i)
+            self.assertEqual(file_map[i], md5_file)
diff --git a/cinder_tempest_plugin/scenario/test_volume_encrypted.py b/cinder_tempest_plugin/scenario/test_volume_encrypted.py
index 69edfa6..69b0ab2 100644
--- a/cinder_tempest_plugin/scenario/test_volume_encrypted.py
+++ b/cinder_tempest_plugin/scenario/test_volume_encrypted.py
@@ -38,11 +38,6 @@
     def resource_cleanup(cls):
         super(TestEncryptedCinderVolumes, cls).resource_cleanup()
 
-    def launch_instance(self):
-        keypair = self.create_keypair()
-
-        return self.create_server(key_name=keypair['name'])
-
     def attach_detach_volume(self, server, volume):
         attached_volume = self.nova_volume_attach(server, volume)
         self.nova_volume_detach(server, attached_volume)
@@ -108,7 +103,11 @@
             self.volumes_client, volume_s['id'], 'available')
         volume_source = self.volumes_client.show_volume(
             volume_s['id'])['volume']
-        server = self.launch_instance()
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_server(wait_until='SSHABLE',
+                                    validatable=True,
+                                    validation_resources=validation_resources)
         self.attach_detach_volume(server, volume_source)
 
     @decorators.idempotent_id('5bb622ab-5060-48a8-8840-d589a548b7e4')
@@ -122,9 +121,8 @@
         * Create an encrypted volume from image
         * Boot an instance from the volume
         * Write data to the volume
-        * Detach volume
-        * Create a clone from the first volume
-        * Create another encrypted volume from source_volumeid
+        * Destroy the instance
+        * Create a clone of the encrypted volume
         * Boot an instance from cloned volume
         * Verify the data
         """
diff --git a/cinder_tempest_plugin/scenario/test_volume_multiattach.py b/cinder_tempest_plugin/scenario/test_volume_multiattach.py
new file mode 100644
index 0000000..e04610f
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/test_volume_multiattach.py
@@ -0,0 +1,152 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from cinder_tempest_plugin.scenario import manager
+from tempest.scenario import manager as tempest_manager
+
+CONF = config.CONF
+
+
+class VolumeMultiattachTests(manager.ScenarioTest,
+                             tempest_manager.EncryptionScenarioTest):
+
+    compute_min_microversion = '2.60'
+    compute_max_microversion = 'latest'
+
+    def setUp(self):
+        super(VolumeMultiattachTests, self).setUp()
+        self.validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        # NOTE(danms): If validation is enabled, we will have a keypair to use,
+        # otherwise we need to create our own.
+        if 'keypair' in self.validation_resources:
+            self.keypair = self.validation_resources['keypair']
+        else:
+            self.keypair = self.create_keypair()
+        self.security_group = self.create_security_group()
+
+    @classmethod
+    def skip_checks(cls):
+        super(VolumeMultiattachTests, cls).skip_checks()
+        if not CONF.compute_feature_enabled.volume_multiattach:
+            raise cls.skipException('Volume multi-attach is not available.')
+
+    def _verify_attachment(self, volume_id, server_id):
+        volume = self.volumes_client.show_volume(volume_id)['volume']
+        server_ids = (
+            [attachment['server_id'] for attachment in volume['attachments']])
+        self.assertIn(server_id, server_ids)
+
+    @decorators.idempotent_id('e6604b85-5280-4f7e-90b5-186248fd3423')
+    def test_multiattach_data_integrity(self):
+
+        # Create an instance
+        server_1 = self.create_server(
+            key_name=self.keypair['name'],
+            wait_until='SSHABLE',
+            validatable=True,
+            validation_resources=self.validation_resources,
+            security_groups=[{'name': self.security_group['name']}])
+
+        # Create multiattach type
+        multiattach_vol_type = self.create_volume_type(
+            extra_specs={'multiattach': "<is> True"})
+
+        # Create a multiattach volume
+        volume = self.create_volume(volume_type=multiattach_vol_type['id'])
+
+        # Create encrypted volume
+        encrypted_volume = self.create_encrypted_volume(
+            'luks', volume_type='luks')
+
+        # Create a normal volume
+        simple_volume = self.create_volume()
+
+        # Attach normal and encrypted volumes (These volumes are not used in
+        # the current test but is used to emulate a real world scenario
+        # where different types of volumes will be attached to the server)
+        self.attach_volume(server_1, simple_volume)
+        self.attach_volume(server_1, encrypted_volume)
+
+        instance_ip = self.get_server_ip(server_1)
+
+        # Attach volume to instance and find it's device name (eg: /dev/vdb)
+        volume_device_name_inst_1, __ = (
+            self._attach_and_get_volume_device_name(
+                server_1, volume, instance_ip, self.keypair['private_key']))
+
+        out_device = '/dev/' + volume_device_name_inst_1
+
+        # This data is written from the first server and will be used to
+        # verify when reading data from second server
+        device_data_inst_1 = self.write_data_to_device(
+            instance_ip, out_device, private_key=self.keypair['private_key'],
+            server=server_1, sha_sum=True)
+
+        # Create another instance
+        server_2 = self.create_server(
+            key_name=self.keypair['name'],
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='SSHABLE',
+            security_groups=[{'name': self.security_group['name']}])
+
+        instance_2_ip = self.get_server_ip(server_2)
+
+        # Attach volume to instance and find it's device name (eg: /dev/vdc)
+        volume_device_name_inst_2, __ = (
+            self._attach_and_get_volume_device_name(
+                server_2, volume, instance_2_ip, self.keypair['private_key']))
+
+        in_device = '/dev/' + volume_device_name_inst_2
+
+        # Read data from volume device
+        device_data_inst_2 = self.read_data_from_device(
+            instance_2_ip, in_device, private_key=self.keypair['private_key'],
+            server=server_2, sha_sum=True)
+
+        self._verify_attachment(volume['id'], server_1['id'])
+        self._verify_attachment(volume['id'], server_2['id'])
+        self.assertEqual(device_data_inst_1, device_data_inst_2)
+
+    @decorators.idempotent_id('53514da8-f49c-4cda-8792-ff4a2fa69977')
+    def test_volume_multiattach_same_host_negative(self):
+        # Create an instance
+        server = self.create_server(
+            key_name=self.keypair['name'],
+            validatable=True,
+            validation_resources=self.validation_resources,
+            wait_until='SSHABLE',
+            security_groups=[{'name': self.security_group['name']}])
+
+        # Create multiattach type
+        multiattach_vol_type = self.create_volume_type(
+            extra_specs={'multiattach': "<is> True"})
+
+        # Create an empty volume
+        volume = self.create_volume(volume_type=multiattach_vol_type['id'])
+
+        # Attach volume to instance
+        attachment = self.attach_volume(server, volume)
+
+        self.assertEqual(server['id'], attachment['serverId'])
+
+        # Try attaching the volume to the same instance
+        self.assertRaises(lib_exc.BadRequest, self.attach_volume, server,
+                          volume)
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
new file mode 100644
index 0000000..bc1dc04
--- /dev/null
+++ b/playbooks/enable-fips.yaml
@@ -0,0 +1,3 @@
+- hosts: all
+  roles:
+    - enable-fips
diff --git a/playbooks/install-multipath.yaml b/playbooks/install-multipath.yaml
new file mode 100644
index 0000000..bc51810
--- /dev/null
+++ b/playbooks/install-multipath.yaml
@@ -0,0 +1,22 @@
+- hosts: tempest
+  vars:
+    mpath_package: "{{ 'device-mapper-multipath' if ansible_os_family == 'RedHat' else 'multipath-tools' }}"
+  tasks:
+    - name: Install multipath package on RedHat systems
+      package:
+        name: "{{ mpath_package }}"
+        state: present
+      become: yes
+
+    - name: Create configuration
+      command: mpathconf --enable --with_multipathd y --user_friendly_names n --find_multipaths y
+      args:
+        creates: /etc/multipath.conf
+      become: yes
+
+    - name: Start and enable on boot the multipath daemon
+      service:
+        name: multipathd
+        state: started
+        enabled: yes
+      become: yes
diff --git a/requirements.txt b/requirements.txt
index 4d75108..c25d1c5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,4 +5,4 @@
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 oslo.config>=5.1.0 # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-tempest>=27.0.0 # Apache-2.0
+tempest>=34.2.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 3b246b5..f224c5c 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,13 +1,12 @@
 [metadata]
 name = cinder-tempest-plugin
-summary = Tempest plugin tests for Cinder.
-description_file =
-    README.rst
+description = Tempest plugin tests for Cinder.
+long_description = file: README.rst
 author = OpenStack
 author_email = openstack-discuss@lists.openstack.org
-home_page = http://www.openstack.org/
+url = http://www.openstack.org/
 python_requires = >=3.6
-classifier =
+classifiers =
     Environment :: OpenStack
     Intended Audience :: Information Technology
     Intended Audience :: System Administrators
@@ -20,6 +19,7 @@
     Programming Language :: Python :: 3.7
     Programming Language :: Python :: 3.8
     Programming Language :: Python :: 3.9
+    Programming Language :: Python :: 3.10
 
 [files]
 packages =
diff --git a/tox.ini b/tox.ini
index c9c91ad..8844306 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,9 @@
 [tox]
-minversion = 3.18.0
+minversion = 4.0.0
+# specify virtualenv here to keep local runs consistent with the
+# gate (it sets the versions of pip, setuptools, and wheel)
+requires = virtualenv>=20.17.1
 envlist = pep8
-skipsdist = True
 # this allows tox to infer the base python from the environment name
 # and override any basepython configured in this file
 ignore_basepython_conflict=true
@@ -20,7 +22,12 @@
 commands = stestr run --slowest {posargs}
 
 [testenv:pep8]
-commands = flake8 {posargs}
+commands =
+    flake8 {posargs}
+    check-uuid --package cinder_tempest_plugin
+
+[testenv:uuidgen]
+commands = check-uuid --fix --package cinder_tempest_plugin
 
 [testenv:venv]
 commands = {posargs}