Merge "Remove all usage of six library"
diff --git a/.gitignore b/.gitignore
index 963e589..3c71a79 100644
--- a/.gitignore
+++ b/.gitignore
@@ -27,7 +27,7 @@
 !.coveragerc
 .tox
 nosetests.xml
-.testrepository
+.stestr
 .venv
 
 # Translations
diff --git a/.stestr.conf b/.stestr.conf
new file mode 100644
index 0000000..4dedb28
--- /dev/null
+++ b/.stestr.conf
@@ -0,0 +1,3 @@
+[DEFAULT]
+test_path=${OS_TEST_PATH:-./cinder_tempest_plugin}
+top_dir=./
diff --git a/.testr.conf b/.testr.conf
deleted file mode 100644
index 6d83b3c..0000000
--- a/.testr.conf
+++ /dev/null
@@ -1,7 +0,0 @@
-[DEFAULT]
-test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
-             OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
-             OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
-             ${PYTHON:-python} -m subunit.run discover -t ./ . $LISTOPT $IDOPTION
-test_id_option=--load-list $IDFILE
-test_list_option=--list
diff --git a/.zuul.yaml b/.zuul.yaml
index 2a04353..0b120b2 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -5,49 +5,70 @@
     check:
       jobs:
         - cinder-tempest-plugin-lvm-lio-barbican
-        - cinder-tempest-plugin-lvm-lio-barbican-centos-8:
+        - cinder-tempest-plugin-lvm-lio-barbican-centos-8-stream:
             voting: false
         - cinder-tempest-plugin-lvm-tgt-barbican
+        - nova-ceph-multistore:
+            voting: false
         - cinder-tempest-plugin-cbak-ceph
+        - cinder-tempest-plugin-cbak-s3
+        - cinder-tempest-plugin-basic-xena
+        - cinder-tempest-plugin-basic-wallaby
+        - cinder-tempest-plugin-basic-victoria
         - cinder-tempest-plugin-basic-ussuri
-        - cinder-tempest-plugin-basic-train
-        - cinder-tempest-plugin-basic-stein
+        # Set this job to voting once we have some actual tests to run
+        - cinder-tempest-plugin-protection-functional:
+            voting: false
     gate:
       jobs:
         - cinder-tempest-plugin-lvm-lio-barbican
         - cinder-tempest-plugin-lvm-tgt-barbican
         - cinder-tempest-plugin-cbak-ceph
+    experimental:
+      jobs:
+        - cinder-tempest-plugin-cbak-ceph-xena
+        - cinder-tempest-plugin-cbak-ceph-wallaby
+        - cinder-tempest-plugin-cbak-ceph-victoria
+        - cinder-tempest-plugin-cbak-ceph-ussuri
 
 - job:
-    name: cinder-tempest-plugin-lvm-barbican-base
+    name: cinder-tempest-plugin-protection-functional
+    parent: devstack-tempest
+    required-projects:
+      - opendev.org/openstack/cinder-tempest-plugin
+      - opendev.org/openstack/cinder
+    vars:
+      tox_envlist: all
+      tempest_test_regex: 'cinder_tempest_plugin.rbac'
+      devstack_local_conf:
+        test-config:
+          $CINDER_CONF:
+            oslo_policy:
+              enforce_new_defaults: True
+          $TEMPEST_CONFIG:
+            enforce_scope:
+              cinder: True
+      tempest_plugins:
+        - cinder-tempest-plugin
+
+- job:
+    name: cinder-tempest-plugin-lvm-barbican-base-abstract
     description: |
       This is a base job for lvm with lio & tgt targets
     parent: devstack-tempest
-    # TODO(gmann): Remove the below nodeset setting to Bionic once
-    # https://storyboard.openstack.org/#!/story/2007732 is fixed
-    # Once nodeset is removed form here then devstack-tempest job
-    # will automatically run this job on Ubuntu Focal nodeset from
-    # Victoria gate onwards.
-    nodeset: openstack-single-node-bionic
+    abstract: true
     timeout: 10800
-    roles:
-      - zuul: opendev.org/openstack/cinderlib
     required-projects:
       - opendev.org/openstack/barbican
       - opendev.org/openstack/tempest
       - opendev.org/openstack/cinder-tempest-plugin
       - opendev.org/openstack/cinder
-      - opendev.org/openstack/cinderlib
-    run: playbooks/tempest-and-cinderlib-run.yaml
-    # Required to collect the tox-based logs of the cinderlib functional tests
-    post-run: playbooks/post-cinderlib.yaml
     host-vars:
       controller:
         devstack_plugins:
           barbican: https://opendev.org/openstack/barbican
     vars:
       tempest_test_regex: '(^tempest\.(api|scenario)|(^cinder_tempest_plugin))'
-      tempest_test_blacklist: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-blacklist.txt'
       tox_envlist: all
       devstack_localrc:
         CINDER_LVM_TYPE: thin
@@ -55,27 +76,80 @@
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
+            auth:
+              # FIXME: 'creator' should be re-added by the barbican devstack plugin
+              # but the value below override everything.
+              tempest_roles: member,creator
             volume-feature-enabled:
               volume_revert: True
       devstack_services:
         barbican: true
       tempest_plugins:
         - cinder-tempest-plugin
-      fetch_subunit_output_additional_dirs:
-        - "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}"
     irrelevant-files:
       - ^.*\.rst$
       - ^doc/.*$
       - ^releasenotes/.*$
 
 - job:
+    name: cinder-tempest-plugin-lvm-barbican-base
+    description: |
+      This is a base job for lvm with lio & tgt targets
+      with cinderlib tests.
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    roles:
+      - zuul: opendev.org/openstack/cinderlib
+    required-projects:
+      - opendev.org/openstack/cinderlib
+    run: playbooks/tempest-and-cinderlib-run.yaml
+    # Required to collect the tox-based logs of the cinderlib functional tests
+    post-run: playbooks/post-cinderlib.yaml
+    vars:
+      fetch_subunit_output_additional_dirs:
+        - "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}"
+      tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+
+- job:
+    name: cinder-tempest-plugin-lvm-barbican-base
+    description: |
+      This is a base job for lvm with lio & tgt targets
+      with cinderlib tests to run on stable/train testing.
+    branches: stable/train
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    roles:
+      - zuul: opendev.org/openstack/cinderlib
+    required-projects:
+      - opendev.org/openstack/cinderlib
+      - name: opendev.org/openstack/cinder-tempest-plugin
+        override-checkout: 1.3.0
+    run: playbooks/tempest-and-cinderlib-run.yaml
+    # Required to collect the tox-based logs of the cinderlib functional tests
+    post-run: playbooks/post-cinderlib.yaml
+    vars:
+      fetch_subunit_output_additional_dirs:
+        - "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}"
+      tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+
+- job:
+    name: cinder-tempest-plugin-lvm-barbican-base
+    description: |
+      This is a base job for lvm with lio & tgt targets
+    branches: ^(?=stable/(ocata|pike|queens|rocky|stein)).*$
+    parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+    required-projects:
+      - name: opendev.org/openstack/cinder-tempest-plugin
+        override-checkout: stein-last
+    vars:
+      tempest_test_blacklist: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-blacklist.txt'
+
+- job:
     name: cinder-tempest-plugin-cbak-ceph
     parent: devstack-plugin-ceph-tempest-py3
     description: |
       Integration tests that runs with the ceph devstack plugin, py3
       and enable the backup service.
     vars:
-      tempest_black_regex: '(VolumesBackupsTest.test_bootable_volume_backup_and_restore|TestVolumeBackupRestore.test_volume_backup_restore)'
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -84,13 +158,36 @@
       devstack_services:
         c-bak: true
 
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-xena
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/xena
+
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-wallaby
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/wallaby
+
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-victoria
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/victoria
+
+- job:
+    name: cinder-tempest-plugin-cbak-ceph-ussuri
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-bionic
+    override-checkout: stable/ussuri
+
 # variant for pre-Ussuri branches (no volume revert for Ceph),
 # should this job be used on those branches
 - job:
     name: cinder-tempest-plugin-cbak-ceph
     branches: ^(?=stable/(ocata|pike|queens|rocky|stein|train)).*$
     vars:
-      tempest_black_regex: ''
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -108,9 +205,9 @@
         CINDER_ISCSI_HELPER: lioadm
 
 - job:
-    name: cinder-tempest-plugin-lvm-lio-barbican-centos-8
+    name: cinder-tempest-plugin-lvm-lio-barbican-centos-8-stream
     parent: cinder-tempest-plugin-lvm-lio-barbican
-    nodeset: devstack-single-node-centos-8
+    nodeset: devstack-single-node-centos-8-stream
     description: |
       This jobs configures Cinder with LVM, LIO, barbican and
       runs tempest tests and cinderlib tests on CentOS 8.
@@ -121,8 +218,26 @@
       This jobs configures Cinder with LVM, tgt, barbican and
       runs tempest tests and cinderlib tests.
     parent: cinder-tempest-plugin-lvm-barbican-base
+    vars:
+      devstack_localrc:
+        CINDER_ISCSI_HELPER: tgtadm
 
 - job:
+    name: cinder-tempest-plugin-cbak-s3
+    parent: cinder-tempest-plugin-basic
+    description: |
+      Integration tests that runs with the s3 backup driver with
+      Swift S3 API.
+    vars:
+      devstack_localrc:
+        CINDER_BACKUP_DRIVER: 's3_swift'
+      devstack_services:
+        c-bak: true
+        s3api: true
+        # Workaround: TLS proxy seems to cause S3 signature mismatch.
+        tls-proxy: false
+      tempest_test_regex: '(test_volume_backup|test_volumes_backup|test_snapshot_backup)'
+- job:
     name: cinder-tempest-plugin-basic
     parent: devstack-tempest
     description: |
@@ -143,25 +258,25 @@
       - ^releasenotes/.*$
 
 - job:
+    name: cinder-tempest-plugin-basic-xena
+    parent: cinder-tempest-plugin-basic
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/xena
+
+- job:
+    name: cinder-tempest-plugin-basic-wallaby
+    parent: cinder-tempest-plugin-basic
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/wallaby
+
+- job:
+    name: cinder-tempest-plugin-basic-victoria
+    parent: cinder-tempest-plugin-basic
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/victoria
+
+- job:
     name: cinder-tempest-plugin-basic-ussuri
     parent: cinder-tempest-plugin-basic
     nodeset: openstack-single-node-bionic
     override-checkout: stable/ussuri
-
-- job:
-    name: cinder-tempest-plugin-basic-train
-    parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/train
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: True
-
-- job:
-    name: cinder-tempest-plugin-basic-stein
-    parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/stein
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: True
diff --git a/README.rst b/README.rst
index 79151cd..3fd608a 100644
--- a/README.rst
+++ b/README.rst
@@ -24,7 +24,6 @@
 ::
 
     [[local|localrc]]
-    VIRT_DRIVER=libvirt
     ADMIN_PASSWORD=secret
     SERVICE_TOKEN=$ADMIN_PASSWORD
     MYSQL_PASSWORD=$ADMIN_PASSWORD
@@ -35,8 +34,8 @@
     SYSLOG=False
     LOG_COLOR=False
     RECLONE=yes
-    ENABLED_SERVICES=c-api,c-sch,c-vol,cinder,dstat,g-api,g-reg,key,mysql,
-                     n-api,n-cond,n-cpu,n-crt,n-net,n-sch,rabbit,tempest
+    ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key
+    ENABLED_SERVICES+=,mysql,n-api,n-cond,n-cpu,n-crt,n-sch,rabbit,tempest,placement-api
     CINDER_ENABLED_BACKENDS=lvmdriver-1
     CINDER_DEFAULT_VOLUME_TYPE=lvmdriver-1
     CINDER_VOLUME_CLEAR=none
diff --git a/cinder_tempest_plugin/api/volume/admin/test_consistencygroups.py b/cinder_tempest_plugin/api/volume/admin/test_consistencygroups.py
index 15d19dc..7dff494 100644
--- a/cinder_tempest_plugin/api/volume/admin/test_consistencygroups.py
+++ b/cinder_tempest_plugin/api/volume/admin/test_consistencygroups.py
@@ -78,20 +78,20 @@
             self.consistencygroups_adm_client.create_consistencygroup)
         cg = create_consistencygroup(volume_type['id'],
                                      name=cg_name)['consistencygroup']
+        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
+            cg['id'], 'available')
+        self.assertEqual(cg_name, cg['name'])
+
+        # Create volume
         vol_name = data_utils.rand_name("volume")
         params = {'name': vol_name,
                   'volume_type': volume_type['id'],
                   'consistencygroup_id': cg['id'],
                   'size': CONF.volume.volume_size}
 
-        # Create volume
         volume = self.admin_volume_client.create_volume(**params)['volume']
-
         waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                 volume['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
-            cg['id'], 'available')
-        self.assertEqual(cg_name, cg['name'])
 
         # Get a given CG
         cg = self.consistencygroups_adm_client.show_consistencygroup(
@@ -122,19 +122,19 @@
             self.consistencygroups_adm_client.create_consistencygroup)
         cg = create_consistencygroup(volume_type['id'],
                                      name=cg_name)['consistencygroup']
+        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
+            cg['id'], 'available')
+        self.assertEqual(cg_name, cg['name'])
+
+        # Create volume
         vol_name = data_utils.rand_name("volume")
         params = {'name': vol_name,
                   'volume_type': volume_type['id'],
                   'consistencygroup_id': cg['id'],
                   'size': CONF.volume.volume_size}
-
-        # Create volume
         volume = self.admin_volume_client.create_volume(**params)['volume']
         waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                 volume['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
-            cg['id'], 'available')
-        self.assertEqual(cg_name, cg['name'])
 
         # Create cgsnapshot
         cgsnapshot_name = data_utils.rand_name('cgsnapshot')
@@ -142,6 +142,9 @@
             self.consistencygroups_adm_client.create_cgsnapshot)
         cgsnapshot = create_cgsnapshot(cg['id'],
                                        name=cgsnapshot_name)['cgsnapshot']
+        self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
+            cgsnapshot['id'], 'available')
+        self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
         snapshots = self.os_admin.snapshots_v2_client.list_snapshots(
             detail=True)['snapshots']
         for snap in snapshots:
@@ -149,9 +152,6 @@
                 waiters.wait_for_volume_resource_status(
                     self.os_admin.snapshots_v2_client,
                     snap['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
-            cgsnapshot['id'], 'available')
-        self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
 
         # Get a given CG snapshot
         cgsnapshot = self.consistencygroups_adm_client.show_cgsnapshot(
@@ -182,19 +182,19 @@
             self.consistencygroups_adm_client.create_consistencygroup)
         cg = create_consistencygroup(volume_type['id'],
                                      name=cg_name)['consistencygroup']
+        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
+            cg['id'], 'available')
+        self.assertEqual(cg_name, cg['name'])
+
+        # Create volume
         vol_name = data_utils.rand_name("volume")
         params = {'name': vol_name,
                   'volume_type': volume_type['id'],
                   'consistencygroup_id': cg['id'],
                   'size': CONF.volume.volume_size}
-
-        # Create volume
         volume = self.admin_volume_client.create_volume(**params)['volume']
         waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                 volume['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
-            cg['id'], 'available')
-        self.assertEqual(cg_name, cg['name'])
 
         # Create cgsnapshot
         cgsnapshot_name = data_utils.rand_name('cgsnapshot')
@@ -202,15 +202,15 @@
             self.consistencygroups_adm_client.create_cgsnapshot)
         cgsnapshot = create_cgsnapshot(cg['id'],
                                        name=cgsnapshot_name)['cgsnapshot']
+        self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
+            cgsnapshot['id'], 'available')
+        self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
         snapshots = self.snapshots_client.list_snapshots(
             detail=True)['snapshots']
         for snap in snapshots:
             if volume['id'] == snap['volume_id']:
                 waiters.wait_for_volume_resource_status(
                     self.os_admin.snapshots_v2_client, snap['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_cgsnapshot_status(
-            cgsnapshot['id'], 'available')
-        self.assertEqual(cgsnapshot_name, cgsnapshot['name'])
 
         # Create CG from CG snapshot
         cg_name2 = data_utils.rand_name('CG_from_snap')
@@ -218,15 +218,15 @@
             self.consistencygroups_adm_client.create_consistencygroup_from_src)
         cg2 = create_consistencygroup2(cgsnapshot_id=cgsnapshot['id'],
                                        name=cg_name2)['consistencygroup']
+        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
+            cg2['id'], 'available')
+        self.assertEqual(cg_name2, cg2['name'])
         vols = self.admin_volume_client.list_volumes(
             detail=True)['volumes']
         for vol in vols:
             if vol['consistencygroup_id'] == cg2['id']:
                 waiters.wait_for_volume_resource_status(
                     self.admin_volume_client, vol['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
-            cg2['id'], 'available')
-        self.assertEqual(cg_name2, cg2['name'])
 
         # Clean up
         self._delete_consistencygroup(cg2['id'])
@@ -247,19 +247,19 @@
             self.consistencygroups_adm_client.create_consistencygroup)
         cg = create_consistencygroup(volume_type['id'],
                                      name=cg_name)['consistencygroup']
+        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
+            cg['id'], 'available')
+        self.assertEqual(cg_name, cg['name'])
+
+        # Create volume
         vol_name = data_utils.rand_name("volume")
         params = {'name': vol_name,
                   'volume_type': volume_type['id'],
                   'consistencygroup_id': cg['id'],
                   'size': CONF.volume.volume_size}
-
-        # Create volume
         volume = self.admin_volume_client.create_volume(**params)['volume']
         waiters.wait_for_volume_resource_status(self.admin_volume_client,
                                                 volume['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
-            cg['id'], 'available')
-        self.assertEqual(cg_name, cg['name'])
 
         # Create CG from CG
         cg_name2 = data_utils.rand_name('CG_from_cg')
@@ -267,15 +267,15 @@
             self.consistencygroups_adm_client.create_consistencygroup_from_src)
         cg2 = create_consistencygroup2(source_cgid=cg['id'],
                                        name=cg_name2)['consistencygroup']
+        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
+            cg2['id'], 'available')
+        self.assertEqual(cg_name2, cg2['name'])
         vols = self.admin_volume_client.list_volumes(
             detail=True)['volumes']
         for vol in vols:
             if vol['consistencygroup_id'] == cg2['id']:
                 waiters.wait_for_volume_resource_status(
                     self.admin_volume_client, vol['id'], 'available')
-        self.consistencygroups_adm_client.wait_for_consistencygroup_status(
-            cg2['id'], 'available')
-        self.assertEqual(cg_name2, cg2['name'])
 
         # Clean up
         self._delete_consistencygroup(cg2['id'])
diff --git a/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
index d1fa730..e5ded52 100644
--- a/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
+++ b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
@@ -13,7 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-from tempest.common import waiters
 from tempest import config
 from tempest.lib import decorators
 from tempest.lib import exceptions
@@ -41,19 +40,10 @@
     def test_backup_crossproject_admin_negative(self):
 
         # create vol as user
-        volume = self.volumes_client.create_volume(
-            size=CONF.volume.volume_size)['volume']
-        waiters.wait_for_volume_resource_status(
-            self.volumes_client,
-            volume['id'], 'available')
+        volume = self.create_volume(size=CONF.volume.volume_size)
 
         # create backup as user
-        backup = self.backups_client.create_backup(
-            volume_id=volume['id'])['backup']
-        waiters.wait_for_volume_resource_status(
-            self.backups_client,
-            backup['id'], 'available')
-
+        self.create_backup(volume_id=volume['id'])
         # try to create incremental backup as admin
         self.assertRaises(
             exceptions.BadRequest, self.admin_backups_client.create_backup,
@@ -63,18 +53,12 @@
     def test_backup_crossproject_user_negative(self):
 
         # create vol as user
-        volume = self.volumes_client.create_volume(
-            size=CONF.volume.volume_size)['volume']
-        waiters.wait_for_volume_resource_status(
-            self.volumes_client,
-            volume['id'], 'available')
+        volume = self.create_volume(size=CONF.volume.volume_size)
 
         # create backup as admin
-        backup = self.admin_backups_client.create_backup(
-            volume_id=volume['id'])['backup']
-        waiters.wait_for_volume_resource_status(
-            self.admin_backups_client,
-            backup['id'], 'available')
+
+        self.create_backup(volume_id=volume['id'],
+                           backup_client=self.admin_backups_client)
 
         # try to create incremental backup as user
         self.assertRaises(
@@ -85,25 +69,14 @@
     def test_incremental_backup_respective_parents(self):
 
         # create vol as user
-        volume = self.volumes_client.create_volume(
-            size=CONF.volume.volume_size)['volume']
-        waiters.wait_for_volume_resource_status(
-            self.volumes_client,
-            volume['id'], 'available')
+        volume = self.create_volume(size=CONF.volume.volume_size)
 
         # create backup as admin
-        backup_adm = self.admin_backups_client.create_backup(
-            volume_id=volume['id'])['backup']
-        waiters.wait_for_volume_resource_status(
-            self.admin_backups_client,
-            backup_adm['id'], 'available')
+        backup_adm = self.create_backup(
+            volume_id=volume['id'], backup_client=self.admin_backups_client)
 
         # create backup as user
-        backup_usr = self.backups_client.create_backup(
-            volume_id=volume['id'])['backup']
-        waiters.wait_for_volume_resource_status(
-            self.backups_client,
-            backup_usr['id'], 'available')
+        backup_usr = self.create_backup(volume_id=volume['id'])
 
         # refresh admin backup and assert no child backups
         backup_adm = self.admin_backups_client.show_backup(
@@ -111,11 +84,8 @@
         self.assertFalse(backup_adm['has_dependent_backups'])
 
         # create incremental backup as admin
-        backup_adm_inc = self.admin_backups_client.create_backup(
-            volume_id=volume['id'], incremental=True)['backup']
-        waiters.wait_for_volume_resource_status(
-            self.admin_backups_client,
-            backup_adm_inc['id'], 'available')
+        self.create_backup(volume_id=volume['id'], incremental=True,
+                           backup_client=self.admin_backups_client)
 
         # refresh user backup and assert no child backups
         backup_usr = self.backups_client.show_backup(
@@ -128,11 +98,8 @@
         self.assertTrue(backup_adm['has_dependent_backups'])
 
         # create incremental backup as user
-        backup_usr_inc = self.backups_client.create_backup(
-            volume_id=volume['id'], incremental=True)['backup']
-        waiters.wait_for_volume_resource_status(
-            self.backups_client,
-            backup_usr_inc['id'], 'available')
+        self.create_backup(volume_id=volume['id'],
+                           incremental=True)
 
         # refresh user backup and assert it has childs
         backup_usr = self.backups_client.show_backup(
diff --git a/cinder_tempest_plugin/api/volume/test_volume_revert.py b/cinder_tempest_plugin/api/volume/test_volume_revert.py
index 7c5eed1..bf3d806 100644
--- a/cinder_tempest_plugin/api/volume/test_volume_revert.py
+++ b/cinder_tempest_plugin/api/volume/test_volume_revert.py
@@ -80,4 +80,4 @@
         # Destination volume smaller than source, API should block that
         self.assertRaises(exceptions.BadRequest,
                           self.volume_revert_client.revert_to_snapshot,
-                          self.volume, self.snapshot)
+                          self.volume, self.snapshot['id'])
diff --git a/cinder_tempest_plugin/api/volume/test_volume_unicode.py b/cinder_tempest_plugin/api/volume/test_volume_unicode.py
index 35d0a54..026271b 100644
--- a/cinder_tempest_plugin/api/volume/test_volume_unicode.py
+++ b/cinder_tempest_plugin/api/volume/test_volume_unicode.py
@@ -57,6 +57,7 @@
 
         return volume
 
+    @decorators.idempotent_id('2d7e2e49-150e-4849-a18e-79f9777c9a96')
     def test_create_delete_unicode_volume_name(self):
         """Create a volume with a unicode name and view it."""
 
@@ -68,15 +69,16 @@
     @testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
                           "Cinder volume snapshots are disabled")
     @decorators.related_bug('1393871')
+    @decorators.idempotent_id('332be44d-5418-4fb3-a8f0-a3587de6929f')
     def test_snapshot_create_volume_description_non_ascii_code(self):
         # Create a volume with non-ascii description
-        description = u'\u05e7\u05d9\u05d9\u05e4\u05e9'
+        description = '\u05e7\u05d9\u05d9\u05e4\u05e9'
         volume = self.create_volume(description=description)
         vol_info = self.volumes_client.show_volume(volume['id'])['volume']
         self.assertEqual(description, vol_info['description'])
 
         # Create a snapshot with different non-ascii description
-        description = u'\u4e2d\u56fd\u793e\u533a'
+        description = '\u4e2d\u56fd\u793e\u533a'
         snapshot = self.create_snapshot(volume['id'], description=description)
         snapshot_info = self.snapshots_client.show_snapshot(
             snapshot['id'])['snapshot']
diff --git a/cinder_tempest_plugin/rbac/__init__.py b/cinder_tempest_plugin/rbac/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/__init__.py
diff --git a/cinder_tempest_plugin/rbac/v3/__init__.py b/cinder_tempest_plugin/rbac/v3/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/__init__.py
diff --git a/cinder_tempest_plugin/rbac/v3/base.py b/cinder_tempest_plugin/rbac/v3/base.py
new file mode 100644
index 0000000..d1a11e5
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/base.py
@@ -0,0 +1,42 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest import config
+
+CONF = config.CONF
+
+
+class VolumeV3RbacBaseTests(object):
+
+    identity_version = 'v3'
+
+    @classmethod
+    def skip_checks(cls):
+        super(VolumeV3RbacBaseTests, cls).skip_checks()
+        if not CONF.enforce_scope.cinder:
+            raise cls.skipException(
+                "Tempest is not configured to enforce_scope for cinder, "
+                "skipping RBAC tests. To enable these tests set "
+                "`tempest.conf [enforce_scope] cinder=True`."
+            )
+
+    def do_request(self, method, expected_status=200, client=None, **payload):
+        if not client:
+            client = self.client
+        if isinstance(expected_status, type(Exception)):
+            self.assertRaises(expected_status,
+                              getattr(client, method),
+                              **payload)
+        else:
+            response = getattr(client, method)(**payload)
+            self.assertEqual(response.response.status, expected_status)
+            return response
diff --git a/cinder_tempest_plugin/rbac/v3/test_capabilities.py b/cinder_tempest_plugin/rbac/v3/test_capabilities.py
new file mode 100644
index 0000000..1fa542d
--- /dev/null
+++ b/cinder_tempest_plugin/rbac/v3/test_capabilities.py
@@ -0,0 +1,80 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import abc
+
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.api.volume import base
+from cinder_tempest_plugin.rbac.v3 import base as rbac_base
+
+
+class VolumeV3RbacCapabilityTests(rbac_base.VolumeV3RbacBaseTests,
+                                  metaclass=abc.ABCMeta):
+
+    @classmethod
+    def setup_clients(cls):
+        super().setup_clients()
+        cls.persona = getattr(cls, 'os_%s' % cls.credentials[0])
+        cls.client = cls.persona.volume_capabilities_client_latest
+        # NOTE(lbragstad): This admin_client will be more useful later when
+        # cinder supports system-scope and we need it for administrative
+        # operations. For now, keep os_project_admin as the admin client until
+        # we have system-scope.
+        admin_client = cls.os_project_admin
+        cls.admin_capabilities_client = (
+            admin_client.volume_capabilities_client_latest)
+        cls.admin_stats_client = (
+            admin_client.volume_scheduler_stats_client_latest)
+
+    @classmethod
+    def setup_credentials(cls):
+        super().setup_credentials()
+        cls.os_primary = getattr(cls, 'os_%s' % cls.credentials[0])
+
+    @abc.abstractmethod
+    def test_get_capabilities(self):
+        """Test volume_extension:capabilities policy.
+
+        This test must check:
+          * whether the persona can fetch capabilities for a host.
+
+        """
+        pass
+
+
+class ProjectAdminTests(VolumeV3RbacCapabilityTests, base.BaseVolumeTest):
+
+    credentials = ['project_admin', 'system_admin']
+
+    def test_get_capabilities(self):
+        pools = self.admin_stats_client.list_pools()['pools']
+        host_name = pools[0]['name']
+        self.do_request('show_backend_capabilities', expected_status=200,
+                        host=host_name)
+
+
+class ProjectMemberTests(ProjectAdminTests, base.BaseVolumeTest):
+
+    credentials = ['project_member', 'project_admin', 'system_admin']
+
+    def test_get_capabilities(self):
+        pools = self.admin_stats_client.list_pools()['pools']
+        host_name = pools[0]['name']
+        self.do_request('show_backend_capabilities',
+                        expected_status=exceptions.Forbidden,
+                        host=host_name)
+
+
+class ProjectReaderTests(ProjectMemberTests, base.BaseVolumeTest):
+
+    credentials = ['project_reader', 'project_admin', 'system_admin']
diff --git a/cinder_tempest_plugin/scenario/manager.py b/cinder_tempest_plugin/scenario/manager.py
new file mode 100644
index 0000000..3b25bb1
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/manager.py
@@ -0,0 +1,214 @@
+# Copyright 2021 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+LOG = log.getLogger(__name__)
+
+
+class ScenarioTest(manager.ScenarioTest):
+
+    credentials = ['primary', 'admin']
+
+    @classmethod
+    def setup_clients(cls):
+        super(ScenarioTest, cls).setup_clients()
+        cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
+
+    def _attached_volume_name(
+            self, disks_list_before_attach, ip_address, private_key):
+        ssh = self.get_remote_client(ip_address, private_key=private_key)
+
+        def _wait_for_volume_available_on_system():
+            disks_list_after_attach = ssh.list_disks()
+            return len(disks_list_after_attach) > len(disks_list_before_attach)
+
+        if not test_utils.call_until_true(_wait_for_volume_available_on_system,
+                                          CONF.compute.build_timeout,
+                                          CONF.compute.build_interval):
+            raise lib_exc.TimeoutException
+
+        disks_list_after_attach = ssh.list_disks()
+        volume_name = [item for item in disks_list_after_attach
+                       if item not in disks_list_before_attach][0]
+        return volume_name
+
+    def _get_file_md5(self, ip_address, filename, dev_name=None,
+                      mount_path='/mnt', private_key=None, server=None):
+
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server)
+        if dev_name is not None:
+            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+                                                               mount_path))
+
+        md5_sum = ssh_client.exec_command(
+            'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
+        if dev_name is not None:
+            ssh_client.exec_command('sudo umount %s' % mount_path)
+        return md5_sum
+
+    def _count_files(self, ip_address, dev_name=None, mount_path='/mnt',
+                     private_key=None, server=None):
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server)
+        if dev_name is not None:
+            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+                                                               mount_path))
+        count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
+        if dev_name is not None:
+            ssh_client.exec_command('sudo umount %s' % mount_path)
+        # We subtract 2 from the count since `wc -l` also includes the count
+        # of new line character and while creating the filesystem, a
+        # lost+found folder is also created
+        return int(count) - 2
+
+    def _make_fs(self, ip_address, private_key, server, dev_name, fs='ext4'):
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server)
+
+        ssh_client.make_fs(dev_name, fs=fs)
+
+    def create_md5_new_file(self, ip_address, filename, dev_name=None,
+                            mount_path='/mnt', private_key=None, server=None):
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server)
+
+        if dev_name is not None:
+            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+                                                               mount_path))
+        ssh_client.exec_command(
+            'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
+            (mount_path, filename))
+        md5 = ssh_client.exec_command(
+            'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
+        ssh_client.exec_command('sudo sync')
+        if dev_name is not None:
+            ssh_client.exec_command('sudo umount %s' % mount_path)
+        return md5
+
+    def get_md5_from_file(self, instance, instance_ip, filename,
+                          dev_name=None):
+
+        md5_sum = self._get_file_md5(instance_ip, filename=filename,
+                                     dev_name=dev_name,
+                                     private_key=self.keypair['private_key'],
+                                     server=instance)
+        count = self._count_files(instance_ip, dev_name=dev_name,
+                                  private_key=self.keypair['private_key'],
+                                  server=instance)
+        return count, md5_sum
+
+    def _attach_and_get_volume_device_name(self, server, volume, instance_ip,
+                                           private_key):
+        ssh_client = self.get_remote_client(
+            instance_ip, private_key=private_key,
+            server=server)
+        # List disks before volume attachment
+        disks_list_before_attach = ssh_client.list_disks()
+        # Attach volume
+        attachment = self.attach_volume(server, volume)
+        # Find the difference between disks before and after attachment that
+        # gives us the volume device name
+        volume_device_name = self._attached_volume_name(
+            disks_list_before_attach, instance_ip, private_key)
+        return volume_device_name, attachment
+
+    def create_volume_type(self, client=None, name=None, extra_specs=None):
+        if not client:
+            client = self.os_admin.volume_types_client_latest
+        if not name:
+            class_name = self.__class__.__name__
+            name = data_utils.rand_name(class_name + '-volume-type')
+        randomized_name = data_utils.rand_name('scenario-type-' + name)
+
+        LOG.debug("Creating a volume type: %s with extra_specs %s",
+                  randomized_name, extra_specs)
+        if extra_specs is None:
+            extra_specs = {}
+        volume_type = self.admin_volume_types_client.create_volume_type(
+            name=randomized_name, extra_specs=extra_specs)['volume_type']
+        self.addCleanup(self.cleanup_volume_type, volume_type)
+        return volume_type
+
+    def attach_volume(self, server, volume, device=None, tag=None):
+        """Attaches volume to server and waits for 'in-use' volume status.
+
+        The volume will be detached when the test tears down.
+
+        :param server: The server to which the volume will be attached.
+        :param volume: The volume to attach.
+        :param device: Optional mountpoint for the attached volume. Note that
+            this is not guaranteed for all hypervisors and is not recommended.
+        :param tag: Optional device role tag to apply to the volume.
+        """
+        attach_kwargs = dict(volumeId=volume['id'])
+        if device:
+            attach_kwargs['device'] = device
+        if tag:
+            attach_kwargs['tag'] = tag
+
+        attachment = self.servers_client.attach_volume(
+            server['id'], **attach_kwargs)['volumeAttachment']
+        # On teardown detach the volume and for multiattach volumes wait for
+        # the attachment to be removed. For non-multiattach volumes wait for
+        # the state of the volume to change to available. This is so we don't
+        # error out when trying to delete the volume during teardown.
+        if volume['multiattach']:
+            att = waiters.wait_for_volume_attachment_create(
+                self.volumes_client, volume['id'], server['id'])
+            self.addCleanup(waiters.wait_for_volume_attachment_remove,
+                            self.volumes_client, volume['id'],
+                            att['attachment_id'])
+        else:
+            self.addCleanup(waiters.wait_for_volume_resource_status,
+                            self.volumes_client, volume['id'], 'available')
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume['id'], 'in-use')
+        # Ignore 404s on detach in case the server is deleted or the volume
+        # is already detached.
+        self.addCleanup(self._detach_volume, server, volume)
+        return attachment
+
+    def _detach_volume(self, server, volume):
+        """Helper method to detach a volume.
+
+        Ignores 404 responses if the volume or server do not exist, or the
+        volume is already detached from the server.
+        """
+        try:
+            volume = self.volumes_client.show_volume(volume['id'])['volume']
+            # Check the status. You can only detach an in-use volume, otherwise
+            # the compute API will return a 400 response.
+            if volume['status'] == 'in-use':
+                self.servers_client.detach_volume(server['id'], volume['id'])
+        except lib_exc.NotFound:
+            # Ignore 404s on detach in case the server is deleted or the volume
+            # is already detached.
+            pass
diff --git a/cinder_tempest_plugin/scenario/test_snapshots.py b/cinder_tempest_plugin/scenario/test_snapshots.py
new file mode 100644
index 0000000..5a9611f
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/test_snapshots.py
@@ -0,0 +1,133 @@
+# Copyright 2020 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import utils
+from tempest.lib import decorators
+
+from cinder_tempest_plugin.scenario import manager
+
+
+class SnapshotDataIntegrityTests(manager.ScenarioTest):
+
+    def setUp(self):
+        super(SnapshotDataIntegrityTests, self).setUp()
+        self.keypair = self.create_keypair()
+        self.security_group = self.create_security_group()
+
+    @decorators.idempotent_id('ff10644e-5a70-4a9f-9801-8204bb81fb61')
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_snapshot_data_integrity(self):
+        """This test checks the data integrity after creating and restoring
+
+        snapshots. The procedure is as follows:
+
+        1) Create an instance with ephemeral disk
+        2) Create a volume, attach it to the instance and create a filesystem
+           on it and mount it
+        3) Mount the volume, create a file and write data into it, Unmount it
+        4) create snapshot
+        5) repeat 3 and 4 two more times (simply creating 3 snapshots)
+
+        Now create volume from the snapshots one by one, attach it to the
+        instance and check the number of files and file content at each
+        point when snapshot was created.
+        """
+
+        # Create an instance
+        server = self.create_server(
+            key_name=self.keypair['name'],
+            security_groups=[{'name': self.security_group['name']}])
+
+        # Create an empty volume
+        volume = self.create_volume()
+
+        instance_ip = self.get_server_ip(server)
+
+        # Attach volume to instance and find it's device name (eg: /dev/vdb)
+        volume_device_name, __ = self._attach_and_get_volume_device_name(
+            server, volume, instance_ip, self.keypair['private_key'])
+
+        # Create filesystem on the volume
+        self._make_fs(instance_ip, self.keypair['private_key'], server,
+                      volume_device_name)
+
+        # Write data to volume
+        file1_md5 = self.create_md5_new_file(
+            instance_ip, dev_name=volume_device_name, filename="file1",
+            private_key=self.keypair['private_key'],
+            server=instance_ip)
+
+        # Create first snapshot
+        snapshot1 = self.create_volume_snapshot(volume['id'], force=True)
+
+        # Write data to volume
+        file2_md5 = self.create_md5_new_file(
+            instance_ip, dev_name=volume_device_name, filename="file2",
+            private_key=self.keypair['private_key'],
+            server=instance_ip)
+
+        # Create second snapshot
+        snapshot2 = self.create_volume_snapshot(volume['id'], force=True)
+
+        # Write data to volume
+        file3_md5 = self.create_md5_new_file(
+            instance_ip, dev_name=volume_device_name, filename="file3",
+            private_key=self.keypair['private_key'],
+            server=instance_ip)
+
+        # Create third snapshot
+        snapshot3 = self.create_volume_snapshot(volume['id'], force=True)
+
+        # Detach the volume
+        self.nova_volume_detach(server, volume)
+
+        # Create volume from snapshot, attach it to instance and check file
+        # and contents for snap1
+        volume_snap_1 = self.create_volume(snapshot_id=snapshot1['id'])
+        volume_device_name, __ = self._attach_and_get_volume_device_name(
+            server, volume_snap_1, instance_ip, self.keypair['private_key'])
+        count_snap_1, md5_file_1 = self.get_md5_from_file(
+            server, instance_ip, 'file1', dev_name=volume_device_name)
+        # Detach the volume
+        self.nova_volume_detach(server, volume_snap_1)
+
+        self.assertEqual(count_snap_1, 1)
+        self.assertEqual(file1_md5, md5_file_1)
+
+        # Create volume from snapshot, attach it to instance and check file
+        # and contents for snap2
+        volume_snap_2 = self.create_volume(snapshot_id=snapshot2['id'])
+        volume_device_name, __ = self._attach_and_get_volume_device_name(
+            server, volume_snap_2, instance_ip, self.keypair['private_key'])
+        count_snap_2, md5_file_2 = self.get_md5_from_file(
+            server, instance_ip, 'file2', dev_name=volume_device_name)
+        # Detach the volume
+        self.nova_volume_detach(server, volume_snap_2)
+
+        self.assertEqual(count_snap_2, 2)
+        self.assertEqual(file2_md5, md5_file_2)
+
+        # Create volume from snapshot, attach it to instance and check file
+        # and contents for snap3
+        volume_snap_3 = self.create_volume(snapshot_id=snapshot3['id'])
+        volume_device_name, __ = self._attach_and_get_volume_device_name(
+            server, volume_snap_3, instance_ip, self.keypair['private_key'])
+        count_snap_3, md5_file_3 = self.get_md5_from_file(
+            server, instance_ip, 'file3', dev_name=volume_device_name)
+        # Detach the volume
+        self.nova_volume_detach(server, volume_snap_3)
+
+        self.assertEqual(count_snap_3, 3)
+        self.assertEqual(file3_md5, md5_file_3)
diff --git a/cinder_tempest_plugin/scenario/test_volume_encrypted.py b/cinder_tempest_plugin/scenario/test_volume_encrypted.py
new file mode 100644
index 0000000..69edfa6
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/test_volume_encrypted.py
@@ -0,0 +1,183 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from tempest.scenario import manager
+
+CONF = config.CONF
+
+
+class TestEncryptedCinderVolumes(manager.EncryptionScenarioTest,
+                                 manager.ScenarioTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(TestEncryptedCinderVolumes, cls).skip_checks()
+        if not CONF.compute_feature_enabled.attach_encrypted_volume:
+            raise cls.skipException('Encrypted volume attach is not supported')
+
+    @classmethod
+    def resource_setup(cls):
+        super(TestEncryptedCinderVolumes, cls).resource_setup()
+
+    @classmethod
+    def resource_cleanup(cls):
+        super(TestEncryptedCinderVolumes, cls).resource_cleanup()
+
+    def launch_instance(self):
+        keypair = self.create_keypair()
+
+        return self.create_server(key_name=keypair['name'])
+
+    def attach_detach_volume(self, server, volume):
+        attached_volume = self.nova_volume_attach(server, volume)
+        self.nova_volume_detach(server, attached_volume)
+
+    def _delete_server(self, server):
+        self.servers_client.delete_server(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+    def create_encrypted_volume_from_image(self, encryption_provider,
+                                           volume_type='luks',
+                                           key_size=256,
+                                           cipher='aes-xts-plain64',
+                                           control_location='front-end',
+                                           **kwargs):
+        """Create an encrypted volume from image.
+
+        :param image_id: ID of the image to create volume from,
+            CONF.compute.image_ref by default
+        :param name: name of the volume,
+            '$classname-volume-origin' by default
+        :param **kwargs: additional parameters
+        """
+        volume_type = self.create_volume_type(name=volume_type)
+        self.create_encryption_type(type_id=volume_type['id'],
+                                    provider=encryption_provider,
+                                    key_size=key_size,
+                                    cipher=cipher,
+                                    control_location=control_location)
+        image_id = kwargs.pop('image_id', CONF.compute.image_ref)
+        name = kwargs.pop('name', None)
+        if not name:
+            namestart = self.__class__.__name__ + '-volume-origin'
+            name = data_utils.rand_name(namestart)
+        return self.create_volume(volume_type=volume_type['name'],
+                                  name=name, imageRef=image_id,
+                                  **kwargs)
+
+    @decorators.idempotent_id('5bb622ab-5060-48a8-8840-d589a548b9e4')
+    @utils.services('volume')
+    @utils.services('compute')
+    def test_attach_cloned_encrypted_volume(self):
+
+        """This test case attempts to reproduce the following steps:
+
+        * Create an encrypted volume
+        * Create clone from volume
+        * Boot an instance and attach/dettach cloned volume
+
+        """
+
+        volume = self.create_encrypted_volume('luks', volume_type='luks')
+        kwargs = {
+            'display_name': data_utils.rand_name(self.__class__.__name__),
+            'source_volid': volume['id'],
+            'volume_type': volume['volume_type'],
+            'size': volume['size']
+        }
+        volume_s = self.volumes_client.create_volume(**kwargs)['volume']
+        self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+                        volume_s['id'])
+        self.addCleanup(self.volumes_client.delete_volume, volume_s['id'])
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, volume_s['id'], 'available')
+        volume_source = self.volumes_client.show_volume(
+            volume_s['id'])['volume']
+        server = self.launch_instance()
+        self.attach_detach_volume(server, volume_source)
+
+    @decorators.idempotent_id('5bb622ab-5060-48a8-8840-d589a548b7e4')
+    @utils.services('volume')
+    @utils.services('compute')
+    @utils.services('image')
+    def test_boot_cloned_encrypted_volume(self):
+
+        """This test case attempts to reproduce the following steps:
+
+        * Create an encrypted volume from image
+        * Boot an instance from the volume
+        * Write data to the volume
+        * Detach volume
+        * Create a clone from the first volume
+        * Create another encrypted volume from source_volumeid
+        * Boot an instance from cloned volume
+        * Verify the data
+        """
+
+        keypair = self.create_keypair()
+        security_group = self.create_security_group()
+
+        volume = self.create_encrypted_volume_from_image('luks')
+
+        # create an instance from volume
+        instance_1st = self.boot_instance_from_resource(
+            source_id=volume['id'],
+            source_type='volume',
+            keypair=keypair,
+            security_group=security_group)
+
+        # write content to volume on instance
+        ip_instance_1st = self.get_server_ip(instance_1st)
+        timestamp = self.create_timestamp(ip_instance_1st,
+                                          private_key=keypair['private_key'],
+                                          server=instance_1st)
+        # delete instance
+        self._delete_server(instance_1st)
+
+        # create clone
+        kwargs = {
+            'display_name': data_utils.rand_name(self.__class__.__name__),
+            'source_volid': volume['id'],
+            'volume_type': volume['volume_type'],
+            'size': volume['size']
+        }
+        volume_s = self.volumes_client.create_volume(**kwargs)['volume']
+
+        self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+                        volume_s['id'])
+        self.addCleanup(self.volumes_client.delete_volume, volume_s['id'])
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, volume_s['id'], 'available')
+
+        # create an instance from volume
+        instance_2nd = self.boot_instance_from_resource(
+            source_id=volume_s['id'],
+            source_type='volume',
+            keypair=keypair,
+            security_group=security_group)
+
+        # check the content of written file
+        ip_instance_2nd = self.get_server_ip(instance_2nd)
+        timestamp2 = self.get_timestamp(ip_instance_2nd,
+                                        private_key=keypair['private_key'],
+                                        server=instance_2nd)
+
+        self.assertEqual(timestamp, timestamp2)
+
+        # delete instance
+        self._delete_server(instance_2nd)
diff --git a/doc/requirements.txt b/doc/requirements.txt
new file mode 100644
index 0000000..d3348d6
--- /dev/null
+++ b/doc/requirements.txt
@@ -0,0 +1,2 @@
+sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
+openstackdocstheme>=1.18.1 # Apache-2.0
diff --git a/requirements.txt b/requirements.txt
index 56d2ec6..4d75108 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,4 +5,4 @@
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 oslo.config>=5.1.0 # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-tempest>=17.1.0 # Apache-2.0
+tempest>=27.0.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 9b05085..fb188e1 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,11 +1,11 @@
 [metadata]
 name = cinder-tempest-plugin
 summary = Tempest plugin tests for Cinder.
-description-file =
+description_file =
     README.rst
 author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = http://www.openstack.org/
+author_email = openstack-discuss@lists.openstack.org
+home_page = http://www.openstack.org/
 classifier =
     Environment :: OpenStack
     Intended Audience :: Information Technology
@@ -16,6 +16,8 @@
     Programming Language :: Python :: 3
     Programming Language :: Python :: 3.6
     Programming Language :: Python :: 3.7
+    Programming Language :: Python :: 3.8
+    Programming Language :: Python :: 3.9
 
 [files]
 packages =
diff --git a/test-requirements.txt b/test-requirements.txt
index e0bd682..905ad51 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -6,8 +6,6 @@
 
 coverage!=4.4,>=4.0 # Apache-2.0
 python-subunit>=1.0.0 # Apache-2.0/BSD
-sphinx!=1.6.6,!=1.6.7,>=1.6.2 # BSD
 oslotest>=3.2.0 # Apache-2.0
-testrepository>=0.0.18 # Apache-2.0/BSD
+stestr>=1.0.0 # Apache-2.0
 testtools>=2.2.0 # MIT
-openstackdocstheme>=1.18.1 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index be122b4..c9c91ad 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,5 +1,5 @@
 [tox]
-minversion = 3.1.0
+minversion = 3.18.0
 envlist = pep8
 skipsdist = True
 # this allows tox to infer the base python from the environment name
@@ -12,9 +12,12 @@
 setenv =
    VIRTUAL_ENV={envdir}
    PYTHONWARNINGS=default::DeprecationWarning
+   OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true}
+   OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true}
+   OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true}
 deps = -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
        -r{toxinidir}/test-requirements.txt
-commands = python setup.py test --slowest --testr-args='{posargs}'
+commands = stestr run --slowest {posargs}
 
 [testenv:pep8]
 commands = flake8 {posargs}
@@ -26,7 +29,9 @@
 # E123, E125 skipped as they are invalid PEP-8.
 # W503 line break before binary operator
 # W504 line break after binary operator
+# H101 include name with TODO
+#  reason: no real benefit
 show-source = True
-ignore = E123,E125,W503,W504
+ignore = E123,E125,W503,W504,H101
 builtins = _
 exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build