Merge "Remove support for Pyton 3.8 and older"
diff --git a/.zuul.yaml b/.zuul.yaml
index c089396..766c0ec 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -5,15 +5,20 @@
     check:
       jobs:
         - cinder-tempest-plugin-lvm-multiattach
+        - cinder-tempest-plugin-lvm-concurrency-tests
         - cinder-tempest-plugin-lvm-lio-barbican
-        - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream:
+        - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream-py311:
+            voting: false
+        - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream-2025-1:
             voting: false
         # FIXME: the tgt job is broken on jammy, and we may be removing tgt
         # support anyway.  So make it non-voting until we figure out what to
         # do about this, which should be at the March 2023 virtual PTG.
         - cinder-tempest-plugin-lvm-tgt-barbican:
             voting: false
-        - cinder-tempest-plugin-lvm-lio-barbican-fips:
+        - cinder-tempest-plugin-lvm-lio-barbican-fips-2025-1:
+            voting: false
+        - cinder-tempest-plugin-lvm-lio-barbican-fips-py311:
             voting: false
         - cinder-tempest-plugin-lvm-nvmet-barbican
         - nova-ceph-multistore:
@@ -23,10 +28,9 @@
         # As per the Tempest "Stable Branch Support Policy", Tempest will only
         # support the "Maintained" stable branches, so only jobs for the
         # current stable branches should be listed here.
+        - cinder-tempest-plugin-basic-2025-1
         - cinder-tempest-plugin-basic-2024-2
         - cinder-tempest-plugin-basic-2024-1
-        - cinder-tempest-plugin-basic-2023-2
-        - cinder-tempest-plugin-basic-2023-1
         - cinder-tempest-plugin-protection-functional
     gate:
       jobs:
@@ -37,10 +41,9 @@
         - cinder-tempest-plugin-cbak-ceph
     experimental:
       jobs:
+        - cinder-tempest-plugin-cbak-ceph-2025-1
         - cinder-tempest-plugin-cbak-ceph-2024-2
         - cinder-tempest-plugin-cbak-ceph-2024-1
-        - cinder-tempest-plugin-cbak-ceph-2023-2
-        - cinder-tempest-plugin-cbak-ceph-2023-1
 
 - job:
     name: cinder-tempest-plugin-protection-functional
@@ -94,6 +97,32 @@
     timeout: 10800
 
 - job:
+    name: cinder-tempest-plugin-lvm-concurrency-tests
+    description: |
+      This job runs Cinder concurrency scenario tests from the cinder-tempest-plugin.
+      These tests involve parallel operations on volumes (e.g., backup creation, attachment),
+      which can put stress on system resources.
+
+      To avoid hitting resource limits, `tempest_concurrency` is set to 1, ensuring that
+      the tests themselves run in serial even though each test performs concurrent actions internally.
+    parent: devstack-tempest
+    required-projects:
+      - opendev.org/openstack/tempest
+      - opendev.org/openstack/cinder-tempest-plugin
+      - opendev.org/openstack/cinder
+    vars:
+      tempest_concurrency: 1
+      tox_envlist: all
+      tempest_test_regex: 'cinder_tempest_plugin.scenario.test_volume_concurrency'
+      tempest_plugins:
+        - cinder-tempest-plugin
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            volume-feature-enabled:
+              concurrency_tests: True
+
+- job:
     name: cinder-tempest-plugin-lvm-barbican-base-abstract
     description: |
       This is a base job for lvm with lio & tgt targets
@@ -130,6 +159,8 @@
               volume_revert: True
       devstack_services:
         barbican: true
+        # explicitly enable c-bak, as it may be disabled in the parent job
+        c-bak: true
       tempest_plugins:
         - cinder-tempest-plugin
     irrelevant-files:
@@ -255,7 +286,6 @@
       # bypasses nova's checks.  Until the nova team decides on a strategy to handle
       # this issue, we skip these tests.
       tempest_exclude_regex: (tempest.api.image.v2.test_images_formats.ImagesFormatTest.test_compute_rejects)
-      configure_swap_size: 4096
       devstack_local_conf:
         test-config:
           $TEMPEST_CONFIG:
@@ -268,6 +298,12 @@
     timeout: 10800
 
 - job:
+    name: cinder-tempest-plugin-cbak-ceph-2025-1
+    parent: cinder-tempest-plugin-cbak-ceph
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2025.1
+
+- job:
     name: cinder-tempest-plugin-cbak-ceph-2024-2
     parent: cinder-tempest-plugin-cbak-ceph
     nodeset: openstack-single-node-jammy
@@ -279,18 +315,6 @@
     nodeset: openstack-single-node-jammy
     override-checkout: stable/2024.1
 
-- job:
-    name: cinder-tempest-plugin-cbak-ceph-2023-2
-    parent: cinder-tempest-plugin-cbak-ceph
-    nodeset: openstack-single-node-jammy
-    override-checkout: stable/2023.2
-
-- job:
-    name: cinder-tempest-plugin-cbak-ceph-2023-1
-    parent: cinder-tempest-plugin-cbak-ceph
-    nodeset: openstack-single-node-jammy
-    override-checkout: stable/2023.1
-
 # variant for pre-Ussuri branches (no volume revert for Ceph),
 # should this job be used on those branches
 - job:
@@ -322,6 +346,22 @@
       runs tempest tests and cinderlib tests on CentOS Stream 9.
 
 - job:
+    name: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream-2025-1
+    parent: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream
+    override-checkout: stable/2025.1
+
+- job:
+    name: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream-py311
+    parent: cinder-tempest-plugin-lvm-lio-barbican
+    nodeset: devstack-single-node-centos-9-stream
+    description: |
+      This jobs configures Cinder with LVM, LIO, barbican and
+      runs tempest tests and cinderlib tests on CentOS Stream 9.
+    vars:
+      devstack_localrc:
+        PYTHON3_VERSION: 3.11
+
+- job:
     name: cinder-tempest-plugin-lvm-lio-barbican-fips
     parent: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream
     description: |
@@ -334,6 +374,26 @@
       tempest_exclude_regex: 'test_encrypted_cinder_volumes_cryptsetup'
 
 - job:
+    name: cinder-tempest-plugin-lvm-lio-barbican-fips-2025-1
+    parent: cinder-tempest-plugin-lvm-lio-barbican-fips
+    override-checkout: stable/2025.1
+
+- job:
+    name: cinder-tempest-plugin-lvm-lio-barbican-fips-py311
+    parent: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream-py311
+    nodeset: devstack-single-node-centos-9-stream
+    description: |
+      This job configures Cinder with LVM, LIO, barbican and
+      runs tempest tests and cinderlib tests on CentOS Stream 9
+      under FIPS mode
+    vars:
+      enable_fips: True
+      nslookup_target: 'opendev.org'
+      tempest_exclude_regex: 'test_encrypted_cinder_volumes_cryptsetup'
+      devstack_localrc:
+        PYTHON3_VERSION: 3.11
+
+- job:
     name: cinder-tempest-plugin-lvm-nvmet-barbican
     description: |
       This jobs configures Cinder with LVM, nvmet, barbican and
@@ -413,6 +473,12 @@
       - ^releasenotes/.*$
 
 - job:
+    name: cinder-tempest-plugin-basic-2025-1
+    parent: cinder-tempest-plugin-basic
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2025.1
+
+- job:
     name: cinder-tempest-plugin-basic-2024-2
     parent: cinder-tempest-plugin-basic
     nodeset: openstack-single-node-jammy
@@ -423,15 +489,3 @@
     parent: cinder-tempest-plugin-basic
     nodeset: openstack-single-node-jammy
     override-checkout: stable/2024.1
-
-- job:
-    name: cinder-tempest-plugin-basic-2023-2
-    parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-jammy
-    override-checkout: stable/2023.2
-
-- job:
-    name: cinder-tempest-plugin-basic-2023-1
-    parent: cinder-tempest-plugin-basic
-    nodeset: openstack-single-node-jammy
-    override-checkout: stable/2023.1
diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py
index 1c64973..e049ff3 100644
--- a/cinder_tempest_plugin/api/volume/base.py
+++ b/cinder_tempest_plugin/api/volume/base.py
@@ -91,6 +91,10 @@
             name = data_utils.rand_name(cls.__name__ + '-Volume')
             kwargs['name'] = name
 
+        if CONF.compute.compute_volume_common_az:
+            kwargs.setdefault('availability_zone',
+                              CONF.compute.compute_volume_common_az)
+
         volume = cls.volumes_client.create_volume(**kwargs)['volume']
         cls.addClassResourceCleanup(
             cls.volumes_client.wait_for_resource_deletion, volume['id'])
diff --git a/cinder_tempest_plugin/api/volume/test_volume_dependency.py b/cinder_tempest_plugin/api/volume/test_volume_dependency.py
index 5ea067f..0b0cc8e 100644
--- a/cinder_tempest_plugin/api/volume/test_volume_dependency.py
+++ b/cinder_tempest_plugin/api/volume/test_volume_dependency.py
@@ -147,8 +147,9 @@
         images_client.delete_image(image_id)
         images_client.wait_for_resource_deletion(image_id)
 
-    @testtools.skipUnless(CONF.volume_feature_enabled.volume_image_dep_tests,
-                          reason='Volume/image dependency tests not enabled.')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.enable_volume_image_dep_tests,
+        reason='Volume/image dependency tests not enabled.')
     @utils.services('image', 'volume')
     @decorators.idempotent_id('7a9fba78-2e4b-42b1-9898-bb4a60685320')
     def test_image_volume_dependencies_1(self):
@@ -174,8 +175,9 @@
 
         self.del_image(image['id'])
 
-    @testtools.skipUnless(CONF.volume_feature_enabled.volume_image_dep_tests,
-                          reason='Volume/image dependency tests not enabled.')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.enable_volume_image_dep_tests,
+        reason='Volume/image dependency tests not enabled.')
     @utils.services('image', 'volume')
     @decorators.idempotent_id('0e20bd6e-440f-41d8-9b5d-fc047ac00423')
     def test_image_volume_dependencies_2(self):
@@ -210,8 +212,9 @@
 
         self.del_image(image['id'])
 
-    @testtools.skipUnless(CONF.volume_feature_enabled.volume_image_dep_tests,
-                          reason='Volume/image dependency tests not enabled.')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.enable_volume_image_dep_tests,
+        reason='Volume/image dependency tests not enabled.')
     @decorators.idempotent_id('e6050452-06bd-4c7f-9912-45178c83e379')
     @utils.services('image', 'volume')
     def test_image_volume_dependencies_3(self):
diff --git a/cinder_tempest_plugin/common/__init__.py b/cinder_tempest_plugin/common/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/cinder_tempest_plugin/common/__init__.py
diff --git a/cinder_tempest_plugin/common/concurrency.py b/cinder_tempest_plugin/common/concurrency.py
new file mode 100644
index 0000000..0374b12
--- /dev/null
+++ b/cinder_tempest_plugin/common/concurrency.py
@@ -0,0 +1,55 @@
+# Copyright 2025 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+import multiprocessing
+
+from tempest import config
+
+CONF = config.CONF
+
+
+def run_concurrent_tasks(target, **kwargs):
+    """Run a target function concurrently using multiprocessing."""
+    manager = multiprocessing.Manager()
+    resource_ids = manager.list()
+    # To capture exceptions
+    errors = manager.list()
+    resource_count = CONF.volume.concurrent_resource_count
+
+    def wrapped_target(index, resource_ids, **kwargs):
+        try:
+            target(index, resource_ids, **kwargs)
+        except Exception as e:
+            errors.append(f"Worker {index} failed: {str(e)}")
+
+    processes = []
+    for i in range(resource_count):
+        p = multiprocessing.Process(
+            target=wrapped_target,
+            args=(i, resource_ids),
+            kwargs=kwargs
+        )
+        processes.append(p)
+        p.start()
+
+    for p in processes:
+        p.join()
+
+    if errors:
+        error_msg = "\n".join(errors)
+        raise RuntimeError(
+            f"One or more concurrent tasks failed:\n{error_msg}")
+
+    return list(resource_ids)
diff --git a/cinder_tempest_plugin/config.py b/cinder_tempest_plugin/config.py
index 53222b8..969451e 100644
--- a/cinder_tempest_plugin/config.py
+++ b/cinder_tempest_plugin/config.py
@@ -24,7 +24,16 @@
                 help='Enable to run Cinder volume revert tests'),
     cfg.BoolOpt('volume_image_dep_tests',
                 default=True,
-                help='Run tests for dependencies between images and volumes')
+                help='Run tests for dependencies between images and volumes',
+                deprecated_for_removal=True,
+                deprecated_reason='Dependency test config setting '
+                '`volume_image_dep_tests` '
+                'in cinder-tempest-plugin is deprecated.Alternatively '
+                '`CONF.volume_feature_enabled.enable_volume_image_dep_tests` '
+                'can be used for dependency tests.'),
+    cfg.BoolOpt('concurrency_tests',
+                default=False,
+                help='Enable or disable running concurrency tests.'),
 ]
 
 # The barbican service is discovered by config_tempest [1], and will appear
@@ -38,3 +47,9 @@
                 default=False,
                 help="Whether or not barbican is expected to be available"),
 ]
+
+concurrency_option = [
+    cfg.IntOpt('concurrent_resource_count',
+               default=5,
+               help='Number of resources to create concurrently.'),
+]
diff --git a/cinder_tempest_plugin/plugin.py b/cinder_tempest_plugin/plugin.py
index 79c835c..e9583cd 100644
--- a/cinder_tempest_plugin/plugin.py
+++ b/cinder_tempest_plugin/plugin.py
@@ -47,6 +47,9 @@
         config.register_opt_group(conf, config.volume_feature_group,
                                   project_config.cinder_option)
 
+        config.register_opt_group(conf, config.volume_group,
+                                  project_config.concurrency_option)
+
         # Define the 'barbican' service_available option, but only if the
         # barbican_tempest_plugin isn't present. It also defines the option,
         # and we need to avoid a duplicate option registration.
@@ -62,6 +65,7 @@
         """
         opt_lists = [
             (config.volume_feature_group.name, project_config.cinder_option),
+            (config.volume_group.name, project_config.concurrency_option),
         ]
 
         if 'barbican_tempest_plugin' not in sys.modules:
diff --git a/cinder_tempest_plugin/rbac/v3/test_volume_types.py b/cinder_tempest_plugin/rbac/v3/test_volume_types.py
index cdbc341..e4e161e 100644
--- a/cinder_tempest_plugin/rbac/v3/test_volume_types.py
+++ b/cinder_tempest_plugin/rbac/v3/test_volume_types.py
@@ -20,7 +20,24 @@
 class RbacV3VolumeTypesTests(rbac_base.VolumeV3RbacBaseTests):
 
     min_microversion = '3.3'
-    extra_spec_key = 'key1'
+    extra_specs = {
+        'key1': 'value1',
+        'multiattach': '<is> False',
+        'volume_backend_name': 'test-backend-name',
+        'RESKEY:availability_zones': 'test-az',
+        'replication_enabled': '<is> False'
+    }
+    extra_specs_keys = list(extra_specs.keys())
+    expected_extra_specs = {
+        "reader": [
+            'multiattach', 'RESKEY:availability_zones', 'replication_enabled'
+        ],
+        "member": [
+            'multiattach', 'RESKEY:availability_zones', 'replication_enabled'
+        ],
+        "admin": extra_specs_keys
+    }
+
     encryption_type_key_cipher = 'cipher'
     create_kwargs = {
         'provider': 'LuksEncryptor',
@@ -52,10 +69,9 @@
         # create a volume type
         if not name:
             name = data_utils.rand_name("volume-type")
-        extra_specs = {cls.extra_spec_key: 'value1'}
         params = {'name': name,
                   'description': "description",
-                  'extra_specs': extra_specs,
+                  'extra_specs': cls.extra_specs,
                   'os-volume-type-access:is_public': True}
         volume_type = cls.admin_types_client.create_volume_type(
             **params
@@ -66,6 +82,9 @@
             cls.encryption_type = \
                 cls.admin_encryption_types_client.create_encryption_type(
                     volume_type['id'], **cls.create_kwargs)['encryption']
+            # NOTE: strictly speaking, this is NOT a volume_type field;
+            # we save it for convenience in these tests
+            volume_type['encryption_id'] = cls.encryption_type['encryption_id']
 
         if cleanup:
             cls.addClassResourceCleanup(
@@ -74,6 +93,28 @@
 
         return volume_type
 
+    def _extra_specs_content_validator(self, client, extra_specs):
+        """Validation of volume type's extra specs content
+
+        Addition for feature:
+        https://specs.openstack.org/openstack/cinder-specs/specs/xena/
+        expose-cinder-user-visible-extra-specs-spec.html
+
+        This feature allows 'readers' and 'members' to "see" volume type's
+        extra specs:
+        'multiattach', 'RESKEY:availability_zones' and 'replication_enabled'
+
+        Args:
+            client: Client object to be used
+            extra_specs: extra_specs dict from response
+
+        Returns:
+            Boolean: True if lists are equal, false otherwise
+        """
+        role = client.user.split('-')[-1]
+        return (sorted(list(extra_specs.keys())) ==
+                sorted(self.expected_extra_specs[role]))
+
     def _update_volume_type(self, expected_status):
         """Update volume type"""
         self.do_request(
@@ -112,30 +153,65 @@
             expected_status=expected_status,
             volume_type_id=self.volume_type['id']
         )['extra_specs']
-        self.assertIn(
-            self.extra_spec_key,
-            list(extra_specs.keys()),
-            message=f"Key '{self.extra_spec_key}' not found in extra_specs."
+        self.assertTrue(
+            self._extra_specs_content_validator(
+                client=self.client, extra_specs=extra_specs
+            )
         )
 
     def _show_extra_spec_for_volume_type(self, expected_status):
         """Show extra_spec for a volume type"""
-        self.do_request(
+
+        # Using 'multiattach' extra spec because all admin, member and readers
+        # should be able to "see".
+        spec = self.do_request(
             method='show_volume_type_extra_specs',
             expected_status=expected_status,
             volume_type_id=self.volume_type['id'],
-            extra_specs_name=self.extra_spec_key
+            extra_specs_name='multiattach'
+        )
+        self.assertEqual(spec['multiattach'], self.extra_specs['multiattach'])
+
+        # Using 'volume_backend_name' extra spec because only admin should
+        # "see" it.
+        role = self.client.user.split('-')[-1]
+        # 'reader' and 'member' will get 404 (NotFound) if they try to show
+        # the extra spec 'volume_backend_name'
+        try:
+            spec = self.do_request(
+                method='show_volume_type_extra_specs',
+                expected_status=expected_status,
+                volume_type_id=self.volume_type['id'],
+                extra_specs_name='volume_backend_name'
+            )
+        except exceptions.NotFound:
+            # NotFound exception should be thrown for
+            # 'reader' and 'member' only
+            self.assertNotEqual(
+                role, 'admin',
+                "NotFound exception was thrown for admin"
+            )
+            return
+
+        # If no exception thrown, then check the content
+        # Only admin should reach to this point
+        self.assertNotIn(
+            role, ['reader', 'member'],
+            "NotFound should be thrown for non admin role"
+        )
+        self.assertEqual(
+            spec['volume_backend_name'],
+            self.extra_specs['volume_backend_name']
         )
 
     def _update_extra_spec_for_volume_type(self, expected_status):
         """Update extra_spec for a volume type"""
-        spec_name = self.extra_spec_key
-        extra_spec = {spec_name: 'updated value'}
+        extra_spec = {'key1': 'key1 updated value'}
         self.do_request(
             method='update_volume_type_extra_specs',
             expected_status=expected_status,
             volume_type_id=self.volume_type['id'],
-            extra_spec_name=spec_name,
+            extra_spec_name='key1',
             extra_specs=extra_spec
         )
 
@@ -147,15 +223,20 @@
             method='delete_volume_type_extra_specs',
             expected_status=expected_status,
             volume_type_id=volume_type['id'],
-            extra_spec_name=self.extra_spec_key
+            extra_spec_name='key1'
         )
 
     def _show_volume_type_detail(self, expected_status):
         """Show volume type"""
-        self.do_request(
+        details = self.do_request(
             method='show_volume_type',
             expected_status=expected_status,
             volume_type_id=self.volume_type['id']
+        )['volume_type']
+        self.assertTrue(
+            self._extra_specs_content_validator(
+                client=self.client, extra_specs=details['extra_specs']
+            )
         )
 
     def _show_default_volume_type(self, expected_status):
@@ -181,10 +262,15 @@
 
     def _list_volume_types(self, expected_status):
         """List all volume types"""
-        self.do_request(
+        volume_types = self.do_request(
             method='list_volume_types',
             expected_status=expected_status
-        )
+        )['volume_types']
+        for volume_type in volume_types:
+            if volume_type['id'] == self.volume_type['id']:
+                self._extra_specs_content_validator(
+                    client=self.client, extra_specs=volume_type['extra_specs']
+                )
 
     def _create_volume_type(self, expected_status):
         """Create a volume type"""
@@ -226,7 +312,8 @@
             method='delete_encryption_type',
             expected_status=expected_status,
             client=self.encryption_types_client,
-            volume_type_id=volume_type['id']
+            volume_type_id=volume_type['id'],
+            encryption_id=volume_type['encryption_id']
         )
 
     def _create_encryption_type(self, expected_status):
@@ -250,6 +337,7 @@
             expected_status=expected_status,
             client=self.encryption_types_client,
             volume_type_id=self.volume_type['id'],
+            encryption_id=self.volume_type['encryption_id'],
             **update_kwargs
         )
 
@@ -275,14 +363,12 @@
             expected_status=exceptions.Forbidden
         )
 
-    @decorators.skip_because(bug='2018467')
     @decorators.idempotent_id('9499752c-3b27-41a3-8f55-4bdba7297f92')
     def test_list_all_extra_specs_for_volume_type(self):
         self._list_all_extra_specs_for_volume_type(
             expected_status=200
         )
 
-    @decorators.skip_because(bug='2018467')
     @decorators.idempotent_id('a38f7248-3a5b-4e51-8e32-d2dcf9c771ea')
     def test_show_extra_spec_for_volume_type(self):
         self._show_extra_spec_for_volume_type(expected_status=200)
@@ -364,14 +450,12 @@
             expected_status=exceptions.Forbidden
         )
 
-    @decorators.skip_because(bug='2018467')
     @decorators.idempotent_id('82fd0d34-17b3-4f45-bd2e-728c9a8bff8c')
     def test_list_all_extra_specs_for_volume_type(self):
         self._list_all_extra_specs_for_volume_type(
             expected_status=200
         )
 
-    @decorators.skip_because(bug='2018467')
     @decorators.idempotent_id('67aa0b40-7c0a-4ae7-8682-fb4f20abd390')
     def test_show_extra_spec_for_volume_type(self):
         self._show_extra_spec_for_volume_type(expected_status=200)
@@ -457,7 +541,6 @@
             expected_status=200
         )
 
-    @decorators.skip_because(bug='2018467')
     @decorators.idempotent_id('a2cca7b6-0af9-47e5-b8c1-4e0f01822d4e')
     def test_show_extra_spec_for_volume_type(self):
         self._show_extra_spec_for_volume_type(expected_status=200)
diff --git a/cinder_tempest_plugin/scenario/test_snapshots.py b/cinder_tempest_plugin/scenario/test_snapshots.py
index 02cd6bd..7b8191b 100644
--- a/cinder_tempest_plugin/scenario/test_snapshots.py
+++ b/cinder_tempest_plugin/scenario/test_snapshots.py
@@ -130,8 +130,9 @@
 
 
 class SnapshotDependencyTests(manager.ScenarioTest):
-    @testtools.skipUnless(CONF.volume_feature_enabled.volume_image_dep_tests,
-                          'dependency tests not enabled')
+    @testtools.skipUnless(
+        CONF.volume_feature_enabled.enable_volume_image_dep_tests,
+        'dependency tests not enabled')
     @decorators.idempotent_id('e7028f52-f6d4-479c-8809-6f6cf96cfe0f')
     @utils.services('image', 'volume')
     def test_snapshot_removal(self):
diff --git a/cinder_tempest_plugin/scenario/test_volume_concurrency.py b/cinder_tempest_plugin/scenario/test_volume_concurrency.py
new file mode 100644
index 0000000..3a27174
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/test_volume_concurrency.py
@@ -0,0 +1,170 @@
+# Copyright 2025 Red Hat, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS,
+#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#    See the License for the specific language governing permissions and
+#    limitations under the License.
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+from cinder_tempest_plugin.common import concurrency
+from cinder_tempest_plugin.scenario import manager
+
+CONF = config.CONF
+
+
+class ConcurrentVolumeActionsTest(manager.ScenarioTest):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ConcurrentVolumeActionsTest, cls).skip_checks()
+        if not CONF.volume_feature_enabled.concurrency_tests:
+            raise cls.skipException(
+                "Concurrency tests are disabled.")
+
+    def _resource_create(self, index, resource_ids, create_func,
+                         resource_id_key='id', **kwargs):
+        """Generic resource creation logic.
+
+        Handles both single and indexed resource creation.
+        If any list-type arguments are passed (e.g., volume_ids),
+        they are indexed using `index`.
+        """
+
+        # Prepare arguments, indexing into lists if necessary
+        adjusted_kwargs = {}
+        for key, value in kwargs.items():
+            if isinstance(value, list):
+                # For list arguments, pick the value by index
+                adjusted_kwargs[key] = value[index]
+            else:
+                adjusted_kwargs[key] = value
+
+        resource = create_func(**adjusted_kwargs)
+        resource_ids.append(resource[resource_id_key])
+
+    def _attach_volume_action(self, index, resource_ids, server_id,
+                              volume_ids):
+        """Attach the given volume to the server."""
+        volume_id = volume_ids[index]
+        self.servers_client.attach_volume(
+            server_id, volumeId=volume_id, device=None)
+        waiters.wait_for_volume_resource_status(
+            self.volumes_client, volume_id, 'in-use')
+        resource_ids.append((server_id, volume_id))
+
+    def _cleanup_resources(self, resource_ids, delete_func, wait_func):
+        """Delete and wait for resource cleanup."""
+        for res_id in resource_ids:
+            delete_func(res_id)
+            wait_func(res_id)
+
+    @utils.services('volume')
+    @decorators.idempotent_id('ceb4f3c2-b2a4-48f9-82a8-3d32cdb5b375')
+    def test_create_volumes(self):
+        """Test parallel volume creation."""
+        volume_ids = concurrency.run_concurrent_tasks(
+            self._resource_create,
+            create_func=self.create_volume,
+        )
+
+        self._cleanup_resources(volume_ids,
+                                self.volumes_client.delete_volume,
+                                self.volumes_client.wait_for_resource_deletion)
+
+    @utils.services('volume')
+    @decorators.idempotent_id('6aa893a6-dfd0-4a0b-ae15-2fb24342e48d')
+    def test_create_snapshots(self):
+        """Test parallel snapshot creation from a single volume."""
+        volume = self.create_volume()
+
+        snapshot_ids = concurrency.run_concurrent_tasks(
+            self._resource_create,
+            create_func=self.create_volume_snapshot,
+            volume_id=volume['id']
+        )
+
+        self._cleanup_resources(
+            snapshot_ids,
+            self.snapshots_client.delete_snapshot,
+            self.snapshots_client.wait_for_resource_deletion)
+
+    @utils.services('compute', 'volume')
+    @decorators.idempotent_id('4c038386-00b0-4a6d-a612-48a4e0a96fa6')
+    def test_attach_volumes_to_server(self):
+        """Test parallel volume attachment to a server."""
+        server = self.create_server(wait_until='ACTIVE')
+        server_id = server['id']
+
+        volume_ids = concurrency.run_concurrent_tasks(
+            self._resource_create,
+            create_func=self.create_volume
+        )
+
+        attach_ids = concurrency.run_concurrent_tasks(
+            self._attach_volume_action,
+            server_id=server_id,
+            volume_ids=volume_ids
+        )
+
+        for server_id, volume_id in attach_ids:
+            self.servers_client.detach_volume(server_id, volume_id)
+            waiters.wait_for_volume_resource_status(self.volumes_client,
+                                                    volume_id, 'available')
+
+        self._cleanup_resources(volume_ids,
+                                self.volumes_client.delete_volume,
+                                self.volumes_client.wait_for_resource_deletion)
+
+    @utils.services('volume')
+    @decorators.idempotent_id('01f66de8-b217-4588-ab7f-e707d1931156')
+    def test_create_backups_and_restores(self):
+        """Test parallel backup creation and restore from multiple volumes."""
+
+        # Step 1: Create volumes in concurrency
+        volume_ids = concurrency.run_concurrent_tasks(
+            self._resource_create,
+            create_func=self.create_volume
+        )
+
+        # Step 2: Create backups in concurrency
+        backup_ids = concurrency.run_concurrent_tasks(
+            self._resource_create,
+            create_func=self.create_backup,
+            volume_id=volume_ids
+        )
+
+        # Step 3: Restore backups in concurrency
+        restored_vol_ids = concurrency.run_concurrent_tasks(
+            self._resource_create,
+            create_func=self.restore_backup,
+            resource_id_key='volume_id',
+            backup_id=backup_ids
+        )
+
+        # Step 4: Cleanup all resources
+        self._cleanup_resources(
+            backup_ids,
+            self.backups_client.delete_backup,
+            self.backups_client.wait_for_resource_deletion)
+
+        self._cleanup_resources(
+            volume_ids,
+            self.volumes_client.delete_volume,
+            self.volumes_client.wait_for_resource_deletion)
+
+        self._cleanup_resources(
+            restored_vol_ids,
+            self.volumes_client.delete_volume,
+            self.volumes_client.wait_for_resource_deletion)
diff --git a/releasenotes/notes/cinder-tempest-plugin-volume_image_dep_tests-ba46faab68dfb799.yaml b/releasenotes/notes/cinder-tempest-plugin-volume_image_dep_tests-ba46faab68dfb799.yaml
new file mode 100644
index 0000000..9390dd9
--- /dev/null
+++ b/releasenotes/notes/cinder-tempest-plugin-volume_image_dep_tests-ba46faab68dfb799.yaml
@@ -0,0 +1,6 @@
+deprecations:
+  - |
+    Dependency test config setting 'volume_image_dep_tests'
+    in cinder-tempest-plugin is deprecated.Alternatively tempest
+    `CONF.volume_feature_enabled.enable_volume_image_dep_tests` can be used
+    to for dependency tests.
diff --git a/requirements.txt b/requirements.txt
index c25d1c5..aa631de 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -5,4 +5,4 @@
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 oslo.config>=5.1.0 # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
-tempest>=34.2.0 # Apache-2.0
+tempest>=40.0.0 # Apache-2.0