Merge "Fix missing space in parameter description"
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
index 9c2d1ed..cea632b 100644
--- a/doc/source/stable_branch_support_policy.rst
+++ b/doc/source/stable_branch_support_policy.rst
@@ -23,7 +23,7 @@
 switch to running Tempest from a tag with support for the branch, or exclude
 a newly introduced test (if that is the cause of the issue). Tempest will not
 be creating stable branches to support *Extended Maintenance* phase branches, as
-the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
+the burden is on the *Extended Maintenance* phase branch maintainers, not the Tempest
 project, to support that branch.
 
 .. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index 968c821..162a111 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -12,7 +12,6 @@
 * 2024.1
 * 2023.2
 * 2023.1
-* Zed
 
 For older OpenStack Release:
 
diff --git a/releasenotes/notes/add-enable-volume-image-dep-tests-option-150b929d18da233f.yaml b/releasenotes/notes/add-enable-volume-image-dep-tests-option-150b929d18da233f.yaml
new file mode 100644
index 0000000..e78201e
--- /dev/null
+++ b/releasenotes/notes/add-enable-volume-image-dep-tests-option-150b929d18da233f.yaml
@@ -0,0 +1,6 @@
+---
+features:
+  - |
+    Add new config option 'enable_volume_image_dep_tests' in section
+    [volume-feature-enabled] which should be used in
+    image<->volume<->snapshot dependency tests.
diff --git a/releasenotes/notes/add-placement-resource-provider-traits-api-calls-9f4b0455afec9afb.yaml b/releasenotes/notes/add-placement-resource-provider-traits-api-calls-9f4b0455afec9afb.yaml
new file mode 100644
index 0000000..1d1811c
--- /dev/null
+++ b/releasenotes/notes/add-placement-resource-provider-traits-api-calls-9f4b0455afec9afb.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Adds API calls for traits in ResourceProvidersClient.
diff --git a/releasenotes/notes/add-placement-traits-api-calls-087061f5455f0b12.yaml b/releasenotes/notes/add-placement-traits-api-calls-087061f5455f0b12.yaml
new file mode 100644
index 0000000..77d0b38
--- /dev/null
+++ b/releasenotes/notes/add-placement-traits-api-calls-087061f5455f0b12.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Adds API calls for traits in PlacementClient.
diff --git a/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
new file mode 100644
index 0000000..30a2278
--- /dev/null
+++ b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    A new config option in the ``volume_feature_enabled`` section,
+    ``volume_types_for_data_volume``, is added to allow the user to specify
+    which volume types can be used for data volumes in a new test
+    ``test_instances_with_cinder_volumes_on_all_compute_nodes``. By default,
+    this option is set to None.
diff --git a/releasenotes/notes/cleanup-attr-decorator-alias-78ce21eb20d87e01.yaml b/releasenotes/notes/cleanup-attr-decorator-alias-78ce21eb20d87e01.yaml
new file mode 100644
index 0000000..43091e1
--- /dev/null
+++ b/releasenotes/notes/cleanup-attr-decorator-alias-78ce21eb20d87e01.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    The ``attr`` decorator is no longer available in the ``tempest.test``
+    module. Use the ``tempest.lib.decorators`` module instead.
diff --git a/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml b/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml
new file mode 100644
index 0000000..48c1717
--- /dev/null
+++ b/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+  - |
+    The following deprecated alias methods of the ``ContainerClient`` class
+    has been removed.
+
+    - ``update_container_metadata``, replaced by ``create_update_or_delete_container_metadata``
+    - ``list_container_contents``, replaced by ``list_container_objects``
diff --git a/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml b/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml
new file mode 100644
index 0000000..d408538
--- /dev/null
+++ b/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+  - |
+    Default value of the ``[image-feature-enabled] image_import`` has been
+    changed from ``False`` to ``True``, and now the image import feature is
+    tested by default.
+
+deprecations:
+  - |
+    The ``[image-feature-enabled] image_import`` option has been deprecated.
+    The image import feature works in both standalone mode and WSGI mode since
+    Victoria and the image import feature can be always tested.
diff --git a/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml b/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml
new file mode 100644
index 0000000..2834876
--- /dev/null
+++ b/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+  - |
+    Default value of the ``[image-feature-enabled] os_glance_reserved`` has
+    been changed from ``False`` to ``True`` and now the reservation of
+    os_glance namespace is tested by default.
+
+deprecations:
+  - |
+    The ``[image-feature-enabled] os_glance_reserved`` option has been
+    deprecatd because glance reserves the os_glance namespace since Wallaby.
diff --git a/releasenotes/notes/enable-neutron-by-default-57b87a20acc1ac47.yaml b/releasenotes/notes/enable-neutron-by-default-57b87a20acc1ac47.yaml
new file mode 100644
index 0000000..b8722ea
--- /dev/null
+++ b/releasenotes/notes/enable-neutron-by-default-57b87a20acc1ac47.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    Default value of the ``[service_available] neutron`` option has been
+    updated from ``False`` to ``True``.
+
+  - |
+    All tests which require network features are now skipped when
+    the ``[service_available] neutron`` option is set to ``False``
diff --git a/releasenotes/notes/end-of-support-of-zed-43e2d5dd5608cb10.yaml b/releasenotes/notes/end-of-support-of-zed-43e2d5dd5608cb10.yaml
new file mode 100644
index 0000000..a0b3ac2
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-zed-43e2d5dd5608cb10.yaml
@@ -0,0 +1,12 @@
+---
+prelude: >
+    This is an intermediate release during the 2024.2 development cycle to
+    mark the end of support for Zed release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * 2024.1
+    * 2023.2
+    * 2023.1
+
+    Current development of Tempest is for OpenStack 2024.2 development
+    cycle.
diff --git a/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml b/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml
new file mode 100644
index 0000000..26da18c
--- /dev/null
+++ b/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    The deprecated ``[compute-feature-enabled] xenapi_apis`` option has been
+    removed.
diff --git a/releasenotes/notes/resource-list-cbf9779e8b434654.yaml b/releasenotes/notes/resource-list-cbf9779e8b434654.yaml
new file mode 100644
index 0000000..bbd2f16
--- /dev/null
+++ b/releasenotes/notes/resource-list-cbf9779e8b434654.yaml
@@ -0,0 +1,11 @@
+---
+features:
+  - |
+    A new interface ``--resource-list`` has been introduced in the
+    ``tempest cleanup`` command to remove the resources created by
+    Tempest. A new config option in the default section, ``record_resources``,
+    is added to allow the recording of all resources created by Tempest.
+    A list of these resources will be saved in ``resource_list.json`` file,
+    which will be appended in case of multiple Tempest runs. This file
+    is intended to be used with the ``tempest cleanup`` command if it is
+    used with the newly added option ``--resource-list``.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index e3018b4..f36c837 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v39.0.0
    v38.0.0
    v37.0.0
    v36.0.0
diff --git a/releasenotes/source/v39.0.0.rst b/releasenotes/source/v39.0.0.rst
new file mode 100644
index 0000000..a971fbc
--- /dev/null
+++ b/releasenotes/source/v39.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v39.0.0 Release Notes
+=====================
+
+.. release-notes:: 39.0.0 Release Notes
+   :version: 39.0.0
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 04db849..c682641 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/victoria.
+   Upper constraints file to be used for stable branch till Wallaby
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 3d78557..29409c0 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/wallaby
+- name: Use stable branch upper-constraints till Wallaby
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "stable/victoria", "stable/wallaby"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "unmaintained/victoria", "unmaintained/wallaby"]
 
 - name: Use Configured upper-constraints for non-master Tempest
   set_fact:
@@ -80,14 +80,14 @@
 
 - name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
   # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
-  # stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
+  # stestr 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1
   # in stable/train|ussuri|victoria) which does not have new args exclude-list
   # so let's fallback to old arg if new arg is passed.
   set_fact:
     exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
   when:
     - tempest_test_exclude_list is defined
-    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+    - target_branch in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
 
 # TODO(kopecmartin) remove this after all consumers of the role have switched
 # to tempest_exclude_regex option, until then it's kept here for the backward
@@ -105,11 +105,11 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch not in ["stable/train", "stable/ussuri", "stable/victoria"]
+    - target_branch not in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
 
 - name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
   # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
-  # 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
+  # 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1 in
   # stable/train|ussuri|victoria) which does not have new args exclude-list so
   # let's fallback to old arg if new arg is passed.
   set_fact:
@@ -117,7 +117,7 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+    - target_branch in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
 
 - name: Run Tempest
   command: tox -e {{tox_envlist}} {{tox_extra_args}} -- \
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
index d43319c..255ca2d 100644
--- a/roles/tempest-cleanup/README.rst
+++ b/roles/tempest-cleanup/README.rst
@@ -47,6 +47,15 @@
    only resources with names that match the prefix. This option can be used
    together with dry_run.
 
+.. zuul:rolevar:: run_tempest_cleanup_resource_list
+   :default: false
+
+   When true, tempest cleanup will be called with '--resource-list' to delete
+   only resources listed in ./resource_list.json that is created if
+   record_resources config option in the default section of tempest.conf file
+   is enabled (set to True). The resource_list.json contains all resources
+   created by Tempest during a Tempest run.
+
 Role usage
 ----------
 
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
index 8060b29..1ec2f8c 100644
--- a/roles/tempest-cleanup/defaults/main.yaml
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -3,3 +3,4 @@
 dry_run: false
 run_tempest_fail_if_leaked_resources: false
 run_tempest_cleanup_prefix: false
+run_tempest_cleanup_resource_list: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
index 07e1b63..8ae5183 100644
--- a/roles/tempest-cleanup/tasks/dry_run.yaml
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -5,7 +5,9 @@
   command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
   args:
     chdir: "{{ devstack_base_dir }}/tempest"
-  when: not run_tempest_cleanup_prefix
+  when:
+    - not run_tempest_cleanup_prefix
+    - run_tempest_cleanup_resource_list is not defined or not run_tempest_cleanup_resource_list
 
 - name: Run tempest cleanup dry-run with tempest prefix
   become: yes
@@ -13,4 +15,12 @@
   command: tox -evenv-tempest -- tempest cleanup --dry-run --debug --prefix tempest
   args:
     chdir: "{{ devstack_base_dir }}/tempest"
-  when: run_tempest_cleanup_prefix
\ No newline at end of file
+  when: run_tempest_cleanup_prefix
+
+- name: Run tempest cleanup with tempest resource list
+  become: yes
+  become_user: tempest
+  command: tox -evenv-tempest -- tempest cleanup --dry-run --debug --resource-list
+  args:
+    chdir: "{{ devstack_base_dir }}/tempest"
+  when: run_tempest_cleanup_resource_list
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
index 7ef4928..1e1c1a7 100644
--- a/roles/tempest-cleanup/tasks/main.yaml
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -27,7 +27,9 @@
       command: tox -evenv-tempest -- tempest cleanup --debug
       args:
         chdir: "{{ devstack_base_dir }}/tempest"
-      when: not run_tempest_cleanup_prefix
+      when:
+        - not run_tempest_cleanup_prefix
+        - run_tempest_cleanup_resource_list is not defined or not run_tempest_cleanup_resource_list
 
     - name: Run tempest cleanup with tempest prefix
       become: yes
@@ -37,6 +39,18 @@
         chdir: "{{ devstack_base_dir }}/tempest"
       when: run_tempest_cleanup_prefix
 
+    - name: Cat resource_list.json
+      command: cat "{{ devstack_base_dir }}/tempest/resource_list.json"
+      when: run_tempest_cleanup_resource_list
+
+    - name: Run tempest cleanup with tempest resource list
+      become: yes
+      become_user: tempest
+      command: tox -evenv-tempest -- tempest cleanup --debug --resource-list
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
+      when: run_tempest_cleanup_resource_list
+
 - when:
     - run_tempest_fail_if_leaked_resources
     - not init_saved_state
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
deleted file mode 100644
index 8fc155b..0000000
--- a/tempest/api/compute/admin/test_agents.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-CONF = config.CONF
-
-
-# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
-class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Compute Agents API"""
-
-    @classmethod
-    def skip_checks(cls):
-        super(AgentsAdminTestJSON, cls).skip_checks()
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise cls.skipException('The os-agents API is not supported.')
-
-    @classmethod
-    def setup_clients(cls):
-        super(AgentsAdminTestJSON, cls).setup_clients()
-        cls.client = cls.os_admin.agents_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(AgentsAdminTestJSON, cls).resource_setup()
-        cls.params_agent = cls._param_helper(
-            hypervisor='common', os='linux', architecture='x86_64',
-            version='7.0', url='xxx://xxxx/xxx/xxx',
-            md5hash='add6bb58e139be103324d04d82d8f545')
-
-    @staticmethod
-    def _param_helper(**kwargs):
-        rand_key = 'architecture'
-        if rand_key in kwargs:
-            # NOTE: The rand_name is for avoiding agent conflicts.
-            # If you try to create an agent with the same hypervisor,
-            # os and architecture as an existing agent, Nova will return
-            # an HTTPConflict or HTTPServerError.
-            kwargs[rand_key] = data_utils.rand_name(
-                prefix=CONF.resource_name_prefix,
-                name=kwargs[rand_key])
-        return kwargs
-
-    @decorators.idempotent_id('1fc6bdc8-0b6d-4cc7-9f30-9b04fabe5b90')
-    def test_create_agent(self):
-        """Test creating a compute agent"""
-        params = self._param_helper(
-            hypervisor='kvm', os='win', architecture='x86',
-            version='7.0', url='xxx://xxxx/xxx/xxx',
-            md5hash='add6bb58e139be103324d04d82d8f545')
-        body = self.client.create_agent(**params)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        for expected_item, value in params.items():
-            self.assertEqual(value, body[expected_item])
-
-    @decorators.idempotent_id('dc9ffd51-1c50-4f0e-a820-ae6d2a568a9e')
-    def test_update_agent(self):
-        """Test updating a compute agent"""
-        # Create and update an agent.
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        agent_id = body['agent_id']
-        params = self._param_helper(
-            version='8.0', url='xxx://xxxx/xxx/xxx2',
-            md5hash='add6bb58e139be103324d04d82d8f547')
-        body = self.client.update_agent(agent_id, **params)['agent']
-        for expected_item, value in params.items():
-            self.assertEqual(value, body[expected_item])
-
-    @decorators.idempotent_id('470e0b89-386f-407b-91fd-819737d0b335')
-    def test_delete_agent(self):
-        """Test deleting a compute agent"""
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.client.delete_agent(body['agent_id'])
-
-        # Verify the list doesn't contain the deleted agent.
-        agents = self.client.list_agents()['agents']
-        self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
-                                               agents))
-
-    @decorators.idempotent_id('6a326c69-654b-438a-80a3-34bcc454e138')
-    def test_list_agents(self):
-        """Test listing compute agents"""
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        agents = self.client.list_agents()['agents']
-        self.assertNotEmpty(agents, 'Cannot get any agents.')
-        self.assertIn(body['agent_id'], map(lambda x: x['agent_id'], agents))
-
-    @decorators.idempotent_id('eabadde4-3cd7-4ec4-a4b5-5a936d2d4408')
-    def test_list_agents_with_filter(self):
-        """Test listing compute agents by the filter"""
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        params = self._param_helper(
-            hypervisor='xen', os='linux', architecture='x86',
-            version='7.0', url='xxx://xxxx/xxx/xxx1',
-            md5hash='add6bb58e139be103324d04d82d8f546')
-        agent_xen = self.client.create_agent(**params)['agent']
-        self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
-
-        agent_id_xen = agent_xen['agent_id']
-        agents = (self.client.list_agents(hypervisor=agent_xen['hypervisor'])
-                  ['agents'])
-        self.assertNotEmpty(agents, 'Cannot get any agents.')
-        self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
-        self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
-                                               agents))
-        for agent in agents:
-            self.assertEqual(agent_xen['hypervisor'], agent['hypervisor'])
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index be838fc..6c9aafb 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -207,15 +207,10 @@
         self.assertEqual(self.image_ref_alt, rebuilt_image_id)
 
     @decorators.idempotent_id('7a1323b4-a6a2-497a-96cb-76c07b945c71')
-    def test_reset_network_inject_network_info(self):
-        """Test resetting and injecting network info of a server"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'The resetNetwork server action is not supported.')
-
-        # Reset Network of a Server
+    def test_inject_network_info(self):
+        """Test injecting network info of a server"""
+        # Create a server
         server = self.create_test_server(wait_until='ACTIVE')
-        self.client.reset_network(server['id'])
         # Inject the Network Info into Server
         self.client.inject_network_info(server['id'])
 
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 013e7d8..b5ee9b1 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -24,7 +24,7 @@
 
 
 class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
-    """Test creating servers on mutiple nodes with scheduler_hints."""
+    """Test creating servers on multiple nodes with scheduler_hints."""
     @classmethod
     def resource_setup(cls):
         super(ServersOnMultiNodesTest, cls).resource_setup()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2557e47..ed94af0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -410,7 +410,7 @@
         :param validatable: whether to the server needs to be
             validatable. When True, validation resources are acquired via
             the `get_class_validation_resources` helper.
-        :param kwargs: extra paramaters are passed through to the
+        :param kwargs: extra parameters are passed through to the
             `create_test_server` call.
         :return: the UUID of the created server.
         """
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 87cedae..d728853 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -71,7 +71,7 @@
             self.assertEqual(snapshot_name, image['name'])
         except lib_exceptions.TimeoutException as ex:
             # If timeout is reached, we don't need to check state,
-            # since, it wouldn't be a 'SAVING' state atleast and apart from
+            # since, it wouldn't be a 'SAVING' state at least and apart from
             # it, this testcase doesn't have scope for other state transition
             # Hence, skip the test.
             raise self.skipException("This test is skipped because " + str(ex))
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 275a26f..a245a8a 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -130,7 +130,7 @@
         except lib_exc.TimeoutException as ex:
             # Test cannot capture the image saving state.
             # If timeout is reached, we don't need to check state,
-            # since, it wouldn't be a 'SAVING' state atleast and apart from
+            # since, it wouldn't be a 'SAVING' state at least and apart from
             # it, this testcase doesn't have scope for other state transition
             # Hence, skip the test.
             raise self.skipException("This test is skipped because " + str(ex))
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 6664e15..b7db200 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -185,7 +185,7 @@
 
 
 class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
-    """Test creating server with FQDN hostname and verifying atrributes
+    """Test creating server with FQDN hostname and verifying attributes
 
     Starting Wallaby release, Nova sanitizes freeform characters in
     server hostname with dashes. This test verifies the same.
diff --git a/tempest/api/compute/servers/test_delete_server.py b/tempest/api/compute/servers/test_delete_server.py
index ee25a22..596d2bd 100644
--- a/tempest/api/compute/servers/test_delete_server.py
+++ b/tempest/api/compute/servers/test_delete_server.py
@@ -99,11 +99,14 @@
     def test_delete_server_while_in_verify_resize_state(self):
         """Test deleting a server while it's VM state is VERIFY_RESIZE"""
         server = self.create_test_server(wait_until='ACTIVE')
-        self.client.resize_server(server['id'], self.flavor_ref_alt)
-        waiters.wait_for_server_status(self.client, server['id'],
-                                       'VERIFY_RESIZE')
-        self.client.delete_server(server['id'])
-        waiters.wait_for_server_termination(self.client, server['id'])
+        body = self.client.resize_server(server['id'], self.flavor_ref_alt)
+        request_id = body.response['x-openstack-request-id']
+        waiters.wait_for_server_status(
+            self.client, server['id'], 'VERIFY_RESIZE', request_id=request_id)
+        body = self.client.delete_server(server['id'])
+        request_id = body.response['x-openstack-request-id']
+        waiters.wait_for_server_termination(
+            self.client, server['id'], request_id=request_id)
 
     @decorators.idempotent_id('d0f3f0d6-d9b6-4a32-8da4-23015dcab23c')
     @utils.services('volume')
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 3a970dd..d2e2935 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -40,7 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
     def test_max_count_less_than_one(self):
-        """Test creating server with max_count < 1 shoudld fail"""
+        """Test creating server with max_count < 1 should fail"""
         invalid_max_count = 0
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           max_count=invalid_max_count)
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 9f93e76..5f35b15 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -27,13 +27,6 @@
     create_default_network = True
 
     @classmethod
-    def skip_checks(cls):
-        super(ServerMetadataTestJSON, cls).skip_checks()
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise cls.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
-    @classmethod
     def setup_clients(cls):
         super(ServerMetadataTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 655909c..2059dfa 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -14,13 +14,10 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
-CONF = config.CONF
-
 
 class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
     """Negative tests of server metadata"""
@@ -91,10 +88,6 @@
 
         Raise BadRequest if key in uri does not match the key passed in body.
         """
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'testkey': 'testvalue'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata_item,
@@ -104,10 +97,6 @@
     @decorators.idempotent_id('0df38c2a-3d4e-4db5-98d8-d4d9fa843a12')
     def test_set_metadata_non_existent_server(self):
         """Test setting metadata for a non existent server should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.NotFound,
@@ -119,10 +108,6 @@
     @decorators.idempotent_id('904b13dc-0ef2-4e4c-91cd-3b4a0f2f49d8')
     def test_update_metadata_non_existent_server(self):
         """Test updating metadata for a non existent server should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'key1': 'value1', 'key2': 'value2'}
         self.assertRaises(lib_exc.NotFound,
@@ -134,10 +119,6 @@
     @decorators.idempotent_id('a452f38c-05c2-4b47-bd44-a4f0bf5a5e48')
     def test_update_metadata_with_blank_key(self):
         """Test updating server metadata to blank key should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_server_metadata,
@@ -150,10 +131,6 @@
 
         Should not be able to delete metadata item from a non-existent server.
         """
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_server_metadata_item,
@@ -168,10 +145,6 @@
         A 403 Forbidden or 413 Overlimit (old behaviour) exception
         will be raised while exceeding metadata items limit for project.
         """
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
         quota_metadata = quota_set['metadata_items']
@@ -196,10 +169,6 @@
     @decorators.idempotent_id('96100343-7fa9-40d8-80fa-d29ef588ce1c')
     def test_set_server_metadata_blank_key(self):
         """Test setting server metadata with blank key should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
@@ -209,10 +178,6 @@
     @decorators.idempotent_id('64a91aee-9723-4863-be44-4c9d9f1e7d0e')
     def test_set_server_metadata_missing_metadata(self):
         """Test setting server metadata without metadata field should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 97c2774..d6c0324 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -234,7 +234,7 @@
         and virtio as the rescue disk.
         """
         # This test just check detach fail and does not
-        # perfom the detach operation but in cleanup from
+        # perform the detach operation but in cleanup from
         # self.attach_volume() it will try to detach the server
         # after unrescue the server. Due to that we need to make
         # server SSHable before it try to detach, more details are
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 955ba1c..fd05ec6 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -139,7 +139,7 @@
         """Test detaching volume from a rescued server should fail"""
         volume = self.create_volume()
         # This test just check detach fail and does not
-        # perfom the detach operation but in cleanup from
+        # perform the detach operation but in cleanup from
         # self.attach_volume() it will try to detach the server
         # after unrescue the server. Due to that we need to make
         # server SSHable before it try to detach, more details are
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 7ea8f09..e267b0f 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -465,6 +465,73 @@
         self._boot_from_multiattach_volume()
 
     @utils.services('image')
+    @decorators.idempotent_id('07eb6686-571c-45f0-9d96-446b120f1121')
+    def test_boot_with_multiattach_volume_direct_lun(self, boot=False):
+        image = self.images_client.show_image(CONF.compute.image_ref)
+        if image.get('hw_scsi_model') != 'virtio-scsi':
+            # NOTE(danms): Technically we don't need this to be virtio-scsi,
+            # but cirros (and other) test images won't see the device unless
+            # they have lsilogic drivers (which is the default). So use this
+            # as sort of the indication that the test should be enabled.
+            self.skip('hw_scsi_model=virtio-scsi not set on image')
+        if not CONF.validation.run_validation:
+            self.skip('validation is required for this test')
+
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+
+        volume = self._create_multiattach_volume(bootable=boot)
+        # Create an image-backed instance with the multi-attach volume as a
+        # block device with device_type=lun
+        bdm = [{'source_type': 'image',
+                'destination_type': 'local',
+                'uuid': CONF.compute.image_ref,
+                'boot_index': 0},
+               {'uuid': volume['id'],
+                'source_type': 'volume',
+                'destination_type': 'volume',
+                'device_type': 'lun',
+                'disk_bus': 'scsi'}]
+
+        if boot:
+            # If we're booting from it, we don't need the local-from-image
+            # disk, but we need the volume to have a boot_index
+            bdm.pop(0)
+            bdm[0]['boot_index'] = 0
+
+        server = self.create_test_server(
+            validatable=True,
+            validation_resources=validation_resources,
+            block_device_mapping_v2=bdm, wait_until='SSHABLE')
+
+        # Assert the volume is attached to the server.
+        attachments = self.servers_client.list_volume_attachments(
+            server['id'])['volumeAttachments']
+        self.assertEqual(1, len(attachments))
+        self.assertEqual(volume['id'], attachments[0]['volumeId'])
+
+        linux_client = remote_client.RemoteClient(
+            self.get_server_ip(server, validation_resources),
+            self.image_ssh_user,
+            self.image_ssh_password,
+            validation_resources['keypair']['private_key'],
+            server=server,
+            servers_client=self.servers_client)
+
+        # Assert the volume appears as a SCSI device
+        command = 'lsblk -S'
+        blks = linux_client.exec_command(command).strip()
+        self.assertIn('\nsda ', blks)
+
+        self.servers_client.delete_server(server['id'])
+        waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+    @utils.services('image')
+    @decorators.idempotent_id('bfe61d6e-767a-4f93-9de8-054355536475')
+    def test_boot_from_multiattach_volume_direct_lun(self, boot=False):
+        self.test_boot_with_multiattach_volume_direct_lun(boot=True)
+
+    @utils.services('image')
     @decorators.idempotent_id('885ac48a-2d7a-40c5-ae8b-1993882d724c')
     @testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
                           'Snapshotting is not available.')
diff --git a/tempest/api/image/v2/test_images_dependency.py b/tempest/api/image/v2/test_images_dependency.py
new file mode 100644
index 0000000..326045b
--- /dev/null
+++ b/tempest/api/image/v2/test_images_dependency.py
@@ -0,0 +1,103 @@
+# Copyright 2024 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import io
+
+from oslo_log import log as logging
+
+from tempest.api.compute import base as compute_base
+from tempest.api.image import base as image_base
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.scenario import manager
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class ImageDependencyTests(image_base.BaseV2ImageTest,
+                           compute_base.BaseV2ComputeTest,
+                           manager.ScenarioTest):
+    """Test image, instance, and snapshot dependency.
+
+       The tests create image and remove the base image that other snapshots
+       were depend on.In OpenStack, images and snapshots should be separate,
+       but in some configurations like Glance with Ceph storage,
+       there were cases where images couldn't be removed.
+       This was fixed in glance store for RBD backend.
+
+       * Dependency scenarios:
+           - image > instance -> snapshot dependency
+
+       NOTE: volume -> image dependencies tests are in cinder-tempest-plugin
+    """
+
+    @classmethod
+    def skip_checks(cls):
+        super(ImageDependencyTests, cls).skip_checks()
+        if not CONF.volume_feature_enabled.enable_volume_image_dep_tests:
+            skip_msg = (
+                "%s Volume/image dependency tests "
+                "not enabled" % (cls.__name__))
+            raise cls.skipException(skip_msg)
+
+    def _create_instance_snapshot(self):
+        """Create instance from image and then snapshot the instance."""
+        # Create image and store data to image
+        image_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name='image-dependency-test')
+        image = self.create_image(name=image_name,
+                                  container_format='bare',
+                                  disk_format='raw',
+                                  visibility='private')
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+        waiters.wait_for_image_status(
+            self.client, image['id'], 'active')
+        # Create instance
+        instance = self.create_test_server(
+            name='instance-depend-image',
+            image_id=image['id'],
+            wait_until='ACTIVE')
+        LOG.info("Instance from image is created %s", instance)
+        instance_observed = \
+            self.servers_client.show_server(instance['id'])['server']
+        # Create instance snapshot
+        snapshot_instance = self.create_server_snapshot(
+            server=instance_observed)
+        LOG.info("Instance snapshot is created %s", snapshot_instance)
+        return image['id'], snapshot_instance['id']
+
+    @decorators.idempotent_id('d19b0731-e98e-4103-8b0e-02f651b8f586')
+    @utils.services('compute')
+    def test_nova_image_snapshot_dependency(self):
+        """Test with image > instance > snapshot dependency.
+
+        Create instance snapshot and check if we able to delete base
+        image
+
+        """
+        base_image_id, snapshot_image_id = self._create_instance_snapshot()
+        self.client.delete_image(base_image_id)
+        self.client.wait_for_resource_deletion(base_image_id)
+        images_list = self.client.list_images()['images']
+        fetched_images_id = [img['id'] for img in images_list]
+        self.assertNotIn(base_image_id, fetched_images_id)
+        self.assertIn(snapshot_image_id, fetched_images_id)
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 5c28e96..01dda06 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -108,7 +108,7 @@
         # both cases, with and without that "active" attribute, we need to
         # removes that field from the allowed_address_pairs which are returned
         # by the Neutron server.
-        # We could make expected results of those tests to be dependend on the
+        # We could make expected results of those tests to be dependent on the
         # available Neutron's API extensions but in that case existing tests
         # may fail randomly as all tests are always using same IP addresses
         # thus allowed_address_pair may be active=True or active=False.
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index e39ad08..07f0903 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -129,7 +129,7 @@
         self.assertIsNone(updated_floating_ip['fixed_ip_address'])
         self.assertIsNone(updated_floating_ip['router_id'])
 
-        # Explicity test deletion of floating IP
+        # Explicitly test deletion of floating IP
         self.floating_ips_client.delete_floatingip(created_floating_ip['id'])
 
     @decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index bd3e360..a0c6342 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -118,7 +118,7 @@
     @classmethod
     def skip_checks(cls):
         super(TagsExtTest, cls).skip_checks()
-        # Added condition to support backward compatiblity since
+        # Added condition to support backward compatibility since
         # tag-ext has been renamed to standard-attr-tag
         if not (utils.is_extension_enabled('tag-ext', 'network') or
                 utils.is_extension_enabled('standard-attr-tag', 'network')):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index e2c9d54..2524def 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -142,7 +142,7 @@
         """Test container synchronization"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
-            # use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
+            # use rsplit with a maxsplit of 1 to ensure ipv6 addresses are
             # handled properly as well
             client_proxy_ip = urlparse.urlparse(
                 cont_client.base_url).netloc.rsplit(':', 1)[0]
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 150677d..8cf44be 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -119,6 +119,13 @@
                         self.images_client.delete_image,
                         image_id)
         waiters.wait_for_image_status(self.images_client, image_id, 'active')
+        # This is required for the optimized upload volume path.
+        # New location APIs are async so we need to wait for the location
+        # import task to complete.
+        # This should work with old location API since we don't fail if there
+        # are no tasks for the image
+        waiters.wait_for_image_tasks_status(self.images_client,
+                                            image_id, 'success')
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 self.volume['id'], 'available')
 
diff --git a/tempest/clients.py b/tempest/clients.py
index 5338ed4..e432120 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -13,8 +13,13 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import os
+
+from oslo_concurrency import lockutils
+
 from tempest import config
 from tempest.lib import auth
+from tempest.lib.common.rest_client import RestClient
 from tempest.lib import exceptions as lib_exc
 from tempest.lib.services import clients
 
@@ -35,6 +40,11 @@
         super(Manager, self).__init__(
             credentials=credentials, identity_uri=identity_uri, scope=scope,
             region=CONF.identity.region)
+        if CONF.record_resources:
+            RestClient.lock_dir = os.path.join(
+                lockutils.get_lock_path(CONF),
+                'tempest-rec-rw-lock')
+            RestClient.record_resources = True
         # TODO(andreaf) When clients are initialised without the right
         # parameters available, the calls below will trigger a KeyError.
         # We should catch that and raise a better error.
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index 2a406de..8d06f93 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -87,6 +87,23 @@
   ``saved_state.json`` file will be ignored and cleanup will be done based on
   the passed prefix only.
 
+* ``--resource-list``: Allows the use of file ``./resource_list.json``, which
+  contains all resources created by Tempest during all Tempest runs, to
+  create another method for removing only resources created by Tempest.
+  List of these resources is created when config option ``record_resources``
+  in default section is set to true. After using this option for cleanup,
+  the existing ``./resource_list.json`` is cleared from deleted resources.
+
+  When this option is used, ``saved_state.json`` file is not needed (no
+  need to run with ``--init-saved-state`` first). If there is any
+  ``saved_state.json`` file present and you run the tempest cleanup with
+  ``--resource-list``, the ``saved_state.json`` file will be ignored and
+  cleanup will be done based on the ``resource_list.json`` only.
+
+  If you run tempest cleanup with both ``--prefix`` and ``--resource-list``,
+  the ``--resource-list`` option will be ignored and cleanup will be done
+  based on the ``--prefix`` option only.
+
 * ``--help``: Print the help text for the command and parameters.
 
 .. [1] The ``_projects_to_clean`` dictionary in ``dry_run.json`` lists the
@@ -122,6 +139,7 @@
 
 SAVED_STATE_JSON = "saved_state.json"
 DRY_RUN_JSON = "dry_run.json"
+RESOURCE_LIST_JSON = "resource_list.json"
 LOG = logging.getLogger(__name__)
 CONF = config.CONF
 
@@ -164,6 +182,7 @@
         self.admin_mgr = clients.Manager(
             credentials.get_configured_admin_credentials())
         self.dry_run_data = {}
+        self.resource_data = {}
         self.json_data = {}
 
         # available services
@@ -177,12 +196,20 @@
             self._init_state()
             return
 
-        self._load_json()
+        if parsed_args.prefix:
+            return
+
+        if parsed_args.resource_list:
+            self._load_resource_list()
+            return
+
+        self._load_saved_state()
 
     def _cleanup(self):
         LOG.info("Begin cleanup")
         is_dry_run = self.options.dry_run
         is_preserve = not self.options.delete_tempest_conf_objects
+        is_resource_list = self.options.resource_list
         is_save_state = False
         cleanup_prefix = self.options.prefix
 
@@ -194,8 +221,10 @@
         # they are in saved state json. Therefore is_preserve is False
         kwargs = {'data': self.dry_run_data,
                   'is_dry_run': is_dry_run,
+                  'resource_list_json': self.resource_data,
                   'saved_state_json': self.json_data,
                   'is_preserve': False,
+                  'is_resource_list': is_resource_list,
                   'is_save_state': is_save_state,
                   'prefix': cleanup_prefix}
         project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
@@ -208,8 +237,10 @@
 
         kwargs = {'data': self.dry_run_data,
                   'is_dry_run': is_dry_run,
+                  'resource_list_json': self.resource_data,
                   'saved_state_json': self.json_data,
                   'is_preserve': is_preserve,
+                  'is_resource_list': is_resource_list,
                   'is_save_state': is_save_state,
                   'prefix': cleanup_prefix,
                   'got_exceptions': self.GOT_EXCEPTIONS}
@@ -228,11 +259,17 @@
                 f.write(json.dumps(self.dry_run_data, sort_keys=True,
                                    indent=2, separators=(',', ': ')))
 
+        if is_resource_list:
+            LOG.info("Clearing 'resource_list.json' file.")
+            with open(RESOURCE_LIST_JSON, 'w') as f:
+                f.write('{}')
+
     def _clean_project(self, project):
         LOG.debug("Cleaning project:  %s ", project['name'])
         is_dry_run = self.options.dry_run
         dry_run_data = self.dry_run_data
         is_preserve = not self.options.delete_tempest_conf_objects
+        is_resource_list = self.options.resource_list
         project_id = project['id']
         project_name = project['name']
         project_data = None
@@ -244,7 +281,9 @@
         kwargs = {'data': project_data,
                   'is_dry_run': is_dry_run,
                   'saved_state_json': self.json_data,
+                  'resource_list_json': self.resource_data,
                   'is_preserve': is_preserve,
+                  'is_resource_list': is_resource_list,
                   'is_save_state': False,
                   'project_id': project_id,
                   'prefix': cleanup_prefix,
@@ -287,6 +326,19 @@
                             "ignored when --init-saved-state is used so that "
                             "it can capture the true init state - all "
                             "resources present at that moment.")
+        parser.add_argument('--resource-list', action="store_true",
+                            dest='resource_list', default=False,
+                            help="Runs tempest cleanup with generated "
+                            "JSON file: " + RESOURCE_LIST_JSON + " to "
+                            "erase resources created during Tempest run. "
+                            "NOTE: To create " + RESOURCE_LIST_JSON + " "
+                            "set config option record_resources under default "
+                            "section in tempest.conf file to true. This "
+                            "option will be ignored when --init-saved-state "
+                            "is used so that it can capture the true init "
+                            "state - all resources present at that moment. "
+                            "This option will be ignored if passed with "
+                            "--prefix.")
         return parser
 
     def get_description(self):
@@ -304,6 +356,7 @@
                   'is_dry_run': False,
                   'saved_state_json': data,
                   'is_preserve': False,
+                  'is_resource_list': False,
                   'is_save_state': True,
                   # must be None as we want to capture true init state
                   # (all resources present) thus no filtering based
@@ -326,15 +379,31 @@
             f.write(json.dumps(data, sort_keys=True,
                                indent=2, separators=(',', ': ')))
 
-    def _load_json(self, saved_state_json=SAVED_STATE_JSON):
+    def _load_resource_list(self, resource_list_json=RESOURCE_LIST_JSON):
+        try:
+            with open(resource_list_json, 'rb') as json_file:
+                self.resource_data = json.load(json_file)
+        except IOError as ex:
+            LOG.exception(
+                "Failed loading 'resource_list.json', please "
+                "be sure you created this file by setting config "
+                "option record_resources in default section to true "
+                "prior to running tempest. Exception: %s", ex)
+            sys.exit(ex)
+        except Exception as ex:
+            LOG.exception(
+                "Exception parsing 'resource_list.json' : %s", ex)
+            sys.exit(ex)
+
+    def _load_saved_state(self, saved_state_json=SAVED_STATE_JSON):
         try:
             with open(saved_state_json, 'rb') as json_file:
                 self.json_data = json.load(json_file)
-
         except IOError as ex:
-            LOG.exception("Failed loading saved state, please be sure you"
-                          " have first run cleanup with --init-saved-state "
-                          "flag prior to running tempest. Exception: %s", ex)
+            LOG.exception(
+                "Failed loading saved state, please be sure you"
+                " have first run cleanup with --init-saved-state "
+                "flag prior to running tempest. Exception: %s", ex)
             sys.exit(ex)
         except Exception as ex:
             LOG.exception("Exception parsing saved state json : %s", ex)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index 8651ab0..b202940 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -120,6 +120,13 @@
                  if item['name'].startswith(self.prefix)]
         return items
 
+    def _filter_by_resource_list(self, item_list, attr):
+        if attr not in self.resource_list_json:
+            return []
+        items = [item for item in item_list if item['id']
+                 in self.resource_list_json[attr].keys()]
+        return items
+
     def _filter_out_ids_from_saved(self, item_list, attr):
         items = [item for item in item_list if item['id']
                  not in self.saved_state_json[attr].keys()]
@@ -166,8 +173,11 @@
     def list(self):
         client = self.client
         snaps = client.list_snapshots()['snapshots']
+
         if self.prefix:
             snaps = self._filter_by_prefix(snaps)
+        elif self.is_resource_list:
+            snaps = self._filter_by_resource_list(snaps, 'snapshots')
         elif not self.is_save_state:
             # recreate list removing saved snapshots
             snaps = self._filter_out_ids_from_saved(snaps, 'snapshots')
@@ -205,8 +215,11 @@
         client = self.client
         servers_body = client.list_servers()
         servers = servers_body['servers']
+
         if self.prefix:
             servers = self._filter_by_prefix(servers)
+        elif self.is_resource_list:
+            servers = self._filter_by_resource_list(servers, 'servers')
         elif not self.is_save_state:
             # recreate list removing saved servers
             servers = self._filter_out_ids_from_saved(servers, 'servers')
@@ -238,9 +251,12 @@
 
     def list(self):
         client = self.server_groups_client
-        sgs = client.list_server_groups()['server_groups']
+        sgs = client.list_server_groups(all_projects=True)['server_groups']
+
         if self.prefix:
             sgs = self._filter_by_prefix(sgs)
+        elif self.is_resource_list:
+            sgs = self._filter_by_resource_list(sgs, 'server_groups')
         elif not self.is_save_state:
             # recreate list removing saved server_groups
             sgs = self._filter_out_ids_from_saved(sgs, 'server_groups')
@@ -276,8 +292,13 @@
     def list(self):
         client = self.client
         keypairs = client.list_keypairs()['keypairs']
+
         if self.prefix:
             keypairs = self._filter_by_prefix(keypairs)
+        elif self.is_resource_list:
+            keypairs = [keypair for keypair in keypairs
+                        if keypair['keypair']['name']
+                        in self.resource_list_json['keypairs'].keys()]
         elif not self.is_save_state:
             # recreate list removing saved keypairs
             keypairs = [keypair for keypair in keypairs
@@ -317,8 +338,11 @@
     def list(self):
         client = self.client
         vols = client.list_volumes()['volumes']
+
         if self.prefix:
             vols = self._filter_by_prefix(vols)
+        elif self.is_resource_list:
+            vols = self._filter_by_resource_list(vols, 'volumes')
         elif not self.is_save_state:
             # recreate list removing saved volumes
             vols = self._filter_out_ids_from_saved(vols, 'volumes')
@@ -462,8 +486,11 @@
         client = self.networks_client
         networks = client.list_networks(**self.tenant_filter)
         networks = networks['networks']
+
         if self.prefix:
             networks = self._filter_by_prefix(networks)
+        elif self.is_resource_list:
+            networks = self._filter_by_resource_list(networks, 'networks')
         else:
             if not self.is_save_state:
                 # recreate list removing saved networks
@@ -500,15 +527,17 @@
 class NetworkFloatingIpService(BaseNetworkService):
 
     def list(self):
-        if self.prefix:
-            # this means we're cleaning resources based on a certain prefix,
-            # this resource doesn't have a name, therefore return empty list
-            return []
         client = self.floating_ips_client
         flips = client.list_floatingips(**self.tenant_filter)
         flips = flips['floatingips']
 
-        if not self.is_save_state:
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore return empty list
+            return []
+        elif self.is_resource_list:
+            flips = self._filter_by_resource_list(flips, 'floatingips')
+        elif not self.is_save_state:
             # recreate list removing saved flips
             flips = self._filter_out_ids_from_saved(flips, 'floatingips')
         LOG.debug("List count, %s Network Floating IPs", len(flips))
@@ -543,8 +572,11 @@
         client = self.routers_client
         routers = client.list_routers(**self.tenant_filter)
         routers = routers['routers']
+
         if self.prefix:
             routers = self._filter_by_prefix(routers)
+        elif self.is_resource_list:
+            routers = self._filter_by_resource_list(routers, 'routers')
         else:
             if not self.is_save_state:
                 # recreate list removing saved routers
@@ -592,16 +624,19 @@
 class NetworkMeteringLabelRuleService(NetworkService):
 
     def list(self):
-        if self.prefix:
-            # this means we're cleaning resources based on a certain prefix,
-            # this resource doesn't have a name, therefore return empty list
-            return []
         client = self.metering_label_rules_client
         rules = client.list_metering_label_rules()
         rules = rules['metering_label_rules']
         rules = self._filter_by_tenant_id(rules)
 
-        if not self.is_save_state:
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore return empty list
+            return []
+        elif self.is_resource_list:
+            rules = self._filter_by_resource_list(
+                rules, 'metering_label_rules')
+        elif not self.is_save_state:
             rules = self._filter_out_ids_from_saved(
                 rules, 'metering_label_rules')
             # recreate list removing saved rules
@@ -638,8 +673,12 @@
         labels = client.list_metering_labels()
         labels = labels['metering_labels']
         labels = self._filter_by_tenant_id(labels)
+
         if self.prefix:
             labels = self._filter_by_prefix(labels)
+        elif self.is_resource_list:
+            labels = self._filter_by_resource_list(
+                labels, 'metering_labels')
         elif not self.is_save_state:
             # recreate list removing saved labels
             labels = self._filter_out_ids_from_saved(
@@ -677,8 +716,11 @@
                  client.list_ports(**self.tenant_filter)['ports']
                  if port["device_owner"] == "" or
                  port["device_owner"].startswith("compute:")]
+
         if self.prefix:
             ports = self._filter_by_prefix(ports)
+        elif self.is_resource_list:
+            ports = self._filter_by_resource_list(ports, 'ports')
         else:
             if not self.is_save_state:
                 # recreate list removing saved ports
@@ -717,8 +759,12 @@
         secgroups = [secgroup for secgroup in
                      client.list_security_groups(**filter)['security_groups']
                      if secgroup['name'] != 'default']
+
         if self.prefix:
             secgroups = self._filter_by_prefix(secgroups)
+        elif self.is_resource_list:
+            secgroups = self._filter_by_resource_list(
+                secgroups, 'security_groups')
         else:
             if not self.is_save_state:
                 # recreate list removing saved security_groups
@@ -760,8 +806,11 @@
         client = self.subnets_client
         subnets = client.list_subnets(**self.tenant_filter)
         subnets = subnets['subnets']
+
         if self.prefix:
             subnets = self._filter_by_prefix(subnets)
+        elif self.is_resource_list:
+            subnets = self._filter_by_resource_list(subnets, 'subnets')
         else:
             if not self.is_save_state:
                 # recreate list removing saved subnets
@@ -797,8 +846,11 @@
     def list(self):
         client = self.subnetpools_client
         pools = client.list_subnetpools(**self.tenant_filter)['subnetpools']
+
         if self.prefix:
             pools = self._filter_by_prefix(pools)
+        elif self.is_resource_list:
+            pools = self._filter_by_resource_list(pools, 'subnetpools')
         else:
             if not self.is_save_state:
                 # recreate list removing saved subnet pools
@@ -838,13 +890,18 @@
         self.client = manager.regions_client
 
     def list(self):
+        client = self.client
+        regions = client.list_regions()
+
         if self.prefix:
             # this means we're cleaning resources based on a certain prefix,
             # this resource doesn't have a name, therefore return empty list
             return []
-        client = self.client
-        regions = client.list_regions()
-        if not self.is_save_state:
+        elif self.is_resource_list:
+            regions = self._filter_by_resource_list(
+                regions['regions'], 'regions')
+            return regions
+        elif not self.is_save_state:
             regions = self._filter_out_ids_from_saved(
                 regions['regions'], 'regions')
             LOG.debug("List count, %s Regions", len(regions))
@@ -884,8 +941,11 @@
     def list(self):
         client = self.client
         flavors = client.list_flavors({"is_public": None})['flavors']
+
         if self.prefix:
             flavors = self._filter_by_prefix(flavors)
+        elif self.is_resource_list:
+            flavors = self._filter_by_resource_list(flavors, 'flavors')
         else:
             if not self.is_save_state:
                 # recreate list removing saved flavors
@@ -932,8 +992,11 @@
             marker = urllib.parse_qs(parsed.query)['marker'][0]
             response = client.list_images(params={"marker": marker})
             images.extend(response['images'])
+
         if self.prefix:
             images = self._filter_by_prefix(images)
+        elif self.is_resource_list:
+            images = self._filter_by_resource_list(images, 'images')
         else:
             if not self.is_save_state:
                 images = self._filter_out_ids_from_saved(images, 'images')
@@ -974,6 +1037,8 @@
         users = self.client.list_users()['users']
         if self.prefix:
             users = self._filter_by_prefix(users)
+        elif self.is_resource_list:
+            users = self._filter_by_resource_list(users, 'users')
         else:
             if not self.is_save_state:
                 users = self._filter_out_ids_from_saved(users, 'users')
@@ -1015,8 +1080,11 @@
     def list(self):
         try:
             roles = self.client.list_roles()['roles']
+
             if self.prefix:
                 roles = self._filter_by_prefix(roles)
+            elif self.is_resource_list:
+                roles = self._filter_by_resource_list(roles, 'roles')
             elif not self.is_save_state:
                 # reconcile roles with saved state and never list admin role
                 roles = self._filter_out_ids_from_saved(roles, 'roles')
@@ -1056,8 +1124,11 @@
 
     def list(self):
         projects = self.client.list_projects()['projects']
+
         if self.prefix:
             projects = self._filter_by_prefix(projects)
+        elif self.is_resource_list:
+            projects = self._filter_by_resource_list(projects, 'projects')
         else:
             if not self.is_save_state:
                 projects = self._filter_out_ids_from_saved(
@@ -1099,8 +1170,11 @@
     def list(self):
         client = self.client
         domains = client.list_domains()['domains']
+
         if self.prefix:
             domains = self._filter_by_prefix(domains)
+        elif self.is_resource_list:
+            domains = self._filter_by_resource_list(domains, 'domains')
         elif not self.is_save_state:
             domains = self._filter_out_ids_from_saved(domains, 'domains')
         LOG.debug("List count, %s Domains after reconcile", len(domains))
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index a8aafe9..49fcaf2 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -424,7 +424,7 @@
 
 class _WebSocket(object):
     def __init__(self, client_socket, url):
-        """Contructor for the WebSocket wrapper to the socket."""
+        """Constructor for the WebSocket wrapper to the socket."""
         self._socket = client_socket
         # cached stream for early frames.
         self.cached_stream = b''
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index b0bf5b2..8d257b0 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -53,7 +53,7 @@
         # Check common headers for all HTTP methods.
         #
         # Please note that for 1xx and 204 responses Content-Length presence
-        # is not checked intensionally. According to RFC 7230 a server MUST
+        # is not checked intentionally. According to RFC 7230 a server MUST
         # NOT send the header in such responses. Thus, clients should not
         # depend on this header. However, the standard does not require them
         # to validate the server's behavior. We leverage that to not refuse
diff --git a/tempest/common/utils/__init__.py b/tempest/common/utils/__init__.py
index 0fa5ce4..0c510de 100644
--- a/tempest/common/utils/__init__.py
+++ b/tempest/common/utils/__init__.py
@@ -29,12 +29,7 @@
         'compute': CONF.service_available.nova,
         'image': CONF.service_available.glance,
         'volume': CONF.service_available.cinder,
-        # NOTE(masayukig): We have two network services which are neutron and
-        # nova-network. And we have no way to know whether nova-network is
-        # available or not. After the pending removal of nova-network from
-        # nova, we can treat the network/neutron case in the same manner as
-        # the other services.
-        'network': True,
+        'network': CONF.service_available.neutron,
         # NOTE(masayukig): Tempest tests always require the identity service.
         # So we should set this True here.
         'identity': True,
diff --git a/tempest/common/utils/linux/remote_client.py b/tempest/common/utils/linux/remote_client.py
index 0d93430..dd18190 100644
--- a/tempest/common/utils/linux/remote_client.py
+++ b/tempest/common/utils/linux/remote_client.py
@@ -182,6 +182,9 @@
     def umount(self, mount_path='/mnt'):
         self.exec_command('sudo umount %s' % mount_path)
 
+    def mkdir(self, dir_path):
+        self.exec_command('sudo mkdir -p %s' % dir_path)
+
     def make_fs(self, dev_name, fs='ext4'):
         cmd_mkfs = 'sudo mkfs -t %s /dev/%s' % (fs, dev_name)
         try:
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index ddc6047..e249f35 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -103,7 +103,8 @@
         old_task_state = task_state
 
 
-def wait_for_server_termination(client, server_id, ignore_error=False):
+def wait_for_server_termination(client, server_id, ignore_error=False,
+                                request_id=None):
     """Waits for server to reach termination."""
     try:
         body = client.show_server(server_id)['server']
@@ -126,9 +127,13 @@
                      '/'.join((server_status, str(task_state))),
                      time.time() - start_time)
         if server_status == 'ERROR' and not ignore_error:
-            raise lib_exc.DeleteErrorException(
-                "Server %s failed to delete and is in ERROR status" %
-                server_id)
+            details = ("Server %s failed to delete and is in ERROR status." %
+                       server_id)
+            if 'fault' in body:
+                details += ' Fault: %s.' % body['fault']
+            if request_id:
+                details += ' Server delete request ID: %s.' % request_id
+            raise lib_exc.DeleteErrorException(details, server_id=server_id)
 
         if server_status == 'SOFT_DELETED':
             # Soft-deleted instances need to be forcibly deleted to
@@ -606,7 +611,7 @@
     floating IPs.
     :param server: The server JSON dict on which to wait.
     :param floating_ip: The floating IP JSON dict on which to wait.
-    :param wait_for_disassociate: Boolean indiating whether to wait for
+    :param wait_for_disassociate: Boolean indicating whether to wait for
     disassociation instead of association.
     """
 
diff --git a/tempest/config.py b/tempest/config.py
index 8a532e9..8ddb5a4 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -595,18 +595,6 @@
                 help='Does the test environment support attaching a volume to '
                      'more than one instance? This depends on hypervisor and '
                      'volume backend/type and compute API version 2.60.'),
-    cfg.BoolOpt('xenapi_apis',
-                default=False,
-                help='Does the test environment support the XenAPI-specific '
-                     'APIs: os-agents, writeable server metadata and the '
-                     'resetNetwork server action? '
-                     'These were removed in Victoria alongside the XenAPI '
-                     'virt driver.',
-                deprecated_for_removal=True,
-                deprecated_reason="On Nova side, XenAPI virt driver and the "
-                                  "APIs that only worked with that driver "
-                                  "have been removed and there's nothing to "
-                                  "test after Ussuri."),
     cfg.BoolOpt('ide_bus',
                 default=True,
                 help='Does the test environment support attaching devices '
@@ -684,19 +672,19 @@
                                   'are current one. In future, Tempest will '
                                   'test v2 APIs only so this config option '
                                   'will be removed.'),
-    # Image import feature is setup in devstack victoria onwards.
-    # Once all stable branches setup the same via glance standalone
-    # mode or with uwsgi, we can remove this config option.
     cfg.BoolOpt('import_image',
-                default=False,
-                help="Is image import feature enabled"),
-    # NOTE(danms): Starting mid-Wallaby glance began enforcing the
-    # previously-informal requirement that os_glance_* properties are
-    # reserved for internal use. Thus, we can only run these checks
-    # if we know we are on a new enough glance.
+                default=True,
+                help="Is image import feature enabled",
+                deprecated_for_removal=True,
+                deprecated_reason='Issue with image import in WSGI mode was '
+                                  'fixed in Victoria, and this feature works '
+                                  'in any deployment architecture now.'),
     cfg.BoolOpt('os_glance_reserved',
-                default=False,
-                help="Should we check that os_glance namespace is reserved"),
+                default=True,
+                help="Should we check that os_glance namespace is reserved",
+                deprecated_for_removal=True,
+                deprecated_reason='os_glance namespace is always reserved '
+                                  'since Wallaby'),
     cfg.BoolOpt('manage_locations',
                 default=False,
                 help=('Is show_multiple_locations enabled in glance. '
@@ -1078,8 +1066,15 @@
                 default=True,
                 help='Does the cloud support extending the size of a volume '
                      'which has snapshot? Some drivers do not support this '
-                     'operation.')
-
+                     'operation.'),
+    cfg.StrOpt('volume_types_for_data_volume',
+               default=None,
+               help='Volume types used for data volumes. Multiple volume '
+                    'types can be assigned.'),
+    cfg.BoolOpt('enable_volume_image_dep_tests',
+                default=True,
+                help='Run tests for dependencies between images, volumes'
+                'and instance snapshots')
 ]
 
 
@@ -1177,14 +1172,14 @@
     cfg.StrOpt('dhcp_client',
                default='udhcpc',
                choices=["udhcpc", "dhclient", "dhcpcd", ""],
-               help='DHCP client used by images to renew DCHP lease. '
+               help='DHCP client used by images to renew DHCP lease. '
                     'If left empty, update operation will be skipped. '
                     'Supported clients: "udhcpc", "dhclient", "dhcpcd"'),
     cfg.StrOpt('protocol',
                default='icmp',
                choices=('icmp', 'tcp', 'udp'),
                help='The protocol used in security groups tests to check '
-                    'connectivity.'),
+                    'connectivity.')
 ]
 
 
@@ -1196,7 +1191,7 @@
                 default=True,
                 help="Whether or not cinder is expected to be available"),
     cfg.BoolOpt('neutron',
-                default=False,
+                default=True,
                 help="Whether or not neutron is expected to be available"),
     cfg.BoolOpt('glance',
                 default=True,
@@ -1317,6 +1312,15 @@
                     "to cleanup only the resources that match the prefix. "
                     "Make sure this prefix does not match with the resource "
                     "name you do not want Tempest cleanup CLI to delete."),
+    cfg.BoolOpt('record_resources',
+                default=False,
+                help="Allows to record all resources created by Tempest. "
+                     "These resources are stored in file resource_list.json, "
+                     "which can be later used for resource deletion by "
+                     "command tempest cleanup. The resource_list.json file "
+                     "will be appended in case of multiple Tempest runs, "
+                     "so the file will contain a list of resources created "
+                     "during all Tempest runs."),
 ]
 
 _opts = [
diff --git a/tempest/lib/api_schema/response/compute/v2_80/__init__.py b/tempest/lib/api_schema/response/compute/v2_80/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_80/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_80/migrations.py b/tempest/lib/api_schema/response/compute/v2_80/migrations.py
new file mode 100644
index 0000000..f2fa008
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_80/migrations.py
@@ -0,0 +1,40 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_59 import migrations
+
+###########################################################################
+#
+# 2.80:
+#
+# The user_id and project_id value is now returned in the response body in
+# addition to the migration id for the following API responses:
+#
+# - GET /os-migrations
+#
+###########################################################################
+
+user_id = {'type': 'string'}
+project_id = {'type': 'string'}
+
+list_migrations = copy.deepcopy(migrations.list_migrations)
+
+list_migrations['response_body']['properties']['migrations']['items'][
+    'properties'].update({
+        'user_id': user_id,
+        'project_id': project_id
+    })
+
+list_migrations['response_body']['properties']['migrations']['items'][
+    'required'].extend(['user_id', 'project_id'])
diff --git a/tempest/lib/api_schema/response/compute/v2_89/__init__.py b/tempest/lib/api_schema/response/compute/v2_89/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_89/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_89/servers.py b/tempest/lib/api_schema/response/compute/v2_89/servers.py
new file mode 100644
index 0000000..debf0dc
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_89/servers.py
@@ -0,0 +1,84 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_79 import servers as servers279
+
+
+###########################################################################
+#
+# 2.89:
+#
+# The attachment_id and bdm_uuid parameter is now returned in the response body
+# of the following calls:
+#
+# - GET /servers/{server_id}/os-volume_attachments
+# - GET /servers/{server_id}/os-volume_attachments/{volume_id}
+# - POST /servers/{server_id}/os-volume_attachments
+###########################################################################
+
+attach_volume = copy.deepcopy(servers279.attach_volume)
+
+show_volume_attachment = copy.deepcopy(servers279.show_volume_attachment)
+
+list_volume_attachments = copy.deepcopy(servers279.list_volume_attachments)
+
+# Remove properties
+# 'id' is available unti v2.88
+show_volume_attachment['response_body']['properties'][
+    'volumeAttachment']['properties'].pop('id')
+show_volume_attachment['response_body']['properties'][
+    'volumeAttachment']['required'].remove('id')
+list_volume_attachments['response_body']['properties'][
+    'volumeAttachments']['items']['properties'].pop('id')
+list_volume_attachments['response_body']['properties'][
+    'volumeAttachments']['items']['required'].remove('id')
+
+
+# Add new properties
+new_properties = {
+    'attachment_id': {'type': 'string', 'format': 'uuid'},
+    'bdm_uuid': {'type': 'string', 'format': 'uuid'}
+}
+
+show_volume_attachment['response_body']['properties'][
+    'volumeAttachment']['properties'].update(new_properties)
+show_volume_attachment['response_body']['properties'][
+    'volumeAttachment']['required'].extend(new_properties.keys())
+list_volume_attachments['response_body']['properties'][
+    'volumeAttachments']['items']['properties'].update(new_properties)
+list_volume_attachments['response_body']['properties'][
+    'volumeAttachments']['items']['required'].extend(new_properties.keys())
+
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.75 ***
+rebuild_server = copy.deepcopy(servers279.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+    servers279.rebuild_server_with_admin_pass)
+update_server = copy.deepcopy(servers279.update_server)
+get_server = copy.deepcopy(servers279.get_server)
+list_servers_detail = copy.deepcopy(servers279.list_servers_detail)
+list_servers = copy.deepcopy(servers279.list_servers)
+show_server_diagnostics = copy.deepcopy(servers279.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers279.get_remote_consoles)
+list_tags = copy.deepcopy(servers279.list_tags)
+update_all_tags = copy.deepcopy(servers279.update_all_tags)
+delete_all_tags = copy.deepcopy(servers279.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers279.check_tag_existence)
+update_tag = copy.deepcopy(servers279.update_tag)
+delete_tag = copy.deepcopy(servers279.delete_tag)
+show_instance_action = copy.deepcopy(servers279.show_instance_action)
+create_backup = copy.deepcopy(servers279.create_backup)
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index 900e5ef..9b5dfda 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -236,7 +236,7 @@
     }
 }
 
-# TODO(zhufl): This is under discussion, so will be merged in a seperate patch.
+# TODO(zhufl): This is under discussion, so will be merged in a separate patch.
 # https://bugs.launchpad.net/cinder/+bug/1880566
 # upload_volume = {
 #     'status_code': [202],
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 466222d..af1112d 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -266,7 +266,7 @@
                   "groups! This is not valid according to the PEP8 "
                   "style guide. " % source_path)
 
-        # Divide grouped_imports into groupes based on PEP8 style guide
+        # Divide grouped_imports into groups based on PEP8 style guide
         pep8_groups = {}
         package_name = self.package.__name__.split(".")[0]
         for key in grouped_imports:
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 99647d4..6814373 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -51,7 +51,7 @@
     :param str identity_admin_role: The role name to use for admin
     :param list extra_roles: A list of strings for extra roles that should
                              be assigned to all created users
-    :param bool neutron_available: Whether we are running in an environemnt
+    :param bool neutron_available: Whether we are running in an environment
                                    with neutron
     :param bool create_networks: Whether dynamic project networks should be
                                  created or not
@@ -453,7 +453,7 @@
             # NOTE(gmann): For 'domain' and 'system' scoped token, there is no
             # project_id so we are skipping the network creation for both
             # scope.
-            # We need to create nework resource once per project.
+            # We need to create network resource once per project.
             if (not project_id and (not scope or scope == 'project')):
                 if (self.neutron_available and self.create_networks):
                     network, subnet, router = self._create_network_resources(
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index 6cf5b73..4f9e9ba 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -21,6 +21,7 @@
 import urllib
 import urllib3
 
+from fasteners import process_lock
 import jsonschema
 from oslo_log import log as logging
 from oslo_log import versionutils
@@ -78,6 +79,17 @@
     # The version of the API this client implements
     api_version = None
 
+    # Directory for storing read-write lock
+    lock_dir = None
+
+    # An interprocess lock used when the recording of all resources created by
+    # Tempest is allowed.
+    rec_rw_lock = None
+
+    # Variable mirrors value in config option 'record_resources' that allows
+    # the recording of all resources created by Tempest.
+    record_resources = False
+
     LOG = logging.getLogger(__name__)
 
     def __init__(self, auth_provider, service, region,
@@ -297,7 +309,13 @@
                  and the second the response body
         :rtype: tuple
         """
-        return self.request('POST', url, extra_headers, headers, body, chunked)
+        resp_header, resp_body = self.request(
+            'POST', url, extra_headers, headers, body, chunked)
+
+        if self.record_resources:
+            self.resource_record(resp_body)
+
+        return resp_header, resp_body
 
     def get(self, url, headers=None, extra_headers=False, chunked=False):
         """Send a HTTP GET request using keystone service catalog and auth
@@ -1006,6 +1024,66 @@
         """Returns the primary type of resource this client works with."""
         return 'resource'
 
+    def resource_update(self, data, res_type, res_dict):
+        """Updates resource_list.json file with current resource."""
+        if not isinstance(res_dict, dict):
+            return
+
+        if not res_type.endswith('s'):
+            res_type += 's'
+
+        if res_type not in data:
+            data[res_type] = {}
+
+        if 'uuid' in res_dict:
+            data[res_type].update(
+                {res_dict.get('uuid'): res_dict.get('name')})
+        elif 'id' in res_dict:
+            data[res_type].update(
+                {res_dict.get('id'): res_dict.get('name')})
+        elif 'name' in res_dict:
+            data[res_type].update({res_dict.get('name'): ""})
+
+        self.rec_rw_lock.acquire_write_lock()
+        with open("resource_list.json", 'w+') as f:
+            f.write(json.dumps(data, indent=2, separators=(',', ': ')))
+        self.rec_rw_lock.release_write_lock()
+
+    def resource_record(self, resp_dict):
+        """Records resources into resource_list.json file."""
+        if self.rec_rw_lock is None:
+            path = self.lock_dir
+            self.rec_rw_lock = (
+                process_lock.InterProcessReaderWriterLock(path)
+            )
+
+        self.rec_rw_lock.acquire_read_lock()
+        try:
+            with open('resource_list.json', 'rb') as f:
+                data = json.load(f)
+        except IOError:
+            data = {}
+        self.rec_rw_lock.release_read_lock()
+
+        try:
+            resp_dict = json.loads(resp_dict.decode('utf-8'))
+        except (AttributeError, TypeError, ValueError):
+            return
+
+        # check if response has any keys
+        if not resp_dict.keys():
+            return
+
+        resource_type = list(resp_dict.keys())[0]
+
+        resource_dict = resp_dict[resource_type]
+
+        if isinstance(resource_dict, list):
+            for resource in resource_dict:
+                self.resource_update(data, resource_type, resource)
+        else:
+            self.resource_update(data, resource_type, resource_dict)
+
     @classmethod
     def validate_response(cls, schema, resp, body):
         # Only check the response if the status code is a success code
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index 7d54c1a..144450b 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -198,7 +198,7 @@
     There are functions created as classmethod and the cleanup
     was managed by the class with addClassResourceCleanup,
     In case the function called from a class level (resource_setup) its ok
-    But when it is called from testcase level there is no reson to delete the
+    But when it is called from testcase level there is no reason to delete the
     resource when class tears down.
 
     The testcase results will not reflect the resources cleanup because test
diff --git a/tempest/lib/services/compute/migrations_client.py b/tempest/lib/services/compute/migrations_client.py
index 8a6e62a..d43fe83 100644
--- a/tempest/lib/services/compute/migrations_client.py
+++ b/tempest/lib/services/compute/migrations_client.py
@@ -21,6 +21,8 @@
     as schemav223
 from tempest.lib.api_schema.response.compute.v2_59 import migrations \
     as schemav259
+from tempest.lib.api_schema.response.compute.v2_80 import migrations \
+    as schemav280
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
 
@@ -29,7 +31,8 @@
     schema_versions_info = [
         {'min': None, 'max': '2.22', 'schema': schema},
         {'min': '2.23', 'max': '2.58', 'schema': schemav223},
-        {'min': '2.59', 'max': None, 'schema': schemav259}]
+        {'min': '2.59', 'max': '2.79', 'schema': schemav259},
+        {'min': '2.80', 'max': None, 'schema': schemav280}]
 
     def list_migrations(self, **params):
         """List all migrations.
diff --git a/tempest/lib/services/compute/server_groups_client.py b/tempest/lib/services/compute/server_groups_client.py
index 9895653..5c1e623 100644
--- a/tempest/lib/services/compute/server_groups_client.py
+++ b/tempest/lib/services/compute/server_groups_client.py
@@ -14,6 +14,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+from urllib import parse as urllib
+
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.api_schema.response.compute.v2_1 import server_groups \
@@ -55,9 +57,14 @@
         self.validate_response(schema.delete_server_group, resp, body)
         return rest_client.ResponseBody(resp, body)
 
-    def list_server_groups(self):
+    def list_server_groups(self, **params):
         """List the server-groups."""
-        resp, body = self.get("os-server-groups")
+
+        url = 'os-server-groups'
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+
+        resp, body = self.get(url)
         body = json.loads(body)
         schema = self.get_schema(self.schema_versions_info)
         self.validate_response(schema.list_server_groups, resp, body)
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 7e3b99f..1b93f91 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -43,6 +43,7 @@
 from tempest.lib.api_schema.response.compute.v2_75 import servers as schemav275
 from tempest.lib.api_schema.response.compute.v2_79 import servers as schemav279
 from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
+from tempest.lib.api_schema.response.compute.v2_89 import servers as schemav289
 from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
 from tempest.lib.common import rest_client
 from tempest.lib.services.compute import base_compute_client
@@ -73,7 +74,8 @@
         {'min': '2.71', 'max': '2.72', 'schema': schemav271},
         {'min': '2.73', 'max': '2.74', 'schema': schemav273},
         {'min': '2.75', 'max': '2.78', 'schema': schemav275},
-        {'min': '2.79', 'max': None, 'schema': schemav279}]
+        {'min': '2.79', 'max': '2.88', 'schema': schemav279},
+        {'min': '2.89', 'max': None, 'schema': schemav289}]
 
     def __init__(self, auth_provider, service, region,
                  enable_instance_password=True, **kwargs):
@@ -896,7 +898,11 @@
         API reference:
         https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
         """
-        if self.enable_instance_password:
+        api_version = self.get_headers().get(self.api_microversion_header_name)
+
+        if not api_version and self.enable_instance_password:
+            evacuate_schema = schema.evacuate_server_with_admin_pass
+        elif api_version < '2.14':
             evacuate_schema = schema.evacuate_server_with_admin_pass
         else:
             evacuate_schema = schema.evacuate_server
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 0608d47..a6a1623 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -159,7 +159,7 @@
         """
         url = 'images/%s/file' % image_id
 
-        # We are going to do chunked transfert, so split the input data
+        # We are going to do chunked transfer, so split the input data
         # info fixed-sized chunks.
         headers = {'Content-Type': 'application/octet-stream'}
         data = iter(functools.partial(data.read, CHUNKSIZE), b'')
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index bdca0d0..47edf70 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -15,7 +15,6 @@
 
 from urllib import parse as urllib
 
-import debtcollector.moves
 from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
@@ -64,7 +63,7 @@
             delete_metadata=None,
             create_update_metadata_prefix='X-Container-Meta-',
             delete_metadata_prefix='X-Remove-Container-Meta-'):
-        """Creates, Updates or deletes an containter metadata entry.
+        """Creates, Updates or deletes an container metadata entry.
 
         Container Metadata can be created, updated or deleted based on
         metadata header or value. For detailed info, please refer to the
@@ -85,11 +84,6 @@
         self.expected_success(204, resp.status)
         return resp, body
 
-    update_container_metadata = debtcollector.moves.moved_function(
-        create_update_or_delete_container_metadata,
-        'update_container_metadata', __name__,
-        version='Queens', removal_version='Rocky')
-
     def list_container_metadata(self, container_name):
         """List all container metadata."""
         url = str(container_name)
@@ -126,7 +120,3 @@
 
         self.expected_success([200, 204], resp.status)
         return resp, body
-
-    list_container_contents = debtcollector.moves.moved_function(
-        list_container_objects, 'list_container_contents', __name__,
-        version='Queens', removal_version='Rocky')
diff --git a/tempest/lib/services/placement/placement_client.py b/tempest/lib/services/placement/placement_client.py
index 216ac08..f272cbf 100644
--- a/tempest/lib/services/placement/placement_client.py
+++ b/tempest/lib/services/placement/placement_client.py
@@ -49,3 +49,39 @@
         self.expected_success(200, resp.status)
         body = json.loads(body)
         return rest_client.ResponseBody(resp, body)
+
+    def list_traits(self, **params):
+        """API ref https://docs.openstack.org/api-ref/placement/#traits
+        """
+        url = "/traits"
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def show_trait(self, name, **params):
+        url = "/traits"
+        if params:
+            url += '?%s' % urllib.urlencode(params)
+            resp, body = self.get(url)
+            body = json.loads(body)
+            self.expected_success(200, resp.status)
+            return rest_client.ResponseBody(resp, body)
+        url = f"{url}/{name}"
+        resp, _ = self.get(url)
+        self.expected_success(204, resp.status)
+        return resp.status
+
+    def create_trait(self, name, **params):
+        url = f"/traits/{name}"
+        json_body = json.dumps(params)
+        resp, _ = self.put(url, body=json_body)
+        return resp.status
+
+    def delete_trait(self, name):
+        url = f"/traits/{name}"
+        resp, _ = self.delete(url)
+        self.expected_success(204, resp.status)
+        return resp.status
diff --git a/tempest/lib/services/placement/resource_providers_client.py b/tempest/lib/services/placement/resource_providers_client.py
index 3214053..a336500 100644
--- a/tempest/lib/services/placement/resource_providers_client.py
+++ b/tempest/lib/services/placement/resource_providers_client.py
@@ -121,3 +121,29 @@
         resp, body = self.delete(url)
         self.expected_success(204, resp.status)
         return rest_client.ResponseBody(resp, body)
+
+    def list_resource_provider_traits(self, rp_uuid, **kwargs):
+        """https://docs.openstack.org/api-ref/placement/#resource-provider-traits
+        """
+        url = f"/resource_providers/{rp_uuid}/traits"
+        if kwargs:
+            url += '?%s' % urllib.urlencode(kwargs)
+        resp, body = self.get(url)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def update_resource_provider_traits(self, rp_uuid, **kwargs):
+        url = f"/resource_providers/{rp_uuid}/traits"
+        data = json.dumps(kwargs)
+        resp, body = self.put(url, data)
+        self.expected_success(200, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
+
+    def delete_resource_provider_traits(self, rp_uuid):
+        url = f"/resource_providers/{rp_uuid}/traits"
+        resp, body = self.delete(url)
+        self.expected_success(204, resp.status)
+        body = json.loads(body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index c6f8973..95f3ffc 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -86,7 +86,7 @@
     def migrate_volume(self, volume_id, **kwargs):
         """Migrate a volume to a new backend
 
-        For a full list of available parameters please refer to the offical
+        For a full list of available parameters please refer to the official
         API reference:
 
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#migrate-a-volume
@@ -173,7 +173,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         # TODO(zhufl): This is under discussion, so will be merged
-        # in a seperate patch.
+        # in a separate patch.
         # https://bugs.launchpad.net/cinder/+bug/1880566
         # self.validate_response(schema.upload_volume, resp, body)
         self.expected_success(202, resp.status)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 5f30909..714a7c7 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -751,6 +751,31 @@
 
         return rules
 
+    def create_and_add_security_group_to_server(self, server):
+        """Create a security group and add it to the server.
+
+        :param server: The server to add the security group to.
+        :return: The security group was added to the server.
+        """
+
+        secgroup = self.create_security_group()
+        self.servers_client.add_security_group(server['id'],
+                                               name=secgroup['name'])
+        self.addCleanup(self.servers_client.remove_security_group,
+                        server['id'], name=secgroup['name'])
+
+        def wait_for_secgroup_add():
+            body = (self.servers_client.show_server(server['id'])
+                    ['server'])
+            return {'name': secgroup['name']} in body['security_groups']
+
+        if not test_utils.call_until_true(wait_for_secgroup_add,
+                                          CONF.compute.build_timeout,
+                                          CONF.compute.build_interval):
+            msg = ('Timed out waiting for adding security group %s to server '
+                   '%s' % (secgroup['id'], server['id']))
+            raise lib_exc.TimeoutException(msg)
+
     def get_remote_client(self, ip_address, username=None, private_key=None,
                           server=None):
         """Get a SSH client to a remote server
@@ -1177,6 +1202,15 @@
         self.assertIsNone(floating_ip['port_id'])
         return floating_ip
 
+    def create_file(self, ip_address, path, private_key=None, server=None,
+                    username=None):
+        """Create a file on a remote server"""
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server,
+                                            username=username)
+        ssh_client.exec_command('sudo mkdir -p %s' % path)
+
     def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
                          private_key=None, server=None, username=None,
                          fs='vfat'):
@@ -1195,16 +1229,18 @@
         # dev_name to mount_path.
         target_dir = '/tmp'
         if dev_name is not None:
+            mount_path = os.path.join(mount_path, dev_name)
             ssh_client.make_fs(dev_name, fs=fs)
-            ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
-                                                               mount_path))
+            ssh_client.mkdir(mount_path)
+            ssh_client.mount(dev_name, mount_path)
             target_dir = mount_path
+
         cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % target_dir
         ssh_client.exec_command(cmd_timestamp)
         timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
                                             % target_dir)
         if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
+            ssh_client.umount(mount_path)
         return timestamp
 
     def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
@@ -1232,12 +1268,14 @@
         # dev_name to mount_path.
         target_dir = '/tmp'
         if dev_name is not None:
+            mount_path = os.path.join(mount_path, dev_name)
+            ssh_client.mkdir(mount_path)
             ssh_client.mount(dev_name, mount_path)
             target_dir = mount_path
         timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
                                             % target_dir)
         if dev_name is not None:
-            ssh_client.exec_command('sudo umount %s' % mount_path)
+            ssh_client.umount(mount_path)
         return timestamp
 
     def get_server_ip(self, server, **kwargs):
diff --git a/tempest/scenario/test_instances_with_cinder_volumes.py b/tempest/scenario/test_instances_with_cinder_volumes.py
new file mode 100644
index 0000000..b9ac2c8
--- /dev/null
+++ b/tempest/scenario/test_instances_with_cinder_volumes.py
@@ -0,0 +1,218 @@
+# Copyright 2024 Openstack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from tempest.scenario import manager
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestInstancesWithCinderVolumes(manager.ScenarioTest):
+    """This is cinder volumes test.
+
+    Tests are below:
+    * test_instances_with_cinder_volumes_on_all_compute_nodes
+    """
+
+    compute_min_microversion = '2.60'
+
+    @decorators.idempotent_id('d0e3c1a3-4b0a-4b0e-8b0a-4b0e8b0a4b0e')
+    @decorators.attr(type=['slow', 'multinode'])
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_instances_with_cinder_volumes_on_all_compute_nodes(self):
+        """Test instances with cinder volumes launches on all compute nodes
+
+        Steps:
+            1. Create an image
+            2. Create a keypair
+            3. Create a bootable volume from the image and of the given volume
+               type
+            4. Boot an instance from the bootable volume on each available
+               compute node, up to CONF.compute.min_compute_nodes
+            5. Create a volume using each volume_types_for_data_volume on all
+               available compute nodes, up to CONF.compute.min_compute_nodes.
+               Total number of volumes is equal to
+               compute nodes * len(volume_types_for_data_volume)
+            6. Attach volumes to the instances
+            7. Assign floating IP to all instances
+            8. Configure security group for ssh access to all instances
+            9. Confirm ssh access to all instances
+            10. Run write test to all volumes through ssh connection per
+                instance
+            11. Clean up the sources, an instance, volumes, keypair and image
+        """
+        boot_volume_type = (CONF.volume.volume_type or
+                            self.create_volume_type()['name'])
+
+        # create an image
+        image = self.image_create()
+
+        # create keypair
+        keypair = self.create_keypair()
+
+        # check all available zones for booting instances
+        available_zone = \
+            self.os_admin.availability_zone_client.list_availability_zones(
+                detail=True)['availabilityZoneInfo']
+
+        hosts = []
+        for zone in available_zone:
+            if zone['zoneState']['available']:
+                for host in zone['hosts']:
+                    if 'nova-compute' in zone['hosts'][host] and \
+                        zone['hosts'][host]['nova-compute']['available'] and \
+                        not host.endswith('-ironic'):
+                        hosts.append({'zone': zone['zoneName'],
+                                      'host_name': host})
+
+        # fail if there is less hosts than minimal number of instances
+        if len(hosts) < CONF.compute.min_compute_nodes:
+            raise exceptions.InvalidConfiguration(
+                "Host list %s is shorter than min_compute_nodes. " % hosts)
+
+        # get volume types
+        volume_types = []
+        if CONF.volume_feature_enabled.volume_types_for_data_volume:
+            types = CONF.volume_feature_enabled.volume_types_for_data_volume
+            volume_types = types.split(',')
+        else:
+            # no user specified volume types, create 2 default ones
+            volume_types.append(self.create_volume_type()['name'])
+            volume_types.append(self.create_volume_type()['name'])
+
+        hosts_to_boot_servers = hosts[:CONF.compute.min_compute_nodes]
+        LOG.debug("List of hosts selected to boot servers %s: ",
+                  hosts_to_boot_servers)
+
+        # create volumes so that we dont need to wait for them to be created
+        # and save them in a list
+        created_volumes = []
+        for host in hosts_to_boot_servers:
+            for volume_type in volume_types:
+                created_volumes.append(
+                    self.create_volume(volume_type=volume_type,
+                                       wait_until=None)
+                )
+
+        bootable_volumes = []
+        for host in hosts_to_boot_servers:
+            # create boot volume from image and of the given volume type
+            bootable_volumes.append(
+                self.create_volume(
+                    imageRef=image, volume_type=boot_volume_type,
+                    wait_until=None)
+            )
+
+        # boot server
+        servers = []
+
+        for bootable_volume in bootable_volumes:
+
+            # wait for bootable volumes to become available
+            waiters.wait_for_volume_resource_status(
+                self.volumes_client, bootable_volume['id'], 'available')
+
+            # create an instance from bootable volume
+            server = self.boot_instance_from_resource(
+                source_id=bootable_volume['id'],
+                source_type='volume',
+                keypair=keypair,
+                wait_until=None
+            )
+            servers.append(server)
+
+        start = 0
+        end = len(volume_types)
+        for server in servers:
+            attached_volumes = []
+
+            # wait for server to become active
+            waiters.wait_for_server_status(self.servers_client,
+                                           server['id'], 'ACTIVE')
+
+            # attach volumes to the instances
+            for volume in created_volumes[start:end]:
+
+                # wait for volume to become available
+                waiters.wait_for_volume_resource_status(
+                    self.volumes_client, volume['id'], 'available')
+
+                attached_volume = self.nova_volume_attach(server, volume)
+                attached_volumes.append(attached_volume)
+                LOG.debug("Attached volume %s to server %s",
+                          attached_volume['id'], server['id'])
+
+            # assign floating ip
+            floating_ip = None
+            if (CONF.network_feature_enabled.floating_ips and
+                CONF.network.floating_network_name):
+                fip = self.create_floating_ip(server)
+                floating_ip = self.associate_floating_ip(
+                    fip, server)
+                ssh_ip = floating_ip['floating_ip_address']
+            else:
+                ssh_ip = self.get_server_ip(server)
+
+            # create security group
+            self.create_and_add_security_group_to_server(server)
+
+            # confirm ssh access
+            self.linux_client = self.get_remote_client(
+                ssh_ip, private_key=keypair['private_key'],
+                server=server
+            )
+
+            server_name = server['name'].split('-')[-1]
+
+            # run write test on all volumes
+            for volume in attached_volumes:
+
+                # dev name volume['attachments'][0]['device'][5:] is like
+                # /dev/vdb, we need to remove /dev/ -> first 5 chars
+                dev_name = volume['attachments'][0]['device'][5:]
+
+                mount_path = f"/mnt/{server_name}"
+
+                timestamp_before = self.create_timestamp(
+                    ssh_ip, private_key=keypair['private_key'], server=server,
+                    dev_name=dev_name, mount_path=mount_path,
+                )
+                timestamp_after = self.get_timestamp(
+                    ssh_ip, private_key=keypair['private_key'], server=server,
+                    dev_name=dev_name, mount_path=mount_path,
+                )
+                self.assertEqual(timestamp_before, timestamp_after)
+
+                # delete volume
+                self.nova_volume_detach(server, volume)
+                self.volumes_client.delete_volume(volume['id'])
+
+            if floating_ip:
+                # delete the floating IP, this should refresh the server
+                # addresses
+                self.disassociate_floating_ip(floating_ip)
+                waiters.wait_for_server_floating_ip(
+                    self.servers_client, server, floating_ip,
+                    wait_for_disassociate=True)
+
+            start += len(volume_types)
+            end += len(volume_types)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 6372c6b..543be31 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -19,9 +19,7 @@
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
-from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
-from tempest.lib import exceptions
 from tempest.scenario import manager
 
 CONF = config.CONF
@@ -73,25 +71,6 @@
         disks = self.linux_client.get_disks()
         self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
 
-    def create_and_add_security_group_to_server(self, server):
-        secgroup = self.create_security_group()
-        self.servers_client.add_security_group(server['id'],
-                                               name=secgroup['name'])
-        self.addCleanup(self.servers_client.remove_security_group,
-                        server['id'], name=secgroup['name'])
-
-        def wait_for_secgroup_add():
-            body = (self.servers_client.show_server(server['id'])
-                    ['server'])
-            return {'name': secgroup['name']} in body['security_groups']
-
-        if not test_utils.call_until_true(wait_for_secgroup_add,
-                                          CONF.compute.build_timeout,
-                                          CONF.compute.build_interval):
-            msg = ('Timed out waiting for adding security group %s to server '
-                   '%s' % (secgroup['id'], server['id']))
-            raise exceptions.TimeoutException(msg)
-
     @decorators.attr(type='slow')
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @utils.services('compute', 'volume', 'image', 'network')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 3a93f74..911ff42 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -20,6 +20,7 @@
 from tempest.common.utils import net_downtime
 from tempest.common import waiters
 from tempest import config
+from tempest.lib.common import api_version_request
 from tempest.lib import decorators
 from tempest.scenario import manager
 
@@ -193,8 +194,11 @@
         # check if microversion is less than 2.25 because of
         # disk_over_commit is depracted since compute api version 2.25
         # if min_microversion is None, it runs on version < 2.25
+        min_v = api_version_request.APIVersionRequest(
+            CONF.compute.min_microversion)
+        api_v = api_version_request.APIVersionRequest('2.25')
         if not migration and (CONF.compute.min_microversion is None or
-                              CONF.compute.min_microversion < '2.25'):
+                              min_v < api_v):
             migration_kwargs['disk_over_commit'] = False
 
         if dest_host:
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 7b819e0..fb68e46 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -179,8 +179,7 @@
     def _check_public_network_connectivity(
             self, should_connect=True, msg=None,
             should_check_floating_ip_status=True, mtu=None):
-        """Verifies connectivty to a VM via public network and floating IP
-
+        """Verifies connectivity to a VM via public network and floating IP
         and verifies floating IP has resource status is correct.
 
         :param should_connect: bool. determines if connectivity check is
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 92dbffb..e060b0f 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -137,7 +137,7 @@
 
         # Make sure the machine ssh-able before attaching the volume
         # Just a live machine is responding
-        # for device attache/detach as expected
+        # for device attach/detach as expected
         linux_client = self.get_remote_client(
             ip_for_snapshot, private_key=keypair['private_key'],
             server=server_from_snapshot)
diff --git a/tempest/test.py b/tempest/test.py
index a766367..85a6c36 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -31,7 +31,6 @@
 from tempest.lib.common import fixed_network
 from tempest.lib.common import profiler
 from tempest.lib.common import validation_resources as vr
-from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
 LOG = logging.getLogger(__name__)
@@ -39,11 +38,6 @@
 CONF = config.CONF
 
 
-attr = debtcollector.moves.moved_function(
-    decorators.attr, 'attr', __name__,
-    version='Pike', removal_version='?')
-
-
 at_exit_set = set()
 
 
@@ -646,7 +640,7 @@
         then be run.
 
         Cleanup functions are always called during the test class tearDown
-        fixture, even if an exception occured during setUp or tearDown.
+        fixture, even if an exception occurred during setUp or tearDown.
         """
         cls._class_cleanups.append((fn, arguments, keywordArguments))
 
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 1d69d9d..f2e809b 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -58,7 +58,7 @@
                 help="Whether or not my service is available")
 
             # Note: as long as the group is listed in get_opt_lists,
-            # it will be possible to access its optins in the plugin code
+            # it will be possible to access its options in the plugin code
             # via ("-" in the group name are replaces with "_"):
             #     CONF.my_service.<option_name>
             my_service_group = cfg.OptGroup(name="my-service",
diff --git a/tempest/tests/cmd/test_cleanup.py b/tempest/tests/cmd/test_cleanup.py
index 69e735b..3efc9bd 100644
--- a/tempest/tests/cmd/test_cleanup.py
+++ b/tempest/tests/cmd/test_cleanup.py
@@ -12,6 +12,7 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import json
 from unittest import mock
 
 from tempest.cmd import cleanup
@@ -20,12 +21,30 @@
 
 class TestTempestCleanup(base.TestCase):
 
-    def test_load_json(self):
+    def test_load_json_saved_state(self):
         # instantiate "empty" TempestCleanup
         c = cleanup.TempestCleanup(None, None, 'test')
         test_saved_json = 'tempest/tests/cmd/test_saved_state_json.json'
+        with open(test_saved_json, 'r') as f:
+            test_saved_json_content = json.load(f)
         # test if the file is loaded without any issues/exceptions
-        c._load_json(test_saved_json)
+        c.options = mock.Mock()
+        c.options.init_saved_state = True
+        c._load_saved_state(test_saved_json)
+        self.assertEqual(c.json_data, test_saved_json_content)
+
+    def test_load_json_resource_list(self):
+        # instantiate "empty" TempestCleanup
+        c = cleanup.TempestCleanup(None, None, 'test')
+        test_resource_list = 'tempest/tests/cmd/test_resource_list.json'
+        with open(test_resource_list, 'r') as f:
+            test_resource_list_content = json.load(f)
+        # test if the file is loaded without any issues/exceptions
+        c.options = mock.Mock()
+        c.options.init_saved_state = False
+        c.options.resource_list = True
+        c._load_resource_list(test_resource_list)
+        self.assertEqual(c.resource_data, test_resource_list_content)
 
     @mock.patch('tempest.cmd.cleanup.TempestCleanup.init')
     @mock.patch('tempest.cmd.cleanup.TempestCleanup._cleanup')
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 6b3b4b7..2557145 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -41,8 +41,10 @@
     def test_base_service_init(self):
         kwargs = {'data': {'data': 'test'},
                   'is_dry_run': False,
+                  'resource_list_json': {'resp': 'data'},
                   'saved_state_json': {'saved': 'data'},
                   'is_preserve': False,
+                  'is_resource_list': False,
                   'is_save_state': True,
                   'prefix': 'tempest',
                   'tenant_id': 'project_id',
@@ -50,8 +52,10 @@
         base = cleanup_service.BaseService(kwargs)
         self.assertEqual(base.data, kwargs['data'])
         self.assertFalse(base.is_dry_run)
+        self.assertEqual(base.resource_list_json, kwargs['resource_list_json'])
         self.assertEqual(base.saved_state_json, kwargs['saved_state_json'])
         self.assertFalse(base.is_preserve)
+        self.assertFalse(base.is_resource_list)
         self.assertTrue(base.is_save_state)
         self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
         self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
@@ -60,8 +64,10 @@
     def test_not_implemented_ex(self):
         kwargs = {'data': {'data': 'test'},
                   'is_dry_run': False,
+                  'resource_list_json': {'resp': 'data'},
                   'saved_state_json': {'saved': 'data'},
                   'is_preserve': False,
+                  'is_resource_list': False,
                   'is_save_state': False,
                   'prefix': 'tempest',
                   'tenant_id': 'project_id',
@@ -181,10 +187,20 @@
         "subnetpools": {'8acf64c1-43fc': 'saved-subnet-pool'},
         "regions": {'RegionOne': {}}
     }
+
+    resource_list = {
+        "keypairs": {'saved-key-pair': ""}
+    }
+
     # Mocked methods
     get_method = 'tempest.lib.common.rest_client.RestClient.get'
     delete_method = 'tempest.lib.common.rest_client.RestClient.delete'
     log_method = 'tempest.cmd.cleanup_service.LOG.exception'
+    filter_saved_state = 'tempest.cmd.cleanup_service.' \
+                         'BaseService._filter_out_ids_from_saved'
+    filter_resource_list = 'tempest.cmd.cleanup_service.' \
+                           'BaseService._filter_by_resource_list'
+    filter_prefix = 'tempest.cmd.cleanup_service.BaseService._filter_by_prefix'
     # Override parameters
     service_class = 'BaseService'
     response = None
@@ -192,17 +208,19 @@
 
     def _create_cmd_service(self, service_type, is_save_state=False,
                             is_preserve=False, is_dry_run=False,
-                            prefix=''):
+                            prefix='', is_resource_list=False):
         creds = fake_credentials.FakeKeystoneV3Credentials()
         os = clients.Manager(creds)
         return getattr(cleanup_service, service_type)(
             os,
+            is_resource_list=is_resource_list,
             is_save_state=is_save_state,
             is_preserve=is_preserve,
             is_dry_run=is_dry_run,
             prefix=prefix,
             project_id='b8e3ece07bb049138d224436756e3b57',
             data={},
+            resource_list_json=self.resource_list,
             saved_state_json=self.saved_state
             )
 
@@ -266,6 +284,38 @@
             self.assertNotIn(rsp['id'], self.conf_values.values())
             self.assertNotIn(rsp['name'], self.conf_values.values())
 
+    def _test_prefix_opt_precedence(self, delete_mock):
+        serv = self._create_cmd_service(
+            self.service_class, is_resource_list=True, prefix='tempest')
+        _, fixtures = self.run_function_with_mocks(
+            serv.run,
+            delete_mock
+        )
+
+        # Check that prefix was used for filtering
+        fixtures[2].mock.assert_called_once()
+
+        # Check that neither saved_state.json nor resource list was
+        # used for filtering
+        fixtures[0].mock.assert_not_called()
+        fixtures[1].mock.assert_not_called()
+
+    def _test_resource_list_opt_precedence(self, delete_mock):
+        serv = self._create_cmd_service(
+            self.service_class, is_resource_list=True)
+        _, fixtures = self.run_function_with_mocks(
+            serv.run,
+            delete_mock
+        )
+
+        # Check that resource list was used for filtering
+        fixtures[1].mock.assert_called_once()
+
+        # Check that neither saved_state.json nor prefix was
+        # used for filtering
+        fixtures[0].mock.assert_not_called()
+        fixtures[2].mock.assert_not_called()
+
 
 class TestSnapshotService(BaseCmdServiceTests):
 
@@ -320,6 +370,24 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestServerService(BaseCmdServiceTests):
 
@@ -378,6 +446,24 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestServerGroupService(BaseCmdServiceTests):
 
@@ -429,6 +515,26 @@
                                      (self.validate_response, 'validate', None)
                                      ])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.validate_response, 'validate', None),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.validate_response, 'validate', None),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestKeyPairService(BaseCmdServiceTests):
 
@@ -493,6 +599,33 @@
             (self.validate_response, 'validate', None)
         ])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.validate_response, 'validate', None),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.validate_response, 'validate', None),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        serv = self._create_cmd_service(
+            self.service_class, is_resource_list=True)
+
+        _, fixtures = self.run_function_with_mocks(
+            serv.delete,
+            delete_mock
+        )
+
+        # Check that prefix was not used for filtering
+        fixtures[0].mock.assert_not_called()
+
 
 class TestVolumeService(BaseCmdServiceTests):
 
@@ -542,6 +675,24 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestVolumeQuotaService(BaseCmdServiceTests):
 
@@ -761,6 +912,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkFloatingIpService(BaseCmdServiceTests):
 
@@ -823,6 +992,34 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        serv = self._create_cmd_service(
+            self.service_class, is_resource_list=True, prefix='tempest')
+        _, fixtures = self.run_function_with_mocks(
+            serv.run,
+            delete_mock
+        )
+
+        # cleanup returns []
+        fixtures[0].mock.assert_not_called()
+        fixtures[1].mock.assert_not_called()
+        fixtures[2].mock.assert_not_called()
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkRouterService(BaseCmdServiceTests):
 
@@ -937,6 +1134,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkMeteringLabelRuleService(BaseCmdServiceTests):
 
@@ -978,6 +1193,34 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        serv = self._create_cmd_service(
+            self.service_class, is_resource_list=True, prefix='tempest')
+        _, fixtures = self.run_function_with_mocks(
+            serv.run,
+            delete_mock
+        )
+
+        # cleanup returns []
+        fixtures[0].mock.assert_not_called()
+        fixtures[1].mock.assert_not_called()
+        fixtures[2].mock.assert_not_called()
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkMeteringLabelService(BaseCmdServiceTests):
 
@@ -1020,6 +1263,24 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkPortService(BaseCmdServiceTests):
 
@@ -1118,6 +1379,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkSecGroupService(BaseCmdServiceTests):
 
@@ -1196,6 +1475,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkSubnetService(BaseCmdServiceTests):
 
@@ -1272,6 +1569,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestNetworkSubnetPoolsService(BaseCmdServiceTests):
 
@@ -1340,6 +1655,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 # begin global services
 class TestRegionService(BaseCmdServiceTests):
@@ -1392,6 +1725,34 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        serv = self._create_cmd_service(
+            self.service_class, is_resource_list=True, prefix='tempest')
+        _, fixtures = self.run_function_with_mocks(
+            serv.run,
+            delete_mock
+        )
+
+        # cleanup returns []
+        fixtures[0].mock.assert_not_called()
+        fixtures[1].mock.assert_not_called()
+        fixtures[2].mock.assert_not_called()
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestDomainService(BaseCmdServiceTests):
 
@@ -1445,6 +1806,26 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None),
+                       (self.mock_update, 'update', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None),
+                       (self.mock_update, 'update', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestProjectsService(BaseCmdServiceTests):
 
@@ -1518,6 +1899,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestImagesService(BaseCmdServiceTests):
 
@@ -1597,6 +1996,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestFlavorService(BaseCmdServiceTests):
 
@@ -1670,6 +2087,24 @@
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestRoleService(BaseCmdServiceTests):
 
@@ -1716,6 +2151,24 @@
     def test_save_state(self):
         self._test_saved_state_true([(self.get_method, self.response, 200)])
 
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
+
 
 class TestUserService(BaseCmdServiceTests):
 
@@ -1782,3 +2235,21 @@
                 "password_expires_at": "1893-11-06T15:32:17.000000",
             })
         self._test_is_preserve_true([(self.get_method, self.response, 200)])
+
+    def test_prefix_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_prefix_opt_precedence(delete_mock)
+
+    def test_resource_list_opt_precedence(self):
+        delete_mock = [(self.filter_saved_state, [], None),
+                       (self.filter_resource_list, [], None),
+                       (self.filter_prefix, [], None),
+                       (self.get_method, self.response, 200),
+                       (self.delete_method, 'error', None),
+                       (self.log_method, 'exception', None)]
+        self._test_resource_list_opt_precedence(delete_mock)
diff --git a/tempest/tests/cmd/test_resource_list.json b/tempest/tests/cmd/test_resource_list.json
new file mode 100644
index 0000000..dfbc790
--- /dev/null
+++ b/tempest/tests/cmd/test_resource_list.json
@@ -0,0 +1,11 @@
+{
+  "project": {
+    "ce4e7edf051c439d8b81c4bfe581c5ef": "test"
+  },
+  "keypairs": {
+    "tempest-keypair-1215039183": ""
+  },
+  "users": {
+    "74463c83f9d640fe84c4376527ceff26": "test"
+  }
+}
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 8a1158d..154d8d1 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -252,7 +252,7 @@
 
     @mock.patch('tempest.lib.auth.get_credentials')
     def test_get_credentials_v3_no_domain(self, mock_auth_get_credentials):
-        expected_uri = 'https://v3.identity.exmaple.com'
+        expected_uri = 'https://v3.identity.example.com'
         expected_result = 'my_creds'
         expected_domain = 'my_domain'
         mock_auth_get_credentials.return_value = expected_result
@@ -272,7 +272,7 @@
 
     @mock.patch('tempest.lib.auth.get_credentials')
     def test_get_credentials_v3_domain(self, mock_auth_get_credentials):
-        expected_uri = 'https://v3.identity.exmaple.com'
+        expected_uri = 'https://v3.identity.example.com'
         expected_result = 'my_creds'
         expected_domain = 'my_domain'
         mock_auth_get_credentials.return_value = expected_result
@@ -291,7 +291,7 @@
 
     @mock.patch('tempest.lib.auth.get_credentials')
     def test_get_credentials_v3_system(self, mock_auth_get_credentials):
-        expected_uri = 'https://v3.identity.exmaple.com'
+        expected_uri = 'https://v3.identity.example.com'
         expected_result = 'my_creds'
         mock_auth_get_credentials.return_value = expected_result
         cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
diff --git a/tempest/tests/lib/common/test_rest_client.py b/tempest/tests/lib/common/test_rest_client.py
index 81a76e0..9bc6f60 100644
--- a/tempest/tests/lib/common/test_rest_client.py
+++ b/tempest/tests/lib/common/test_rest_client.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 import copy
+from unittest import mock
 
 import fixtures
 import jsonschema
@@ -749,6 +750,110 @@
                           expected_code, read_code)
 
 
+class TestRecordResources(BaseRestClientTestClass):
+
+    def setUp(self):
+        self.fake_http = fake_http.fake_httplib2()
+        super(TestRecordResources, self).setUp()
+
+    def _cleanup_test_resource_record(self):
+        # clear resource_list.json file
+        with open('resource_list.json', 'w') as f:
+            f.write('{}')
+
+    def test_post_record_resources(self):
+        self.rest_client.record_resources = True
+        __, return_dict = self.rest_client.post(self.url, {}, {})
+        self.assertEqual({}, return_dict['headers'])
+        self.assertEqual({}, return_dict['body'])
+
+    def test_resource_record_no_top_key(self):
+        test_body_no_key = b'{}'
+        self.rest_client.resource_record(test_body_no_key)
+
+    def test_resource_record_dict(self):
+        test_dict_body = b'{"project": {"id": "test-id", "name": ""}}\n'
+        self.rest_client.resource_record(test_dict_body)
+
+        with open('resource_list.json', 'r') as f:
+            content = f.read()
+            resource_list_content = json.loads(content)
+
+        test_resource_list = {
+            "projects": {"test-id": ""}
+        }
+        self.assertEqual(resource_list_content, test_resource_list)
+
+        # cleanup
+        self._cleanup_test_resource_record()
+
+    def test_resource_record_list(self):
+        test_list_body = '''{
+            "user": [
+                {
+                    "id": "test-uuid",
+                    "name": "test-name"
+                },
+                {
+                    "id": "test-uuid2",
+                    "name": "test-name2"
+                }
+            ]
+        }'''
+        test_list_body = test_list_body.encode('utf-8')
+        self.rest_client.resource_record(test_list_body)
+
+        with open('resource_list.json', 'r') as f:
+            content = f.read()
+            resource_list_content = json.loads(content)
+
+        test_resource_list = {
+            "users": {
+                "test-uuid": "test-name",
+                "test-uuid2": "test-name2"
+            }
+        }
+        self.assertEqual(resource_list_content, test_resource_list)
+
+        # cleanup
+        self._cleanup_test_resource_record()
+
+    def test_resource_update_id(self):
+        data = {}
+        res_dict = {'id': 'test-uuid', 'name': 'test-name'}
+
+        self.rest_client.rec_rw_lock = mock.MagicMock()
+        self.rest_client.resource_update(data, 'user', res_dict)
+        result = {'users': {'test-uuid': 'test-name'}}
+        self.assertEqual(data, result)
+
+    def test_resource_update_name(self):
+        data = {'keypairs': {}}
+        res_dict = {'name': 'test-keypair'}
+
+        self.rest_client.rec_rw_lock = mock.MagicMock()
+        self.rest_client.resource_update(data, 'keypair', res_dict)
+        result = {'keypairs': {'test-keypair': ""}}
+        self.assertEqual(data, result)
+
+    def test_resource_update_no_id(self):
+        data = {}
+        res_dict = {'type': 'test', 'description': 'example'}
+
+        self.rest_client.rec_rw_lock = mock.MagicMock()
+        self.rest_client.resource_update(data, 'projects', res_dict)
+        result = {'projects': {}}
+        self.assertEqual(data, result)
+
+    def test_resource_update_not_dict(self):
+        data = {}
+        res_dict = 'test-string'
+
+        self.rest_client.rec_rw_lock = mock.MagicMock()
+        self.rest_client.resource_update(data, 'user', res_dict)
+        self.assertEqual(data, {})
+
+
 class TestResponseBody(base.TestCase):
 
     def test_str(self):
diff --git a/tempest/tests/lib/common/utils/test_data_utils.py b/tempest/tests/lib/common/utils/test_data_utils.py
index a0267d0..06a7805 100644
--- a/tempest/tests/lib/common/utils/test_data_utils.py
+++ b/tempest/tests/lib/common/utils/test_data_utils.py
@@ -79,7 +79,7 @@
         self.assertEqual(len(actual), 3)
         self.assertRegex(actual, "[A-Za-z0-9~!@#%^&*_=+]{3}")
         actual2 = data_utils.rand_password(2)
-        # NOTE(masayukig): Originally, we checked that the acutal and actual2
+        # NOTE(masayukig): Originally, we checked that the actual and actual2
         # are different each other. But only 3 letters can be the same value
         # in a very rare case. So, we just check the length here, too,
         # just in case.
diff --git a/tempest/tests/lib/services/base.py b/tempest/tests/lib/services/base.py
index 924f9f2..fd4bc17 100644
--- a/tempest/tests/lib/services/base.py
+++ b/tempest/tests/lib/services/base.py
@@ -54,7 +54,7 @@
                  ``assert_called_once_with(foo='bar')`` is called.
                * If mock_args='foo' then ``assert_called_once_with('foo')``
                  is called.
-        :param resp_as_string: Whether response body is retruned as string.
+        :param resp_as_string: Whether response body is returned as string.
                This is for service client methods which return ResponseBodyData
                object.
         :param kwargs: kwargs that are passed to function.
diff --git a/tempest/tests/lib/services/placement/test_placement_client.py b/tempest/tests/lib/services/placement/test_placement_client.py
index 1396a85..bb57bb0 100644
--- a/tempest/tests/lib/services/placement/test_placement_client.py
+++ b/tempest/tests/lib/services/placement/test_placement_client.py
@@ -87,3 +87,77 @@
 
     def test_list_allocations_with_bytes_body(self):
         self._test_list_allocations(bytes_body=True)
+
+    FAKE_ALL_TRAITS = {
+        "traits": [
+            "CUSTOM_HW_FPGA_CLASS1",
+            "CUSTOM_HW_FPGA_CLASS2",
+            "CUSTOM_HW_FPGA_CLASS3"
+        ]
+    }
+
+    FAKE_ASSOCIATED_TRAITS = {
+        "traits": [
+            "CUSTOM_HW_FPGA_CLASS1",
+            "CUSTOM_HW_FPGA_CLASS2"
+        ]
+    }
+
+    def test_list_traits(self):
+        self.check_service_client_function(
+            self.client.list_traits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ALL_TRAITS)
+
+        self.check_service_client_function(
+            self.client.list_traits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ASSOCIATED_TRAITS,
+            **{
+                "associated": "true"
+            })
+
+        self.check_service_client_function(
+            self.client.list_traits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ALL_TRAITS,
+            **{
+                "associated": "true",
+                "name": "startswith:CUSTOM_HW_FPGPA"
+            })
+
+    def test_show_traits(self):
+        self.check_service_client_function(
+            self.client.show_trait,
+            'tempest.lib.common.rest_client.RestClient.get',
+            204, status=204,
+            name="CUSTOM_HW_FPGA_CLASS1")
+
+        self.check_service_client_function(
+            self.client.show_trait,
+            'tempest.lib.common.rest_client.RestClient.get',
+            404, status=404,
+            # trait with this name does not exists
+            name="CUSTOM_HW_FPGA_CLASS4")
+
+    def test_create_traits(self):
+        self.check_service_client_function(
+            self.client.create_trait,
+            'tempest.lib.common.rest_client.RestClient.put',
+            204, status=204,
+            # try to create trait with existing name
+            name="CUSTOM_HW_FPGA_CLASS1")
+
+        self.check_service_client_function(
+            self.client.create_trait,
+            'tempest.lib.common.rest_client.RestClient.put',
+            201, status=201,
+            # create new trait
+            name="CUSTOM_HW_FPGA_CLASS4")
+
+    def test_delete_traits(self):
+        self.check_service_client_function(
+            self.client.delete_trait,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            204, status=204,
+            name="CUSTOM_HW_FPGA_CLASS1")
diff --git a/tempest/tests/lib/services/placement/test_resource_providers_client.py b/tempest/tests/lib/services/placement/test_resource_providers_client.py
index 2871395..399f323 100644
--- a/tempest/tests/lib/services/placement/test_resource_providers_client.py
+++ b/tempest/tests/lib/services/placement/test_resource_providers_client.py
@@ -204,3 +204,40 @@
 
     def test_show_resource_provider_usages_with_with_bytes_body(self):
         self._test_list_resource_provider_inventories(bytes_body=True)
+
+    FAKE_ALL_RESOURCE_PROVIDER_TRAITS = {
+        "resource_provider_generation": 0,
+        "traits": [
+            "CUSTOM_HW_FPGA_CLASS1",
+            "CUSTOM_HW_FPGA_CLASS2"
+        ]
+    }
+    FAKE_NEW_RESOURCE_PROVIDER_TRAITS = {
+        "resource_provider_generation": 1,
+        "traits": [
+            "CUSTOM_HW_FPGA_CLASS1",
+            "CUSTOM_HW_FPGA_CLASS2"
+        ]
+    }
+
+    def test_list_resource_provider_traits(self):
+        self.check_service_client_function(
+            self.client.list_resource_provider_traits,
+            'tempest.lib.common.rest_client.RestClient.get',
+            self.FAKE_ALL_RESOURCE_PROVIDER_TRAITS,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID)
+
+    def test_update_resource_provider_traits(self):
+        self.check_service_client_function(
+            self.client.update_resource_provider_traits,
+            'tempest.lib.common.rest_client.RestClient.put',
+            self.FAKE_NEW_RESOURCE_PROVIDER_TRAITS,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID,
+            **self.FAKE_NEW_RESOURCE_PROVIDER_TRAITS)
+
+    def test_delete_resource_provider_traits(self):
+        self.check_service_client_function(
+            self.client.delete_resource_provider_traits,
+            'tempest.lib.common.rest_client.RestClient.delete',
+            self.FAKE_ALL_RESOURCE_PROVIDER_TRAITS, status=204,
+            rp_uuid=self.FAKE_RESOURCE_PROVIDER_UUID)
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index 13870ba..0ba6ed3 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -162,7 +162,7 @@
 
         client = ssh.Client('localhost', 'root', timeout=timeout)
         # We need to mock LOG here because LOG.info() calls time.time()
-        # in order to preprend a timestamp.
+        # in order to prepend a timestamp.
         with mock.patch.object(ssh, 'LOG'):
             self.assertRaises(exceptions.SSHTimeout,
                               client._get_ssh_connection)
diff --git a/tox.ini b/tox.ini
index 51c38f2..e3c8fcf 100644
--- a/tox.ini
+++ b/tox.ini
@@ -154,7 +154,7 @@
 sitepackages = {[tempestenv]sitepackages}
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
-# But exlcude the extra tests mentioned in tools/tempest-extra-tests-list.txt
+# But exclude the extra tests mentioned in tools/tempest-extra-tests-list.txt
 regex = '(^tempest\.scenario.*)|(^tempest\.serial_tests)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)'
 commands =
     find . -type f -name "*.pyc" -delete
@@ -197,7 +197,7 @@
 # tests listed in exclude-list file:
 commands =
     find . -type f -name "*.pyc" -delete
-    tempest run --regex {[testenv:integrated-compute]regex1} --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
+    tempest run --slowest --regex {[testenv:integrated-compute]regex1} --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
     tempest run --combine --serial --slowest --regex {[testenv:integrated-compute]regex2} --exclude-list ./tools/tempest-integrated-gate-compute-exclude-list.txt {posargs}
 
 [testenv:integrated-placement]
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 3b402c8..633f501 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -22,6 +22,8 @@
           $TEMPEST_CONFIG:
             compute:
               min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
+            service-clients:
+              http_timeout: 90
       test_results_stage_name: test_results
       zuul_copy_output:
         '/var/log/openvswitch': logs
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 596acb1..2fd6e36 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -17,6 +17,13 @@
         # TODO(gmann): Enable File injection tests once nova bug is fixed
         # https://bugs.launchpad.net/nova/+bug/1882421
         #   ENABLE_FILE_INJECTION: true
+      run_tempest_cleanup: true
+      run_tempest_cleanup_resource_list: true
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            DEFAULT:
+              record_resources: true
 
 - job:
     name: tempest-ipv6-only
@@ -24,7 +31,7 @@
     description: |
       Integration test of IPv6-only deployments. This job runs
       smoke and IPv6 relates tests only. Basic idea is to test
-      whether OpenStack Services listen on IPv6 addrress or not.
+      whether OpenStack Services listen on IPv6 address or not.
     timeout: 10800
     vars:
       tox_envlist: ipv6-only
@@ -37,6 +44,14 @@
       tools/tempest-extra-tests-list.txt.
     vars:
       tox_envlist: extra-tests
+      run_tempest_cleanup: true
+      run_tempest_cleanup_resource_list: true
+      run_tempest_dry_cleanup: true
+      devstack_local_conf:
+        test-config:
+          $TEMPEST_CONFIG:
+            DEFAULT:
+              record_resources: true
 
 - job:
     name: tempest-full-py3
@@ -73,7 +88,7 @@
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
-        # Enbale horizon so that we can run horizon test.
+        # Enable horizon so that we can run horizon test.
         horizon: true
 
 - job:
@@ -206,7 +221,7 @@
       tox_envlist: integrated-object-storage
       devstack_localrc:
         # NOTE(gmann): swift is not ready on python3 yet and devstack
-        # install it on python2.7 only. But settting the USE_PYTHON3
+        # install it on python2.7 only. But setting the USE_PYTHON3
         # for future once swift is ready on py3.
         USE_PYTHON3: true
 
@@ -228,7 +243,7 @@
     name: tempest-multinode-full-py3
     parent: tempest-multinode-full-base
     nodeset: openstack-two-node-jammy
-    # This job runs on ubuntu Jammy and after stable/zed.
+    # This job runs on ubuntu Jammy and after unmaintained/zed.
     branches:
       regex: ^.*/(victoria|wallaby|xena|yoga|zed)$
       negate: true
@@ -374,15 +389,7 @@
       This job runs the Tempest tests with scope and new defaults enabled.
     vars:
       devstack_localrc:
-        # Enabeling the scope and new defaults for services.
-        # NOTE: (gmann) We need to keep keystone scope check disable as
-        # services (except ironic) does not support the system scope and
-        # they need keystone to continue working with project scope. Until
-        # Keystone policies are changed to work for both system as well as
-        # for project scoped, we need to keep scope check disable for
-        # keystone.
-        # Nova, Glance, and Neutron have enabled the new defaults and scope
-        # by default in devstack.
+        KEYSTONE_ENFORCE_SCOPE: true
         CINDER_ENFORCE_SCOPE: true
         PLACEMENT_ENFORCE_SCOPE: true
 
@@ -475,7 +482,7 @@
         # (on SLURP as well as non SLURP release) so we are adding grenade-skip-level-always
         # job in integrated gate and we do not need to update skip level job
         # here until Nova change the decision.
-        # This is added from 2023.2 relese cycle onwards so we need to use branch variant
+        # This is added from 2023.2 release cycle onwards so we need to use branch variant
         # to make sure we do not run this job on older than 2023.2 gate.
         - grenade-skip-level-always:
             branches:
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index e2505cb..c652fe0 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -39,7 +39,7 @@
         # those in respective stable branch gate.
         - tempest-full-2024-1:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-zed:
+        - tempest-full-2023-1:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
@@ -155,7 +155,7 @@
         - nova-live-migration:
             irrelevant-files: *tempest-irrelevant-files
         - ironic-tempest-bios-ipmi-direct-tinyipa:
-             irrelevant-files: *tempest-irrelevant-files
+            irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
         - nova-multi-cell
@@ -169,8 +169,6 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-all-rbac-old-defaults
         - tempest-full-parallel
-        - tempest-full-zed-extra-tests
-        - tempest-full-enforce-scope-new-defaults-zed
         - neutron-ovs-tempest-dvr-ha-multinode-full:
             irrelevant-files: *tempest-irrelevant-files
         - nova-tempest-v2-api:
@@ -192,15 +190,12 @@
         - tempest-full-2024-1
         - tempest-full-2023-2
         - tempest-full-2023-1
-        - tempest-full-zed
         - tempest-slow-2024-1
         - tempest-slow-2023-2
         - tempest-slow-2023-1
-        - tempest-slow-zed
         - tempest-full-2024-1-extra-tests
         - tempest-full-2023-2-extra-tests
         - tempest-full-2023-1-extra-tests
-        - tempest-full-zed-extra-tests
     periodic:
       jobs:
         - tempest-all
@@ -212,4 +207,3 @@
         - tempest-centos9-stream-fips
         - tempest-full-centos-9-stream
         - tempest-full-test-account-no-admin-py3
-        - tempest-full-enforce-scope-new-defaults-zed
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index a662685..9d69715 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -18,12 +18,6 @@
     override-checkout: stable/2023.1
 
 - job:
-    name: tempest-full-zed
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/zed
-
-- job:
     name: tempest-full-2024-1-extra-tests
     parent: tempest-extra-tests
     nodeset: openstack-single-node-jammy
@@ -42,12 +36,6 @@
     override-checkout: stable/2023.1
 
 - job:
-    name: tempest-full-zed-extra-tests
-    parent: tempest-extra-tests
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/zed
-
-- job:
     name: tempest-slow-2024-1
     parent: tempest-slow-py3
     nodeset: openstack-two-node-jammy
@@ -66,18 +54,6 @@
     override-checkout: stable/2023.1
 
 - job:
-    name: tempest-full-enforce-scope-new-defaults-zed
-    parent: tempest-full-enforce-scope-new-defaults
-    nodeset: openstack-single-node-focal
-    override-checkout: stable/zed
-
-- job:
-    name: tempest-slow-zed
-    parent: tempest-slow-py3
-    nodeset: openstack-two-node-focal
-    override-checkout: stable/zed
-
-- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is to use the 'full' tox env which
@@ -103,14 +79,14 @@
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
-        # Enbale horizon so that we can run horizon test.
+        # Enable horizon so that we can run horizon test.
         horizon: true
 
 - job:
     name: tempest-multinode-full-py3
     parent: tempest-multinode-full
     nodeset: openstack-two-node-focal
-    # This job runs on Focal and supposed to run until stable/zed.
+    # This job runs on Focal and supposed to run until unmaintained/zed.
     branches:
       - ^.*/victoria
       - ^.*/wallaby
@@ -133,7 +109,7 @@
     name: tempest-multinode-full
     parent: tempest-multinode-full-base
     nodeset: openstack-two-node-focal
-    # This job runs on Focal and on python2. This is for stable/victoria to stable/zed.
+    # This job runs on Focal and on python2. This is for unmaintained/victoria to unmaintained/zed.
     branches:
       - ^.*/victoria
       - ^.*/wallaby