Merge "Add wait for location import task"
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
index 9c2d1ed..cea632b 100644
--- a/doc/source/stable_branch_support_policy.rst
+++ b/doc/source/stable_branch_support_policy.rst
@@ -23,7 +23,7 @@
 switch to running Tempest from a tag with support for the branch, or exclude
 a newly introduced test (if that is the cause of the issue). Tempest will not
 be creating stable branches to support *Extended Maintenance* phase branches, as
-the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
+the burden is on the *Extended Maintenance* phase branch maintainers, not the Tempest
 project, to support that branch.
 
 .. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index e97b6f4..968c821 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,6 +9,7 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* 2024.1
 * 2023.2
 * 2023.1
 * Zed
diff --git a/playbooks/devstack-tempest-ipv6.yaml b/playbooks/devstack-tempest-ipv6.yaml
index 568077e..89eec6d 100644
--- a/playbooks/devstack-tempest-ipv6.yaml
+++ b/playbooks/devstack-tempest-ipv6.yaml
@@ -17,6 +17,16 @@
     # fail early if anything missing the IPv6 settings or deployments.
     - devstack-ipv6-only-deployments-verification
   tasks:
+    - name: Run tempest cleanup init-saved-state
+      include_role:
+        name: tempest-cleanup
+      vars:
+        init_saved_state: true
+      when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
+            (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
+            (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool) or
+            (run_tempest_cleanup_prefix is defined and run_tempest_cleanup_prefix | bool)
+
     - name: Run Tempest version <= 26.0.0
       include_role:
         name: run-tempest-26
@@ -30,3 +40,15 @@
       when:
         - zuul.branch is defined
         - zuul.branch not in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein"]
+
+    - name: Run tempest cleanup dry-run
+      include_role:
+        name: tempest-cleanup
+      vars:
+        dry_run: true
+      when: run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool
+
+    - name: Run tempest cleanup
+      include_role:
+        name: tempest-cleanup
+      when: run_tempest_cleanup is defined and run_tempest_cleanup | bool
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 6d70bc3..5fb1afc 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -32,7 +32,8 @@
         init_saved_state: true
       when: (run_tempest_dry_cleanup is defined and run_tempest_dry_cleanup | bool) or
             (run_tempest_cleanup is defined and run_tempest_cleanup | bool) or
-            (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool)
+            (run_tempest_fail_if_leaked_resources is defined and run_tempest_fail_if_leaked_resources | bool) or
+            (run_tempest_cleanup_prefix is defined and run_tempest_cleanup_prefix | bool)
 
     - name: Run Tempest version <= 26.0.0
       include_role:
diff --git a/releasenotes/notes/Allow-tempest-cleanup-delete-resources-based-on-prefix-96d9562f1f30e979.yaml b/releasenotes/notes/Allow-tempest-cleanup-delete-resources-based-on-prefix-96d9562f1f30e979.yaml
new file mode 100644
index 0000000..872f664
--- /dev/null
+++ b/releasenotes/notes/Allow-tempest-cleanup-delete-resources-based-on-prefix-96d9562f1f30e979.yaml
@@ -0,0 +1,10 @@
+---
+features:
+  - |
+    We add a new argument, ``--prefix``, to ``tempest cleanup`` tool that will
+    allow users delete only resources that match the prefix. When this option
+    is used, ``saved_state.json`` file is not needed (no need to run with
+    ``--init-saved-state`` first). If there is one, it will be ignored and the
+    cleanup will be done based on the given prefix only.
+    Note, that some resources are not named thus they will not be deleted when
+    filtering based on the prefix.
diff --git a/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
new file mode 100644
index 0000000..30a2278
--- /dev/null
+++ b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    A new config option in the ``volume_feature_enabled`` section,
+    ``volume_types_for_data_volume``, is added to allow the user to specify
+    which volume types can be used for data volumes in a new test
+    ``test_instances_with_cinder_volumes_on_all_compute_nodes``. By default,
+    this option is set to None.
diff --git a/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml b/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml
new file mode 100644
index 0000000..48c1717
--- /dev/null
+++ b/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+  - |
+    The following deprecated alias methods of the ``ContainerClient`` class
+    has been removed.
+
+    - ``update_container_metadata``, replaced by ``create_update_or_delete_container_metadata``
+    - ``list_container_contents``, replaced by ``list_container_objects``
diff --git a/releasenotes/notes/cleanup-decorator-aliases-e940b6e114e6f481.yaml b/releasenotes/notes/cleanup-decorator-aliases-e940b6e114e6f481.yaml
new file mode 100644
index 0000000..fd4a546
--- /dev/null
+++ b/releasenotes/notes/cleanup-decorator-aliases-e940b6e114e6f481.yaml
@@ -0,0 +1,9 @@
+---
+upgrade:
+  - |
+    The following decorators are no longer available in the ``tempest.test``
+    module. Use the ``tempest.common.utils`` module instead.
+
+    - ``services``
+    - ``requires_ext``
+    - ``is_extension_enabled``
diff --git a/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml b/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml
new file mode 100644
index 0000000..d408538
--- /dev/null
+++ b/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+  - |
+    Default value of the ``[image-feature-enabled] image_import`` has been
+    changed from ``False`` to ``True``, and now the image import feature is
+    tested by default.
+
+deprecations:
+  - |
+    The ``[image-feature-enabled] image_import`` option has been deprecated.
+    The image import feature works in both standalone mode and WSGI mode since
+    Victoria and the image import feature can be always tested.
diff --git a/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml b/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml
new file mode 100644
index 0000000..2834876
--- /dev/null
+++ b/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+  - |
+    Default value of the ``[image-feature-enabled] os_glance_reserved`` has
+    been changed from ``False`` to ``True`` and now the reservation of
+    os_glance namespace is tested by default.
+
+deprecations:
+  - |
+    The ``[image-feature-enabled] os_glance_reserved`` option has been
+    deprecatd because glance reserves the os_glance namespace since Wallaby.
diff --git a/releasenotes/notes/remove-dns_servers_option-f49fdb2b4eb50f8f.yaml b/releasenotes/notes/remove-dns_servers_option-f49fdb2b4eb50f8f.yaml
new file mode 100644
index 0000000..6be1db9
--- /dev/null
+++ b/releasenotes/notes/remove-dns_servers_option-f49fdb2b4eb50f8f.yaml
@@ -0,0 +1,4 @@
+---
+upgrade:
+  - |
+    The deprecated ``[network] dns_servers`` option has been removed.
diff --git a/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml b/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml
new file mode 100644
index 0000000..26da18c
--- /dev/null
+++ b/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+  - |
+    The deprecated ``[compute-feature-enabled] xenapi_apis`` option has been
+    removed.
diff --git a/releasenotes/notes/tempest-2024-1-release-d51f15c6bfe60b35.yaml b/releasenotes/notes/tempest-2024-1-release-d51f15c6bfe60b35.yaml
new file mode 100644
index 0000000..81d6a05
--- /dev/null
+++ b/releasenotes/notes/tempest-2024-1-release-d51f15c6bfe60b35.yaml
@@ -0,0 +1,17 @@
+---
+prelude: >
+    This release is to tag Tempest for OpenStack 2024.1 release.
+    This release marks the start of 2024.1 release support in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * 2024.1
+    * 2023.2
+    * 2023.1
+    * Zed
+
+    Current development of Tempest is for OpenStack 2024.2 development
+    cycle. Every Tempest commit is also tested against master during
+    the 2024.2 cycle. However, this does not necessarily mean that using
+    Tempest as of this tag will work against a 2024.2 (or future release)
+    cloud.
+    To be on safe side, use this tag to test the OpenStack 2024.1 release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 989d3b5..e3018b4 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,8 @@
    :maxdepth: 1
 
    unreleased
+   v38.0.0
+   v37.0.0
    v36.0.0
    v35.0.0
    v34.2.0
diff --git a/releasenotes/source/v37.0.0.rst b/releasenotes/source/v37.0.0.rst
new file mode 100644
index 0000000..72b8bc6
--- /dev/null
+++ b/releasenotes/source/v37.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v37.0.0 Release Notes
+=====================
+
+.. release-notes:: 37.0.0 Release Notes
+   :version: 37.0.0
diff --git a/releasenotes/source/v38.0.0.rst b/releasenotes/source/v38.0.0.rst
new file mode 100644
index 0000000..2664374
--- /dev/null
+++ b/releasenotes/source/v38.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v38.0.0 Release Notes
+=====================
+
+.. release-notes:: 38.0.0 Release Notes
+   :version: 38.0.0
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 04db849..c682641 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
 .. zuul:rolevar:: stable_constraints_file
    :default: ''
 
-   Upper constraints file to be used for stable branch till stable/victoria.
+   Upper constraints file to be used for stable branch till Wallaby
 
 .. zuul:rolevar:: tempest_tox_environment
    :default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 3d78557..29409c0 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
     target_branch: "{{ zuul.override_checkout }}"
   when: zuul.override_checkout is defined
 
-- name: Use stable branch upper-constraints till stable/wallaby
+- name: Use stable branch upper-constraints till Wallaby
   set_fact:
     # TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
     tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
-  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "stable/victoria", "stable/wallaby"]
+  when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "unmaintained/victoria", "unmaintained/wallaby"]
 
 - name: Use Configured upper-constraints for non-master Tempest
   set_fact:
@@ -80,14 +80,14 @@
 
 - name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
   # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
-  # stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
+  # stestr 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1
   # in stable/train|ussuri|victoria) which does not have new args exclude-list
   # so let's fallback to old arg if new arg is passed.
   set_fact:
     exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
   when:
     - tempest_test_exclude_list is defined
-    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+    - target_branch in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
 
 # TODO(kopecmartin) remove this after all consumers of the role have switched
 # to tempest_exclude_regex option, until then it's kept here for the backward
@@ -105,11 +105,11 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch not in ["stable/train", "stable/ussuri", "stable/victoria"]
+    - target_branch not in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
 
 - name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
   # NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
-  # 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
+  # 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1 in
   # stable/train|ussuri|victoria) which does not have new args exclude-list so
   # let's fallback to old arg if new arg is passed.
   set_fact:
@@ -117,7 +117,7 @@
   when:
     - tempest_black_regex is not defined
     - tempest_exclude_regex is defined
-    - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+    - target_branch in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
 
 - name: Run Tempest
   command: tox -e {{tox_envlist}} {{tox_extra_args}} -- \
diff --git a/roles/tempest-cleanup/README.rst b/roles/tempest-cleanup/README.rst
index d1fad90..d43319c 100644
--- a/roles/tempest-cleanup/README.rst
+++ b/roles/tempest-cleanup/README.rst
@@ -40,6 +40,12 @@
    some must have been leaked. This can be also used to verify that tempest
    cleanup was successful.
 
+.. zuul:rolevar:: run_tempest_cleanup_prefix
+   :default: false
+
+   When true, tempest cleanup will be called with '--prefix tempest' to delete
+   only resources with names that match the prefix. This option can be used
+   together with dry_run.
 
 Role usage
 ----------
diff --git a/roles/tempest-cleanup/defaults/main.yaml b/roles/tempest-cleanup/defaults/main.yaml
index ce78bdb..8060b29 100644
--- a/roles/tempest-cleanup/defaults/main.yaml
+++ b/roles/tempest-cleanup/defaults/main.yaml
@@ -2,3 +2,4 @@
 init_saved_state: false
 dry_run: false
 run_tempest_fail_if_leaked_resources: false
+run_tempest_cleanup_prefix: false
diff --git a/roles/tempest-cleanup/tasks/dry_run.yaml b/roles/tempest-cleanup/tasks/dry_run.yaml
index 46749ab..07e1b63 100644
--- a/roles/tempest-cleanup/tasks/dry_run.yaml
+++ b/roles/tempest-cleanup/tasks/dry_run.yaml
@@ -5,3 +5,12 @@
   command: tox -evenv-tempest -- tempest cleanup --dry-run --debug
   args:
     chdir: "{{ devstack_base_dir }}/tempest"
+  when: not run_tempest_cleanup_prefix
+
+- name: Run tempest cleanup dry-run with tempest prefix
+  become: yes
+  become_user: tempest
+  command: tox -evenv-tempest -- tempest cleanup --dry-run --debug --prefix tempest
+  args:
+    chdir: "{{ devstack_base_dir }}/tempest"
+  when: run_tempest_cleanup_prefix
\ No newline at end of file
diff --git a/roles/tempest-cleanup/tasks/main.yaml b/roles/tempest-cleanup/tasks/main.yaml
index c1d63f0..7ef4928 100644
--- a/roles/tempest-cleanup/tasks/main.yaml
+++ b/roles/tempest-cleanup/tasks/main.yaml
@@ -27,6 +27,15 @@
       command: tox -evenv-tempest -- tempest cleanup --debug
       args:
         chdir: "{{ devstack_base_dir }}/tempest"
+      when: not run_tempest_cleanup_prefix
+
+    - name: Run tempest cleanup with tempest prefix
+      become: yes
+      become_user: tempest
+      command: tox -evenv-tempest -- tempest cleanup --debug --prefix tempest
+      args:
+        chdir: "{{ devstack_base_dir }}/tempest"
+      when: run_tempest_cleanup_prefix
 
 - when:
     - run_tempest_fail_if_leaked_resources
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
deleted file mode 100644
index 8fc155b..0000000
--- a/tempest/api/compute/admin/test_agents.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2014 NEC Corporation.  All rights reserved.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License"); you may
-#    not use this file except in compliance with the License. You may obtain
-#    a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-#    License for the specific language governing permissions and limitations
-#    under the License.
-
-from tempest.api.compute import base
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-CONF = config.CONF
-
-
-# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
-class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
-    """Tests Compute Agents API"""
-
-    @classmethod
-    def skip_checks(cls):
-        super(AgentsAdminTestJSON, cls).skip_checks()
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise cls.skipException('The os-agents API is not supported.')
-
-    @classmethod
-    def setup_clients(cls):
-        super(AgentsAdminTestJSON, cls).setup_clients()
-        cls.client = cls.os_admin.agents_client
-
-    @classmethod
-    def resource_setup(cls):
-        super(AgentsAdminTestJSON, cls).resource_setup()
-        cls.params_agent = cls._param_helper(
-            hypervisor='common', os='linux', architecture='x86_64',
-            version='7.0', url='xxx://xxxx/xxx/xxx',
-            md5hash='add6bb58e139be103324d04d82d8f545')
-
-    @staticmethod
-    def _param_helper(**kwargs):
-        rand_key = 'architecture'
-        if rand_key in kwargs:
-            # NOTE: The rand_name is for avoiding agent conflicts.
-            # If you try to create an agent with the same hypervisor,
-            # os and architecture as an existing agent, Nova will return
-            # an HTTPConflict or HTTPServerError.
-            kwargs[rand_key] = data_utils.rand_name(
-                prefix=CONF.resource_name_prefix,
-                name=kwargs[rand_key])
-        return kwargs
-
-    @decorators.idempotent_id('1fc6bdc8-0b6d-4cc7-9f30-9b04fabe5b90')
-    def test_create_agent(self):
-        """Test creating a compute agent"""
-        params = self._param_helper(
-            hypervisor='kvm', os='win', architecture='x86',
-            version='7.0', url='xxx://xxxx/xxx/xxx',
-            md5hash='add6bb58e139be103324d04d82d8f545')
-        body = self.client.create_agent(**params)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        for expected_item, value in params.items():
-            self.assertEqual(value, body[expected_item])
-
-    @decorators.idempotent_id('dc9ffd51-1c50-4f0e-a820-ae6d2a568a9e')
-    def test_update_agent(self):
-        """Test updating a compute agent"""
-        # Create and update an agent.
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        agent_id = body['agent_id']
-        params = self._param_helper(
-            version='8.0', url='xxx://xxxx/xxx/xxx2',
-            md5hash='add6bb58e139be103324d04d82d8f547')
-        body = self.client.update_agent(agent_id, **params)['agent']
-        for expected_item, value in params.items():
-            self.assertEqual(value, body[expected_item])
-
-    @decorators.idempotent_id('470e0b89-386f-407b-91fd-819737d0b335')
-    def test_delete_agent(self):
-        """Test deleting a compute agent"""
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.client.delete_agent(body['agent_id'])
-
-        # Verify the list doesn't contain the deleted agent.
-        agents = self.client.list_agents()['agents']
-        self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
-                                               agents))
-
-    @decorators.idempotent_id('6a326c69-654b-438a-80a3-34bcc454e138')
-    def test_list_agents(self):
-        """Test listing compute agents"""
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        agents = self.client.list_agents()['agents']
-        self.assertNotEmpty(agents, 'Cannot get any agents.')
-        self.assertIn(body['agent_id'], map(lambda x: x['agent_id'], agents))
-
-    @decorators.idempotent_id('eabadde4-3cd7-4ec4-a4b5-5a936d2d4408')
-    def test_list_agents_with_filter(self):
-        """Test listing compute agents by the filter"""
-        body = self.client.create_agent(**self.params_agent)['agent']
-        self.addCleanup(self.client.delete_agent, body['agent_id'])
-        params = self._param_helper(
-            hypervisor='xen', os='linux', architecture='x86',
-            version='7.0', url='xxx://xxxx/xxx/xxx1',
-            md5hash='add6bb58e139be103324d04d82d8f546')
-        agent_xen = self.client.create_agent(**params)['agent']
-        self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
-
-        agent_id_xen = agent_xen['agent_id']
-        agents = (self.client.list_agents(hypervisor=agent_xen['hypervisor'])
-                  ['agents'])
-        self.assertNotEmpty(agents, 'Cannot get any agents.')
-        self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
-        self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
-                                               agents))
-        for agent in agents:
-            self.assertEqual(agent_xen['hypervisor'], agent['hypervisor'])
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index be838fc..6c9aafb 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -207,15 +207,10 @@
         self.assertEqual(self.image_ref_alt, rebuilt_image_id)
 
     @decorators.idempotent_id('7a1323b4-a6a2-497a-96cb-76c07b945c71')
-    def test_reset_network_inject_network_info(self):
-        """Test resetting and injecting network info of a server"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'The resetNetwork server action is not supported.')
-
-        # Reset Network of a Server
+    def test_inject_network_info(self):
+        """Test injecting network info of a server"""
+        # Create a server
         server = self.create_test_server(wait_until='ACTIVE')
-        self.client.reset_network(server['id'])
         # Inject the Network Info into Server
         self.client.inject_network_info(server['id'])
 
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 013e7d8..b5ee9b1 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -24,7 +24,7 @@
 
 
 class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
-    """Test creating servers on mutiple nodes with scheduler_hints."""
+    """Test creating servers on multiple nodes with scheduler_hints."""
     @classmethod
     def resource_setup(cls):
         super(ServersOnMultiNodesTest, cls).resource_setup()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2557e47..ed94af0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -410,7 +410,7 @@
         :param validatable: whether to the server needs to be
             validatable. When True, validation resources are acquired via
             the `get_class_validation_resources` helper.
-        :param kwargs: extra paramaters are passed through to the
+        :param kwargs: extra parameters are passed through to the
             `create_test_server` call.
         :return: the UUID of the created server.
         """
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 87cedae..d728853 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -71,7 +71,7 @@
             self.assertEqual(snapshot_name, image['name'])
         except lib_exceptions.TimeoutException as ex:
             # If timeout is reached, we don't need to check state,
-            # since, it wouldn't be a 'SAVING' state atleast and apart from
+            # since, it wouldn't be a 'SAVING' state at least and apart from
             # it, this testcase doesn't have scope for other state transition
             # Hence, skip the test.
             raise self.skipException("This test is skipped because " + str(ex))
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 275a26f..a245a8a 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -130,7 +130,7 @@
         except lib_exc.TimeoutException as ex:
             # Test cannot capture the image saving state.
             # If timeout is reached, we don't need to check state,
-            # since, it wouldn't be a 'SAVING' state atleast and apart from
+            # since, it wouldn't be a 'SAVING' state at least and apart from
             # it, this testcase doesn't have scope for other state transition
             # Hence, skip the test.
             raise self.skipException("This test is skipped because " + str(ex))
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 6664e15..b7db200 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -185,7 +185,7 @@
 
 
 class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
-    """Test creating server with FQDN hostname and verifying atrributes
+    """Test creating server with FQDN hostname and verifying attributes
 
     Starting Wallaby release, Nova sanitizes freeform characters in
     server hostname with dashes. This test verifies the same.
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 3a970dd..d2e2935 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -40,7 +40,7 @@
     @decorators.attr(type=['negative'])
     @decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
     def test_max_count_less_than_one(self):
-        """Test creating server with max_count < 1 shoudld fail"""
+        """Test creating server with max_count < 1 should fail"""
         invalid_max_count = 0
         self.assertRaises(lib_exc.BadRequest, self.create_test_server,
                           max_count=invalid_max_count)
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 9f93e76..5f35b15 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -27,13 +27,6 @@
     create_default_network = True
 
     @classmethod
-    def skip_checks(cls):
-        super(ServerMetadataTestJSON, cls).skip_checks()
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise cls.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
-    @classmethod
     def setup_clients(cls):
         super(ServerMetadataTestJSON, cls).setup_clients()
         cls.client = cls.servers_client
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 655909c..2059dfa 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -14,13 +14,10 @@
 #    under the License.
 
 from tempest.api.compute import base
-from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions as lib_exc
 
-CONF = config.CONF
-
 
 class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
     """Negative tests of server metadata"""
@@ -91,10 +88,6 @@
 
         Raise BadRequest if key in uri does not match the key passed in body.
         """
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'testkey': 'testvalue'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata_item,
@@ -104,10 +97,6 @@
     @decorators.idempotent_id('0df38c2a-3d4e-4db5-98d8-d4d9fa843a12')
     def test_set_metadata_non_existent_server(self):
         """Test setting metadata for a non existent server should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.NotFound,
@@ -119,10 +108,6 @@
     @decorators.idempotent_id('904b13dc-0ef2-4e4c-91cd-3b4a0f2f49d8')
     def test_update_metadata_non_existent_server(self):
         """Test updating metadata for a non existent server should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         non_existent_server_id = data_utils.rand_uuid()
         meta = {'key1': 'value1', 'key2': 'value2'}
         self.assertRaises(lib_exc.NotFound,
@@ -134,10 +119,6 @@
     @decorators.idempotent_id('a452f38c-05c2-4b47-bd44-a4f0bf5a5e48')
     def test_update_metadata_with_blank_key(self):
         """Test updating server metadata to blank key should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.update_server_metadata,
@@ -150,10 +131,6 @@
 
         Should not be able to delete metadata item from a non-existent server.
         """
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         non_existent_server_id = data_utils.rand_uuid()
         self.assertRaises(lib_exc.NotFound,
                           self.client.delete_server_metadata_item,
@@ -168,10 +145,6 @@
         A 403 Forbidden or 413 Overlimit (old behaviour) exception
         will be raised while exceeding metadata items limit for project.
         """
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         quota_set = self.quotas_client.show_quota_set(
             self.tenant_id)['quota_set']
         quota_metadata = quota_set['metadata_items']
@@ -196,10 +169,6 @@
     @decorators.idempotent_id('96100343-7fa9-40d8-80fa-d29ef588ce1c')
     def test_set_server_metadata_blank_key(self):
         """Test setting server metadata with blank key should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
@@ -209,10 +178,6 @@
     @decorators.idempotent_id('64a91aee-9723-4863-be44-4c9d9f1e7d0e')
     def test_set_server_metadata_missing_metadata(self):
         """Test setting server metadata without metadata field should fail"""
-        if not CONF.compute_feature_enabled.xenapi_apis:
-            raise self.skipException(
-                'Metadata is read-only on non-Xen-based deployments.')
-
         meta = {'meta1': 'data1'}
         self.assertRaises(lib_exc.BadRequest,
                           self.client.set_server_metadata,
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 97c2774..d6c0324 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -234,7 +234,7 @@
         and virtio as the rescue disk.
         """
         # This test just check detach fail and does not
-        # perfom the detach operation but in cleanup from
+        # perform the detach operation but in cleanup from
         # self.attach_volume() it will try to detach the server
         # after unrescue the server. Due to that we need to make
         # server SSHable before it try to detach, more details are
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 955ba1c..fd05ec6 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -139,7 +139,7 @@
         """Test detaching volume from a rescued server should fail"""
         volume = self.create_volume()
         # This test just check detach fail and does not
-        # perfom the detach operation but in cleanup from
+        # perform the detach operation but in cleanup from
         # self.attach_volume() it will try to detach the server
         # after unrescue the server. Due to that we need to make
         # server SSHable before it try to detach, more details are
diff --git a/tempest/api/image/v2/admin/test_image_caching.py b/tempest/api/image/v2/admin/test_image_caching.py
index 75369c9..333f946 100644
--- a/tempest/api/image/v2/admin/test_image_caching.py
+++ b/tempest/api/image/v2/admin/test_image_caching.py
@@ -37,13 +37,17 @@
         # NOTE(abhishekk): As caching is enabled instance boot or volume
         # boot or image download can also cache image, so we are going to
         # maintain our caching information to avoid disturbing other tests
-        self.cached_info = {}
+        self.cached_info = []
+        self.cached_info_remote = []
 
     def tearDown(self):
         # Delete all from cache/queue if we exit abruptly
         for image_id in self.cached_info:
-            self.os_admin.image_cache_client.cache_delete(
-                image_id)
+            self.os_admin.image_cache_client.cache_delete(image_id)
+
+        for image_id in self.cached_info_remote:
+            self.os_admin.image_cache_client.cache_delete(image_id)
+
         super(ImageCachingTest, self).tearDown()
 
     @classmethod
@@ -75,19 +79,13 @@
         image = self.client.show_image(image['id'])
         return image
 
-    def _assertCheckQueues(self, queued_images):
-        for image in self.cached_info:
-            if self.cached_info[image] == 'queued':
-                self.assertIn(image, queued_images)
-
-    def _assertCheckCache(self, cached_images):
+    def _assertCheckCache(self, cached_images, cached):
         cached_list = []
         for image in cached_images:
             cached_list.append(image['image_id'])
 
-        for image in self.cached_info:
-            if self.cached_info[image] == 'cached':
-                self.assertIn(image, cached_list)
+        for image in cached:
+            self.assertIn(image, cached_list)
 
     @decorators.idempotent_id('4bf6adba-2f9f-47e9-a6d5-37f21ad4387c')
     def test_image_caching_cycle(self):
@@ -97,10 +95,9 @@
         self.assertRaises(lib_exc.Forbidden,
                           self.os_primary.image_cache_client.list_cache)
 
-        # Check there is nothing is queued for cached by us
+        # Check there is nothing cached by us
         output = self.os_admin.image_cache_client.list_cache()
-        self._assertCheckQueues(output['queued_images'])
-        self._assertCheckCache(output['cached_images'])
+        self._assertCheckCache(output['cached_images'], self.cached_info)
 
         # Non-existing image should raise NotFound exception
         self.assertRaises(lib_exc.NotFound,
@@ -122,12 +119,6 @@
 
         # Queue image for caching
         self.os_admin.image_cache_client.cache_queue(image['id'])
-        self.cached_info[image['id']] = 'queued'
-        # Verify that we have 1 image for queueing and 0 for caching
-        output = self.os_admin.image_cache_client.list_cache()
-        self._assertCheckQueues(output['queued_images'])
-        self._assertCheckCache(output['cached_images'])
-
         # Wait for image caching
         LOG.info("Waiting for image %s to get cached", image['id'])
         caching = waiters.wait_for_caching(
@@ -135,10 +126,9 @@
             self.os_admin.image_cache_client,
             image['id'])
 
-        self.cached_info[image['id']] = 'cached'
-        # verify that we have image in cache and not in queued
-        self._assertCheckQueues(caching['queued_images'])
-        self._assertCheckCache(caching['cached_images'])
+        self.cached_info.append(image['id'])
+        # verify that we have image cached
+        self._assertCheckCache(caching['cached_images'], self.cached_info)
 
         # Verify that we can delete images from caching and queueing with
         # api call.
@@ -152,4 +142,78 @@
                           self.os_admin.image_cache_client.cache_clear,
                           target="invalid")
         # Remove all data from local information
-        self.cached_info = {}
+        self.cached_info = []
+
+    @decorators.idempotent_id('0a6b7e10-bc30-4a41-91ff-69fb4f5e65f2')
+    def test_remote_and_self_cache(self):
+        """Test image cache works with self and remote glance service"""
+        if not CONF.image.alternate_image_endpoint:
+            raise self.skipException('No image_remote service to test '
+                                     'against')
+
+        # Check there is nothing is cached by us on current and
+        # remote node
+        output = self.os_admin.image_cache_client.list_cache()
+        self._assertCheckCache(output['cached_images'], self.cached_info)
+
+        output = self.os_admin.cache_client_remote.list_cache()
+        self._assertCheckCache(output['cached_images'],
+                               self.cached_info_remote)
+
+        # Create one image
+        image = self.image_create_and_upload(name='first',
+                                             container_format='bare',
+                                             disk_format='raw',
+                                             visibility='private')
+        self.assertEqual('active', image['status'])
+
+        # Queue image for caching on local node
+        self.os_admin.image_cache_client.cache_queue(image['id'])
+        # Wait for image caching
+        LOG.info("Waiting for image %s to get cached", image['id'])
+        caching = waiters.wait_for_caching(
+            self.client,
+            self.os_admin.image_cache_client,
+            image['id'])
+        self.cached_info.append(image['id'])
+        # verify that we have image in cache on local node
+        self._assertCheckCache(caching['cached_images'], self.cached_info)
+        # verify that we don't have anything cached on remote node
+        output = self.os_admin.cache_client_remote.list_cache()
+        self._assertCheckCache(output['cached_images'],
+                               self.cached_info_remote)
+
+        # cache same image on remote node
+        self.os_admin.cache_client_remote.cache_queue(image['id'])
+        # Wait for image caching
+        LOG.info("Waiting for image %s to get cached", image['id'])
+        caching = waiters.wait_for_caching(
+            self.client,
+            self.os_admin.cache_client_remote,
+            image['id'])
+        self.cached_info_remote.append(image['id'])
+
+        # verify that we have image cached on remote node
+        output = self.os_admin.cache_client_remote.list_cache()
+        self._assertCheckCache(output['cached_images'],
+                               self.cached_info_remote)
+
+        # Verify that we can delete image from remote cache and it
+        # still present in local cache
+        self.os_admin.cache_client_remote.cache_clear()
+        output = self.os_admin.cache_client_remote.list_cache()
+        self.assertEqual(0, len(output['queued_images']))
+        self.assertEqual(0, len(output['cached_images']))
+
+        output = self.os_admin.image_cache_client.list_cache()
+        self._assertCheckCache(output['cached_images'], self.cached_info)
+
+        # Delete image from local cache as well
+        self.os_admin.image_cache_client.cache_clear()
+        output = self.os_admin.image_cache_client.list_cache()
+        self.assertEqual(0, len(output['queued_images']))
+        self.assertEqual(0, len(output['cached_images']))
+
+        # Remove all data from local and remote information
+        self.cached_info = []
+        self.cached_info_remote = []
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index 80c01a5..f0b891f 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -58,7 +58,10 @@
     def test_get_delete_deleted_image(self):
         """Get and delete the deleted image"""
         # create and delete image
-        image = self.client.create_image(name='test',
+        image_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name="test")
+        image = self.client.create_image(name=image_name,
                                          container_format='bare',
                                          disk_format='raw')
         self.client.delete_image(image['id'])
@@ -111,7 +114,10 @@
     @decorators.idempotent_id('ab980a34-8410-40eb-872b-f264752f46e5')
     def test_delete_protected_image(self):
         """Create a protected image"""
-        image = self.create_image(protected=True)
+        image_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name="test")
+        image = self.create_image(name=image_name, protected=True)
         self.addCleanup(self.client.update_image, image['id'],
                         [dict(replace="/protected", value=False)])
 
@@ -132,7 +138,10 @@
         if not CONF.image_feature_enabled.os_glance_reserved:
             raise self.skipException('os_glance_reserved is not enabled')
 
-        image = self.create_image(name='test',
+        image_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name="test")
+        image = self.create_image(name=image_name,
                                   container_format='bare',
                                   disk_format='raw')
         self.assertRaises(lib_exc.Forbidden,
@@ -152,9 +161,12 @@
         if not CONF.image_feature_enabled.os_glance_reserved:
             raise self.skipException('os_glance_reserved is not enabled')
 
+        image_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name="test")
         self.assertRaises(lib_exc.Forbidden,
                           self.create_image,
-                          name='test',
+                          name=image_name,
                           container_format='bare',
                           disk_format='raw',
                           os_glance_foo='bar')
@@ -195,7 +207,10 @@
         if 'web-download' not in self.available_import_methods:
             raise self.skipException('Server does not support '
                                      'web-download import method')
-        image = self.client.create_image(name='test',
+        image_name = data_utils.rand_name(
+            prefix=CONF.resource_name_prefix,
+            name="test")
+        image = self.client.create_image(name=image_name,
                                          container_format='bare',
                                          disk_format='raw')
         # Now try to get image details
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 5c28e96..01dda06 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -108,7 +108,7 @@
         # both cases, with and without that "active" attribute, we need to
         # removes that field from the allowed_address_pairs which are returned
         # by the Neutron server.
-        # We could make expected results of those tests to be dependend on the
+        # We could make expected results of those tests to be dependent on the
         # available Neutron's API extensions but in that case existing tests
         # may fail randomly as all tests are always using same IP addresses
         # thus allowed_address_pair may be active=True or active=False.
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index e39ad08..07f0903 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -129,7 +129,7 @@
         self.assertIsNone(updated_floating_ip['fixed_ip_address'])
         self.assertIsNone(updated_floating_ip['router_id'])
 
-        # Explicity test deletion of floating IP
+        # Explicitly test deletion of floating IP
         self.floating_ips_client.delete_floatingip(created_floating_ip['id'])
 
     @decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index bd3e360..a0c6342 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -118,7 +118,7 @@
     @classmethod
     def skip_checks(cls):
         super(TagsExtTest, cls).skip_checks()
-        # Added condition to support backward compatiblity since
+        # Added condition to support backward compatibility since
         # tag-ext has been renamed to standard-attr-tag
         if not (utils.is_extension_enabled('tag-ext', 'network') or
                 utils.is_extension_enabled('standard-attr-tag', 'network')):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index e2c9d54..2524def 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -142,7 +142,7 @@
         """Test container synchronization"""
         def make_headers(cont, cont_client):
             # tell first container to synchronize to a second
-            # use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
+            # use rsplit with a maxsplit of 1 to ensure ipv6 addresses are
             # handled properly as well
             client_proxy_ip = urlparse.urlparse(
                 cont_client.base_url).netloc.rsplit(':', 1)[0]
diff --git a/tempest/api/volume/test_volumes_backup.py b/tempest/api/volume/test_volumes_backup.py
index bfe962a..a3ba974 100644
--- a/tempest/api/volume/test_volumes_backup.py
+++ b/tempest/api/volume/test_volumes_backup.py
@@ -172,6 +172,52 @@
 
         self.assertTrue(restored_volume_info['bootable'])
 
+    @decorators.idempotent_id('f86eff09-2a6d-43c1-905e-8079e5754f1e')
+    @utils.services('compute')
+    @decorators.related_bug('1703011')
+    def test_volume_backup_incremental(self):
+        """Test create a backup when latest incremental backup is deleted"""
+        # Create a volume
+        volume = self.create_volume()
+
+        # Create a server
+        server = self.create_server(wait_until='SSHABLE')
+
+        # Attach volume to the server
+        self.attach_volume(server['id'], volume['id'])
+
+        # Create a backup to the attached volume
+        backup1 = self.create_backup(volume['id'], force=True)
+
+        # Validate backup details
+        backup_info = self.backups_client.show_backup(backup1['id'])['backup']
+        self.assertEqual(False, backup_info['has_dependent_backups'])
+        self.assertEqual(False, backup_info['is_incremental'])
+
+        # Create another incremental backup
+        backup2 = self.backups_client.create_backup(
+            volume_id=volume['id'], incremental=True, force=True)['backup']
+        waiters.wait_for_volume_resource_status(self.backups_client,
+                                                backup2['id'], 'available')
+
+        # Validate incremental backup details
+        backup2_info = self.backups_client.show_backup(backup2['id'])['backup']
+        self.assertEqual(True, backup2_info['is_incremental'])
+        self.assertEqual(False, backup2_info['has_dependent_backups'])
+
+        # Delete the last incremental backup that was created
+        self.backups_client.delete_backup(backup2['id'])
+        self.backups_client.wait_for_resource_deletion(backup2['id'])
+
+        # Create another incremental backup
+        backup3 = self.create_backup(
+            volume_id=volume['id'], incremental=True, force=True)
+
+        # Validate incremental backup details
+        backup3_info = self.backups_client.show_backup(backup3['id'])['backup']
+        self.assertEqual(True, backup3_info['is_incremental'])
+        self.assertEqual(False, backup3_info['has_dependent_backups'])
+
 
 class VolumesBackupsV39Test(base.BaseVolumeTest):
     """Test volumes backup with volume microversion greater than 3.8"""
diff --git a/tempest/clients.py b/tempest/clients.py
index 5b31cf8..5338ed4 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -104,6 +104,15 @@
                 service=CONF.image.alternate_image_endpoint,
                 endpoint_type=CONF.image.alternate_image_endpoint_type,
                 region=CONF.image.region)
+            # NOTE(abhishekk): If no alternate endpoint is configured,
+            # this client will work the same as the base
+            # self.image_cache_client. If your test needs to know if
+            # these are different, check the config option to see if
+            # the alternate_image_endpoint is set.
+            self.cache_client_remote = self.image_v2.ImageCacheClient(
+                service=CONF.image.alternate_image_endpoint,
+                endpoint_type=CONF.image.alternate_image_endpoint_type,
+                region=CONF.image.region)
 
     def _set_compute_clients(self):
         self.agents_client = self.compute.AgentsClient()
diff --git a/tempest/cmd/cleanup.py b/tempest/cmd/cleanup.py
index a8a344a..2a406de 100644
--- a/tempest/cmd/cleanup.py
+++ b/tempest/cmd/cleanup.py
@@ -28,6 +28,10 @@
 
 .. warning::
 
+    We advice not to run tempest cleanup on production environments.
+
+.. warning::
+
     If step 1 is skipped in the example below, the cleanup procedure
     may delete resources that existed in the cloud before the test run. This
     may cause an unwanted destruction of cloud resources, so use caution with
@@ -45,7 +49,10 @@
 * ``--init-saved-state``: Initializes the saved state of the OpenStack
   deployment and will output a ``saved_state.json`` file containing resources
   from your deployment that will be preserved from the cleanup command. This
-  should be done prior to running Tempest tests.
+  should be done prior to running Tempest tests. Note, that if other users of
+  your cloud could have created resources after running ``--init-saved-state``,
+  it would not protect those resources as they wouldn't be present in the
+  saved_state.json file.
 
 * ``--delete-tempest-conf-objects``: If option is present, then the command
   will delete the admin project in addition to the resources associated with
@@ -58,7 +65,27 @@
   global objects that will be removed (domains, flavors, images, roles,
   projects, and users). Once the cleanup command is executed (e.g. run without
   parameters), running it again with ``--dry-run`` should yield an empty
-  report.
+  report. We STRONGLY ENCOURAGE to run ``tempest cleanup`` with ``--dry-run``
+  first and then verify that the resources listed in the ``dry_run.json`` file
+  are meant to be deleted.
+
+* ``--prefix``: Only resources that match the prefix will be deleted. When this
+  option is used, ``saved_state.json`` file is not needed (no need to run with
+  ``--init-saved-state`` first).
+
+  All tempest resources are created with the prefix value from the config
+  option ``resource_name_prefix`` in tempest.conf. To cleanup only the
+  resources created by tempest, you should use the prefix set in your
+  tempest.conf (the default value of ``resource_name_prefix`` is ``tempest``.
+
+  Note, that some resources are not named thus they will not be deleted when
+  filtering based on the prefix. This option will be ignored when
+  ``--init-saved-state`` is used so that it can capture the true init state -
+  all resources present at that moment. If there is any ``saved_state.json``
+  file present (e.g. if you ran the tempest cleanup with ``--init-saved-state``
+  before) and you run the tempest cleanup with ``--prefix``, the
+  ``saved_state.json`` file will be ignored and cleanup will be done based on
+  the passed prefix only.
 
 * ``--help``: Print the help text for the command and parameters.
 
@@ -157,6 +184,7 @@
         is_dry_run = self.options.dry_run
         is_preserve = not self.options.delete_tempest_conf_objects
         is_save_state = False
+        cleanup_prefix = self.options.prefix
 
         if is_dry_run:
             self.dry_run_data["_projects_to_clean"] = {}
@@ -168,7 +196,8 @@
                   'is_dry_run': is_dry_run,
                   'saved_state_json': self.json_data,
                   'is_preserve': False,
-                  'is_save_state': is_save_state}
+                  'is_save_state': is_save_state,
+                  'prefix': cleanup_prefix}
         project_service = cleanup_service.ProjectService(admin_mgr, **kwargs)
         projects = project_service.list()
         LOG.info("Processing %s projects", len(projects))
@@ -182,6 +211,7 @@
                   'saved_state_json': self.json_data,
                   'is_preserve': is_preserve,
                   'is_save_state': is_save_state,
+                  'prefix': cleanup_prefix,
                   'got_exceptions': self.GOT_EXCEPTIONS}
         LOG.info("Processing global services")
         for service in self.global_services:
@@ -206,6 +236,7 @@
         project_id = project['id']
         project_name = project['name']
         project_data = None
+        cleanup_prefix = self.options.prefix
         if is_dry_run:
             project_data = dry_run_data["_projects_to_clean"][project_id] = {}
             project_data['name'] = project_name
@@ -216,6 +247,7 @@
                   'is_preserve': is_preserve,
                   'is_save_state': False,
                   'project_id': project_id,
+                  'prefix': cleanup_prefix,
                   'got_exceptions': self.GOT_EXCEPTIONS}
         for service in self.project_associated_services:
             svc = service(self.admin_mgr, **kwargs)
@@ -243,10 +275,26 @@
                             help="Generate JSON file:" + DRY_RUN_JSON +
                             ", that reports the objects that would have "
                             "been deleted had a full cleanup been run.")
+        parser.add_argument('--prefix', dest='prefix', default=None,
+                            help="Only resources that match the prefix will "
+                            "be deleted (resources in saved_state.json are "
+                            "not taken into account). All tempest resources "
+                            "are created with the prefix value set by "
+                            "resource_name_prefix in tempest.conf, default "
+                            "prefix is tempest. Note that some resources are "
+                            "not named thus they will not be deleted when "
+                            "filtering based on the prefix. This opt will be "
+                            "ignored when --init-saved-state is used so that "
+                            "it can capture the true init state - all "
+                            "resources present at that moment.")
         return parser
 
     def get_description(self):
-        return 'Cleanup after tempest run'
+        return ('tempest cleanup tool, read the full documentation before '
+                'using this tool. We advice not to run it on production '
+                'environments. On environments where also other users may '
+                'create resources, we strongly advice using --dry-run '
+                'argument first and verify the content of dry_run.json file.')
 
     def _init_state(self):
         LOG.info("Initializing saved state.")
@@ -257,6 +305,10 @@
                   'saved_state_json': data,
                   'is_preserve': False,
                   'is_save_state': True,
+                  # must be None as we want to capture true init state
+                  # (all resources present) thus no filtering based
+                  # on the prefix
+                  'prefix': None,
                   'got_exceptions': self.GOT_EXCEPTIONS}
         for service in self.global_services:
             svc = service(admin_mgr, **kwargs)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index f2370f3..8651ab0 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -115,6 +115,16 @@
         return [item for item in item_list
                 if item['tenant_id'] == self.tenant_id]
 
+    def _filter_by_prefix(self, item_list):
+        items = [item for item in item_list
+                 if item['name'].startswith(self.prefix)]
+        return items
+
+    def _filter_out_ids_from_saved(self, item_list, attr):
+        items = [item for item in item_list if item['id']
+                 not in self.saved_state_json[attr].keys()]
+        return items
+
     def list(self):
         pass
 
@@ -156,10 +166,11 @@
     def list(self):
         client = self.client
         snaps = client.list_snapshots()['snapshots']
-        if not self.is_save_state:
+        if self.prefix:
+            snaps = self._filter_by_prefix(snaps)
+        elif not self.is_save_state:
             # recreate list removing saved snapshots
-            snaps = [snap for snap in snaps if snap['id']
-                     not in self.saved_state_json['snapshots'].keys()]
+            snaps = self._filter_out_ids_from_saved(snaps, 'snapshots')
         LOG.debug("List count, %s Snapshots", len(snaps))
         return snaps
 
@@ -194,10 +205,11 @@
         client = self.client
         servers_body = client.list_servers()
         servers = servers_body['servers']
-        if not self.is_save_state:
+        if self.prefix:
+            servers = self._filter_by_prefix(servers)
+        elif not self.is_save_state:
             # recreate list removing saved servers
-            servers = [server for server in servers if server['id']
-                       not in self.saved_state_json['servers'].keys()]
+            servers = self._filter_out_ids_from_saved(servers, 'servers')
         LOG.debug("List count, %s Servers", len(servers))
         return servers
 
@@ -227,10 +239,11 @@
     def list(self):
         client = self.server_groups_client
         sgs = client.list_server_groups()['server_groups']
-        if not self.is_save_state:
+        if self.prefix:
+            sgs = self._filter_by_prefix(sgs)
+        elif not self.is_save_state:
             # recreate list removing saved server_groups
-            sgs = [sg for sg in sgs if sg['id']
-                   not in self.saved_state_json['server_groups'].keys()]
+            sgs = self._filter_out_ids_from_saved(sgs, 'server_groups')
         LOG.debug("List count, %s Server Groups", len(sgs))
         return sgs
 
@@ -263,7 +276,9 @@
     def list(self):
         client = self.client
         keypairs = client.list_keypairs()['keypairs']
-        if not self.is_save_state:
+        if self.prefix:
+            keypairs = self._filter_by_prefix(keypairs)
+        elif not self.is_save_state:
             # recreate list removing saved keypairs
             keypairs = [keypair for keypair in keypairs
                         if keypair['keypair']['name']
@@ -302,10 +317,11 @@
     def list(self):
         client = self.client
         vols = client.list_volumes()['volumes']
-        if not self.is_save_state:
+        if self.prefix:
+            vols = self._filter_by_prefix(vols)
+        elif not self.is_save_state:
             # recreate list removing saved volumes
-            vols = [vol for vol in vols if vol['id']
-                    not in self.saved_state_json['volumes'].keys()]
+            vols = self._filter_out_ids_from_saved(vols, 'volumes')
         LOG.debug("List count, %s Volumes", len(vols))
         return vols
 
@@ -336,6 +352,10 @@
         self.client = manager.volume_quotas_client_latest
 
     def delete(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore do nothing
+            return
         client = self.client
         try:
             LOG.debug("Deleting Volume Quotas for project with id %s",
@@ -346,6 +366,10 @@
                           self.project_id)
 
     def dry_run(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore do nothing
+            return
         quotas = self.client.show_quota_set(
             self.project_id, params={'usage': True})['quota_set']
         self.data['volume_quotas'] = quotas
@@ -358,6 +382,10 @@
         self.limits_client = manager.limits_client
 
     def delete(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore do nothing
+            return
         client = self.client
         try:
             LOG.debug("Deleting Nova Quotas for project with id %s",
@@ -368,6 +396,10 @@
                           self.project_id)
 
     def dry_run(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore do nothing
+            return
         client = self.limits_client
         quotas = client.show_limits()['limits']
         self.data['compute_quotas'] = quotas['absolute']
@@ -379,6 +411,10 @@
         self.client = manager.network_quotas_client
 
     def delete(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore do nothing
+            return
         client = self.client
         try:
             LOG.debug("Deleting Network Quotas for project with id %s",
@@ -389,6 +425,10 @@
                           self.project_id)
 
     def dry_run(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore do nothing
+            return
         resp = [quota for quota in self.client.list_quotas()['quotas']
                 if quota['project_id'] == self.project_id]
         self.data['network_quotas'] = resp
@@ -422,11 +462,13 @@
         client = self.networks_client
         networks = client.list_networks(**self.tenant_filter)
         networks = networks['networks']
-
-        if not self.is_save_state:
-            # recreate list removing saved networks
-            networks = [network for network in networks if network['id']
-                        not in self.saved_state_json['networks'].keys()]
+        if self.prefix:
+            networks = self._filter_by_prefix(networks)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved networks
+                networks = self._filter_out_ids_from_saved(
+                    networks, 'networks')
         # filter out networks declared in tempest.conf
         if self.is_preserve:
             networks = [network for network in networks
@@ -458,14 +500,17 @@
 class NetworkFloatingIpService(BaseNetworkService):
 
     def list(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore return empty list
+            return []
         client = self.floating_ips_client
         flips = client.list_floatingips(**self.tenant_filter)
         flips = flips['floatingips']
 
         if not self.is_save_state:
             # recreate list removing saved flips
-            flips = [flip for flip in flips if flip['id']
-                     not in self.saved_state_json['floatingips'].keys()]
+            flips = self._filter_out_ids_from_saved(flips, 'floatingips')
         LOG.debug("List count, %s Network Floating IPs", len(flips))
         return flips
 
@@ -498,15 +543,15 @@
         client = self.routers_client
         routers = client.list_routers(**self.tenant_filter)
         routers = routers['routers']
-
-        if not self.is_save_state:
-            # recreate list removing saved routers
-            routers = [router for router in routers if router['id']
-                       not in self.saved_state_json['routers'].keys()]
+        if self.prefix:
+            routers = self._filter_by_prefix(routers)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved routers
+                routers = self._filter_out_ids_from_saved(routers, 'routers')
         if self.is_preserve:
             routers = [router for router in routers
                        if router['id'] != CONF_PUB_ROUTER]
-
         LOG.debug("List count, %s Routers", len(routers))
         return routers
 
@@ -547,15 +592,19 @@
 class NetworkMeteringLabelRuleService(NetworkService):
 
     def list(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore return empty list
+            return []
         client = self.metering_label_rules_client
         rules = client.list_metering_label_rules()
         rules = rules['metering_label_rules']
         rules = self._filter_by_tenant_id(rules)
 
         if not self.is_save_state:
-            saved_rules = self.saved_state_json['metering_label_rules'].keys()
+            rules = self._filter_out_ids_from_saved(
+                rules, 'metering_label_rules')
             # recreate list removing saved rules
-            rules = [rule for rule in rules if rule['id'] not in saved_rules]
         LOG.debug("List count, %s Metering Label Rules", len(rules))
         return rules
 
@@ -589,11 +638,12 @@
         labels = client.list_metering_labels()
         labels = labels['metering_labels']
         labels = self._filter_by_tenant_id(labels)
-
-        if not self.is_save_state:
+        if self.prefix:
+            labels = self._filter_by_prefix(labels)
+        elif not self.is_save_state:
             # recreate list removing saved labels
-            labels = [label for label in labels if label['id']
-                      not in self.saved_state_json['metering_labels'].keys()]
+            labels = self._filter_out_ids_from_saved(
+                labels, 'metering_labels')
         LOG.debug("List count, %s Metering Labels", len(labels))
         return labels
 
@@ -627,14 +677,14 @@
                  client.list_ports(**self.tenant_filter)['ports']
                  if port["device_owner"] == "" or
                  port["device_owner"].startswith("compute:")]
-
-        if not self.is_save_state:
-            # recreate list removing saved ports
-            ports = [port for port in ports if port['id']
-                     not in self.saved_state_json['ports'].keys()]
+        if self.prefix:
+            ports = self._filter_by_prefix(ports)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved ports
+                ports = self._filter_out_ids_from_saved(ports, 'ports')
         if self.is_preserve:
             ports = self._filter_by_conf_networks(ports)
-
         LOG.debug("List count, %s Ports", len(ports))
         return ports
 
@@ -667,16 +717,18 @@
         secgroups = [secgroup for secgroup in
                      client.list_security_groups(**filter)['security_groups']
                      if secgroup['name'] != 'default']
-
-        if not self.is_save_state:
-            # recreate list removing saved security_groups
-            secgroups = [secgroup for secgroup in secgroups if secgroup['id']
-                         not in self.saved_state_json['security_groups'].keys()
-                         ]
+        if self.prefix:
+            secgroups = self._filter_by_prefix(secgroups)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved security_groups
+                secgroups = self._filter_out_ids_from_saved(
+                    secgroups, 'security_groups')
         if self.is_preserve:
-            secgroups = [secgroup for secgroup in secgroups
-                         if secgroup['security_group_rules'][0]['project_id']
-                         not in CONF_PROJECTS]
+            secgroups = [
+                secgroup for secgroup in secgroups
+                if secgroup['security_group_rules'][0]['project_id']
+                not in CONF_PROJECTS]
         LOG.debug("List count, %s security_groups", len(secgroups))
         return secgroups
 
@@ -708,10 +760,12 @@
         client = self.subnets_client
         subnets = client.list_subnets(**self.tenant_filter)
         subnets = subnets['subnets']
-        if not self.is_save_state:
-            # recreate list removing saved subnets
-            subnets = [subnet for subnet in subnets if subnet['id']
-                       not in self.saved_state_json['subnets'].keys()]
+        if self.prefix:
+            subnets = self._filter_by_prefix(subnets)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved subnets
+                subnets = self._filter_out_ids_from_saved(subnets, 'subnets')
         if self.is_preserve:
             subnets = self._filter_by_conf_networks(subnets)
         LOG.debug("List count, %s Subnets", len(subnets))
@@ -743,10 +797,12 @@
     def list(self):
         client = self.subnetpools_client
         pools = client.list_subnetpools(**self.tenant_filter)['subnetpools']
-        if not self.is_save_state:
-            # recreate list removing saved subnet pools
-            pools = [pool for pool in pools if pool['id']
-                     not in self.saved_state_json['subnetpools'].keys()]
+        if self.prefix:
+            pools = self._filter_by_prefix(pools)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved subnet pools
+                pools = self._filter_out_ids_from_saved(pools, 'subnetpools')
         if self.is_preserve:
             pools = [pool for pool in pools if pool['project_id']
                      not in CONF_PROJECTS]
@@ -782,11 +838,15 @@
         self.client = manager.regions_client
 
     def list(self):
+        if self.prefix:
+            # this means we're cleaning resources based on a certain prefix,
+            # this resource doesn't have a name, therefore return empty list
+            return []
         client = self.client
         regions = client.list_regions()
         if not self.is_save_state:
-            regions = [region for region in regions['regions'] if region['id']
-                       not in self.saved_state_json['regions'].keys()]
+            regions = self._filter_out_ids_from_saved(
+                regions['regions'], 'regions')
             LOG.debug("List count, %s Regions", len(regions))
             return regions
         else:
@@ -824,11 +884,12 @@
     def list(self):
         client = self.client
         flavors = client.list_flavors({"is_public": None})['flavors']
-        if not self.is_save_state:
-            # recreate list removing saved flavors
-            flavors = [flavor for flavor in flavors if flavor['id']
-                       not in self.saved_state_json['flavors'].keys()]
-
+        if self.prefix:
+            flavors = self._filter_by_prefix(flavors)
+        else:
+            if not self.is_save_state:
+                # recreate list removing saved flavors
+                flavors = self._filter_out_ids_from_saved(flavors, 'flavors')
         if self.is_preserve:
             flavors = [flavor for flavor in flavors
                        if flavor['id'] not in CONF_FLAVORS]
@@ -871,10 +932,11 @@
             marker = urllib.parse_qs(parsed.query)['marker'][0]
             response = client.list_images(params={"marker": marker})
             images.extend(response['images'])
-
-        if not self.is_save_state:
-            images = [image for image in images if image['id']
-                      not in self.saved_state_json['images'].keys()]
+        if self.prefix:
+            images = self._filter_by_prefix(images)
+        else:
+            if not self.is_save_state:
+                images = self._filter_out_ids_from_saved(images, 'images')
         if self.is_preserve:
             images = [image for image in images
                       if image['id'] not in CONF_IMAGES]
@@ -910,19 +972,17 @@
 
     def list(self):
         users = self.client.list_users()['users']
-
-        if not self.is_save_state:
-            users = [user for user in users if user['id']
-                     not in self.saved_state_json['users'].keys()]
-
+        if self.prefix:
+            users = self._filter_by_prefix(users)
+        else:
+            if not self.is_save_state:
+                users = self._filter_out_ids_from_saved(users, 'users')
         if self.is_preserve:
             users = [user for user in users if user['name']
                      not in CONF_USERS]
-
         elif not self.is_save_state:  # Never delete admin user
             users = [user for user in users if user['name'] !=
                      CONF.auth.admin_username]
-
         LOG.debug("List count, %s Users after reconcile", len(users))
         return users
 
@@ -955,13 +1015,14 @@
     def list(self):
         try:
             roles = self.client.list_roles()['roles']
-            # reconcile roles with saved state and never list admin role
-            if not self.is_save_state:
-                roles = [role for role in roles if
-                         (role['id'] not in
-                          self.saved_state_json['roles'].keys() and
-                          role['name'] != CONF.identity.admin_role)]
-                LOG.debug("List count, %s Roles after reconcile", len(roles))
+            if self.prefix:
+                roles = self._filter_by_prefix(roles)
+            elif not self.is_save_state:
+                # reconcile roles with saved state and never list admin role
+                roles = self._filter_out_ids_from_saved(roles, 'roles')
+                roles = [role for role in roles
+                         if role['name'] != CONF.identity.admin_role]
+            LOG.debug("List count, %s Roles after reconcile", len(roles))
             return roles
         except Exception:
             LOG.exception("Cannot retrieve Roles.")
@@ -995,18 +1056,17 @@
 
     def list(self):
         projects = self.client.list_projects()['projects']
-        if not self.is_save_state:
-            project_ids = self.saved_state_json['projects']
-            projects = [project
-                        for project in projects
-                        if (project['id'] not in project_ids and
-                            project['name'] != CONF.auth.admin_project_name)]
-
+        if self.prefix:
+            projects = self._filter_by_prefix(projects)
+        else:
+            if not self.is_save_state:
+                projects = self._filter_out_ids_from_saved(
+                    projects, 'projects')
+                projects = [project for project in projects
+                            if project['name'] != CONF.auth.admin_project_name]
         if self.is_preserve:
-            projects = [project
-                        for project in projects
+            projects = [project for project in projects
                         if project['name'] not in CONF_PROJECTS]
-
         LOG.debug("List count, %s Projects after reconcile", len(projects))
         return projects
 
@@ -1039,10 +1099,10 @@
     def list(self):
         client = self.client
         domains = client.list_domains()['domains']
-        if not self.is_save_state:
-            domains = [domain for domain in domains if domain['id']
-                       not in self.saved_state_json['domains'].keys()]
-
+        if self.prefix:
+            domains = self._filter_by_prefix(domains)
+        elif not self.is_save_state:
+            domains = self._filter_out_ids_from_saved(domains, 'domains')
         LOG.debug("List count, %s Domains after reconcile", len(domains))
         return domains
 
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index a8aafe9..49fcaf2 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -424,7 +424,7 @@
 
 class _WebSocket(object):
     def __init__(self, client_socket, url):
-        """Contructor for the WebSocket wrapper to the socket."""
+        """Constructor for the WebSocket wrapper to the socket."""
         self._socket = client_socket
         # cached stream for early frames.
         self.cached_stream = b''
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index b0bf5b2..8d257b0 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -53,7 +53,7 @@
         # Check common headers for all HTTP methods.
         #
         # Please note that for 1xx and 204 responses Content-Length presence
-        # is not checked intensionally. According to RFC 7230 a server MUST
+        # is not checked intentionally. According to RFC 7230 a server MUST
         # NOT send the header in such responses. Thus, clients should not
         # depend on this header. However, the standard does not require them
         # to validate the server's behavior. We leverage that to not refuse
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index ddc6047..d65b491 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -606,7 +606,7 @@
     floating IPs.
     :param server: The server JSON dict on which to wait.
     :param floating_ip: The floating IP JSON dict on which to wait.
-    :param wait_for_disassociate: Boolean indiating whether to wait for
+    :param wait_for_disassociate: Boolean indicating whether to wait for
     disassociation instead of association.
     """
 
diff --git a/tempest/config.py b/tempest/config.py
index 8ed1ff6..4a33bfb 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -595,18 +595,6 @@
                 help='Does the test environment support attaching a volume to '
                      'more than one instance? This depends on hypervisor and '
                      'volume backend/type and compute API version 2.60.'),
-    cfg.BoolOpt('xenapi_apis',
-                default=False,
-                help='Does the test environment support the XenAPI-specific '
-                     'APIs: os-agents, writeable server metadata and the '
-                     'resetNetwork server action? '
-                     'These were removed in Victoria alongside the XenAPI '
-                     'virt driver.',
-                deprecated_for_removal=True,
-                deprecated_reason="On Nova side, XenAPI virt driver and the "
-                                  "APIs that only worked with that driver "
-                                  "have been removed and there's nothing to "
-                                  "test after Ussuri."),
     cfg.BoolOpt('ide_bus',
                 default=True,
                 help='Does the test environment support attaching devices '
@@ -684,19 +672,19 @@
                                   'are current one. In future, Tempest will '
                                   'test v2 APIs only so this config option '
                                   'will be removed.'),
-    # Image import feature is setup in devstack victoria onwards.
-    # Once all stable branches setup the same via glance standalone
-    # mode or with uwsgi, we can remove this config option.
     cfg.BoolOpt('import_image',
-                default=False,
-                help="Is image import feature enabled"),
-    # NOTE(danms): Starting mid-Wallaby glance began enforcing the
-    # previously-informal requirement that os_glance_* properties are
-    # reserved for internal use. Thus, we can only run these checks
-    # if we know we are on a new enough glance.
+                default=True,
+                help="Is image import feature enabled",
+                deprecated_for_removal=True,
+                deprecated_reason='Issue with image import in WSGI mode was '
+                                  'fixed in Victoria, and this feature works '
+                                  'in any deployment architecture now.'),
     cfg.BoolOpt('os_glance_reserved',
-                default=False,
-                help="Should we check that os_glance namespace is reserved"),
+                default=True,
+                help="Should we check that os_glance namespace is reserved",
+                deprecated_for_removal=True,
+                deprecated_reason='os_glance namespace is always reserved '
+                                  'since Wallaby'),
     cfg.BoolOpt('manage_locations',
                 default=False,
                 help=('Is show_multiple_locations enabled in glance. '
@@ -766,13 +754,6 @@
                default=1,
                help="Time in seconds between network operation status "
                     "checks."),
-    cfg.ListOpt('dns_servers',
-                default=["8.8.8.8", "8.8.4.4"],
-                help="List of dns servers which should be used"
-                     " for subnet creation",
-                deprecated_for_removal=True,
-                deprecated_reason="This config option is no longer "
-                                  "used anywhere, so it can be removed."),
     cfg.StrOpt('port_vnic_type',
                choices=[None, 'normal', 'direct', 'macvtap', 'direct-physical',
                         'baremetal', 'virtio-forwarder'],
@@ -1085,7 +1066,11 @@
                 default=True,
                 help='Does the cloud support extending the size of a volume '
                      'which has snapshot? Some drivers do not support this '
-                     'operation.')
+                     'operation.'),
+    cfg.StrOpt('volume_types_for_data_volume',
+               default=None,
+               help='Volume types used for data volumes. Multiple volume '
+                    'types can be assigned.'),
 
 ]
 
@@ -1184,7 +1169,7 @@
     cfg.StrOpt('dhcp_client',
                default='udhcpc',
                choices=["udhcpc", "dhclient", "dhcpcd", ""],
-               help='DHCP client used by images to renew DCHP lease. '
+               help='DHCP client used by images to renew DHCP lease. '
                     'If left empty, update operation will be skipped. '
                     'Supported clients: "udhcpc", "dhclient", "dhcpcd"'),
     cfg.StrOpt('protocol',
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index 900e5ef..9b5dfda 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -236,7 +236,7 @@
     }
 }
 
-# TODO(zhufl): This is under discussion, so will be merged in a seperate patch.
+# TODO(zhufl): This is under discussion, so will be merged in a separate patch.
 # https://bugs.launchpad.net/cinder/+bug/1880566
 # upload_volume = {
 #     'status_code': [202],
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 466222d..af1112d 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -266,7 +266,7 @@
                   "groups! This is not valid according to the PEP8 "
                   "style guide. " % source_path)
 
-        # Divide grouped_imports into groupes based on PEP8 style guide
+        # Divide grouped_imports into groups based on PEP8 style guide
         pep8_groups = {}
         package_name = self.package.__name__.split(".")[0]
         for key in grouped_imports:
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 99647d4..6814373 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -51,7 +51,7 @@
     :param str identity_admin_role: The role name to use for admin
     :param list extra_roles: A list of strings for extra roles that should
                              be assigned to all created users
-    :param bool neutron_available: Whether we are running in an environemnt
+    :param bool neutron_available: Whether we are running in an environment
                                    with neutron
     :param bool create_networks: Whether dynamic project networks should be
                                  created or not
@@ -453,7 +453,7 @@
             # NOTE(gmann): For 'domain' and 'system' scoped token, there is no
             # project_id so we are skipping the network creation for both
             # scope.
-            # We need to create nework resource once per project.
+            # We need to create network resource once per project.
             if (not project_id and (not scope or scope == 'project')):
                 if (self.neutron_available and self.create_networks):
                     network, subnet, router = self._create_network_resources(
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index 7d54c1a..144450b 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -198,7 +198,7 @@
     There are functions created as classmethod and the cleanup
     was managed by the class with addClassResourceCleanup,
     In case the function called from a class level (resource_setup) its ok
-    But when it is called from testcase level there is no reson to delete the
+    But when it is called from testcase level there is no reason to delete the
     resource when class tears down.
 
     The testcase results will not reflect the resources cleanup because test
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 0608d47..a6a1623 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -159,7 +159,7 @@
         """
         url = 'images/%s/file' % image_id
 
-        # We are going to do chunked transfert, so split the input data
+        # We are going to do chunked transfer, so split the input data
         # info fixed-sized chunks.
         headers = {'Content-Type': 'application/octet-stream'}
         data = iter(functools.partial(data.read, CHUNKSIZE), b'')
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index bdca0d0..47edf70 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -15,7 +15,6 @@
 
 from urllib import parse as urllib
 
-import debtcollector.moves
 from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
@@ -64,7 +63,7 @@
             delete_metadata=None,
             create_update_metadata_prefix='X-Container-Meta-',
             delete_metadata_prefix='X-Remove-Container-Meta-'):
-        """Creates, Updates or deletes an containter metadata entry.
+        """Creates, Updates or deletes an container metadata entry.
 
         Container Metadata can be created, updated or deleted based on
         metadata header or value. For detailed info, please refer to the
@@ -85,11 +84,6 @@
         self.expected_success(204, resp.status)
         return resp, body
 
-    update_container_metadata = debtcollector.moves.moved_function(
-        create_update_or_delete_container_metadata,
-        'update_container_metadata', __name__,
-        version='Queens', removal_version='Rocky')
-
     def list_container_metadata(self, container_name):
         """List all container metadata."""
         url = str(container_name)
@@ -126,7 +120,3 @@
 
         self.expected_success([200, 204], resp.status)
         return resp, body
-
-    list_container_contents = debtcollector.moves.moved_function(
-        list_container_objects, 'list_container_contents', __name__,
-        version='Queens', removal_version='Rocky')
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index c6f8973..95f3ffc 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -86,7 +86,7 @@
     def migrate_volume(self, volume_id, **kwargs):
         """Migrate a volume to a new backend
 
-        For a full list of available parameters please refer to the offical
+        For a full list of available parameters please refer to the official
         API reference:
 
         https://docs.openstack.org/api-ref/block-storage/v3/index.html#migrate-a-volume
@@ -173,7 +173,7 @@
         resp, body = self.post(url, post_body)
         body = json.loads(body)
         # TODO(zhufl): This is under discussion, so will be merged
-        # in a seperate patch.
+        # in a separate patch.
         # https://bugs.launchpad.net/cinder/+bug/1880566
         # self.validate_response(schema.upload_volume, resp, body)
         self.expected_success(202, resp.status)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 5f30909..d51e7e5 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -751,6 +751,31 @@
 
         return rules
 
+    def create_and_add_security_group_to_server(self, server):
+        """Create a security group and add it to the server.
+
+        :param server: The server to add the security group to.
+        :return: The security group was added to the server.
+        """
+
+        secgroup = self.create_security_group()
+        self.servers_client.add_security_group(server['id'],
+                                               name=secgroup['name'])
+        self.addCleanup(self.servers_client.remove_security_group,
+                        server['id'], name=secgroup['name'])
+
+        def wait_for_secgroup_add():
+            body = (self.servers_client.show_server(server['id'])
+                    ['server'])
+            return {'name': secgroup['name']} in body['security_groups']
+
+        if not test_utils.call_until_true(wait_for_secgroup_add,
+                                          CONF.compute.build_timeout,
+                                          CONF.compute.build_interval):
+            msg = ('Timed out waiting for adding security group %s to server '
+                   '%s' % (secgroup['id'], server['id']))
+            raise lib_exc.TimeoutException(msg)
+
     def get_remote_client(self, ip_address, username=None, private_key=None,
                           server=None):
         """Get a SSH client to a remote server
@@ -1177,6 +1202,15 @@
         self.assertIsNone(floating_ip['port_id'])
         return floating_ip
 
+    def create_file(self, ip_address, path, private_key=None, server=None,
+                    username=None):
+        """Create a file on a remote server"""
+        ssh_client = self.get_remote_client(ip_address,
+                                            private_key=private_key,
+                                            server=server,
+                                            username=username)
+        ssh_client.exec_command('sudo mkdir -p %s' % path)
+
     def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
                          private_key=None, server=None, username=None,
                          fs='vfat'):
diff --git a/tempest/scenario/test_instances_with_cinder_volumes.py b/tempest/scenario/test_instances_with_cinder_volumes.py
new file mode 100644
index 0000000..5f33b49
--- /dev/null
+++ b/tempest/scenario/test_instances_with_cinder_volumes.py
@@ -0,0 +1,225 @@
+# Copyright 2024 Openstack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from tempest.scenario import manager
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestInstancesWithCinderVolumes(manager.ScenarioTest):
+    """This is cinder volumes test.
+
+    Tests are below:
+    * test_instances_with_cinder_volumes_on_all_compute_nodes
+    """
+
+    compute_min_microversion = '2.60'
+
+    @decorators.idempotent_id('d0e3c1a3-4b0a-4b0e-8b0a-4b0e8b0a4b0e')
+    @decorators.attr(type=['slow', 'multinode'])
+    @utils.services('compute', 'volume', 'image', 'network')
+    def test_instances_with_cinder_volumes_on_all_compute_nodes(self):
+        """Test instances with cinder volumes launches on all compute nodes
+
+        Steps:
+            1. Create an image
+            2. Create a keypair
+            3. Create a bootable volume from the image and of the given volume
+               type
+            4. Boot an instance from the bootable volume on each available
+               compute node, up to CONF.compute.min_compute_nodes
+            5. Create a volume using each volume_types_for_data_volume on all
+               available compute nodes, up to CONF.compute.min_compute_nodes.
+               Total number of volumes is equal to
+               compute nodes * len(volume_types_for_data_volume)
+            6. Attach volumes to the instances
+            7. Assign floating IP to all instances
+            8. Configure security group for ssh access to all instances
+            9. Confirm ssh access to all instances
+            10. Run write test to all volumes through ssh connection per
+                instance
+            11. Clean up the sources, an instance, volumes, keypair and image
+        """
+        boot_volume_type = (CONF.volume.volume_type or
+                            self.create_volume_type()['name'])
+
+        # create an image
+        image = self.image_create()
+
+        # create keypair
+        keypair = self.create_keypair()
+
+        # check all available zones for booting instances
+        available_zone = \
+            self.os_admin.availability_zone_client.list_availability_zones(
+                detail=True)['availabilityZoneInfo']
+
+        hosts = []
+        for zone in available_zone:
+            if zone['zoneState']['available']:
+                for host in zone['hosts']:
+                    if 'nova-compute' in zone['hosts'][host] and \
+                        zone['hosts'][host]['nova-compute']['available'] and \
+                        not host.endswith('-ironic'):
+                        hosts.append({'zone': zone['zoneName'],
+                                      'host_name': host})
+
+        # fail if there is less hosts than minimal number of instances
+        if len(hosts) < CONF.compute.min_compute_nodes:
+            raise exceptions.InvalidConfiguration(
+                "Host list %s is shorter than min_compute_nodes. " % hosts)
+
+        # get volume types
+        volume_types = []
+        if CONF.volume_feature_enabled.volume_types_for_data_volume:
+            types = CONF.volume_feature_enabled.volume_types_for_data_volume
+            volume_types = types.split(',')
+        else:
+            # no user specified volume types, create 2 default ones
+            volume_types.append(self.create_volume_type()['name'])
+            volume_types.append(self.create_volume_type()['name'])
+
+        hosts_to_boot_servers = hosts[:CONF.compute.min_compute_nodes]
+        LOG.debug("List of hosts selected to boot servers %s: ",
+                  hosts_to_boot_servers)
+
+        # create volumes so that we dont need to wait for them to be created
+        # and save them in a list
+        created_volumes = []
+        for host in hosts_to_boot_servers:
+            for volume_type in volume_types:
+                created_volumes.append(
+                    self.create_volume(volume_type=volume_type,
+                                       wait_until=None)
+                )
+
+        bootable_volumes = []
+        for host in hosts_to_boot_servers:
+            # create boot volume from image and of the given volume type
+            bootable_volumes.append(
+                self.create_volume(
+                    imageRef=image, volume_type=boot_volume_type,
+                    wait_until=None)
+            )
+
+        # boot server
+        servers = []
+
+        for bootable_volume in bootable_volumes:
+
+            # wait for bootable volumes to become available
+            waiters.wait_for_volume_resource_status(
+                self.volumes_client, bootable_volume['id'], 'available')
+
+            # create an instance from bootable volume
+            server = self.boot_instance_from_resource(
+                source_id=bootable_volume['id'],
+                source_type='volume',
+                keypair=keypair,
+                wait_until=None
+            )
+            servers.append(server)
+
+        start = 0
+        end = len(volume_types)
+        for server in servers:
+            attached_volumes = []
+
+            # wait for server to become active
+            waiters.wait_for_server_status(self.servers_client,
+                                           server['id'], 'ACTIVE')
+
+            # attach volumes to the instances
+            for volume in created_volumes[start:end]:
+
+                # wait for volume to become available
+                waiters.wait_for_volume_resource_status(
+                    self.volumes_client, volume['id'], 'available')
+
+                attached_volume = self.nova_volume_attach(server, volume)
+                attached_volumes.append(attached_volume)
+                LOG.debug("Attached volume %s to server %s",
+                          attached_volume['id'], server['id'])
+
+            # assign floating ip
+            floating_ip = None
+            if (CONF.network_feature_enabled.floating_ips and
+                CONF.network.floating_network_name):
+                fip = self.create_floating_ip(server)
+                floating_ip = self.associate_floating_ip(
+                    fip, server)
+                ssh_ip = floating_ip['floating_ip_address']
+            else:
+                ssh_ip = self.get_server_ip(server)
+
+            # create security group
+            self.create_and_add_security_group_to_server(server)
+
+            # confirm ssh access
+            self.linux_client = self.get_remote_client(
+                ssh_ip, private_key=keypair['private_key'],
+                server=server
+            )
+
+            # run write test on all volumes
+            for volume in attached_volumes:
+
+                waiters.wait_for_volume_resource_status(
+                    self.volumes_client, volume['id'], 'in-use')
+
+                # get the mount path
+                mount_path = f"/mnt/{volume['attachments'][0]['device'][5:]}"
+
+                # create file for mounting on server
+                self.create_file(ssh_ip, mount_path,
+                                 private_key=keypair['private_key'],
+                                 server=server)
+
+                # dev name volume['attachments'][0]['device'][5:] is like
+                # /dev/vdb, we need to remove /dev/ -> first 5 chars
+                timestamp_before = self.create_timestamp(
+                    ssh_ip, private_key=keypair['private_key'], server=server,
+                    dev_name=volume['attachments'][0]['device'][5:],
+                    mount_path=mount_path
+                )
+                timestamp_after = self.get_timestamp(
+                    ssh_ip, private_key=keypair['private_key'], server=server,
+                    dev_name=volume['attachments'][0]['device'][5:],
+                    mount_path=mount_path
+                )
+                self.assertEqual(timestamp_before, timestamp_after)
+
+                # delete volume
+                self.nova_volume_detach(server, volume)
+                self.volumes_client.delete_volume(volume['id'])
+
+            if floating_ip:
+                # delete the floating IP, this should refresh the server
+                # addresses
+                self.disassociate_floating_ip(floating_ip)
+                waiters.wait_for_server_floating_ip(
+                    self.servers_client, server, floating_ip,
+                    wait_for_disassociate=True)
+
+            start += len(volume_types)
+            end += len(volume_types)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 6372c6b..543be31 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -19,9 +19,7 @@
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
-from tempest.lib.common.utils import test_utils
 from tempest.lib import decorators
-from tempest.lib import exceptions
 from tempest.scenario import manager
 
 CONF = config.CONF
@@ -73,25 +71,6 @@
         disks = self.linux_client.get_disks()
         self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
 
-    def create_and_add_security_group_to_server(self, server):
-        secgroup = self.create_security_group()
-        self.servers_client.add_security_group(server['id'],
-                                               name=secgroup['name'])
-        self.addCleanup(self.servers_client.remove_security_group,
-                        server['id'], name=secgroup['name'])
-
-        def wait_for_secgroup_add():
-            body = (self.servers_client.show_server(server['id'])
-                    ['server'])
-            return {'name': secgroup['name']} in body['security_groups']
-
-        if not test_utils.call_until_true(wait_for_secgroup_add,
-                                          CONF.compute.build_timeout,
-                                          CONF.compute.build_interval):
-            msg = ('Timed out waiting for adding security group %s to server '
-                   '%s' % (secgroup['id'], server['id']))
-            raise exceptions.TimeoutException(msg)
-
     @decorators.attr(type='slow')
     @decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
     @utils.services('compute', 'volume', 'image', 'network')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 3a93f74..911ff42 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -20,6 +20,7 @@
 from tempest.common.utils import net_downtime
 from tempest.common import waiters
 from tempest import config
+from tempest.lib.common import api_version_request
 from tempest.lib import decorators
 from tempest.scenario import manager
 
@@ -193,8 +194,11 @@
         # check if microversion is less than 2.25 because of
         # disk_over_commit is depracted since compute api version 2.25
         # if min_microversion is None, it runs on version < 2.25
+        min_v = api_version_request.APIVersionRequest(
+            CONF.compute.min_microversion)
+        api_v = api_version_request.APIVersionRequest('2.25')
         if not migration and (CONF.compute.min_microversion is None or
-                              CONF.compute.min_microversion < '2.25'):
+                              min_v < api_v):
             migration_kwargs['disk_over_commit'] = False
 
         if dest_host:
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 7b819e0..fb68e46 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -179,8 +179,7 @@
     def _check_public_network_connectivity(
             self, should_connect=True, msg=None,
             should_check_floating_ip_status=True, mtu=None):
-        """Verifies connectivty to a VM via public network and floating IP
-
+        """Verifies connectivity to a VM via public network and floating IP
         and verifies floating IP has resource status is correct.
 
         :param should_connect: bool. determines if connectivity check is
diff --git a/tempest/scenario/test_network_qos_placement.py b/tempest/scenario/test_network_qos_placement.py
index dbbc314..055dcb6 100644
--- a/tempest/scenario/test_network_qos_placement.py
+++ b/tempest/scenario/test_network_qos_placement.py
@@ -67,10 +67,10 @@
         cls.networks_client = cls.os_admin.networks_client
         cls.subnets_client = cls.os_admin.subnets_client
         cls.ports_client = cls.os_primary.ports_client
-        cls.routers_client = cls.os_adm.routers_client
+        cls.routers_client = cls.os_admin.routers_client
         cls.qos_client = cls.os_admin.qos_client
         cls.qos_min_bw_client = cls.os_admin.qos_min_bw_client
-        cls.flavors_client = cls.os_adm.flavors_client
+        cls.flavors_client = cls.os_admin.flavors_client
         cls.servers_client = cls.os_primary.servers_client
 
     def _create_flavor_to_resize_to(self):
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 92dbffb..e060b0f 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -137,7 +137,7 @@
 
         # Make sure the machine ssh-able before attaching the volume
         # Just a live machine is responding
-        # for device attache/detach as expected
+        # for device attach/detach as expected
         linux_client = self.get_remote_client(
             ip_for_snapshot, private_key=keypair['private_key'],
             server=server_from_snapshot)
diff --git a/tempest/test.py b/tempest/test.py
index 3360221..173bfab 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -26,7 +26,6 @@
 
 from tempest import clients
 from tempest.common import credentials_factory as credentials
-from tempest.common import utils
 from tempest import config
 from tempest.lib.common import api_microversion_fixture
 from tempest.lib.common import fixed_network
@@ -45,20 +44,6 @@
     version='Pike', removal_version='?')
 
 
-services = debtcollector.moves.moved_function(
-    utils.services, 'services', __name__,
-    version='Pike', removal_version='?')
-
-
-requires_ext = debtcollector.moves.moved_function(
-    utils.requires_ext, 'requires_ext', __name__,
-    version='Pike', removal_version='?')
-
-
-is_extension_enabled = debtcollector.moves.moved_function(
-    utils.is_extension_enabled, 'is_extension_enabled', __name__,
-    version='Pike', removal_version='?')
-
 at_exit_set = set()
 
 
@@ -661,7 +646,7 @@
         then be run.
 
         Cleanup functions are always called during the test class tearDown
-        fixture, even if an exception occured during setUp or tearDown.
+        fixture, even if an exception occurred during setUp or tearDown.
         """
         cls._class_cleanups.append((fn, arguments, keywordArguments))
 
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 1d69d9d..f2e809b 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -58,7 +58,7 @@
                 help="Whether or not my service is available")
 
             # Note: as long as the group is listed in get_opt_lists,
-            # it will be possible to access its optins in the plugin code
+            # it will be possible to access its options in the plugin code
             # via ("-" in the group name are replaces with "_"):
             #     CONF.my_service.<option_name>
             my_service_group = cfg.OptGroup(name="my-service",
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 2301be6..6b3b4b7 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -44,6 +44,7 @@
                   'saved_state_json': {'saved': 'data'},
                   'is_preserve': False,
                   'is_save_state': True,
+                  'prefix': 'tempest',
                   'tenant_id': 'project_id',
                   'got_exceptions': []}
         base = cleanup_service.BaseService(kwargs)
@@ -54,6 +55,7 @@
         self.assertTrue(base.is_save_state)
         self.assertEqual(base.tenant_filter['project_id'], kwargs['tenant_id'])
         self.assertEqual(base.got_exceptions, kwargs['got_exceptions'])
+        self.assertEqual(base.prefix, kwargs['prefix'])
 
     def test_not_implemented_ex(self):
         kwargs = {'data': {'data': 'test'},
@@ -61,6 +63,7 @@
                   'saved_state_json': {'saved': 'data'},
                   'is_preserve': False,
                   'is_save_state': False,
+                  'prefix': 'tempest',
                   'tenant_id': 'project_id',
                   'got_exceptions': []}
         base = self.TestException(kwargs)
@@ -188,7 +191,8 @@
     service_name = 'default'
 
     def _create_cmd_service(self, service_type, is_save_state=False,
-                            is_preserve=False, is_dry_run=False):
+                            is_preserve=False, is_dry_run=False,
+                            prefix=''):
         creds = fake_credentials.FakeKeystoneV3Credentials()
         os = clients.Manager(creds)
         return getattr(cleanup_service, service_type)(
@@ -196,6 +200,7 @@
             is_save_state=is_save_state,
             is_preserve=is_preserve,
             is_dry_run=is_dry_run,
+            prefix=prefix,
             project_id='b8e3ece07bb049138d224436756e3b57',
             data={},
             saved_state_json=self.saved_state
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 8a1158d..154d8d1 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -252,7 +252,7 @@
 
     @mock.patch('tempest.lib.auth.get_credentials')
     def test_get_credentials_v3_no_domain(self, mock_auth_get_credentials):
-        expected_uri = 'https://v3.identity.exmaple.com'
+        expected_uri = 'https://v3.identity.example.com'
         expected_result = 'my_creds'
         expected_domain = 'my_domain'
         mock_auth_get_credentials.return_value = expected_result
@@ -272,7 +272,7 @@
 
     @mock.patch('tempest.lib.auth.get_credentials')
     def test_get_credentials_v3_domain(self, mock_auth_get_credentials):
-        expected_uri = 'https://v3.identity.exmaple.com'
+        expected_uri = 'https://v3.identity.example.com'
         expected_result = 'my_creds'
         expected_domain = 'my_domain'
         mock_auth_get_credentials.return_value = expected_result
@@ -291,7 +291,7 @@
 
     @mock.patch('tempest.lib.auth.get_credentials')
     def test_get_credentials_v3_system(self, mock_auth_get_credentials):
-        expected_uri = 'https://v3.identity.exmaple.com'
+        expected_uri = 'https://v3.identity.example.com'
         expected_result = 'my_creds'
         mock_auth_get_credentials.return_value = expected_result
         cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
diff --git a/tempest/tests/lib/common/utils/test_data_utils.py b/tempest/tests/lib/common/utils/test_data_utils.py
index a0267d0..06a7805 100644
--- a/tempest/tests/lib/common/utils/test_data_utils.py
+++ b/tempest/tests/lib/common/utils/test_data_utils.py
@@ -79,7 +79,7 @@
         self.assertEqual(len(actual), 3)
         self.assertRegex(actual, "[A-Za-z0-9~!@#%^&*_=+]{3}")
         actual2 = data_utils.rand_password(2)
-        # NOTE(masayukig): Originally, we checked that the acutal and actual2
+        # NOTE(masayukig): Originally, we checked that the actual and actual2
         # are different each other. But only 3 letters can be the same value
         # in a very rare case. So, we just check the length here, too,
         # just in case.
diff --git a/tempest/tests/lib/services/base.py b/tempest/tests/lib/services/base.py
index 924f9f2..fd4bc17 100644
--- a/tempest/tests/lib/services/base.py
+++ b/tempest/tests/lib/services/base.py
@@ -54,7 +54,7 @@
                  ``assert_called_once_with(foo='bar')`` is called.
                * If mock_args='foo' then ``assert_called_once_with('foo')``
                  is called.
-        :param resp_as_string: Whether response body is retruned as string.
+        :param resp_as_string: Whether response body is returned as string.
                This is for service client methods which return ResponseBodyData
                object.
         :param kwargs: kwargs that are passed to function.
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index 13870ba..0ba6ed3 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -162,7 +162,7 @@
 
         client = ssh.Client('localhost', 'root', timeout=timeout)
         # We need to mock LOG here because LOG.info() calls time.time()
-        # in order to preprend a timestamp.
+        # in order to prepend a timestamp.
         with mock.patch.object(ssh, 'LOG'):
             self.assertRaises(exceptions.SSHTimeout,
                               client._get_ssh_connection)
diff --git a/tox.ini b/tox.ini
index 51c38f2..fcdf6ff 100644
--- a/tox.ini
+++ b/tox.ini
@@ -154,7 +154,7 @@
 sitepackages = {[tempestenv]sitepackages}
 setenv = {[tempestenv]setenv}
 deps = {[tempestenv]deps}
-# But exlcude the extra tests mentioned in tools/tempest-extra-tests-list.txt
+# But exclude the extra tests mentioned in tools/tempest-extra-tests-list.txt
 regex = '(^tempest\.scenario.*)|(^tempest\.serial_tests)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)'
 commands =
     find . -type f -name "*.pyc" -delete
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 0ac893a..3b402c8 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -24,6 +24,8 @@
               min_compute_nodes: "{{ groups['compute'] | default(['controller']) | length }}"
       test_results_stage_name: test_results
       zuul_copy_output:
+        '/var/log/openvswitch': logs
+        '/var/log/ovn': logs
         '{{ devstack_base_dir }}/tempest/etc/tempest.conf': logs
         '{{ devstack_base_dir }}/tempest/etc/accounts.yaml': logs
         '{{ devstack_base_dir }}/tempest/tempest.log': logs
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 970cf03..67a7bb1 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -24,38 +24,12 @@
     description: |
       Integration test of IPv6-only deployments. This job runs
       smoke and IPv6 relates tests only. Basic idea is to test
-      whether OpenStack Services listen on IPv6 addrress or not.
+      whether OpenStack Services listen on IPv6 address or not.
     timeout: 10800
     vars:
       tox_envlist: ipv6-only
 
 - job:
-    name: tempest-full
-    parent: devstack-tempest
-    description: |
-      Base integration test with Neutron networking and py27.
-      This job is supposed to run until stable/train setup only.
-      If you are running it on stable/ussuri gate onwards for python2.7
-      coverage then you need to do override-checkout with any stable
-      branch less than or equal to stable/train.
-      Former names for this job where:
-        * legacy-tempest-dsvm-neutron-full
-        * gate-tempest-dsvm-neutron-full-ubuntu-xenial
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        ENABLE_FILE_INJECTION: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        USE_PYTHON3: False
-      devstack_services:
-        # NOTE(mriedem): Disable the cinder-backup service from tempest-full
-        # since tempest-full is in the integrated-gate project template but
-        # the backup tests do not really involve other services so they should
-        # be run in some more cinder-specific job, especially because the
-        # tests fail at a high rate (see bugs 1483434, 1813217, 1745168)
-        c-bak: false
-
-- job:
     name: tempest-extra-tests
     parent: tempest-full-py3
     description: |
@@ -74,7 +48,7 @@
     # this job definition is only for stable/xena onwards
     # and separate job definition until stable/wallaby
     branches:
-      regex: ^stable/(stein|train|ussuri|victoria|wallaby)$
+      regex: ^.*/(victoria|wallaby)$
       negate: true
     description: |
       Base integration test with Neutron networking, horizon, swift enable,
@@ -99,7 +73,7 @@
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
-        # Enbale horizon so that we can run horizon test.
+        # Enable horizon so that we can run horizon test.
         horizon: true
 
 - job:
@@ -108,7 +82,7 @@
     nodeset: devstack-single-node-centos-9-stream
     # centos-9-stream is supported from yoga release onwards
     branches:
-      regex: ^stable/(stein|train|ussuri|victoria|wallaby|xena)$
+      regex: ^.*/(victoria|wallaby|xena)$
       negate: true
     description: |
       Base integration test on CentOS 9 stream
@@ -169,7 +143,7 @@
     nodeset: devstack-single-node-centos-9-stream
     # centos-9-stream is supported from yoga release onwards
     branches:
-      regex: ^stable/(stein|train|ussuri|victoria|wallaby|xena)$
+      regex: ^.*/(victoria|wallaby|xena)$
       negate: true
     description: |
       This job runs integration tests for compute. This is
@@ -232,7 +206,7 @@
       tox_envlist: integrated-object-storage
       devstack_localrc:
         # NOTE(gmann): swift is not ready on python3 yet and devstack
-        # install it on python2.7 only. But settting the USE_PYTHON3
+        # install it on python2.7 only. But setting the USE_PYTHON3
         # for future once swift is ready on py3.
         USE_PYTHON3: true
 
@@ -256,7 +230,7 @@
     nodeset: openstack-two-node-jammy
     # This job runs on ubuntu Jammy and after stable/zed.
     branches:
-      regex: ^stable/(stein|train|ussuri|victoria|wallaby|xena|yoga|zed)$
+      regex: ^.*/(victoria|wallaby|xena|yoga|zed)$
       negate: true
     vars:
       # NOTE(gmann): Default concurrency is higher (number of cpu -2) which
@@ -321,7 +295,7 @@
     # till stable/wallaby, this job definition is only for stable/xena
     # onwards and separate job definition until stable/wallaby
     branches:
-      regex: ^stable/(stein|train|ussuri|victoria|wallaby)$
+      regex: ^.*/(victoria|wallaby)$
       negate: true
     vars:
       tox_envlist: slow
@@ -400,15 +374,7 @@
       This job runs the Tempest tests with scope and new defaults enabled.
     vars:
       devstack_localrc:
-        # Enabeling the scope and new defaults for services.
-        # NOTE: (gmann) We need to keep keystone scope check disable as
-        # services (except ironic) does not support the system scope and
-        # they need keystone to continue working with project scope. Until
-        # Keystone policies are changed to work for both system as well as
-        # for project scoped, we need to keep scope check disable for
-        # keystone.
-        # Nova, Glance, and Neutron have enabled the new defaults and scope
-        # by default in devstack.
+        KEYSTONE_ENFORCE_SCOPE: true
         CINDER_ENFORCE_SCOPE: true
         PLACEMENT_ENFORCE_SCOPE: true
 
@@ -442,11 +408,12 @@
             voting: false
             branches:
               - stable/2023.1
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -454,24 +421,25 @@
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
         # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
 
 - project-template:
@@ -499,39 +467,43 @@
         # (on SLURP as well as non SLURP release) so we are adding grenade-skip-level-always
         # job in integrated gate and we do not need to update skip level job
         # here until Nova change the decision.
-        # This is added from 2023.2 relese cycle onwards so we need to use branch variant
+        # This is added from 2023.2 release cycle onwards so we need to use branch variant
         # to make sure we do not run this job on older than 2023.2 gate.
         - grenade-skip-level-always:
             branches:
+              - ^.*/2023.2
+              - ^.*/2024.1
               - master
         - tempest-integrated-compute
         # centos-8-stream is tested from wallaby -> yoga branches
         - tempest-integrated-compute-centos-8-stream:
-            branches: ^stable/(wallaby|xena|yoga).*$
+            branches: ^.*/(wallaby|xena|yoga)$
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
         # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
     gate:
       jobs:
         - grenade-skip-level-always:
             branches:
+              - ^.*/2023.2
+              - ^.*/2024.1
               - master
         - tempest-integrated-compute
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
     periodic-weekly:
       jobs:
         # centos-9-stream is tested from zed release onwards
         - tempest-integrated-compute-centos-9-stream:
             branches:
-              regex: ^stable/(stein|train|ussuri|victoria|wallaby|xena|yoga)$
+              regex: ^.*/(victoria|wallaby|xena|yoga)$
               negate: true
 
 - project-template:
@@ -551,11 +523,12 @@
             voting: false
             branches:
               - stable/2023.1
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -563,24 +536,25 @@
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
         # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
 
 - project-template:
@@ -600,11 +574,12 @@
             voting: false
             branches:
               - stable/2023.1
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -612,16 +587,17 @@
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
     gate:
       jobs:
         - grenade
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -629,7 +605,7 @@
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
 
 - project-template:
@@ -642,11 +618,12 @@
     check:
       jobs:
         - grenade
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         - tempest-integrated-object-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -654,16 +631,17 @@
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
     gate:
       jobs:
         - grenade
-        # on master (SLURP 2024.1) grenade-skip-level which test stable/2023.1
-        # to stable/2024.1 upgrade is voting.
+        # on stable/2024.1(SLURP) grenade-skip-level is voting which test
+        # stable/2023.1 to stable/2024.1 upgrade. This is supposed to run on
+        # SLURP release only.
         - grenade-skip-level:
             branches:
-              - master
+              - ^.*/2024.1
         - tempest-integrated-object-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -671,5 +649,5 @@
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
             branches:
-              regex: ^stable/(ussuri|victoria|wallaby)$
+              regex: ^.*/(victoria|wallaby)$
               negate: true
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 7dc7115..e2505cb 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -37,16 +37,13 @@
         # if things are working in latest and oldest it will work in between
         # stable branches also. If anything is breaking we will be catching
         # those in respective stable branch gate.
-        - tempest-full-2023-2:
+        - tempest-full-2024-1:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-zed:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
-            # TODO(kopecmartin): non-voting until this is resolved:
-            # https://bugs.launchpad.net/tempest/+bug/2053026
-            voting: false
             irrelevant-files: &tempest-irrelevant-files-2
               - ^.*\.rst$
               - ^doc/.*$
@@ -114,7 +111,7 @@
             irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
-        - grenade-skip-level:
+        - grenade-skip-level-always:
             irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-tempest-dvr:
             voting: false
@@ -145,7 +142,7 @@
             irrelevant-files: *tempest-irrelevant-files
         - grenade:
             irrelevant-files: *tempest-irrelevant-files
-        - grenade-skip-level:
+        - grenade-skip-level-always:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
@@ -192,12 +189,15 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-2024-1
         - tempest-full-2023-2
         - tempest-full-2023-1
         - tempest-full-zed
+        - tempest-slow-2024-1
         - tempest-slow-2023-2
         - tempest-slow-2023-1
         - tempest-slow-zed
+        - tempest-full-2024-1-extra-tests
         - tempest-full-2023-2-extra-tests
         - tempest-full-2023-1-extra-tests
         - tempest-full-zed-extra-tests
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index c62209a..429c014 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,11 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-2024-1
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2024.1
+
+- job:
     name: tempest-full-2023-2
     parent: tempest-full-py3
     nodeset: openstack-single-node-jammy
@@ -18,6 +24,12 @@
     override-checkout: stable/zed
 
 - job:
+    name: tempest-full-2024-1-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2024.1
+
+- job:
     name: tempest-full-2023-2-extra-tests
     parent: tempest-extra-tests
     nodeset: openstack-single-node-jammy
@@ -36,6 +48,12 @@
     override-checkout: stable/zed
 
 - job:
+    name: tempest-slow-2024-1
+    parent: tempest-slow-py3
+    nodeset: openstack-two-node-jammy
+    override-checkout: stable/2024.1
+
+- job:
     name: tempest-slow-2023-2
     parent: tempest-slow-py3
     nodeset: openstack-two-node-jammy
@@ -65,9 +83,8 @@
     # This job version is to use the 'full' tox env which
     # is available for stable/ussuri to stable/wallaby also.
     branches:
-      - stable/ussuri
-      - stable/victoria
-      - stable/wallaby
+      - ^.*/victoria
+      - ^.*/wallaby
     description: |
       Base integration test with Neutron networking, horizon, swift enable,
       and py3.
@@ -86,93 +103,20 @@
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron
       devstack_services:
-        # Enbale horizon so that we can run horizon test.
+        # Enable horizon so that we can run horizon test.
         horizon: true
 
 - job:
-    name: tempest-full-py3
-    parent: devstack-tempest
-    # This job version is with swift disabled on py3
-    # as swift was not ready on py3 until stable/train.
-    branches:
-      - stable/stein
-      - stable/train
-    description: |
-      Base integration test with Neutron networking, swift disabled, and py3.
-      Former names for this job where:
-        * legacy-tempest-dsvm-py35
-        * gate-tempest-dsvm-py35
-    required-projects:
-      - openstack/horizon
-    vars:
-      tox_envlist: full
-      devstack_localrc:
-        USE_PYTHON3: true
-        FORCE_CONFIG_DRIVE: true
-        ENABLE_VOLUME_MULTIATTACH: true
-        GLANCE_USE_IMPORT_WORKFLOW: True
-      devstack_plugins:
-        neutron: https://opendev.org/openstack/neutron
-      devstack_local_conf:
-        post-config:
-          "/$NEUTRON_CORE_PLUGIN_CONF":
-            ovs:
-              bridge_mappings: public:br-ex
-              resource_provider_bandwidths: br-ex:1000000:1000000
-        test-config:
-          $TEMPEST_CONFIG:
-            network-feature-enabled:
-              qos_placement_physnet: public
-      devstack_services:
-        # Enbale horizon so that we can run horizon test.
-        horizon: true
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        # NOTE(mriedem): Disable the cinder-backup service from
-        # tempest-full-py3 since tempest-full-py3 is in the integrated-gate-py3
-        # project template but the backup tests do not really involve other
-        # services so they should be run in some more cinder-specific job,
-        # especially because the tests fail at a high rate (see bugs 1483434,
-        # 1813217, 1745168)
-        c-bak: false
-        neutron-placement: true
-        neutron-qos: true
-
-- job:
-    name: tempest-multinode-full-py3
-    parent: tempest-multinode-full
-    nodeset: openstack-two-node-bionic
-    # This job runs on Bionic.
-    branches:
-      - stable/stein
-      - stable/train
-      - stable/ussuri
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-      devstack_plugins:
-        neutron: https://opendev.org/openstack/neutron
-      devstack_services:
-        neutron-trunk: true
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
     name: tempest-multinode-full-py3
     parent: tempest-multinode-full
     nodeset: openstack-two-node-focal
     # This job runs on Focal and supposed to run until stable/zed.
     branches:
-      - stable/victoria
-      - stable/wallaby
-      - stable/xena
-      - stable/yoga
-      - stable/zed
+      - ^.*/victoria
+      - ^.*/wallaby
+      - ^.*/xena
+      - ^.*/yoga
+      - ^.*/zed
     vars:
       devstack_localrc:
         USE_PYTHON3: true
@@ -191,11 +135,11 @@
     nodeset: openstack-two-node-focal
     # This job runs on Focal and on python2. This is for stable/victoria to stable/zed.
     branches:
-      - stable/victoria
-      - stable/wallaby
-      - stable/xena
-      - stable/yoga
-      - stable/zed
+      - ^.*/victoria
+      - ^.*/wallaby
+      - ^.*/xena
+      - ^.*/yoga
+      - ^.*/zed
     vars:
       devstack_localrc:
         USE_PYTHON3: False
@@ -205,56 +149,13 @@
           USE_PYTHON3: False
 
 - job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-bionic
-    # This job runs on Bionic and on python2. This is for stable/stein and stable/train.
-    # This job is prepared to make sure all stable branches from stable/stein till stable/train
-    # will keep running on bionic. This can be removed once stable/train is EOL.
-    branches:
-      - stable/stein
-      - stable/train
-      - stable/ussuri
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
-    name: tempest-slow-py3
-    parent: tempest-slow
-    # This job version is with swift disabled on py3
-    # as swift was not ready on py3 until stable/train.
-    branches:
-      - stable/stein
-      - stable/train
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: true
-      devstack_services:
-        s-account: false
-        s-container: false
-        s-object: false
-        s-proxy: false
-        # without Swift, c-bak cannot run (in the Gate at least)
-        c-bak: false
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: true
-
-- job:
     name: tempest-slow-py3
     parent: tempest-slow
     # This job version is to use the 'slow-serial' tox env for
     # the stable/ussuri to stable/wallaby testing.
     branches:
-      - stable/ussuri
-      - stable/victoria
-      - stable/wallaby
+      - ^.*/victoria
+      - ^.*/wallaby
     vars:
       tox_envlist: slow-serial
 
@@ -269,12 +170,9 @@
     # This job is not used after stable/xena and can be
     # removed once stable/xena is EOL.
     branches:
-      - stable/stein
-      - stable/train
-      - stable/ussuri
-      - stable/victoria
-      - stable/wallaby
-      - stable/xena
+      - ^.*/victoria
+      - ^.*/wallaby
+      - ^.*/xena
 
 - job:
     name: tempest-integrated-compute-centos-8-stream
@@ -283,9 +181,9 @@
     voting: false
     nodeset: devstack-single-node-centos-8-stream
     branches:
-      - stable/wallaby
-      - stable/xena
-      - stable/yoga
+      - ^.*/wallaby
+      - ^.*/xena
+      - ^.*/yoga
     description: |
       This job runs integration tests for compute. This is
       subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
@@ -302,9 +200,9 @@
     # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
     voting: false
     branches:
-      - stable/wallaby
-      - stable/xena
-      - stable/yoga
+      - ^.*/wallaby
+      - ^.*/xena
+      - ^.*/yoga
     nodeset: devstack-single-node-centos-8-stream
     description: |
       Base integration test with Neutron networking and py36 running
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 10490b4..296682e 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -58,6 +58,8 @@
       Base integration test with Neutron networking, IPv6 and py3.
     vars:
       tox_envlist: full
+      run_tempest_cleanup: true
+      run_tempest_cleanup_prefix: true
       devstack_localrc:
         USE_PYTHON3: true
         FORCE_CONFIG_DRIVE: true