Merge "Remove references to 'all-plugin' tox environment"
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 2eaf72f..315255d 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -207,21 +207,21 @@
 is ``test_path=./tempest/test_discover`` which will only run test discover on the
 Tempest suite.
 
-Alternatively, there are the py27 and py36 tox jobs which will run the unit
-tests with the corresponding version of python.
+Alternatively, there is the py39 tox job which will run the unit tests with
+the corresponding version of python.
 
 One common activity is to just run a single test, you can do this with tox
-simply by specifying to just run py27 or py36 tests against a single test::
+simply by specifying to just run py39 tests against a single test::
 
-    $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+    $ tox -e py39 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
 
 Or all tests in the test_microversions.py file::
 
-    $ tox -e py36 -- -n tempest.tests.test_microversions
+    $ tox -e py39 -- -n tempest.tests.test_microversions
 
 You may also use regular expressions to run any matching tests::
 
-    $ tox -e py36 -- test_microversions
+    $ tox -e py39 -- test_microversions
 
 Additionally, when running a single test, or test-file, the ``-n/--no-discover``
 argument is no longer required, however it may perform faster if included.
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index f630578..3d221c9 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,10 +9,10 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Zed
 * Yoga
 * Xena
 * Wallaby
-* Victoria
 
 For older OpenStack Release:
 
diff --git a/releasenotes/notes/add-server-external-events-client-c86b269b0091077b.yaml b/releasenotes/notes/add-server-external-events-client-c86b269b0091077b.yaml
new file mode 100644
index 0000000..2af8e95
--- /dev/null
+++ b/releasenotes/notes/add-server-external-events-client-c86b269b0091077b.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    The ``server_external_events`` tempest client for compute
+    Server External Events API is implemented in this release.
diff --git a/releasenotes/notes/end-of-support-of-wallaby-455e4871ae4cb32e.yaml b/releasenotes/notes/end-of-support-of-wallaby-455e4871ae4cb32e.yaml
new file mode 100644
index 0000000..d5c2974
--- /dev/null
+++ b/releasenotes/notes/end-of-support-of-wallaby-455e4871ae4cb32e.yaml
@@ -0,0 +1,12 @@
+---
+prelude: |
+    This is an intermediate release during the 2023.1 development cycle to
+    mark the end of support for EM Wallaby release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Zed
+    * Yoga
+    * Xena
+
+    Current development of Tempest is for OpenStack 2023.1 development
+    cycle.
diff --git a/releasenotes/notes/enforce_scope_placement-47a12c741e330f60.yaml b/releasenotes/notes/enforce_scope_placement-47a12c741e330f60.yaml
new file mode 100644
index 0000000..e5e602e
--- /dev/null
+++ b/releasenotes/notes/enforce_scope_placement-47a12c741e330f60.yaml
@@ -0,0 +1,4 @@
+---
+prelude: >
+    Adding placement service for config options ``enforce_scope`` so that
+    we can switch the scope and new defaults enforcement for placement service.
diff --git a/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml b/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml
new file mode 100644
index 0000000..c8a026e
--- /dev/null
+++ b/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add a new config option
+    `[image_feature_enabled]/manage_locations` which enables
+    tests for the `show_multiple_locations=True` functionality in
+    glance. In order for this to work you must also have a store
+    capable of hosting images with an HTTP URI.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index b36be01..ccd5fe1 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v33.0.0
    v32.0.0
    v31.1.0
    v31.0.0
diff --git a/releasenotes/source/v33.0.0.rst b/releasenotes/source/v33.0.0.rst
new file mode 100644
index 0000000..fe7bd7d
--- /dev/null
+++ b/releasenotes/source/v33.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v33.0.0 Release Notes
+=====================
+.. release-notes:: 33.0.0 Release Notes
+   :version: 33.0.0
diff --git a/requirements.txt b/requirements.txt
index c4c7fcc..a118856 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,3 +21,4 @@
 PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
+defusedxml>=0.7.1 # PSFL
diff --git a/setup.cfg b/setup.cfg
index a531eb4..beaf9b4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -17,6 +17,7 @@
     Programming Language :: Python :: 3
     Programming Language :: Python :: 3.8
     Programming Language :: Python :: 3.9
+    Programming Language :: Python :: 3.10
     Programming Language :: Python :: 3 :: Only
     Programming Language :: Python :: Implementation :: CPython
 
diff --git a/tempest/api/compute/admin/test_server_external_events.py b/tempest/api/compute/admin/test_server_external_events.py
new file mode 100644
index 0000000..1c5c295
--- /dev/null
+++ b/tempest/api/compute/admin/test_server_external_events.py
@@ -0,0 +1,37 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.api.compute import base
+from tempest.lib import decorators
+
+
+class ServerExternalEventsTest(base.BaseV2ComputeAdminTest):
+    """Test server external events test"""
+
+    @decorators.idempotent_id('6bbf4723-61d2-4372-af55-7ba27f1c9ba6')
+    def test_create_server_external_events(self):
+        """Test create a server and add some external events"""
+        server_id = self.create_test_server(wait_until='ACTIVE')['id']
+        events = [
+            {
+                "name": "network-changed",
+                "server_uuid": server_id,
+            }
+        ]
+        client = self.os_admin.server_external_events_client
+        events_resp = client.create_server_external_events(
+            events=events)['events'][0]
+        self.assertEqual(server_id, events_resp['server_uuid'])
+        self.assertEqual('network-changed', events_resp['name'])
+        self.assertEqual(200, events_resp['code'])
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 75df5ae..ea1cddc 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -698,6 +698,8 @@
             binary='nova-compute')['services']
         hosts = []
         for svc in svcs:
+            if svc['host'].endswith('-ironic'):
+                continue
             if svc['state'] == 'up' and svc['status'] == 'enabled':
                 if CONF.compute.compute_volume_common_az:
                     if svc['zone'] == CONF.compute.compute_volume_common_az:
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 91ce1f9..d47ffce 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -128,3 +128,27 @@
                                               wait_for_server=False)
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
+
+    @decorators.idempotent_id('f3cac456-e3fe-4183-a7a7-a59f7f017088')
+    def test_create_server_from_snapshot(self):
+        # Create one server normally
+        server = self.create_test_server(wait_until='ACTIVE')
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+
+        # Snapshot it
+        snapshot_name = data_utils.rand_name('test-snap')
+        image = self.create_image_from_server(server['id'],
+                                              name=snapshot_name,
+                                              wait_until='ACTIVE',
+                                              wait_for_server=False)
+        self.addCleanup(self.client.delete_image, image['id'])
+
+        # Try to create another server from that snapshot
+        server2 = self.create_test_server(wait_until='ACTIVE',
+                                          image_id=image['id'])
+
+        # Delete server 2 before we finish otherwise we'll race with
+        # the cleanup which tries to delete the image before the
+        # server is gone.
+        self.servers_client.delete_server(server2['id'])
+        waiters.wait_for_server_termination(self.servers_client, server2['id'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index a69dbb3..e1e7fda 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -190,7 +190,7 @@
                 server, self.os_primary, tenant_network,
                 True, self.validation_resources, "SSHABLE", True)
         else:
-            waiters.wait_for_server_status(self.client, self.server['id'],
+            waiters.wait_for_server_status(self.client, server['id'],
                                            'ACTIVE')
 
         msg = ('Server was not rebuilt to the original image. '
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 7e647dd..d590668 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -363,10 +363,12 @@
 
         if all_stores:
             stores_list = ','.join([store['id']
-                                    for store in self.available_stores])
+                                    for store in self.available_stores
+                                    if store.get('read-only') != 'true'])
         else:
-            stores = [store['id'] for store in self.available_stores]
-            stores_list = stores[::len(stores) - 1]
+            stores = [store['id'] for store in self.available_stores
+                      if store.get('read-only') != 'true']
+            stores_list = stores[::max(1, len(stores) - 1)]
 
         return body, stores_list
 
@@ -765,3 +767,280 @@
         fetched_images = self.alt_img_client.list_images(params)['images']
         self.assertEqual(1, len(fetched_images))
         self.assertEqual(image['id'], fetched_images[0]['id'])
+
+
+class ImageLocationsTest(base.BaseV2ImageTest):
+    @classmethod
+    def skip_checks(cls):
+        super(ImageLocationsTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.manage_locations:
+            skip_msg = (
+                "%s skipped as show_multiple_locations is not available" % (
+                    cls.__name__))
+            raise cls.skipException(skip_msg)
+
+    @decorators.idempotent_id('58b0fadc-219d-40e1-b159-1c902cec323a')
+    def test_location_after_upload(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Now try uploading an image file
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+        waiters.wait_for_image_status(self.client, image['id'], 'active')
+
+        # Locations should now have one item
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Expected one location in %r' % image['locations'])
+
+        # NOTE(danms): If show_image_direct_url is enabled, then this
+        # will be present. If so, it should match the one location we set
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        return image
+
+    def _check_set_location(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Add a new location
+        new_loc = {'metadata': {'foo': 'bar'},
+                   'url': CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # The image should now be active, with one location that looks
+        # like we expect
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Image should have one location but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(new_loc['url'], image['locations'][0]['url'])
+        self.assertEqual('bar', image['locations'][0]['metadata'].get('foo'))
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        # If we added the location directly, the image goes straight
+        # to active and no hashing is done
+        self.assertEqual('active', image['status'])
+        self.assertIsNone(None, image['os_hash_algo'])
+        self.assertIsNone(None, image['os_hash_value'])
+
+        return image
+
+    @decorators.idempotent_id('37599b8a-d5c0-4590-aee5-73878502be15')
+    def test_set_location(self):
+        self._check_set_location()
+
+    def _check_set_multiple_locations(self):
+        image = self._check_set_location()
+
+        new_loc = {'metadata': {'speed': '88mph'},
+                   'url': '%s#new' % CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # The image should now have two locations and the last one
+        # (locations are ordered) should have the new URL.
+        image = self.client.show_image(image['id'])
+        self.assertEqual(2, len(image['locations']),
+                         'Image should have two locations but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(new_loc['url'], image['locations'][1]['url'])
+
+        # The image should still be active and still have no hashes
+        self.assertEqual('active', image['status'])
+        self.assertIsNone(None, image['os_hash_algo'])
+        self.assertIsNone(None, image['os_hash_value'])
+
+        # The direct_url should still match the first location
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        return image
+
+    @decorators.idempotent_id('bf6e0009-c039-4884-b498-db074caadb10')
+    def test_replace_location(self):
+        image = self._check_set_multiple_locations()
+        original_locs = image['locations']
+
+        # Replacing with the exact thing should work
+        self.client.update_image(image['id'], [
+            dict(replace='/locations', value=image['locations'])])
+
+        # Changing metadata on a location should work
+        original_locs[0]['metadata']['date'] = '2015-10-15'
+        self.client.update_image(image['id'], [
+            dict(replace='/locations', value=original_locs)])
+
+        # Deleting a location should not work
+        self.assertRaises(
+            lib_exc.BadRequest,
+            self.client.update_image,
+            image['id'], [
+                dict(replace='/locations', value=[original_locs[0]])])
+
+        # Replacing a location (with a different URL) should not work
+        new_loc = {'metadata': original_locs[1]['metadata'],
+                   'url': '%s#new3' % CONF.image.http_image}
+        self.assertRaises(
+            lib_exc.BadRequest,
+            self.client.update_image,
+            image['id'], [
+                dict(replace='/locations', value=[original_locs[0],
+                                                  new_loc])])
+
+        # Make sure the locations haven't changed with the above failures,
+        # but the metadata we updated should be changed.
+        image = self.client.show_image(image['id'])
+        self.assertEqual(2, len(image['locations']),
+                         'Image should have two locations but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(original_locs, image['locations'])
+
+    @decorators.idempotent_id('8a648de4-b745-4c28-a7b5-20de1c3da4d2')
+    def test_delete_locations(self):
+        image = self._check_set_multiple_locations()
+        expected_remaining_loc = image['locations'][1]
+
+        self.client.update_image(image['id'], [
+            dict(remove='/locations/0')])
+
+        # The image should now have only the one location we did not delete
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Image should have one location but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(expected_remaining_loc['url'],
+                         image['locations'][0]['url'])
+
+        # The direct_url should now be the last remaining location
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        # Removing the last location should be disallowed
+        self.assertRaises(lib_exc.Forbidden,
+                          self.client.update_image, image['id'], [
+                              dict(remove='/locations/0')])
+
+    @decorators.idempotent_id('a9a20396-8399-4b36-909d-564949be098f')
+    def test_set_location_bad_scheme(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Adding a new location using a scheme that is not allowed
+        # should result in an error
+        new_loc = {'metadata': {'foo': 'bar'},
+                   'url': 'gopher://info.cern.ch'}
+        self.assertRaises(lib_exc.BadRequest,
+                          self.client.update_image, image['id'], [
+                              dict(add='/locations/-', value=new_loc)])
+
+    def _check_set_location_with_hash(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Create a new location with validation data
+        new_loc = {'validation_data': {'checksum': '1' * 32,
+                                       'os_hash_value': 'deadbeef' * 16,
+                                       'os_hash_algo': 'sha512'},
+                   'metadata': {},
+                   'url': CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # Expect that all of our values ended up on the image
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']))
+        self.assertEqual('1' * 32, image['checksum'])
+        self.assertEqual('deadbeef' * 16, image['os_hash_value'])
+        self.assertEqual('sha512', image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
+        self.assertEqual('active', image['status'])
+
+        return image
+
+    @decorators.idempotent_id('42d6f7db-c9f5-4bae-9e15-a90262fe445a')
+    def test_set_location_with_hash(self):
+        self._check_set_location_with_hash()
+
+    @decorators.idempotent_id('304c8a19-aa86-47dd-a022-ec4c7f433f1b')
+    def test_set_location_with_hash_second_matching(self):
+        orig_image = self._check_set_location_with_hash()
+
+        new_loc = {
+            'validation_data': {'checksum': orig_image['checksum'],
+                                'os_hash_value': orig_image['os_hash_value'],
+                                'os_hash_algo': orig_image['os_hash_algo']},
+            'metadata': {},
+            'url': '%s#new' % CONF.image.http_image}
+        self.client.update_image(orig_image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # Setting the same exact values on a new location should work
+        image = self.client.show_image(orig_image['id'])
+        self.assertEqual(2, len(image['locations']))
+        self.assertEqual(orig_image['checksum'], image['checksum'])
+        self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
+        self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
+        self.assertNotIn('validation_data', image['locations'][1])
+
+    @decorators.idempotent_id('f3ce99c2-9ffb-4b9f-b2cb-876929382553')
+    def test_set_location_with_hash_not_matching(self):
+        orig_image = self._check_set_location_with_hash()
+        values = {
+            'checksum': '2' * 32,
+            'os_hash_value': 'beefdead' * 16,
+            'os_hash_algo': 'sha256',
+        }
+
+        # Try to set a new location with one each of the above
+        # substitutions
+        for k, v in values.items():
+            new_loc = {
+                'validation_data': {
+                    'checksum': orig_image['checksum'],
+                    'os_hash_value': orig_image['os_hash_value'],
+                    'os_hash_algo': orig_image['os_hash_algo']},
+                'metadata': {},
+                'url': '%s#new' % CONF.image.http_image}
+            new_loc['validation_data'][k] = v
+
+            # This should always fail due to the mismatch
+            self.assertRaises(lib_exc.Conflict,
+                              self.client.update_image,
+                              orig_image['id'], [
+                                  dict(add='/locations/-', value=new_loc)])
+
+        # Now try to add a new location with all of the substitutions,
+        # which should also fail
+        new_loc['validation_data'] = values
+        self.assertRaises(lib_exc.Conflict,
+                          self.client.update_image,
+                          orig_image['id'], [
+                              dict(add='/locations/-', value=new_loc)])
+
+        # Make sure nothing has changed on our image after all the
+        # above failures
+        image = self.client.show_image(orig_image['id'])
+        self.assertEqual(1, len(image['locations']))
+        self.assertEqual(orig_image['checksum'], image['checksum'])
+        self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
+        self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index a11bed8..7d5bd26 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -186,12 +186,10 @@
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(1024)
-        headers = {'Transfer-Encoding': 'chunked'}
         resp, _ = self.object_client.create_object(
             self.container_name,
             object_name,
             data=data_utils.chunkify(data, 512),
-            headers=headers,
             chunked=True)
 
         self.assertHeaders(resp, 'Object', 'PUT')
diff --git a/tempest/clients.py b/tempest/clients.py
index a65c43b..1aa34d0 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -144,6 +144,8 @@
         self.tenant_networks_client = self.compute.TenantNetworksClient()
         self.assisted_volume_snapshots_client = (
             self.compute.AssistedVolumeSnapshotsClient())
+        self.server_external_events_client = (
+            self.compute.ServerExternalEventsClient())
 
         # NOTE: The following client needs special timeout values because
         # the API is a proxy for the other component.
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 2c256a9..be8766d 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -291,11 +291,11 @@
     if multiple_create_request:
         # Get servers created which name match with name param.
         body_servers = clients.servers_client.list_servers()
-        servers = \
+        created_servers = \
             [s for s in body_servers['servers'] if s['name'].startswith(name)]
     else:
         body = rest_client.ResponseBody(body.response, body['server'])
-        servers = [body]
+        created_servers = [body]
 
     if wait_until:
 
@@ -307,11 +307,19 @@
             wait_until_extra = wait_until
             wait_until = 'ACTIVE'
 
-        for server in servers:
-            try:
-                waiters.wait_for_server_status(
+        servers = []
+        try:
+            # Wait for server to be in active state and populate servers list
+            # with those full server response so that we will have addresses
+            # field present in server which is needed to be used for wait for
+            # ssh
+            for server in created_servers:
+                server = waiters.wait_for_server_status(
                     clients.servers_client, server['id'], wait_until,
                     request_id=request_id)
+                servers.append(server)
+
+            for server in servers:
                 if CONF.validation.run_validation and validatable:
                     if CONF.validation.connect_method == 'floating':
                         _setup_validation_fip(
@@ -322,31 +330,33 @@
                             server, clients, tenant_network,
                             validatable, validation_resources,
                             wait_until_extra, False)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                for server in created_servers:
+                    try:
+                        clients.servers_client.delete_server(
+                            server['id'])
+                    except Exception:
+                        LOG.exception('Deleting server %s failed',
+                                      server['id'])
+                for server in created_servers:
+                    # NOTE(artom) If the servers were booted with volumes
+                    # and with delete_on_termination=False we need to wait
+                    # for the servers to go away before proceeding with
+                    # cleanup, otherwise we'll attempt to delete the
+                    # volumes while they're still attached to servers that
+                    # are in the process of being deleted.
+                    try:
+                        waiters.wait_for_server_termination(
+                            clients.servers_client, server['id'])
+                    except Exception:
+                        LOG.exception('Server %s failed to delete in time',
+                                      server['id'])
+        if servers and not multiple_create_request:
+            body = rest_client.ResponseBody(body.response, servers[0])
+        return body, servers
 
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    for server in servers:
-                        try:
-                            clients.servers_client.delete_server(
-                                server['id'])
-                        except Exception:
-                            LOG.exception('Deleting server %s failed',
-                                          server['id'])
-                    for server in servers:
-                        # NOTE(artom) If the servers were booted with volumes
-                        # and with delete_on_termination=False we need to wait
-                        # for the servers to go away before proceeding with
-                        # cleanup, otherwise we'll attempt to delete the
-                        # volumes while they're still attached to servers that
-                        # are in the process of being deleted.
-                        try:
-                            waiters.wait_for_server_termination(
-                                clients.servers_client, server['id'])
-                        except Exception:
-                            LOG.exception('Server %s failed to delete in time',
-                                          server['id'])
-
-    return body, servers
+    return body, created_servers
 
 
 def shelve_server(servers_client, server_id, force_shelve_offload=False):
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index f207066..71599bd 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -49,19 +49,19 @@
         # between the UNKNOWN->ACTIVE transition.
         # TODO(afazekas): enumerate and validate the stable status set
         if status == 'BUILD' and server_status != 'UNKNOWN':
-            return
+            return body
         if server_status == status:
             if ready_wait:
                 if status == 'BUILD':
-                    return
+                    return body
                 # NOTE(afazekas): The instance is in "ready for action state"
                 # when no task in progress
                 if task_state is None:
                     # without state api extension 3 sec usually enough
                     time.sleep(CONF.compute.ready_wait)
-                    return
+                    return body
             else:
-                return
+                return body
 
         time.sleep(client.build_interval)
         body = client.show_server(server_id)['server']
@@ -233,6 +233,26 @@
 
     exc_cls = lib_exc.TimeoutException
     start = int(time.time())
+
+    # NOTE(danms): Don't wait for stores that are read-only as those
+    # will never complete
+    try:
+        store_info = client.info_stores()['stores']
+        stores = ','.join(sorted([
+            store['id'] for store in store_info
+            if store.get('read-only') != 'true' and
+            (not stores or store['id'] in stores.split(','))]))
+    except lib_exc.NotFound:
+        # If multi-store is not enabled, then we can not resolve which
+        # ones are read-only, and stores must have been passed as None
+        # anyway for us to succeed. If not, then we should raise right
+        # now and avoid waiting since we will never see the stores
+        # appear.
+        if stores is not None:
+            raise lib_exc.TimeoutException(
+                'Image service has no store support; '
+                'cowardly refusing to wait for them.')
+
     while int(time.time()) - start < client.build_timeout:
         image = client.show_image(image_id)
         if image['status'] == 'active' and (stores is None or
diff --git a/tempest/config.py b/tempest/config.py
index 39e7fb3..d91fca4 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -735,6 +735,11 @@
     cfg.BoolOpt('os_glance_reserved',
                 default=False,
                 help="Should we check that os_glance namespace is reserved"),
+    cfg.BoolOpt('manage_locations',
+                default=False,
+                help=('Is show_multiple_locations enabled in glance. '
+                      'Note that at least one http store must be enabled as '
+                      'well, because we use that location scheme to test.')),
 ]
 
 network_group = cfg.OptGroup(name='network',
@@ -1242,29 +1247,46 @@
 EnforceScopeGroup = [
     cfg.BoolOpt('nova',
                 default=False,
-                help='Does the compute service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'nova.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the compute service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when nova.conf: [oslo_policy]. '
+                     'enforce_new_defaults and nova.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in nova conf.'),
     cfg.BoolOpt('neutron',
                 default=False,
-                help='Does the network service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'neutron.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the network service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when neutron.conf: [oslo_policy]. '
+                     'enforce_new_defaults and neutron.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in neutron conf.'),
     cfg.BoolOpt('glance',
                 default=False,
-                help='Does the Image service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'glance.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Image service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when glance.conf: [oslo_policy]. '
+                     'enforce_new_defaults and glance.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in glance conf.'),
     cfg.BoolOpt('cinder',
                 default=False,
-                help='Does the Volume service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'cinder.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Volume service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when cinder.conf: [oslo_policy]. '
+                     'enforce_new_defaults and cinder.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in cinder conf.'),
     cfg.BoolOpt('keystone',
                 default=False,
-                help='Does the Identity service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'keystone.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Identity service API policies enforce scope '
+                     'and new defaults? This configuration value should be '
+                     'enabled when keystone.conf: [oslo_policy]. '
+                     'enforce_new_defaults and keystone.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in keystone conf.'),
+    cfg.BoolOpt('placement',
+                default=False,
+                help='Does the placement service API policies enforce scope '
+                     'and new defaults? This configuration value should be '
+                     'enabled when placement.conf: [oslo_policy]. '
+                     'enforce_new_defaults and nova.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in placement conf.'),
 ]
 
 debug_group = cfg.OptGroup(name="debug",
diff --git a/tempest/lib/api_schema/response/compute/v2_1/server_external_events.py b/tempest/lib/api_schema/response/compute/v2_1/server_external_events.py
new file mode 100644
index 0000000..2ab69e2
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_1/server_external_events.py
@@ -0,0 +1,55 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+create = {
+    'status_code': [200],
+    'response_body': {
+        'type': 'object',
+        'properties': {
+            'events': {
+                'type': 'array', 'minItems': 1,
+                'items': {
+                    'type': 'object',
+                    'properties': {
+                        'server_uuid': {
+                            'type': 'string', 'format': 'uuid'
+                        },
+                        'name': {
+                            'type': 'string',
+                            'enum': [
+                                'network-changed',
+                                'network-vif-plugged',
+                                'network-vif-unplugged',
+                                'network-vif-deleted'
+                            ],
+                        },
+                        'status': {
+                            'type': 'string',
+                            'enum': ['failed', 'completed', 'in-progress'],
+                        },
+                        'tag': {
+                            'type': 'string', 'maxLength': 255,
+                        },
+                        'code': {'type': 'integer'},
+                    },
+                    'required': [
+                        'server_uuid', 'name', 'code'],
+                    'additionalProperties': False,
+                },
+            },
+        },
+        'required': ['events'],
+        'additionalProperties': False,
+    }
+}
diff --git a/tempest/lib/services/compute/__init__.py b/tempest/lib/services/compute/__init__.py
index 8d07a45..da800af 100644
--- a/tempest/lib/services/compute/__init__.py
+++ b/tempest/lib/services/compute/__init__.py
@@ -52,6 +52,8 @@
     SecurityGroupRulesClient
 from tempest.lib.services.compute.security_groups_client import \
     SecurityGroupsClient
+from tempest.lib.services.compute.server_external_events_client \
+    import ServerExternalEventsClient
 from tempest.lib.services.compute.server_groups_client import \
     ServerGroupsClient
 from tempest.lib.services.compute.servers_client import ServersClient
@@ -75,6 +77,6 @@
            'MigrationsClient', 'NetworksClient', 'QuotaClassesClient',
            'QuotasClient', 'SecurityGroupDefaultRulesClient',
            'SecurityGroupRulesClient', 'SecurityGroupsClient',
-           'ServerGroupsClient', 'ServersClient', 'ServicesClient',
-           'SnapshotsClient', 'TenantNetworksClient', 'TenantUsagesClient',
-           'VersionsClient', 'VolumesClient']
+           'ServerExternalEventsClient', 'ServerGroupsClient', 'ServersClient',
+           'ServicesClient', 'SnapshotsClient', 'TenantNetworksClient',
+           'TenantUsagesClient', 'VersionsClient', 'VolumesClient']
diff --git a/tempest/lib/services/compute/server_external_events_client.py b/tempest/lib/services/compute/server_external_events_client.py
new file mode 100644
index 0000000..683dce1
--- /dev/null
+++ b/tempest/lib/services/compute/server_external_events_client.py
@@ -0,0 +1,36 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_serialization import jsonutils as json
+
+from tempest.lib.api_schema.response.compute.v2_1 import \
+    server_external_events as schema
+from tempest.lib.common import rest_client
+from tempest.lib.services.compute import base_compute_client
+
+
+class ServerExternalEventsClient(base_compute_client.BaseComputeClient):
+
+    def create_server_external_events(self, events):
+        """Create Server External Events.
+
+        For a full list of available parameters, please refer to the official
+        API reference:
+        https://docs.openstack.org/api-ref/compute/#run-events
+        """
+        post_body = json.dumps({'events': events})
+        resp, body = self.post("os-server-external-events", post_body)
+        body = json.loads(body)
+        self.validate_response(schema.create, resp, body)
+        return rest_client.ResponseBody(resp, body)
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 52b2534..d7ce526 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 from urllib import parse as urllib
-from xml.etree import ElementTree as etree
 
+from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 6d07ec1..ee87726 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 from urllib import parse as urllib
-from xml.etree import ElementTree as etree
 
 import debtcollector.moves
+from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 1d0ee77..71088a4 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -59,11 +59,19 @@
     def test_wait_for_image_imported_to_stores(self):
         self.client.show_image.return_value = ({'status': 'active',
                                                 'stores': 'fake_store'})
+        self.client.info_stores.return_value = {
+            'stores': [{'id': 'fake_store',
+                        'description': 'A writable store'},
+                       {'id': 'another_fake_store',
+                        'description': 'A read-only store',
+                        'read-only': 'true'}]
+        }
         start_time = int(time.time())
         waiters.wait_for_image_imported_to_stores(
-            self.client, 'fake_image_id', 'fake_store')
+            self.client, 'fake_image_id', 'fake_store,another_fake_store')
         end_time = int(time.time())
-        # Ensure waiter returns before build_timeout
+        # Ensure waiter returns before build_timeout, and did not wait
+        # for the read-only store
         self.assertLess((end_time - start_time), 10)
 
     def test_wait_for_image_imported_to_stores_failure(self):
@@ -95,6 +103,22 @@
                           waiters.wait_for_image_imported_to_stores,
                           client, 'fake_image_id', 'fake_store')
 
+    def test_wait_for_image_imported_to_stores_no_stores(self):
+        client = mock.MagicMock()
+        client.show_image.return_value = ({'status': 'active'})
+        client.info_stores.side_effect = lib_exc.NotFound
+        client.build_timeout = 2
+        start_time = time.time()
+        waiters.wait_for_image_imported_to_stores(
+            client, 'fake_image_id', None)
+        end_time = time.time()
+        self.assertLess(end_time - start_time, 10)
+
+        exc = self.assertRaises(lib_exc.TimeoutException,
+                                waiters.wait_for_image_imported_to_stores,
+                                client, 'fake_image_id', 'foo,bar')
+        self.assertIn('cowardly', str(exc))
+
     def test_wait_for_image_copied_to_stores(self):
         self.client.show_image.return_value = ({
             'status': 'active',
diff --git a/tempest/tests/lib/services/compute/test_server_external_events_client.py b/tempest/tests/lib/services/compute/test_server_external_events_client.py
new file mode 100644
index 0000000..63922b3
--- /dev/null
+++ b/tempest/tests/lib/services/compute/test_server_external_events_client.py
@@ -0,0 +1,56 @@
+# Copyright 2022 NEC Corporation.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from tempest.lib.services.compute import server_external_events_client
+from tempest.tests.lib import fake_auth_provider
+from tempest.tests.lib.services import base
+
+
+class TestServerExternalEventsClient(base.BaseServiceTest):
+
+    events = [
+        {
+            "code": 200,
+            "name": "network-changed",
+            "server_uuid": "ff1df7b2-6772-45fd-9326-c0a3b05591c2",
+            "status": "completed",
+            "tag": "foo"
+        }
+    ]
+
+    events_req = [
+        {
+            "name": "network-changed",
+            "server_uuid": "ff1df7b2-6772-45fd-9326-c0a3b05591c2",
+        }
+    ]
+
+    def setUp(self):
+        super(TestServerExternalEventsClient, self).setUp()
+        fake_auth = fake_auth_provider.FakeAuthProvider()
+        self.client = server_external_events_client.ServerExternalEventsClient(
+            fake_auth, 'compute', 'regionOne')
+
+    def _test_create_server_external_events(self, bytes_body=False):
+        expected = {"events": self.events}
+        self.check_service_client_function(
+            self.client.create_server_external_events,
+            'tempest.lib.common.rest_client.RestClient.post', expected,
+            bytes_body, events=self.events_req)
+
+    def test_create_server_external_events_str_body(self):
+        self._test_create_server_external_events(bytes_body=False)
+
+    def test_create_server_external_events_byte_body(self):
+        self._test_create_server_external_events(bytes_body=True)
diff --git a/tox.ini b/tox.ini
index 94eb4d9..c784293 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,6 @@
 [tox]
 envlist = pep8,py39,bashate,pip-check-reqs
 minversion = 3.18.0
-skipsdist = True
 ignore_basepython_conflict = True
 
 [tempestenv]
@@ -24,10 +23,25 @@
     OS_STDERR_CAPTURE=1
     OS_TEST_TIMEOUT=160
     PYTHONWARNINGS=default::DeprecationWarning,ignore::DeprecationWarning:distutils,ignore::DeprecationWarning:site
-passenv = OS_STDOUT_CAPTURE OS_STDERR_CAPTURE OS_TEST_TIMEOUT OS_TEST_LOCK_PATH TEMPEST_CONFIG TEMPEST_CONFIG_DIR http_proxy HTTP_PROXY https_proxy HTTPS_PROXY no_proxy NO_PROXY ZUUL_CACHE_DIR REQUIREMENTS_PIP_LOCATION GENERATE_TEMPEST_PLUGIN_LIST
+passenv =
+    OS_STDOUT_CAPTURE
+    OS_STDERR_CAPTURE
+    OS_TEST_TIMEOUT
+    OS_TEST_LOCK_PATH
+    TEMPEST_CONFIG
+    TEMPEST_CONFIG_DIR
+    http_proxy
+    HTTP_PROXY
+    https_proxy
+    HTTPS_PROXY
+    no_proxy
+    NO_PROXY
+    ZUUL_CACHE_DIR
+    REQUIREMENTS_PIP_LOCATION
+    GENERATE_TEMPEST_PLUGIN_LIST
 usedevelop = True
-install_command = pip install {opts} {packages}
-allowlist_externals = *
+allowlist_externals =
+    find
 deps =
     -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 4c08ad9..7d0246b 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -86,6 +86,15 @@
         # Enbale horizon so that we can run horizon test.
         horizon: true
 
+# TODO(gmann): As per the 2023.1 testing runtime, we need to run at least
+# one job on Focal. This job can be removed as per the future testing
+# runtime (whenever we drop the Ubuntu Focal testing).
+- job:
+    name: tempest-full-ubuntu-focal
+    description: This is tempest-full python3 job on Ubuntu Focal(20.04)
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
+
 - job:
     name: tempest-full-centos-9-stream
     parent: tempest-full-py3
@@ -225,22 +234,11 @@
         TEMPEST_PLACEMENT_MIN_MICROVERSION: 'latest'
 
 - job:
-    name: tempest-multinode-full
-    parent: tempest-multinode-full-base
-    nodeset: openstack-two-node-focal
-    # This job runs on Focal from stable/victoria on.
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri)).*$
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-    group-vars:
-      subnode:
-        devstack_localrc:
-          USE_PYTHON3: False
-
-- job:
     name: tempest-multinode-full-py3
-    parent: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-jammy
+    # This job runs on ubuntu Jammy and after stable/zed.
+    branches: ^(?!stable/(pike|queens|rocky|stein|train|ussuri|victoria|wallaby|xena|yoga|zed)).*$
     vars:
       devstack_localrc:
         USE_PYTHON3: true
@@ -346,6 +344,30 @@
         # ENABLE_FILE_INJECTION: true
         DATABASE_TYPE: postgresql
 
+- job:
+    name: tempest-full-enforce-scope-new-defaults
+    parent: tempest-full-py3
+    description: |
+      This job runs the Tempest tests with scope and new defaults enabled.
+    # TODO: remove this once https://review.opendev.org/c/openstack/neutron-lib/+/864213
+    # fix is released in neutron-lib
+    required-projects:
+      - openstack/neutron-lib
+      - openstack/neutron
+    vars:
+      devstack_localrc:
+        # Enabeling the scope and new defaults for services.
+        # NOTE: (gmann) We need to keep keystone scope check disable as
+        # services (except ironic) does not support the system scope and
+        # they need keystone to continue working with project scope. Until
+        # Keystone policies are changed to work for both system as well as
+        # for project scoped, we need to keep scope check disable for
+        # keystone.
+        NOVA_ENFORCE_SCOPE: true
+        CINDER_ENFORCE_SCOPE: true
+        GLANCE_ENFORCE_SCOPE: true
+        NEUTRON_ENFORCE_SCOPE: true
+
 - project-template:
     name: integrated-gate-networking
     description: |
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 1432180..966cc9a 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -28,6 +28,8 @@
               - ^.mailmap$
         - tempest-full-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ubuntu-focal:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3-ipv6:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
@@ -40,10 +42,6 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-wallaby-py3:
-            irrelevant-files: *tempest-irrelevant-files
-        - tempest-slow-wallaby:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
@@ -101,6 +99,8 @@
             irrelevant-files: *tempest-irrelevant-files
         - nova-live-migration:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-enforce-scope-new-defaults:
+            irrelevant-files: *tempest-irrelevant-files
         - devstack-plugin-ceph-tempest-py3:
             # TODO(kopecmartin): make it voting once the below bug is fixed
             # https://bugs.launchpad.net/devstack-plugin-ceph/+bug/1975648
@@ -136,6 +136,8 @@
         - openstack-tox-py310
         - tempest-slow-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-ubuntu-focal:
+            irrelevant-files: *tempest-irrelevant-files
         - neutron-ovs-grenade-multinode:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-py3:
@@ -144,6 +146,10 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
+        - tempest-multinode-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-enforce-scope-new-defaults:
+            irrelevant-files: *tempest-irrelevant-files
         #- devstack-plugin-ceph-tempest-py3:
         #    irrelevant-files: *tempest-irrelevant-files
         #- tempest-full-centos-9-stream:
@@ -152,6 +158,7 @@
             irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
+        - nova-multi-cell
         - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api:
@@ -173,11 +180,9 @@
         - tempest-full-zed
         - tempest-full-yoga
         - tempest-full-xena
-        - tempest-full-wallaby-py3
         - tempest-slow-zed
         - tempest-slow-yoga
         - tempest-slow-xena
-        - tempest-slow-wallaby
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 6d97fad..fb2300b 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -2,44 +2,40 @@
 - job:
     name: tempest-full-zed
     parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
     override-checkout: stable/zed
 
 - job:
     name: tempest-full-yoga
     parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
     override-checkout: stable/yoga
 
 - job:
     name: tempest-full-xena
     parent: tempest-full-py3
+    nodeset: openstack-single-node-focal
     override-checkout: stable/xena
 
 - job:
-    name: tempest-full-wallaby-py3
-    parent: tempest-full-py3
-    override-checkout: stable/wallaby
-
-- job:
     name: tempest-slow-zed
     parent: tempest-slow-py3
+    nodeset: openstack-two-node-focal
     override-checkout: stable/zed
 
 - job:
     name: tempest-slow-yoga
     parent: tempest-slow-py3
+    nodeset: openstack-two-node-focal
     override-checkout: stable/yoga
 
 - job:
     name: tempest-slow-xena
     parent: tempest-slow-py3
+    nodeset: openstack-two-node-focal
     override-checkout: stable/xena
 
 - job:
-    name: tempest-slow-wallaby
-    parent: tempest-slow-py3
-    override-checkout: stable/wallaby
-
-- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is with swift disabled on py3
@@ -95,6 +91,69 @@
         neutron-qos: true
 
 - job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    nodeset: openstack-two-node-bionic
+    # This job runs on Bionic.
+    branches:
+      - stable/stein
+      - stable/train
+      - stable/ussuri
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-trunk: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-multinode-full-py3
+    parent: tempest-multinode-full
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal and supposed to run until stable/zed.
+    branches:
+      - stable/victoria
+      - stable/wallaby
+      - stable/xena
+      - stable/yoga
+      - stable/zed
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: true
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        neutron-trunk: true
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: true
+
+- job:
+    name: tempest-multinode-full
+    parent: tempest-multinode-full-base
+    nodeset: openstack-two-node-focal
+    # This job runs on Focal and on python2. This is for stable/victoria to stable/zed.
+    branches:
+      - stable/victoria
+      - stable/wallaby
+      - stable/xena
+      - stable/yoga
+      - stable/zed
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+    group-vars:
+      subnode:
+        devstack_localrc:
+          USE_PYTHON3: False
+
+- job:
     name: tempest-multinode-full
     parent: tempest-multinode-full-base
     nodeset: openstack-two-node-bionic
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 822feaa..ca9ba7f 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -73,7 +73,7 @@
     parent: tox
     description: |
       Run tempest plugin sanity check script using tox.
-    nodeset: ubuntu-focal
+    nodeset: ubuntu-jammy
     vars:
       tox_envlist: plugin-sanity-check
     timeout: 5000