Merge "Update "test_list_migrations_in_flavor_resize_situation" to indicate the failing request on timeout."
diff --git a/playbooks/enable-fips.yaml b/playbooks/enable-fips.yaml
deleted file mode 100644
index c8f042d..0000000
--- a/playbooks/enable-fips.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-- hosts: all
-  tasks:
-    - include_role:
-        name: enable-fips
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index ccd5fe1..882413f 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
    :maxdepth: 1
 
    unreleased
+   v34.0.0
    v33.0.0
    v32.0.0
    v31.1.0
diff --git a/releasenotes/source/v34.0.0.rst b/releasenotes/source/v34.0.0.rst
new file mode 100644
index 0000000..94d3b67
--- /dev/null
+++ b/releasenotes/source/v34.0.0.rst
@@ -0,0 +1,6 @@
+=====================
+v34.0.0 Release Notes
+=====================
+
+.. release-notes:: 34.0.0 Release Notes
+   :version: 34.0.0
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 7da87c7..36148c5 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -199,11 +199,12 @@
            "server1"
         8. Check "volume2" is attached to "server1".
         """
+        multiattach_vol_type = CONF.volume.volume_type_multiattach
         # Create two volumes.
         # NOTE(gmann): Volumes are created before server creation so that
         # volumes cleanup can happen successfully irrespective of which volume
         # is attached to server.
-        volume1 = self.create_volume(multiattach=True)
+        volume1 = self.create_volume(volume_type=multiattach_vol_type)
         # Make volume1 read-only since you can't swap from a volume with
         # multiple read/write attachments, and you can't change the readonly
         # flag on an in-use volume so we have to do this before attaching
@@ -211,7 +212,7 @@
         # attach modes, then we can handle this differently.
         self.admin_volumes_client.update_volume_readonly(
             volume1['id'], readonly=True)
-        volume2 = self.create_volume(multiattach=True)
+        volume2 = self.create_volume(volume_type=multiattach_vol_type)
 
         # Create two servers and wait for them to be ACTIVE.
         validation_resources = self.get_class_validation_resources(
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 91ab09e..55c842f 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -115,9 +115,11 @@
         5. Check "vol1" is still attached to both servers
         6. Check "vol2" is not attached to any server
         """
+        multiattach_vol_type = CONF.volume.volume_type_multiattach
+
         # Create two multiattach capable volumes.
-        vol1 = self.create_volume(multiattach=True)
-        vol2 = self.create_volume(multiattach=True)
+        vol1 = self.create_volume(volume_type=multiattach_vol_type)
+        vol2 = self.create_volume(volume_type=multiattach_vol_type)
 
         # Create two instances.
         validation_resources = self.get_class_validation_resources(
diff --git a/tempest/api/compute/servers/test_create_server_multi_nic.py b/tempest/api/compute/servers/test_create_server_multi_nic.py
index bd3f58d..6ec058d 100644
--- a/tempest/api/compute/servers/test_create_server_multi_nic.py
+++ b/tempest/api/compute/servers/test_create_server_multi_nic.py
@@ -23,6 +23,28 @@
 CONF = config.CONF
 
 
+def get_subnets(count=2):
+    """Returns a list of requested subnets from project_network_cidr block.
+
+    Args:
+        count (int):    Number of blocks required.
+
+    Returns:
+        CIDRs as a list of strings
+            e.g. ['19.80.0.0/24', '19.86.0.0/24']
+    """
+    default_rtn = ['19.80.0.0/24', '19.86.0.0/24']
+    _net = netaddr.IPNetwork(CONF.network.project_network_cidr)
+
+    # Split the subnet into the requested number of smaller subnets.
+    sub_prefix_len = (32 - _net.prefixlen) // count
+    if sub_prefix_len < 1:
+        return default_rtn
+
+    _new_cidr = _net.prefixlen + sub_prefix_len
+    return [str(net) for _, net in zip(range(count), _net.subnet(_new_cidr))]
+
+
 class ServersTestMultiNic(base.BaseV2ComputeTest):
     """Test multiple networks in servers"""
 
@@ -65,8 +87,9 @@
         The networks order given at the server creation is preserved within
         the server.
         """
-        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
-        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+        _cidrs = get_subnets()
+        net1 = self._create_net_subnet_ret_net_from_cidr(_cidrs[0])
+        net2 = self._create_net_subnet_ret_net_from_cidr(_cidrs[1])
 
         networks = [{'uuid': net1['network']['id']},
                     {'uuid': net2['network']['id']}]
@@ -86,14 +109,12 @@
                      ['addresses'])
 
         # We can't predict the ip addresses assigned to the server on networks.
-        # Sometimes the assigned addresses are ['19.80.0.2', '19.86.0.2'], at
-        # other times ['19.80.0.3', '19.86.0.3']. So we check if the first
-        # address is in first network, similarly second address is in second
-        # network.
+        # So we check if the first address is in first network, similarly
+        # second address is in second network.
         addr = [addresses[net1['network']['name']][0]['addr'],
                 addresses[net2['network']['name']][0]['addr']]
-        networks = [netaddr.IPNetwork('19.80.0.0/24'),
-                    netaddr.IPNetwork('19.86.0.0/24')]
+        networks = [netaddr.IPNetwork(_cidrs[0]),
+                    netaddr.IPNetwork(_cidrs[1])]
         for address, network in zip(addr, networks):
             self.assertIn(address, network)
 
@@ -107,8 +128,9 @@
         """
         # Verify that server creation does not fail when more than one nic
         # is created on the same network.
-        net1 = self._create_net_subnet_ret_net_from_cidr('19.80.0.0/24')
-        net2 = self._create_net_subnet_ret_net_from_cidr('19.86.0.0/24')
+        _cidrs = get_subnets()
+        net1 = self._create_net_subnet_ret_net_from_cidr(_cidrs[0])
+        net2 = self._create_net_subnet_ret_net_from_cidr(_cidrs[1])
 
         networks = [{'uuid': net1['network']['id']},
                     {'uuid': net2['network']['id']},
@@ -124,8 +146,8 @@
         addr = [addresses[net1['network']['name']][0]['addr'],
                 addresses[net2['network']['name']][0]['addr'],
                 addresses[net1['network']['name']][1]['addr']]
-        networks = [netaddr.IPNetwork('19.80.0.0/24'),
-                    netaddr.IPNetwork('19.86.0.0/24'),
-                    netaddr.IPNetwork('19.80.0.0/24')]
+        networks = [netaddr.IPNetwork(_cidrs[0]),
+                    netaddr.IPNetwork(_cidrs[1]),
+                    netaddr.IPNetwork(_cidrs[0])]
         for address, network in zip(addr, networks):
             self.assertIn(address, network)
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 5380c67..7ea8f09 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -369,7 +369,9 @@
         kwargs = {}
         if bootable:
             kwargs['image_ref'] = CONF.compute.image_ref
-        return self.create_volume(multiattach=True, **kwargs)
+        multiattach_vol_type = CONF.volume.volume_type_multiattach
+        return self.create_volume(volume_type=multiattach_vol_type,
+                                  **kwargs)
 
     def _create_and_multiattach(self):
         """Creates two server instances and a volume and attaches to both.
diff --git a/tempest/cmd/account_generator.py b/tempest/cmd/account_generator.py
index ad0b547..f4f4b17 100755
--- a/tempest/cmd/account_generator.py
+++ b/tempest/cmd/account_generator.py
@@ -155,7 +155,7 @@
     # Create the list of resources to be provisioned for each process
     # NOTE(andreaf) get_credentials expects a string for types or a list for
     # roles. Adding all required inputs to the spec list.
-    spec = ['primary', 'alt']
+    spec = ['primary', 'alt', 'project_reader']
     if CONF.service_available.swift:
         spec.append([CONF.object_storage.operator_role])
         spec.append([CONF.object_storage.reseller_admin_role])
@@ -163,8 +163,13 @@
         spec.append('admin')
     resources = []
     for cred_type in spec:
+        scope = None
+        if "_" in cred_type:
+            scope = cred_type.split("_")[0]
+            cred_type = cred_type.split("_")[1:2]
+
         resources.append((cred_type, cred_provider.get_credentials(
-            credential_type=cred_type)))
+            credential_type=cred_type, scope=scope)))
     return resources
 
 
diff --git a/tempest/config.py b/tempest/config.py
index dfc0a8e..551578e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1015,6 +1015,10 @@
     cfg.StrOpt('volume_type',
                default='',
                help='Volume type to be used while creating volume.'),
+    cfg.StrOpt('volume_type_multiattach',
+               default='',
+               help='Multiattach volume type used while creating multiattach '
+                    'volume.'),
     cfg.StrOpt('storage_protocol',
                default='iSCSI',
                help='Backend protocol to target when creating volume types'),
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index bf3f62f..db0aa5a 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -325,13 +325,15 @@
 
     def create_volume(self, size=None, name=None, snapshot_id=None,
                       imageRef=None, volume_type=None, wait_until='available',
-                      **kwargs):
+                      client=None, **kwargs):
         """Creates volume
 
         This wrapper utility creates volume and waits for volume to be
         in 'available' state by default. If wait_until is None, means no wait.
         This method returns the volume's full representation by GET request.
         """
+        if client is None:
+            client = self.volumes_client
 
         if size is None:
             size = CONF.volume.volume_size
@@ -355,19 +357,20 @@
             kwargs.setdefault('availability_zone',
                               CONF.compute.compute_volume_common_az)
 
-        volume = self.volumes_client.create_volume(**kwargs)['volume']
+        volume = client.create_volume(**kwargs)['volume']
 
-        self.addCleanup(self.volumes_client.wait_for_resource_deletion,
+        self.addCleanup(client.wait_for_resource_deletion,
                         volume['id'])
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.volumes_client.delete_volume, volume['id'])
+                        client.delete_volume, volume['id'])
         self.assertEqual(name, volume['name'])
         if wait_until:
-            waiters.wait_for_volume_resource_status(self.volumes_client,
+            waiters.wait_for_volume_resource_status(client,
                                                     volume['id'], wait_until)
             # The volume retrieved on creation has a non-up-to-date status.
             # Retrieval after it becomes active ensures correct details.
-            volume = self.volumes_client.show_volume(volume['id'])['volume']
+            volume = client.show_volume(volume['id'])['volume']
+
         return volume
 
     def create_backup(self, volume_id, name=None, description=None,
@@ -858,32 +861,43 @@
                   image_name, server['name'])
         return snapshot_image
 
-    def nova_volume_attach(self, server, volume_to_attach, **kwargs):
+    def nova_volume_attach(self, server, volume_to_attach,
+                           volumes_client=None, servers_client=None,
+                           **kwargs):
         """Compute volume attach
 
         This utility attaches volume from compute and waits for the
         volume status to be 'in-use' state.
         """
-        volume = self.servers_client.attach_volume(
+        if volumes_client is None:
+            volumes_client = self.volumes_client
+        if servers_client is None:
+            servers_client = self.servers_client
+
+        volume = servers_client.attach_volume(
             server['id'], volumeId=volume_to_attach['id'],
             **kwargs)['volumeAttachment']
         self.assertEqual(volume_to_attach['id'], volume['id'])
-        waiters.wait_for_volume_resource_status(self.volumes_client,
+        waiters.wait_for_volume_resource_status(volumes_client,
                                                 volume['id'], 'in-use')
         self.addCleanup(test_utils.call_and_ignore_notfound_exc,
-                        self.nova_volume_detach, server, volume)
+                        self.nova_volume_detach, server, volume,
+                        servers_client)
         # Return the updated volume after the attachment
-        return self.volumes_client.show_volume(volume['id'])['volume']
+        return volumes_client.show_volume(volume['id'])['volume']
 
-    def nova_volume_detach(self, server, volume):
+    def nova_volume_detach(self, server, volume, servers_client=None):
         """Compute volume detach
 
         This utility detaches the volume from the server and checks whether the
         volume attachment has been removed from Nova.
         """
-        self.servers_client.detach_volume(server['id'], volume['id'])
+        if servers_client is None:
+            servers_client = self.servers_client
+
+        servers_client.detach_volume(server['id'], volume['id'])
         waiters.wait_for_volume_attachment_remove_from_server(
-            self.servers_client, server['id'], volume['id'])
+            servers_client, server['id'], volume['id'])
 
     def ping_ip_address(self, ip_address, should_succeed=True,
                         ping_timeout=None, mtu=None, server=None):
diff --git a/tempest/tests/cmd/test_account_generator.py b/tempest/tests/cmd/test_account_generator.py
index 7d764be..9647467 100644
--- a/tempest/tests/cmd/test_account_generator.py
+++ b/tempest/tests/cmd/test_account_generator.py
@@ -153,13 +153,14 @@
         resources = account_generator.generate_resources(
             self.cred_provider, admin=False)
         resource_types = [k for k, _ in resources]
-        # No admin, no swift, expect two credentials only
-        self.assertEqual(2, len(resources))
-        # Ensure create_user was invoked twice (two distinct users)
-        self.assertEqual(2, self.user_create_fixture.mock.call_count)
+        # No admin, no swift, expect three credentials only
+        self.assertEqual(3, len(resources))
+        # Ensure create_user was invoked three times (three distinct users)
+        self.assertEqual(3, self.user_create_fixture.mock.call_count)
         self.assertIn('primary', resource_types)
         self.assertIn('alt', resource_types)
         self.assertNotIn('admin', resource_types)
+        self.assertIn(['reader'], resource_types)
         self.assertNotIn(['fake_operator'], resource_types)
         self.assertNotIn(['fake_reseller'], resource_types)
         self.assertNotIn(['fake_owner'], resource_types)
@@ -178,12 +179,13 @@
             self.cred_provider, admin=True)
         resource_types = [k for k, _ in resources]
         # Admin, no swift, expect three credentials only
-        self.assertEqual(3, len(resources))
-        # Ensure create_user was invoked 3 times (3 distinct users)
-        self.assertEqual(3, self.user_create_fixture.mock.call_count)
+        self.assertEqual(4, len(resources))
+        # Ensure create_user was invoked 4 times (4 distinct users)
+        self.assertEqual(4, self.user_create_fixture.mock.call_count)
         self.assertIn('primary', resource_types)
         self.assertIn('alt', resource_types)
         self.assertIn('admin', resource_types)
+        self.assertIn(['reader'], resource_types)
         self.assertNotIn(['fake_operator'], resource_types)
         self.assertNotIn(['fake_reseller'], resource_types)
         self.assertNotIn(['fake_owner'], resource_types)
@@ -201,13 +203,14 @@
         resources = account_generator.generate_resources(
             self.cred_provider, admin=True)
         resource_types = [k for k, _ in resources]
-        # all options on, expect five credentials
-        self.assertEqual(5, len(resources))
-        # Ensure create_user was invoked 5 times (5 distinct users)
-        self.assertEqual(5, self.user_create_fixture.mock.call_count)
+        # all options on, expect six credentials
+        self.assertEqual(6, len(resources))
+        # Ensure create_user was invoked 6 times (6 distinct users)
+        self.assertEqual(6, self.user_create_fixture.mock.call_count)
         self.assertIn('primary', resource_types)
         self.assertIn('alt', resource_types)
         self.assertIn('admin', resource_types)
+        self.assertIn(['reader'], resource_types)
         self.assertIn(['fake_operator'], resource_types)
         self.assertIn(['fake_reseller'], resource_types)
         for resource in resources:
@@ -224,13 +227,14 @@
         resources = account_generator.generate_resources(
             self.cred_provider, admin=False)
         resource_types = [k for k, _ in resources]
-        # No Admin, swift, expect four credentials only
-        self.assertEqual(4, len(resources))
-        # Ensure create_user was invoked 4 times (4 distinct users)
-        self.assertEqual(4, self.user_create_fixture.mock.call_count)
+        # No Admin, swift, expect five credentials only
+        self.assertEqual(5, len(resources))
+        # Ensure create_user was invoked 5 times (5 distinct users)
+        self.assertEqual(5, self.user_create_fixture.mock.call_count)
         self.assertIn('primary', resource_types)
         self.assertIn('alt', resource_types)
         self.assertNotIn('admin', resource_types)
+        self.assertIn(['reader'], resource_types)
         self.assertIn(['fake_operator'], resource_types)
         self.assertIn(['fake_reseller'], resource_types)
         self.assertNotIn(['fake_owner'], resource_types)
@@ -284,14 +288,14 @@
         # Ordered args in [0], keyword args in [1]
         accounts, f = yaml_dump_mock.call_args[0]
         self.assertEqual(handle, f)
-        self.assertEqual(5, len(accounts))
+        self.assertEqual(6, len(accounts))
         if self.domain_is_in:
             self.assertIn('domain_name', accounts[0].keys())
         else:
             self.assertNotIn('domain_name', accounts[0].keys())
         self.assertEqual(1, len([x for x in accounts if
                                  x.get('types') == ['admin']]))
-        self.assertEqual(2, len([x for x in accounts if 'roles' in x]))
+        self.assertEqual(3, len([x for x in accounts if 'roles' in x]))
         for account in accounts:
             self.assertIn('resources', account)
             self.assertIn('network', account.get('resources'))
@@ -315,14 +319,14 @@
         # Ordered args in [0], keyword args in [1]
         accounts, f = yaml_dump_mock.call_args[0]
         self.assertEqual(handle, f)
-        self.assertEqual(5, len(accounts))
+        self.assertEqual(6, len(accounts))
         if self.domain_is_in:
             self.assertIn('domain_name', accounts[0].keys())
         else:
             self.assertNotIn('domain_name', accounts[0].keys())
         self.assertEqual(1, len([x for x in accounts if
                                  x.get('types') == ['admin']]))
-        self.assertEqual(2, len([x for x in accounts if 'roles' in x]))
+        self.assertEqual(3, len([x for x in accounts if 'roles' in x]))
         for account in accounts:
             self.assertIn('resources', account)
             self.assertIn('network', account.get('resources'))
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 4f21956..233cb6c 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -62,7 +62,7 @@
 
 - job:
     name: tempest-extra-tests
-    parent: devstack-tempest
+    parent: tempest-full-py3
     description: |
       This job runs the extra tests mentioned in
       tools/tempest-extra-tests-list.txt.
@@ -342,11 +342,11 @@
     description: |
       Integration testing for a FIPS enabled Centos 8 system
     nodeset: devstack-single-node-centos-8-stream
-    pre-run: playbooks/enable-fips.yaml
     vars:
       tox_envlist: full
       configure_swap_size: 4096
       nslookup_target: 'opendev.org'
+      enable_fips: True
 
 - job:
     name: tempest-centos9-stream-fips
@@ -355,11 +355,11 @@
       Integration testing for a FIPS enabled Centos 9 system
     timeout: 10800
     nodeset: devstack-single-node-centos-9-stream
-    pre-run: playbooks/enable-fips.yaml
     vars:
       tox_envlist: full
       configure_swap_size: 4096
       nslookup_target: 'opendev.org'
+      enable_fips: True
 
 - job:
     name: tempest-pg-full
@@ -410,6 +410,8 @@
         - grenade
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -444,6 +446,17 @@
       jobs:
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
+        # NOTE(gmann): Nova decided to run grenade skip level testing always
+        # (on SLURP as well as non SLURP release) so we are adding grenade-skip-level-always
+        # job in integrated gate and we do not need to update skip level job
+        # here until Nova change the decision.
+        # This is added from 2023.2 relese cycle onwards so we need to use branch variant
+        # to make sure we do not run this job on older than 2023.2 gate.
+        - grenade-skip-level-always:
+            branches:
+              - master
         - tempest-integrated-compute
         # centos-8-stream is tested from wallaby -> yoga branches
         - tempest-integrated-compute-centos-8-stream:
@@ -456,6 +469,9 @@
             branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
+        - grenade-skip-level-always:
+            branches:
+              - master
         - tempest-integrated-compute
         - openstacksdk-functional-devstack:
             branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
@@ -477,6 +493,8 @@
         - grenade
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
@@ -507,6 +525,8 @@
         - grenade
         - grenade-skip-level:
             voting: false
+            branches:
+              - stable/2023.1
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 3df61d8..be8442a 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -33,7 +33,12 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-zed:
+        # NOTE(gmann): We will be testing the latest and oldest
+        # supported stable branch in Tempest master gate with assuming
+        # if things are working in latest and oldest it will work in between
+        # stable branches also. If anything is breaking we will be catching
+        # those in respective stable branch gate.
+        - tempest-full-2023-1:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
@@ -161,6 +166,7 @@
         - tempest-full-zed-extra-tests
         - tempest-full-yoga-extra-tests
         - tempest-full-xena-extra-tests
+        - tempest-full-enforce-scope-new-defaults-zed
         - neutron-ovs-tempest-dvr-ha-multinode-full:
             irrelevant-files: *tempest-irrelevant-files
         - nova-tempest-v2-api:
@@ -179,12 +185,15 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-2023-1
         - tempest-full-zed
         - tempest-full-yoga
         - tempest-full-xena
+        - tempest-slow-2023-1
         - tempest-slow-zed
         - tempest-slow-yoga
         - tempest-slow-xena
+        - tempest-full-2023-1-extra-tests
         - tempest-full-zed-extra-tests
         - tempest-full-yoga-extra-tests
         - tempest-full-xena-extra-tests
@@ -199,3 +208,4 @@
         - tempest-centos9-stream-fips
         - tempest-full-centos-9-stream
         - tempest-full-test-account-no-admin-py3
+        - tempest-full-enforce-scope-new-defaults-zed
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 8aeb748..c5fc063 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,11 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-2023-1
+    parent: tempest-full-py3
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.1
+
+- job:
     name: tempest-full-zed
     parent: tempest-full-py3
     nodeset: openstack-single-node-focal
@@ -18,6 +24,12 @@
     override-checkout: stable/xena
 
 - job:
+    name: tempest-full-2023-1-extra-tests
+    parent: tempest-extra-tests
+    nodeset: openstack-single-node-jammy
+    override-checkout: stable/2023.1
+
+- job:
     name: tempest-full-zed-extra-tests
     parent: tempest-extra-tests
     nodeset: openstack-single-node-focal
@@ -36,6 +48,18 @@
     override-checkout: stable/xena
 
 - job:
+    name: tempest-slow-2023-1
+    parent: tempest-slow-py3
+    nodeset: openstack-two-node-jammy
+    override-checkout: stable/2023-1
+
+- job:
+    name: tempest-full-enforce-scope-new-defaults-zed
+    parent: tempest-full-enforce-scope-new-defaults
+    nodeset: openstack-single-node-focal
+    override-checkout: stable/zed
+
+- job:
     name: tempest-slow-zed
     parent: tempest-slow-py3
     nodeset: openstack-two-node-focal