Merge "Fix test_basic_metadata_definition_namespaces"
diff --git a/releasenotes/notes/end-of-support-for-ussuri-68583f47805eff02.yaml b/releasenotes/notes/end-of-support-for-ussuri-68583f47805eff02.yaml
new file mode 100644
index 0000000..1a750d9
--- /dev/null
+++ b/releasenotes/notes/end-of-support-for-ussuri-68583f47805eff02.yaml
@@ -0,0 +1,13 @@
+---
+prelude: |
+    This is an intermediate release during the Zed development cycle to
+    mark the end of support for EM Ussuri release in Tempest.
+    After this release, Tempest will support below OpenStack Releases:
+
+    * Yoga
+    * Xena
+    * Wallaby
+    * Victoria
+
+    Current development of Tempest is for OpenStack Zed development
+    cycle.
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 549d4fb..99d8e2a 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -85,10 +85,14 @@
             hw_scsi_model='virtio-scsi',
             hw_disk_bus='scsi',
             hw_cdrom_bus='scsi')
-        server = self.create_test_server(image_id=custom_img,
-                                         config_drive=True,
-                                         wait_until='ACTIVE')
-
+        validation_resources = self.get_test_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            image_id=custom_img,
+            config_drive=True,
+            validatable=True,
+            validation_resources=validation_resources,
+            wait_until="SSHABLE")
         # NOTE(lyarwood): self.create_test_server delete the server
         # at class level cleanup so add server cleanup to ensure that
         # the instance is deleted first before created image. This
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index d099fce..7d29a4d 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -402,7 +402,7 @@
             config_drive=config_drive_enabled,
             name=data_utils.rand_name('device-tagging-server'),
             networks=[{'uuid': self.get_tenant_network()['id']}],
-            wait_until='ACTIVE')
+            wait_until='SSHABLE')
         self.addCleanup(self.delete_server, server['id'])
 
         # NOTE(mgoddard): Get detailed server to ensure addresses are present
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 354e3b9..716ecda 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import compute
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
@@ -112,7 +113,6 @@
 
 
 class BaseServerStableDeviceRescueTest(base.BaseV2ComputeTest):
-    create_default_network = True
 
     @classmethod
     def skip_checks(cls):
@@ -124,19 +124,31 @@
             msg = "Stable rescue not available."
             raise cls.skipException(msg)
 
+    @classmethod
+    def setup_credentials(cls):
+        cls.set_network_resources(network=True, subnet=True, router=True,
+                                  dhcp=True)
+        super(BaseServerStableDeviceRescueTest, cls).setup_credentials()
+
     def _create_server_and_rescue_image(self, hw_rescue_device=None,
                                         hw_rescue_bus=None,
-                                        block_device_mapping_v2=None):
-
-        server_id = self.create_test_server(
-            wait_until='ACTIVE')['id']
+                                        block_device_mapping_v2=None,
+                                        validatable=False,
+                                        validation_resources=None,
+                                        wait_until='ACTIVE'):
+        server = self.create_test_server(
+            wait_until=wait_until,
+            validatable=validatable,
+            validation_resources=validation_resources)
         image_id = self.create_image_from_server(
-            server_id, wait_until='ACTIVE')['id']
+            server['id'], wait_until='ACTIVE')['id']
 
         if block_device_mapping_v2:
-            server_id = self.create_test_server(
-                wait_until='ACTIVE',
-                block_device_mapping_v2=block_device_mapping_v2)['id']
+            server = self.create_test_server(
+                wait_until=wait_until,
+                validatable=validatable,
+                validation_resources=validation_resources,
+                block_device_mapping_v2=block_device_mapping_v2)
 
         if hw_rescue_bus:
             self.images_client.update_image(
@@ -146,16 +158,28 @@
             self.images_client.update_image(
                 image_id, [dict(add='/hw_rescue_device',
                                 value=hw_rescue_device)])
-        return server_id, image_id
+        return server, image_id
 
-    def _test_stable_device_rescue(self, server_id, rescue_image_id):
+    def _test_stable_device_rescue(
+            self, server, rescue_image_id,
+            validation_resources=None):
         self.servers_client.rescue_server(
-            server_id, rescue_image_ref=rescue_image_id)
+            server['id'], rescue_image_ref=rescue_image_id)
         waiters.wait_for_server_status(
-            self.servers_client, server_id, 'RESCUE')
-        self.servers_client.unrescue_server(server_id)
-        waiters.wait_for_server_status(
-            self.servers_client, server_id, 'ACTIVE')
+            self.servers_client, server['id'], 'RESCUE')
+        self.servers_client.unrescue_server(server['id'])
+        # NOTE(gmann) In next addCleanup, server unrescue is called before the
+        # detach volume is called in cleanup (added by self.attach_volume()
+        # method) so to make sure server is ready before detach operation, we
+        # need to perform ssh on it, more details are in bug#1960346.
+        if validation_resources and CONF.validation.run_validation:
+            tenant_network = self.get_tenant_network()
+            compute.wait_for_ssh_or_ping(
+                server, self.os_primary, tenant_network,
+                True, validation_resources, "SSHABLE", True)
+        else:
+            waiters.wait_for_server_status(
+                self.servers_client, server['id'], 'ACTIVE')
 
 
 class ServerStableDeviceRescueTestIDE(BaseServerStableDeviceRescueTest):
@@ -172,9 +196,9 @@
                       "Aarch64 does not support ide bus for cdrom")
     def test_stable_device_rescue_cdrom_ide(self):
         """Test rescuing server with cdrom and ide as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='cdrom', hw_rescue_bus='ide')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
 
 class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
@@ -183,23 +207,23 @@
     @decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
     def test_stable_device_rescue_disk_virtio(self):
         """Test rescuing server with disk and virtio as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='virtio')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.idempotent_id('12340157-6306-4745-bdda-cfa019908b48')
     def test_stable_device_rescue_disk_scsi(self):
         """Test rescuing server with disk and scsi as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='scsi')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.idempotent_id('647d04cf-ad35-4956-89ab-b05c5c16f30c')
     def test_stable_device_rescue_disk_usb(self):
         """Test rescuing server with disk and usb as the rescue disk"""
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='usb')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
     @utils.services('volume')
@@ -209,14 +233,25 @@
         Attach a volume to the server and then rescue the server with disk
         and virtio as the rescue disk.
         """
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
-            hw_rescue_device='disk', hw_rescue_bus='virtio')
-        server = self.servers_client.show_server(server_id)['server']
+        # This test just check detach fail and does not
+        # perfom the detach operation but in cleanup from
+        # self.attach_volume() it will try to detach the server
+        # after unrescue the server. Due to that we need to make
+        # server SSHable before it try to detach, more details are
+        # in bug#1960346
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server, rescue_image_id = self._create_server_and_rescue_image(
+            hw_rescue_device='disk', hw_rescue_bus='virtio', validatable=True,
+            validation_resources=validation_resources, wait_until="SSHABLE")
+        server = self.servers_client.show_server(server['id'])['server']
         volume = self.create_volume()
         self.attach_volume(server, volume)
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume['id'], 'in-use')
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(
+            server, rescue_image_id,
+            validation_resources=validation_resources)
 
 
 class ServerBootFromVolumeStableRescueTest(BaseServerStableDeviceRescueTest):
@@ -248,10 +283,10 @@
             "source_type": "blank",
             "volume_size": CONF.volume.volume_size,
             "destination_type": "volume"}]
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='virtio',
             block_device_mapping_v2=block_device_mapping_v2)
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
 
     @decorators.attr(type='slow')
     @decorators.idempotent_id('e4636333-c928-40fc-98b7-70a23eef4224')
@@ -267,7 +302,7 @@
             "volume_size": CONF.volume.volume_size,
             "uuid": CONF.compute.image_ref,
             "destination_type": "volume"}]
-        server_id, rescue_image_id = self._create_server_and_rescue_image(
+        server, rescue_image_id = self._create_server_and_rescue_image(
             hw_rescue_device='disk', hw_rescue_bus='virtio',
             block_device_mapping_v2=block_device_mapping_v2)
-        self._test_stable_device_rescue(server_id, rescue_image_id)
+        self._test_stable_device_rescue(server, rescue_image_id)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 9bcf062..955ba1c 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -16,6 +16,7 @@
 import testtools
 
 from tempest.api.compute import base
+from tempest.common import compute
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
@@ -38,7 +39,8 @@
 
     @classmethod
     def setup_credentials(cls):
-        cls.set_network_resources(network=True, subnet=True, router=True)
+        cls.set_network_resources(network=True, subnet=True, router=True,
+                                  dhcp=True)
         super(ServerRescueNegativeTestJSON, cls).setup_credentials()
 
     @classmethod
@@ -136,21 +138,41 @@
     def test_rescued_vm_detach_volume(self):
         """Test detaching volume from a rescued server should fail"""
         volume = self.create_volume()
-
+        # This test just check detach fail and does not
+        # perfom the detach operation but in cleanup from
+        # self.attach_volume() it will try to detach the server
+        # after unrescue the server. Due to that we need to make
+        # server SSHable before it try to detach, more details are
+        # in bug#1960346
+        validation_resources = self.get_class_validation_resources(
+            self.os_primary)
+        server = self.create_test_server(
+            adminPass=self.password,
+            wait_until="SSHABLE",
+            validatable=True,
+            validation_resources=validation_resources)
         # Attach the volume to the server
-        server = self.servers_client.show_server(self.server_id)['server']
         self.attach_volume(server, volume)
 
         # Rescue the server
-        self.servers_client.rescue_server(self.server_id,
+        self.servers_client.rescue_server(server['id'],
                                           adminPass=self.password)
         waiters.wait_for_server_status(self.servers_client,
-                                       self.server_id, 'RESCUE')
+                                       server['id'], 'RESCUE')
+        # NOTE(gmann) In next addCleanup, server unrescue is called before the
+        # detach volume is called in cleanup (added by self.attach_volume()
+        # method) so to make sure server is ready before detach operation, we
+        # need to perform ssh on it, more details are in bug#1960346.
+        if CONF.validation.run_validation:
+            tenant_network = self.get_tenant_network()
+            self.addCleanup(compute.wait_for_ssh_or_ping,
+                            server, self.os_primary, tenant_network,
+                            True, validation_resources, "SSHABLE", True)
         # addCleanup is a LIFO queue
-        self.addCleanup(self._unrescue, self.server_id)
+        self.addCleanup(self._unrescue, server['id'])
 
         # Detach the volume from the server expecting failure
         self.assertRaises(lib_exc.Conflict,
                           self.servers_client.detach_volume,
-                          self.server_id,
+                          server['id'],
                           volume['id'])
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 43e30ad..eb7e366 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -15,7 +15,7 @@
 
 import base64
 import socket
-import ssl
+from ssl import SSLContext as sslc
 import struct
 import textwrap
 from urllib import parse as urlparse
@@ -84,6 +84,73 @@
         raise lib_exc.InvalidConfiguration()
 
 
+def _setup_validation_fip(
+        server, clients, tenant_network, validation_resources):
+    if CONF.service_available.neutron:
+        ifaces = clients.interfaces_client.list_interfaces(server['id'])
+        validation_port = None
+        for iface in ifaces['interfaceAttachments']:
+            if iface['net_id'] == tenant_network['id']:
+                validation_port = iface['port_id']
+                break
+        if not validation_port:
+            # NOTE(artom) This will get caught by the catch-all clause in
+            # the wait_until loop below
+            raise ValueError('Unable to setup floating IP for validation: '
+                             'port not found on tenant network')
+        clients.floating_ips_client.update_floatingip(
+            validation_resources['floating_ip']['id'],
+            port_id=validation_port)
+    else:
+        fip_client = clients.compute_floating_ips_client
+        fip_client.associate_floating_ip_to_server(
+            floating_ip=validation_resources['floating_ip']['ip'],
+            server_id=server['id'])
+
+
+def wait_for_ssh_or_ping(server, clients, tenant_network,
+                         validatable, validation_resources, wait_until,
+                         set_floatingip):
+    """Wait for the server for SSH or Ping as requested.
+
+    :param server: The server dict as returned by the API
+    :param clients: Client manager which provides OpenStack Tempest clients.
+    :param tenant_network: Tenant network to be used for creating a server.
+    :param validatable: Whether the server will be pingable or sshable.
+    :param validation_resources: Resources created for the connection to the
+        server. Include a keypair, a security group and an IP.
+    :param wait_until: Server status to wait for the server to reach.
+        It can be PINGABLE and SSHABLE states when the server is both
+        validatable and has the required validation_resources provided.
+    :param set_floatingip: If FIP needs to be associated to server
+    """
+    if set_floatingip and CONF.validation.connect_method == 'floating':
+        _setup_validation_fip(
+            server, clients, tenant_network, validation_resources)
+
+    server_ip = get_server_ip(
+        server, validation_resources=validation_resources)
+    if wait_until == 'PINGABLE':
+        waiters.wait_for_ping(
+            server_ip,
+            clients.servers_client.build_timeout,
+            clients.servers_client.build_interval
+        )
+    if wait_until == 'SSHABLE':
+        pkey = validation_resources['keypair']['private_key']
+        ssh_client = remote_client.RemoteClient(
+            server_ip,
+            CONF.validation.image_ssh_user,
+            pkey=pkey,
+            server=server,
+            servers_client=clients.servers_client
+        )
+        waiters.wait_for_ssh(
+            ssh_client,
+            clients.servers_client.build_timeout
+        )
+
+
 def create_test_server(clients, validatable=False, validation_resources=None,
                        tenant_network=None, wait_until=None,
                        volume_backed=False, name=None, flavor=None,
@@ -237,28 +304,6 @@
         body = rest_client.ResponseBody(body.response, body['server'])
         servers = [body]
 
-    def _setup_validation_fip():
-        if CONF.service_available.neutron:
-            ifaces = clients.interfaces_client.list_interfaces(server['id'])
-            validation_port = None
-            for iface in ifaces['interfaceAttachments']:
-                if iface['net_id'] == tenant_network['id']:
-                    validation_port = iface['port_id']
-                    break
-            if not validation_port:
-                # NOTE(artom) This will get caught by the catch-all clause in
-                # the wait_until loop below
-                raise ValueError('Unable to setup floating IP for validation: '
-                                 'port not found on tenant network')
-            clients.floating_ips_client.update_floatingip(
-                validation_resources['floating_ip']['id'],
-                port_id=validation_port)
-        else:
-            fip_client = clients.compute_floating_ips_client
-            fip_client.associate_floating_ip_to_server(
-                floating_ip=validation_resources['floating_ip']['ip'],
-                server_id=servers[0]['id'])
-
     if wait_until:
 
         # NOTE(lyarwood): PINGABLE and SSHABLE both require the instance to
@@ -274,35 +319,16 @@
                 waiters.wait_for_server_status(
                     clients.servers_client, server['id'], wait_until,
                     request_id=request_id)
-
                 if CONF.validation.run_validation and validatable:
-
                     if CONF.validation.connect_method == 'floating':
-                        _setup_validation_fip()
-
-                    server_ip = get_server_ip(
-                        server, validation_resources=validation_resources)
-
-                    if wait_until_extra == 'PINGABLE':
-                        waiters.wait_for_ping(
-                            server_ip,
-                            clients.servers_client.build_timeout,
-                            clients.servers_client.build_interval
-                        )
-
-                    if wait_until_extra == 'SSHABLE':
-                        pkey = validation_resources['keypair']['private_key']
-                        ssh_client = remote_client.RemoteClient(
-                            server_ip,
-                            CONF.validation.image_ssh_user,
-                            pkey=pkey,
-                            server=server,
-                            servers_client=clients.servers_client
-                        )
-                        waiters.wait_for_ssh(
-                            ssh_client,
-                            clients.servers_client.build_timeout
-                        )
+                        _setup_validation_fip(
+                            server, clients, tenant_network,
+                            validation_resources)
+                    if wait_until_extra:
+                        wait_for_ssh_or_ping(
+                            server, clients, tenant_network,
+                            validatable, validation_resources,
+                            wait_until_extra, False)
 
             except Exception:
                 with excutils.save_and_reraise_exception():
@@ -369,7 +395,8 @@
         af, socktype, proto, _, sa = res
         client_socket = socket.socket(af, socktype, proto)
         if url.scheme == 'https':
-            client_socket = ssl.wrap_socket(client_socket)
+            client_socket = sslc().wrap_socket(client_socket,
+                                               server_hostname=url.hostname)
         client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
         try:
             client_socket.connect(sa)
diff --git a/tempest/config.py b/tempest/config.py
index 0f509fb..b4d4891 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -876,7 +876,7 @@
     cfg.StrOpt('qos_placement_physnet', default=None,
                help='Name of the physnet for placement based minimum '
                     'bandwidth allocation.'),
-    cfg.StrOpt('provider_net_base_segmentation_id', default=3000,
+    cfg.StrOpt('provider_net_base_segmentation_id', default='3000',
                help='Base segmentation ID to create provider networks. '
                     'This value will be increased in case of conflict.'),
     cfg.BoolOpt('qos_min_bw_and_pps', default=False,
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 7aa96b2..1d24bc1 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -641,7 +641,8 @@
 
     def create_loginable_secgroup_rule(self, security_group_rules_client=None,
                                        secgroup=None,
-                                       security_groups_client=None):
+                                       security_groups_client=None,
+                                       rulesets=None):
         """Create loginable security group rule by neutron clients by default.
 
         This function will create:
@@ -655,24 +656,26 @@
             security_group_rules_client = self.security_group_rules_client
         if security_groups_client is None:
             security_groups_client = self.security_groups_client
+        if rulesets is None:
+            rulesets = [
+                dict(
+                    # ssh
+                    protocol='tcp',
+                    port_range_min=22,
+                    port_range_max=22,
+                ),
+                dict(
+                    # ping
+                    protocol='icmp',
+                ),
+                dict(
+                    # ipv6-icmp for ping6
+                    protocol='icmp',
+                    ethertype='IPv6',
+                )
+            ]
+
         rules = []
-        rulesets = [
-            dict(
-                # ssh
-                protocol='tcp',
-                port_range_min=22,
-                port_range_max=22,
-            ),
-            dict(
-                # ping
-                protocol='icmp',
-            ),
-            dict(
-                # ipv6-icmp for ping6
-                protocol='icmp',
-                ethertype='IPv6',
-            )
-        ]
         sec_group_rules_client = security_group_rules_client
         for ruleset in rulesets:
             for r_direction in ['ingress', 'egress']:
diff --git a/tempest/scenario/test_compute_unified_limits.py b/tempest/scenario/test_compute_unified_limits.py
index bacf526..eda6d6f 100644
--- a/tempest/scenario/test_compute_unified_limits.py
+++ b/tempest/scenario/test_compute_unified_limits.py
@@ -13,8 +13,6 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
-import testtools
-
 from tempest.common import utils
 from tempest.common import waiters
 from tempest import config
@@ -25,8 +23,6 @@
 CONF = config.CONF
 
 
-@testtools.skipUnless(CONF.compute_feature_enabled.unified_limits,
-                      'Compute unified limits are not enabled')
 class ComputeProjectQuotaTest(manager.ScenarioTest):
     """The test base class for compute unified limits tests.
 
@@ -41,6 +37,12 @@
     force_tenant_isolation = True
 
     @classmethod
+    def skip_checks(cls):
+        super(ComputeProjectQuotaTest, cls).skip_checks()
+        if not CONF.compute_feature_enabled.unified_limits:
+            raise cls.skipException('Compute unified limits are not enabled.')
+
+    @classmethod
     def resource_setup(cls):
         super(ComputeProjectQuotaTest, cls).resource_setup()
 
@@ -67,8 +69,6 @@
             self.limit_ids[name], value)
 
 
-@testtools.skipUnless(CONF.compute_feature_enabled.unified_limits,
-                      'Compute unified limits are not enabled')
 class ServersQuotaTest(ComputeProjectQuotaTest):
 
     @classmethod
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index e62f24a..eb8b793 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -35,14 +35,14 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-yoga:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-wallaby-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-victoria-py3:
             irrelevant-files: *tempest-irrelevant-files
-        - tempest-full-ussuri-py3:
-            irrelevant-files: *tempest-irrelevant-files
         - tempest-multinode-full-py3:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-tox-plugin-sanity-check:
@@ -145,6 +145,8 @@
             irrelevant-files: *tempest-irrelevant-files-3
         - devstack-plugin-ceph-tempest-py3:
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-centos-9-stream:
+            irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
         - tempest-with-latest-microversion
@@ -165,10 +167,10 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-yoga
         - tempest-full-xena
         - tempest-full-wallaby-py3
         - tempest-full-victoria-py3
-        - tempest-full-ussuri-py3
     periodic:
       jobs:
         - tempest-all
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 5cc0dd0..8086458 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,10 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-yoga
+    parent: tempest-full-py3
+    override-checkout: stable/yoga
+
+- job:
     name: tempest-full-xena
     parent: tempest-full-py3
     override-checkout: stable/xena
@@ -15,12 +20,6 @@
     override-checkout: stable/victoria
 
 - job:
-    name: tempest-full-ussuri-py3
-    parent: tempest-full-py3
-    nodeset: openstack-single-node-bionic
-    override-checkout: stable/ussuri
-
-- job:
     name: tempest-full-py3
     parent: devstack-tempest
     # This job version is with swift disabled on py3
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 7d28e5c..a4a4b67 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -85,7 +85,7 @@
 - job:
     name: tempest-full-centos-9-stream
     parent: tempest-full-py3-centos-8-stream
-    voting: false
+    voting: true
     nodeset: devstack-single-node-centos-9-stream
 
 - job: