Merge "Fix incorrect subnet_id for ipv6 member servers"
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index aea476e..174589c 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -21,6 +21,7 @@
 ADMIN_STATE_UP = 'admin_state_up'
 BYTES_IN = 'bytes_in'
 BYTES_OUT = 'bytes_out'
+CA_TLS_CONTAINER_REF = 'ca_tls_container_ref'
 CLIENT_AUTHENTICATION = 'client_authentication'
 CLIENT_AUTH_NONE = 'NONE'
 CLIENT_AUTH_OPTIONAL = 'OPTIONAL'
@@ -28,6 +29,7 @@
 CLIENT_CA_TLS_CONTAINER_REF = 'client_ca_tls_container_ref'
 CLIENT_CRL_CONTAINER_REF = 'client_crl_container_ref'
 CREATED_AT = 'created_at'
+CRL_CONTAINER_REF = 'crl_container_ref'
 DESCRIPTION = 'description'
 FLAVOR_DATA = 'flavor_data'
 FLAVOR_ID = 'flavor_id'
@@ -63,6 +65,7 @@
 TIMEOUT_MEMBER_CONNECT = 'timeout_member_connect'
 TIMEOUT_MEMBER_DATA = 'timeout_member_data'
 TIMEOUT_TCP_INSPECT = 'timeout_tcp_inspect'
+TLS_ENABLED = 'tls_enabled'
 DEFAULT_TLS_CONTAINER_REF = 'default_tls_container_ref'
 SNI_CONTAINER_REFS = 'sni_container_refs'
 DEFAULT_POOL_ID = 'default_pool_id'
diff --git a/octavia_tempest_plugin/contrib/test_server/test_server.go b/octavia_tempest_plugin/contrib/test_server/test_server.go
index fa8f8d7..1671b5c 100644
--- a/octavia_tempest_plugin/contrib/test_server/test_server.go
+++ b/octavia_tempest_plugin/contrib/test_server/test_server.go
@@ -171,11 +171,14 @@
 				tls.CurveP256},
 			PreferServerCipherSuites: true,
 			CipherSuites: []uint16{
+				tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
 				tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
 				tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
 				tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
 				tls.TLS_RSA_WITH_AES_256_CBC_SHA,
 			},
+			NextProtos: []string{"h2", "http/1.1", "http/1.0"},
 		}
 	} else {
 		tlsConfig = &tls.Config{
@@ -186,6 +189,8 @@
 				tls.CurveP256},
 			PreferServerCipherSuites: true,
 			CipherSuites: []uint16{
+				tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,
+				tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,
 				tls.TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,
 				tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA,
 				tls.TLS_RSA_WITH_AES_256_GCM_SHA384,
@@ -200,8 +205,6 @@
 		Addr:      portStr,
 		Handler:   mux,
 		TLSConfig: tlsConfig,
-		TLSNextProto: make(map[string]func(*http.Server, *tls.Conn,
-			http.Handler), 0),
 	}
 	log.Fatal(srv.ListenAndServeTLS(serverCertPem, serverKeyPem))
 }
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py b/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
index 98f4bfa..c19d97c 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
@@ -32,6 +32,8 @@
                     listener_id=Unset, name=Unset, description=Unset,
                     tags=Unset,
                     admin_state_up=Unset, session_persistence=Unset,
+                    ca_tls_container_ref=Unset, crl_container_ref=Unset,
+                    tls_enabled=Unset, tls_container_ref=Unset,
                     return_object_only=True):
         """Create a pool.
 
@@ -47,6 +49,18 @@
         :param session_persistence: A JSON object specifying the session
                                     persistence for the pool or null for no
                                     session persistence.
+        :param ca_tls_container_ref: The key manager ref for a secret
+                                     containing the PEM encoded CA certificate
+                                     to validate pool members against.
+        :param crl_container_ref: The key manager ref for a secret containing
+                                  the PEM encoded CRL to use when validating
+                                  pool members.
+        :param tls_enabled: A boolean, True when the pool should connect to
+                            members using TLS.
+        :param tls_container_ref: The key manager ref for a secret containing
+                                  a PKCS12 bundle with the client
+                                  authentication certificate and key used
+                                  when connecting to pool members over TLS.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
@@ -164,7 +178,9 @@
     @skip_if_not_implemented
     def update_pool(self, pool_id, lb_algorithm=Unset, name=Unset,
                     description=Unset, tags=Unset, admin_state_up=Unset,
-                    session_persistence=Unset, return_object_only=True):
+                    session_persistence=Unset, ca_tls_container_ref=Unset,
+                    crl_container_ref=Unset, tls_enabled=Unset,
+                    tls_container_ref=Unset, return_object_only=True):
         """Update a pool.
 
         :param pool_id: The pool ID to update.
@@ -177,6 +193,18 @@
         :param session_persistence: A JSON object specifying the session
                                     persistence for the pool or null for no
                                     session persistence.
+        :param ca_tls_container_ref: The key manager ref for a secret
+                                     containing the PEM encoded CA certificate
+                                     to validate pool members against.
+        :param crl_container_ref: The key manager ref for a secret containing
+                                  the PEM encoded CRL to use when validating
+                                  pool members.
+        :param tls_enabled: A boolean, True when the pool should connect to
+                            members using TLS.
+        :param tls_container_ref: The key manager ref for a secret containing
+                                  a PKCS12 bundle with the client
+                                  authentication certificate and key used
+                                  when connecting to pool members over TLS.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
index 83f2d50..e7dccb6 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -185,7 +185,8 @@
         ssh_key = cls._get_amphora_ssh_key()
         linux_client = remote_client.RemoteClient(
             amp['lb_network_ip'], CONF.load_balancer.amphora_ssh_user,
-            pkey=ssh_key)
+            pkey=ssh_key,
+            **cls.remote_client_args())
         linux_client.validate_authentication()
 
         # Allow logging from non-init namespaces
@@ -202,7 +203,8 @@
     def _has_vip_traffic(cls, ip_address, log_prefix):
         ssh_key = cls._get_amphora_ssh_key()
         linux_client = remote_client.RemoteClient(
-            ip_address, CONF.load_balancer.amphora_ssh_user, pkey=ssh_key)
+            ip_address, CONF.load_balancer.amphora_ssh_user, pkey=ssh_key,
+            **cls.remote_client_args())
         linux_client.validate_authentication()
 
         try:
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 843388e..3f41892 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -99,6 +99,23 @@
         return new_cert, new_key, new_secret_ref
 
     @classmethod
+    def _load_pool_pki(cls):
+        # Create the pkcs12 bundle
+        pkcs12 = cert_utils.generate_pkcs12_bundle(cls.member_client_cert,
+                                                   cls.member_client_key)
+        LOG.debug('Pool client PKCS12 bundle: %s', base64.b64encode(pkcs12))
+
+        cls.pool_client_ref = cls._store_secret(cls.barbican_mgr, pkcs12)
+
+        cls.pool_CA_ref = cls._store_secret(
+            cls.barbican_mgr,
+            cls.member_ca_cert.public_bytes(serialization.Encoding.PEM))
+
+        cls.pool_CRL_ref = cls._store_secret(
+            cls.barbican_mgr,
+            cls.member_crl.public_bytes(serialization.Encoding.PEM))
+
+    @classmethod
     def resource_setup(cls):
         """Setup resources needed by the tests."""
         super(TLSWithBarbicanTest, cls).resource_setup()
@@ -174,6 +191,8 @@
             cls.barbican_mgr,
             cls.client_crl.public_bytes(serialization.Encoding.PEM))
 
+        cls._load_pool_pki()
+
         # Setup a load balancer for the tests to use
         lb_name = data_utils.rand_name("lb_member_lb1-tls")
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
@@ -1197,3 +1216,149 @@
     def test_http_1_1_tls_traffic(self):
         self._test_http_versions_tls_traffic(
             'HTTP/1.1', ['http/1.1', 'http/1.0'])
+
+    @decorators.idempotent_id('ee0faf71-d11e-4323-8673-e5e15779749b')
+    def test_pool_reencryption(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.8'):
+            raise self.skipException('Pool re-encryption is only available on '
+                                     'Octavia API version 2.8 or newer.')
+        pool_name = data_utils.rand_name("lb_member_pool1-tls-reencrypt")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.TLS_ENABLED: True
+        }
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-tls-reencrypt")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 443,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        self.mem_member_client.create_member(**member1_kwargs)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-tls-reencrypt")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 443,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        self.mem_member_client.create_member(**member2_kwargs)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-tls-reencrypt")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '84',
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test with no CA validation
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
+                                    protocol_port=84)
+
+        # Test with CA validation - invalid CA
+        pool_update_kwargs = {
+            const.CA_TLS_CONTAINER_REF: self.client_ca_cert_ref
+        }
+
+        self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_pool_client.show_pool, pool_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        url = 'http://{0}:84'.format(self.lb_vip_address)
+        self.validate_URL_response(url, expected_status_code=503)
+
+        # Test with CA validation - valid CA
+        pool_update_kwargs = {
+            const.CA_TLS_CONTAINER_REF: self.pool_CA_ref
+        }
+
+        self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_pool_client.show_pool, pool_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
+                                    protocol_port=84)
+
+        # Test with CRL including one webserver certificate revoked
+        pool_update_kwargs = {
+            const.CRL_CONTAINER_REF: self.pool_CRL_ref
+        }
+
+        self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_pool_client.show_pool, pool_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
+                                    protocol_port=84, traffic_member_count=1)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index b0bd643..a18066a 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -15,12 +15,14 @@
 import ipaddress
 import os
 import random
+import re
 import shlex
 import string
 import subprocess
 import tempfile
 
 from cryptography.hazmat.primitives import serialization
+from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_utils import uuidutils
 from tempest import config
@@ -582,6 +584,18 @@
 
 class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
     @classmethod
+    def remote_client_args(cls):
+        # In case we're using octavia-tempest-plugin with old tempest releases
+        # (for instance on stable/train) that don't support ssh_key_type, catch
+        # the exception and don't pass any argument
+        args = {}
+        try:
+            args['ssh_key_type'] = CONF.validation.ssh_key_type
+        except cfg.NoSuchOptError:
+            pass
+        return args
+
+    @classmethod
     def resource_setup(cls):
         super(LoadBalancerBaseTestWithCompute, cls).resource_setup()
         # If validation is disabled in this cloud, we won't be able to
@@ -640,6 +654,19 @@
                 cls.lb_mem_SGr_client.delete_security_group_rule,
                 cls.lb_mem_SGr_client.show_security_group_rule,
                 SGr['id'])
+            # Create a security group rule to allow 443 (test webservers)
+            SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                direction='ingress',
+                security_group_id=cls.lb_member_sec_group['id'],
+                protocol='tcp',
+                ethertype='IPv4',
+                port_range_min=443,
+                port_range_max=443)['security_group_rule']
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_SGr_client.delete_security_group_rule,
+                cls.lb_mem_SGr_client.show_security_group_rule,
+                SGr['id'])
             # Create a security group rule to allow UDP 9999 (test webservers)
             # Port 9999 is used to illustrate health monitor ERRORs on closed
             # ports.
@@ -696,6 +723,19 @@
                     cls.lb_mem_SGr_client.delete_security_group_rule,
                     cls.lb_mem_SGr_client.show_security_group_rule,
                     SGr['id'])
+                # Create a security group rule to allow 443 (test webservers)
+                SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                    direction='ingress',
+                    security_group_id=cls.lb_member_sec_group['id'],
+                    protocol='tcp',
+                    ethertype='IPv6',
+                    port_range_min=443,
+                    port_range_max=443)['security_group_rule']
+                cls.addClassResourceCleanup(
+                    waiters.wait_for_not_found,
+                    cls.lb_mem_SGr_client.delete_security_group_rule,
+                    cls.lb_mem_SGr_client.show_security_group_rule,
+                    SGr['id'])
                 # Create a security group rule to allow 22 (ssh)
                 SGr = cls.lb_mem_SGr_client.create_security_group_rule(
                     direction='ingress',
@@ -950,25 +990,57 @@
         return webserver_details
 
     @classmethod
+    def _get_openssh_version(cls):
+        p = subprocess.Popen(["ssh", "-V"],
+                             stdout=subprocess.PIPE,
+                             stderr=subprocess.PIPE)
+        output = p.communicate()[1]
+
+        try:
+            m = re.match(r"OpenSSH_(\d+)\.(\d+)", output.decode('utf-8'))
+            version_maj = int(m.group(1))
+            version_min = int(m.group(2))
+            return version_maj, version_min
+        except Exception:
+            return None, None
+
+    @classmethod
+    def _need_scp_protocol(cls):
+        # When using scp >= 8.7, force the use of the SCP protocol,
+        # the new default (SFTP protocol) doesn't work with
+        # cirros VMs.
+        ssh_version = cls._get_openssh_version()
+        LOG.debug("ssh_version = {}".format(ssh_version))
+        return (ssh_version[0] > 8 or
+                (ssh_version[0] == 8 and ssh_version[1] >= 7))
+
+    @classmethod
     def _install_start_webserver(cls, ip_address, ssh_key, start_id,
                                  revoke_cert=False):
         local_file = CONF.load_balancer.test_server_path
 
         linux_client = remote_client.RemoteClient(
-            ip_address, CONF.validation.image_ssh_user, pkey=ssh_key)
+            ip_address, CONF.validation.image_ssh_user, pkey=ssh_key,
+            **cls.remote_client_args())
         linux_client.validate_authentication()
 
         with tempfile.NamedTemporaryFile() as key:
             key.write(ssh_key.encode('utf-8'))
             key.flush()
+            ssh_extra_args = (
+                "-o PubkeyAcceptedKeyTypes=+ssh-rsa")
+            if cls._need_scp_protocol():
+                ssh_extra_args += " -O"
             cmd = ("scp -v -o UserKnownHostsFile=/dev/null "
+                   "{7} "
                    "-o StrictHostKeyChecking=no "
                    "-o ConnectTimeout={0} -o ConnectionAttempts={1} "
                    "-i {2} {3} {4}@{5}:{6}").format(
                 CONF.load_balancer.scp_connection_timeout,
                 CONF.load_balancer.scp_connection_attempts,
                 key.name, local_file, CONF.validation.image_ssh_user,
-                ip_address, const.TEST_SERVER_BINARY)
+                ip_address, const.TEST_SERVER_BINARY,
+                ssh_extra_args)
             args = shlex.split(cmd)
             subprocess_args = {'stdout': subprocess.PIPE,
                                'stderr': subprocess.STDOUT,
@@ -1010,7 +1082,8 @@
     def _enable_ipv6_nic_webserver(cls, ip_address, ssh_key,
                                    ipv6_address, ipv6_prefix):
         linux_client = remote_client.RemoteClient(
-            ip_address, CONF.validation.image_ssh_user, pkey=ssh_key)
+            ip_address, CONF.validation.image_ssh_user, pkey=ssh_key,
+            **cls.remote_client_args())
         linux_client.validate_authentication()
 
         linux_client.exec_command('sudo ip address add {0}/{1} dev '
@@ -1125,14 +1198,20 @@
             subprocess_args = {'stdout': subprocess.PIPE,
                                'stderr': subprocess.STDOUT,
                                'cwd': None}
+            ssh_extra_args = (
+                "-o PubkeyAcceptedKeyTypes=+ssh-rsa")
+            if cls._need_scp_protocol():
+                ssh_extra_args += " -O"
             cmd = ("scp -v -o UserKnownHostsFile=/dev/null "
+                   "{9} "
                    "-o StrictHostKeyChecking=no "
                    "-o ConnectTimeout={0} -o ConnectionAttempts={1} "
                    "-i {2} {3} {4} {5} {6}@{7}:{8}").format(
                 CONF.load_balancer.scp_connection_timeout,
                 CONF.load_balancer.scp_connection_attempts,
                 ssh_key.name, cert_filename, key_filename, client_ca_filename,
-                CONF.validation.image_ssh_user, ip_address, const.DEV_SHM_PATH)
+                CONF.validation.image_ssh_user, ip_address, const.DEV_SHM_PATH,
+                ssh_extra_args)
             args = shlex.split(cmd)
             proc = subprocess.Popen(args, **subprocess_args)
             stdout, stderr = proc.communicate()
diff --git a/releasenotes/notes/pool-tls-scenario-test-0eb88e731c595b67.yaml b/releasenotes/notes/pool-tls-scenario-test-0eb88e731c595b67.yaml
new file mode 100644
index 0000000..429874a
--- /dev/null
+++ b/releasenotes/notes/pool-tls-scenario-test-0eb88e731c595b67.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added scenario test coverage for pool re-encryption.
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index eb5a593..98bbd88 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -39,6 +39,26 @@
           - controller
 
 - nodeset:
+    name: octavia-single-node-centos-8-stream
+    nodes:
+      - name: controller
+        label: nested-virt-centos-8-stream
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: octavia-single-node-centos-9-stream
+    nodes:
+      - name: controller
+        label: nested-virt-centos-9-stream
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: octavia-two-node
     nodes:
       - name: controller
@@ -510,6 +530,11 @@
         USE_PYTHON3: False
 
 - job:
+    name: octavia-v2-dsvm-noop-api-stable-xena
+    parent: octavia-v2-dsvm-noop-api
+    override-checkout: stable/xena
+
+- job:
     name: octavia-v2-dsvm-noop-api-stable-wallaby
     parent: octavia-v2-dsvm-noop-api
     override-checkout: stable/wallaby
@@ -526,12 +551,6 @@
     override-checkout: stable/ussuri
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-train
-    parent: octavia-v2-dsvm-noop-api
-    nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/train
-
-- job:
     name: octavia-v2-dsvm-scenario
     parent: octavia-dsvm-live-base
     vars:
@@ -586,6 +605,11 @@
         override-checkout: 2.30.0
 
 - job:
+    name: octavia-v2-dsvm-scenario-stable-xena
+    parent: octavia-v2-dsvm-scenario
+    override-checkout: stable/xena
+
+- job:
     name: octavia-v2-dsvm-scenario-stable-wallaby
     parent: octavia-v2-dsvm-scenario
     override-checkout: stable/wallaby
@@ -600,12 +624,6 @@
     nodeset: octavia-single-node-ubuntu-bionic
     override-checkout: stable/ussuri
 
-- job:
-    name: octavia-v2-dsvm-scenario-stable-train
-    parent: octavia-v2-dsvm-scenario
-    nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/train
-
 # Legacy jobs for the transition to the act-stdby two node jobs
 - job:
     name: octavia-v2-dsvm-scenario-two-node
@@ -689,6 +707,26 @@
         OCTAVIA_AMP_IMAGE_SIZE: 3
 
 - job:
+    name: octavia-v2-dsvm-scenario-centos-8-stream
+    parent: octavia-v2-dsvm-scenario
+    nodeset: octavia-single-node-centos-8-stream
+    vars:
+      devstack_localrc:
+        OCTAVIA_AMP_BASE_OS: centos
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 8-stream
+        OCTAVIA_AMP_IMAGE_SIZE: 3
+
+- job:
+    name: octavia-v2-dsvm-scenario-centos-9-stream
+    parent: octavia-v2-dsvm-scenario
+    nodeset: octavia-single-node-centos-9-stream
+    vars:
+      devstack_localrc:
+        OCTAVIA_AMP_BASE_OS: centos
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 9-stream
+        OCTAVIA_AMP_IMAGE_SIZE: 3
+
+- job:
     name: octavia-v2-dsvm-scenario-ubuntu-focal
     parent: octavia-v2-dsvm-scenario
     vars:
@@ -722,6 +760,11 @@
       - ^octavia_tempest_plugin/tests/(?!barbican_scenario/|\w+\.py).*
 
 - job:
+    name: octavia-v2-dsvm-tls-barbican-stable-xena
+    parent: octavia-v2-dsvm-tls-barbican
+    override-checkout: stable/xena
+
+- job:
     name: octavia-v2-dsvm-tls-barbican-stable-wallaby
     parent: octavia-v2-dsvm-tls-barbican
     override-checkout: stable/wallaby
@@ -781,6 +824,11 @@
         override-checkout: 2.30.0
 
 - job:
+    name: octavia-v2-dsvm-spare-pool-stable-xena
+    parent: octavia-v2-dsvm-spare-pool
+    override-checkout: stable/xena
+
+- job:
     name: octavia-v2-dsvm-spare-pool-stable-wallaby
     parent: octavia-v2-dsvm-spare-pool
     override-checkout: stable/wallaby
@@ -797,12 +845,6 @@
     override-checkout: stable/ussuri
 
 - job:
-    name: octavia-v2-dsvm-spare-pool-stable-train
-    parent: octavia-v2-dsvm-spare-pool
-    nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/train
-
-- job:
     name: octavia-v2-dsvm-cinder-amphora
     parent: octavia-v2-dsvm-scenario
     required-projects:
@@ -932,6 +974,11 @@
       tox_envlist: all
 
 - job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-xena
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    override-checkout: stable/xena
+
+- job:
     name: octavia-v2-act-stdby-dsvm-scenario-stable-wallaby
     parent: octavia-v2-act-stdby-dsvm-scenario
     override-checkout: stable/wallaby
@@ -947,12 +994,6 @@
     nodeset: octavia-single-node-ubuntu-bionic
     override-checkout: stable/ussuri
 
-- job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-train
-    parent: octavia-v2-act-stdby-dsvm-scenario
-    nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/train
-
 ######### Third party jobs ##########
 
 - job:
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index 02d3ac7..49ba09d 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -9,47 +9,49 @@
     check:
       jobs:
         - octavia-v2-dsvm-noop-api
+        - octavia-v2-dsvm-noop-api-stable-xena
         - octavia-v2-dsvm-noop-api-stable-wallaby
         - octavia-v2-dsvm-noop-api-stable-victoria
         - octavia-v2-dsvm-noop-api-stable-ussuri
-        - octavia-v2-dsvm-noop-api-stable-train
         - octavia-v2-dsvm-noop-api-scoped-tokens
         - octavia-v2-dsvm-scenario
+        - octavia-v2-dsvm-scenario-stable-xena
         - octavia-v2-dsvm-scenario-stable-wallaby
         - octavia-v2-dsvm-scenario-stable-victoria
         - octavia-v2-dsvm-scenario-stable-ussuri
-        - octavia-v2-dsvm-scenario-stable-train
         - octavia-v2-dsvm-tls-barbican
+        - octavia-v2-dsvm-tls-barbican-stable-xena
         - octavia-v2-dsvm-tls-barbican-stable-wallaby
         - octavia-v2-dsvm-tls-barbican-stable-victoria
         - octavia-v2-dsvm-tls-barbican-stable-ussuri
-        - octavia-v2-dsvm-tls-barbican-stable-train
         - octavia-v2-dsvm-scenario-ipv6-only:
             voting: false
-        - octavia-v2-dsvm-scenario-centos-8:
+        - octavia-v2-dsvm-scenario-centos-8-stream:
+            voting: false
+        - octavia-v2-dsvm-scenario-centos-9-stream:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario-two-node:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario:
             voting: false
+        - octavia-v2-act-stdby-dsvm-scenario-stable-xena:
+            voting: false
         - octavia-v2-act-stdby-dsvm-scenario-stable-wallaby:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario-stable-victoria:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario-stable-ussuri:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-train:
-            voting: false
         - octavia-v2-dsvm-spare-pool:
             voting: false
+        - octavia-v2-dsvm-spare-pool-stable-xena:
+            voting: false
         - octavia-v2-dsvm-spare-pool-stable-wallaby:
             voting: false
         - octavia-v2-dsvm-spare-pool-stable-victoria:
             voting: false
         - octavia-v2-dsvm-spare-pool-stable-ussuri:
             voting: false
-        - octavia-v2-dsvm-spare-pool-stable-train:
-            voting: false
         - octavia-v2-dsvm-cinder-amphora:
             voting: false
         # Third party provider jobs
@@ -62,18 +64,18 @@
       queue: octavia
       jobs:
         - octavia-v2-dsvm-noop-api
+        - octavia-v2-dsvm-noop-api-stable-xena
         - octavia-v2-dsvm-noop-api-stable-wallaby
         - octavia-v2-dsvm-noop-api-stable-victoria
         - octavia-v2-dsvm-noop-api-stable-ussuri
-        - octavia-v2-dsvm-noop-api-stable-train
         - octavia-v2-dsvm-noop-api-scoped-tokens
         - octavia-v2-dsvm-scenario
+        - octavia-v2-dsvm-scenario-stable-xena
         - octavia-v2-dsvm-scenario-stable-wallaby
         - octavia-v2-dsvm-scenario-stable-victoria
         - octavia-v2-dsvm-scenario-stable-ussuri
-        - octavia-v2-dsvm-scenario-stable-train
         - octavia-v2-dsvm-tls-barbican
+        - octavia-v2-dsvm-tls-barbican-stable-xena
         - octavia-v2-dsvm-tls-barbican-stable-wallaby
         - octavia-v2-dsvm-tls-barbican-stable-victoria
         - octavia-v2-dsvm-tls-barbican-stable-ussuri
-        - octavia-v2-dsvm-tls-barbican-stable-train