Merge "Use TOX_CONSTRAINTS_FILE"
diff --git a/.gitignore b/.gitignore
index 78c2e67..59ed598 100644
--- a/.gitignore
+++ b/.gitignore
@@ -67,3 +67,6 @@
 # Others
 .stestr
 tempest.log
+
+# Git
+*.orig
diff --git a/README.rst b/README.rst
index 8b5bd3c..beb0f5a 100644
--- a/README.rst
+++ b/README.rst
@@ -30,20 +30,26 @@
 Installing
 ----------
 
-Clone this repository to the destination machine, and call from the repo::
+From the tempest directory, setup the tempest virtual environment for the
+Octavia tempest plugin::
 
-    $ pip install -e .
+    $ tox -e venv-tempest -- pip3 install -e <path to octavia-tempest-plugin>
+
+For example, when using a typical devstack setup::
+
+    $ cd /opt/stack/tempest
+    $ tox -e venv-tempest -- pip3 install -e /opt/stack/octavia-tempest-plugin
 
 Running the tests
 -----------------
 
 To run all the tests from this plugin, call from the tempest repo::
 
-    $ tox -e all-plugin -- octavia_tempest_plugin
+    $ tox -e all -- octavia_tempest_plugin
 
 To run a single test case, call with full path, for example::
 
-    $ tox -e all-plugin -- octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_traffic
+    $ tox -e all -- octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_traffic
 
 To retrieve a list of all tempest tests, run::
 
diff --git a/babel.cfg b/babel.cfg
deleted file mode 100644
index 15cd6cb..0000000
--- a/babel.cfg
+++ /dev/null
@@ -1,2 +0,0 @@
-[python: **.py]
-
diff --git a/doc/source/conf.py b/doc/source/conf.py
old mode 100755
new mode 100644
diff --git a/doc/source/configref.rst b/doc/source/configref.rst
index f9db042..4b58b12 100644
--- a/doc/source/configref.rst
+++ b/doc/source/configref.rst
@@ -19,8 +19,6 @@
 .. contents:: Table of Contents
     :depth: 2
 
-.. note:: Not all of these options are used by the Octavia tempest tests.
-
 .. show-options::
 
-    tempest.config
+    octavia-tempest-plugin
diff --git a/octavia_tempest_plugin/common/barbican_client_mgr.py b/octavia_tempest_plugin/common/barbican_client_mgr.py
index eba1715..3bd2807 100644
--- a/octavia_tempest_plugin/common/barbican_client_mgr.py
+++ b/octavia_tempest_plugin/common/barbican_client_mgr.py
@@ -73,7 +73,7 @@
         p12_secret.name = data_utils.rand_name("lb_member_barbican")
         p12_secret.payload = secret
         secret_ref = p12_secret.store()
-        LOG.debug('Secret {0} has ref {1}'.format(p12_secret.name, secret_ref))
+        LOG.debug('Secret %s has ref %s', p12_secret.name, secret_ref)
         return secret_ref
 
     def delete_secret(self, secret_ref):
@@ -84,5 +84,5 @@
                                                users=[user_id],
                                                project_access=True)
         acl_ref = acl_entity.submit()
-        LOG.debug('Secret ACL {0} added user {1}'.format(acl_ref, user_id))
+        LOG.debug('Secret ACL %s added user %s', acl_ref, user_id)
         return acl_ref
diff --git a/octavia_tempest_plugin/common/cert_utils.py b/octavia_tempest_plugin/common/cert_utils.py
index 753da6b..34d0d7d 100644
--- a/octavia_tempest_plugin/common/cert_utils.py
+++ b/octavia_tempest_plugin/common/cert_utils.py
@@ -21,7 +21,6 @@
 from cryptography.hazmat.primitives.serialization import pkcs12
 from cryptography import x509
 from cryptography.x509.oid import NameOID
-import OpenSSL
 
 
 def generate_ca_cert_and_key():
@@ -176,38 +175,13 @@
 def generate_pkcs12_bundle(server_cert, server_key):
     """Creates a pkcs12 formated bundle.
 
-    Note: This uses pyOpenSSL as the cryptography package does not yet
-          support creating pkcs12 bundles. The currently un-released
-          2.5 version of cryptography supports reading pkcs12, but not
-          creation. This method should be updated to only use
-          cryptography once it supports creating pkcs12 bundles.
-
     :param server_cert: A cryptography certificate (x509) object.
     :param server_key: A cryptography key (x509) object.
     :returns: A pkcs12 bundle.
     """
-    # Use the PKCS12 serialization function from cryptography if it exists
-    # (>=3.0), otherwise use the pyOpenSSL module.
-    #
-    # The PKCS12 class of the pyOpenSSL module is not compliant with FIPS.
-    # It uses the SHA1 function [0] which is not allowed when generating
-    # digital signatures [1]
-    #
-    # [0] https://github.com/pyca/pyopenssl/blob/
-    #       65ca53a7a06a7c78c1749200a6b3a007e47d3214/src/OpenSSL/
-    #       crypto.py#L2748-L2749
-    # [1] https://nvlpubs.nist.gov/nistpubs/SpecialPublications/
-    #       NIST.SP.800-131Ar1.pdf
-    if hasattr(pkcs12, 'serialize_key_and_certificates'):
-        p12 = pkcs12.serialize_key_and_certificates(
-            b'', server_key, server_cert,
-            cas=None, encryption_algorithm=NoEncryption())
-    else:
-        p12 = OpenSSL.crypto.PKCS12()
-        p12.set_privatekey(
-            OpenSSL.crypto.PKey.from_cryptography_key(server_key))
-        p12.set_certificate(OpenSSL.crypto.X509.from_cryptography(server_cert))
-        p12 = p12.export()
+    p12 = pkcs12.serialize_key_and_certificates(
+        b'', server_key, server_cert,
+        cas=None, encryption_algorithm=NoEncryption())
     return p12
 
 
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 174589c..f03da3e 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -45,6 +45,7 @@
 PROVIDER_NAME = 'provider_name'
 PROVISIONING_STATUS = 'provisioning_status'
 REQUEST_ERRORS = 'request_errors'
+TLS_CONTAINER_REF = 'tls_container_ref'
 TOTAL_CONNECTIONS = 'total_connections'
 UPDATED_AT = 'updated_at'
 VIP_ADDRESS = 'vip_address'
@@ -71,6 +72,9 @@
 DEFAULT_POOL_ID = 'default_pool_id'
 L7_POLICIES = 'l7_policies'
 ALPN_PROTOCOLS = 'alpn_protocols'
+HSTS_MAX_AGE = 'hsts_max_age'
+HSTS_INCLUDE_SUBDOMAINS = 'hsts_include_subdomains'
+HSTS_PRELOAD = 'hsts_preload'
 
 LB_ALGORITHM = 'lb_algorithm'
 LB_ALGORITHM_ROUND_ROBIN = 'ROUND_ROBIN'
@@ -111,6 +115,7 @@
 
 # Other constants
 ACTIVE = 'ACTIVE'
+PAUSED = 'PAUSED'
 PENDING_UPDATE = 'PENDING_UPDATE'
 ADMIN_STATE_UP_TRUE = 'true'
 ASC = 'asc'
@@ -125,11 +130,14 @@
 SINGLE = 'SINGLE'
 ACTIVE_STANDBY = 'ACTIVE_STANDBY'
 SUPPORTED_LB_TOPOLOGIES = (SINGLE, ACTIVE_STANDBY)
+BACKUP_TRUE = 'true'
 
 # Protocols
 HTTP = 'HTTP'
 HTTPS = 'HTTPS'
 PROXY = 'PROXY'
+PROMETHEUS = 'PROMETHEUS'
+SCTP = 'SCTP'
 TCP = 'TCP'
 TERMINATED_HTTPS = 'TERMINATED_HTTPS'
 UDP = 'UDP'
@@ -139,12 +147,18 @@
 POST = 'POST'
 PUT = 'PUT'
 DELETE = 'DELETE'
+HEAD = 'HEAD'
+OPTIONS = 'OPTIONS'
+PATCH = 'PATCH'
+CONNECT = 'CONNECT'
+TRACE = 'TRACE'
 
 # HM Types
 HEALTH_MONITOR_PING = 'PING'
 HEALTH_MONITOR_TCP = 'TCP'
 HEALTH_MONITOR_HTTP = 'HTTP'
 HEALTH_MONITOR_HTTPS = 'HTTPS'
+HEALTH_MONITOR_SCTP = 'SCTP'
 HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO'
 HEALTH_MONITOR_UDP_CONNECT = 'UDP-CONNECT'
 
@@ -231,11 +245,17 @@
 
 # Flavor capabilities
 LOADBALANCER_TOPOLOGY = 'loadbalancer_topology'
+COMPUTE_FLAVOR = 'compute_flavor'
 
 # Availability zone capabilities
 COMPUTE_ZONE = 'compute_zone'
 MANAGEMENT_NETWORK = 'management_network'
 
+# Compute flavors
+RAM = 'ram'
+VCPUS = 'vcpus'
+DISK = 'disk'
+
 # API valid fields
 SHOW_LOAD_BALANCER_RESPONSE_FIELDS = (
     ADMIN_STATE_UP, CREATED_AT, DESCRIPTION, FLAVOR_ID, ID, LISTENERS, NAME,
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 4d1543b..78f57ed 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -61,6 +61,9 @@
     cfg.StrOpt('octavia_svc_username', default='admin',
                help='The service_auth username the Octavia services are using'
                     'to access other OpenStack services.'),
+    cfg.BoolOpt('log_user_roles',
+                default=True,
+                help='Log the user roles at the start of every test.'),
     # load-balancer specific options
     cfg.FloatOpt('check_interval',
                  default=5,
@@ -102,7 +105,7 @@
     cfg.StrOpt('provider',
                default='octavia',
                help='The provider driver to use for the tests.'),
-    cfg.StrOpt('RBAC_test_type', default=const.ADVANCED,
+    cfg.StrOpt('RBAC_test_type', default=const.KEYSTONE_DEFAULT_ROLES,
                choices=[const.ADVANCED, const.KEYSTONE_DEFAULT_ROLES,
                         const.OWNERADMIN, const.NONE],
                help='Type of RBAC tests to run. "advanced" runs the octavia '
@@ -241,7 +244,11 @@
                 help='Does the load-balancer service API policies enforce '
                      'the new keystone default roles? This configuration '
                      'value should be same as octavia.conf: '
-                     '[oslo_policy].enforce_new_defaults option.'),
+                     '[oslo_policy].enforce_new_defaults option.',
+                deprecated_for_removal=True,
+                deprecated_reason='Consolidated into the RBAC_test_type '
+                                  'setting.',
+                deprecated_since='bobcat'),
 ]
 
 lb_feature_enabled_group = cfg.OptGroup(name='loadbalancer-feature-enabled',
@@ -285,6 +292,8 @@
                 help="Whether the log offload tests will run. These require "
                      "the tempest instance have access to the log files "
                      "specified in the tempest configuration."),
+    cfg.BoolOpt('prometheus_listener_enabled', default=True,
+                help="Whether the PROMETHEUS listener tests will run."),
 ]
 
 # Extending this enforce_scope group defined in tempest
diff --git a/octavia_tempest_plugin/hacking/checks.py b/octavia_tempest_plugin/hacking/checks.py
index eec7476..7008994 100644
--- a/octavia_tempest_plugin/hacking/checks.py
+++ b/octavia_tempest_plugin/hacking/checks.py
@@ -66,8 +66,6 @@
 no_eventlet_re = re.compile(r'(import|from)\s+[(]?eventlet')
 no_line_continuation_backslash_re = re.compile(r'.*(\\)\n')
 no_logging_re = re.compile(r'(import|from)\s+[(]?logging')
-import_mock_re = re.compile(r"\bimport[\s]+mock\b")
-import_from_mock_re = re.compile(r"\bfrom[\s]+mock[\s]+import\b")
 
 
 def _translation_checks_not_enforced(filename):
@@ -147,7 +145,7 @@
     O339
     """
     if logical_line.startswith('LOG.warn('):
-        yield(0, "O339:Use LOG.warning() rather than LOG.warn()")
+        yield (0, "O339:Use LOG.warning() rather than LOG.warn()")
 
 
 @core.flake8ext
@@ -260,18 +258,3 @@
     if no_logging_re.match(logical_line):
         msg = 'O348 Usage of Python logging module not allowed, use oslo_log'
         yield logical_line.index('logging'), msg
-
-
-@core.flake8ext
-def check_no_import_mock(logical_line):
-    """O349 - Test code must not import mock library.
-
-    :param logical_line: The logical line to check.
-    :returns: None if the logical line passes the check, otherwise a tuple
-              is yielded that contains the offending index in logical line
-              and a message describe the check validation failure.
-    """
-    if (import_mock_re.match(logical_line) or
-            import_from_mock_re.match(logical_line)):
-        msg = 'O349 Test code must not import mock library, use unittest.mock'
-        yield 0, msg
diff --git a/octavia_tempest_plugin/opts.py b/octavia_tempest_plugin/opts.py
new file mode 100644
index 0000000..2385035
--- /dev/null
+++ b/octavia_tempest_plugin/opts.py
@@ -0,0 +1,21 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy
+# of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import octavia_tempest_plugin.config
+
+
+def list_opts():
+    return [
+        ('load_balancer', octavia_tempest_plugin.config.OctaviaGroup),
+        ('loadbalancer-feature-enabled',
+         octavia_tempest_plugin.config.LBFeatureEnabledGroup),
+    ]
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py b/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
index 1ee70f7..1c8e6e5 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
@@ -41,7 +41,8 @@
                         sni_container_refs=Unset, client_authentication=Unset,
                         client_ca_tls_container_ref=Unset,
                         client_crl_container_ref=Unset, allowed_cidrs=Unset,
-                        alpn_protocols=Unset,
+                        alpn_protocols=Unset, hsts_max_age=Unset,
+                        hsts_include_subdomains=Unset, hsts_preload=Unset,
                         return_object_only=True):
         """Create a listener.
 
@@ -92,6 +93,12 @@
         :param allowed_cidrs: A list of IPv4 or IPv6 CIDRs.
         :param alpn_protocols: A list of ALPN protocols for TERMINATED_HTTPS
                                listeners.
+        :param hsts_include_subdomains: Defines whether the
+            `include_subdomains` directive is used for HSTS or not
+        :param hsts_max_age: Enables HTTP Strict Transport Security (HSTS)
+            and sets the `max_age` directive to given value
+        :param hsts_preload: Defines whether the `hsts_preload` directive
+            is used for HSTS or not
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
@@ -218,7 +225,8 @@
                         sni_container_refs=Unset, client_authentication=Unset,
                         client_ca_tls_container_ref=Unset,
                         client_crl_container_ref=Unset, allowed_cidrs=Unset,
-                        alpn_protocols=Unset,
+                        alpn_protocols=Unset, hsts_max_age=Unset,
+                        hsts_include_subdomains=Unset, hsts_preload=Unset,
                         return_object_only=True):
         """Update a listener.
 
@@ -267,6 +275,12 @@
         :param allowed_cidrs: A list of IPv4 or IPv6 CIDRs.
         :param alpn_protocols: A list of ALPN protocols for TERMINATED_HTTPS
                                listeners.
+        :param hsts_include_subdomains: Defines whether the
+            `include_subdomains` directive is used for HSTS or not
+        :param hsts_max_age: Enables HTTP Strict Transport Security (HSTS)
+            and sets the `max_age` directive to given value
+        :param hsts_preload: Defines whether the `hsts_preload` directive
+            is used for HSTS or not
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py b/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
index c19d97c..e93f33c 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/pool_client.py
@@ -34,7 +34,7 @@
                     admin_state_up=Unset, session_persistence=Unset,
                     ca_tls_container_ref=Unset, crl_container_ref=Unset,
                     tls_enabled=Unset, tls_container_ref=Unset,
-                    return_object_only=True):
+                    alpn_protocols=Unset, return_object_only=True):
         """Create a pool.
 
         :param protocol: The protocol for the resource.
@@ -61,6 +61,7 @@
                                   a PKCS12 bundle with the client
                                   authentication certificate and key used
                                   when connecting to pool members over TLS.
+        :param alpn_protocols: A list of ALPN protocols for TLS enabled pools.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
@@ -180,7 +181,8 @@
                     description=Unset, tags=Unset, admin_state_up=Unset,
                     session_persistence=Unset, ca_tls_container_ref=Unset,
                     crl_container_ref=Unset, tls_enabled=Unset,
-                    tls_container_ref=Unset, return_object_only=True):
+                    tls_container_ref=Unset, alpn_protocols=Unset,
+                    return_object_only=True):
         """Update a pool.
 
         :param pool_id: The pool ID to update.
@@ -205,6 +207,7 @@
                                   a PKCS12 bundle with the client
                                   authentication certificate and key used
                                   when connecting to pool members over TLS.
+        :param alpn_protocols: A list of ALPN protocols for TLS enabled pools.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
diff --git a/octavia_tempest_plugin/tests/RBAC_tests.py b/octavia_tempest_plugin/tests/RBAC_tests.py
index d31d506..8fc356c 100644
--- a/octavia_tempest_plugin/tests/RBAC_tests.py
+++ b/octavia_tempest_plugin/tests/RBAC_tests.py
@@ -59,17 +59,10 @@
             try:
                 cred_obj = getattr(self, cred)
             except AttributeError:
-                # TODO(johnsom) Remove once scoped tokens is the default.
-                if ((cred == 'os_system_admin' or cred == 'os_system_reader')
-                        and not CONF.enforce_scope.octavia):
-                    LOG.info('Skipping %s allowed RBAC test because '
-                             'enforce_scope.octavia is not True', cred)
-                    continue
-                else:
-                    self.fail('Credential {} "expected_allowed" for RBAC '
-                              'testing was not created by tempest '
-                              'credentials setup. This is likely a bug in the '
-                              'test.'.format(cred))
+                self.fail('Credential {} "expected_allowed" for RBAC '
+                          'testing was not created by tempest '
+                          'credentials setup. This is likely a bug in the '
+                          'test.'.format(cred))
             method = self._get_client_method(cred_obj, client_str, method_str)
             try:
                 method(*args, **kwargs)
@@ -155,16 +148,6 @@
         """
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
-            allowed_list.append('os_admin')
 
         # #### Test that disallowed credentials cannot access the API.
         self._check_disallowed(client_str, method_str, allowed_list,
@@ -193,6 +176,8 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
         self._list_get_RBAC_enforcement(client_str, method_str,
                                         expected_allowed, *args, **kwargs)
 
@@ -215,6 +200,8 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
         self._list_get_RBAC_enforcement(client_str, method_str,
                                         expected_allowed, *args, **kwargs)
 
@@ -244,16 +231,6 @@
         """
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
-            allowed_list.append('os_admin')
 
         # #### Test that disallowed credentials cannot access the API.
         self._check_disallowed(client_str, method_str, allowed_list,
@@ -283,6 +260,8 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
         self._CUD_RBAC_enforcement(client_str, method_str, expected_allowed,
                                    status_method, obj_id, *args, **kwargs)
 
@@ -310,6 +289,8 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
         self._CUD_RBAC_enforcement(client_str, method_str, expected_allowed,
                                    status_method, obj_id, *args, **kwargs)
 
@@ -337,6 +318,8 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
         self._CUD_RBAC_enforcement(client_str, method_str, expected_allowed,
                                    status_method, obj_id, *args, **kwargs)
 
@@ -369,34 +352,19 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
-            allowed_list.append('os_admin')
 
         for cred in allowed_list:
             try:
                 cred_obj = getattr(self, cred)
             except AttributeError:
-                # TODO(johnsom) Remove once scoped tokens is the default.
-                if ((cred == 'os_system_admin' or cred == 'os_system_reader')
-                        and not CONF.enforce_scope.octavia):
-                    LOG.info('Skipping %s allowed RBAC test because '
-                             'enforce_scope.octavia is not True', cred)
-                    continue
-                else:
-                    self.fail('Credential {} "expected_allowed" for RBAC '
-                              'testing was not created by tempest '
-                              'credentials setup. This is likely a bug in the '
-                              'test.'.format(cred))
+                self.fail('Credential {} "expected_allowed" for RBAC '
+                          'testing was not created by tempest '
+                          'credentials setup. This is likely a bug in the '
+                          'test.'.format(cred))
             method = self._get_client_method(cred_obj, client_str, method_str)
             try:
                 result = method(*args, **kwargs)
@@ -437,34 +405,19 @@
                               correct scope for access is denied.
         :returns: None on success
         """
+        if CONF.load_balancer.RBAC_test_type == constants.NONE:
+            return
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
-            allowed_list.append('os_admin')
 
         for cred in allowed_list:
             try:
                 cred_obj = getattr(self, cred)
             except AttributeError:
-                # TODO(johnsom) Remove once scoped tokens is the default.
-                if ((cred == 'os_system_admin' or cred == 'os_system_reader')
-                        and not CONF.enforce_scope.octavia):
-                    LOG.info('Skipping %s allowed RBAC test because '
-                             'enforce_scope.octavia is not True', cred)
-                    continue
-                else:
-                    self.fail('Credential {} "expected_allowed" for RBAC '
-                              'testing was not created by tempest '
-                              'credentials setup. This is likely a bug in the '
-                              'test.'.format(cred))
+                self.fail('Credential {} "expected_allowed" for RBAC '
+                          'testing was not created by tempest '
+                          'credentials setup. This is likely a bug in the '
+                          'test.'.format(cred))
             method = self._get_client_method(cred_obj, client_str, method_str)
             try:
                 result = method(*args, **kwargs)
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
index 11b0cec..66cb59b 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
@@ -24,6 +24,7 @@
 from tempest.lib import exceptions
 
 from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.services.load_balancer import v2
 from octavia_tempest_plugin.tests import test_base
 from octavia_tempest_plugin.tests import waiters
 
@@ -35,6 +36,7 @@
     CONF.validation.run_validation,
     'Active-Standby tests will not work without run_validation enabled.')
 class ActiveStandbyScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
+    mem_listener_client: v2.ListenerClient
 
     @classmethod
     def resource_setup(cls):
@@ -67,7 +69,7 @@
                 floating_network_id=CONF.network.public_network_id,
                 port_id=port_id)
             floating_ip = result['floatingip']
-            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            LOG.info('lb1_floating_ip: %s', floating_ip)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls.lb_mem_float_ip_client.delete_floatingip,
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
index e7dccb6..ce2c94f 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -76,7 +76,7 @@
                 floating_network_id=CONF.network.public_network_id,
                 port_id=port_id)
             floating_ip = result['floatingip']
-            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            LOG.info('lb1_floating_ip: %s', floating_ip)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls.lb_mem_float_ip_client.delete_floatingip,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index 180e4f3..06f93ac 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -90,16 +90,9 @@
                                 CONF.load_balancer.lb_build_timeout)
 
         # Test RBAC for list amphorae
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_list_RBAC_enforcement(
-                'AmphoraClient', 'list_amphorae', expected_allowed)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_list_RBAC_enforcement(
+            'AmphoraClient', 'list_amphorae', expected_allowed)
 
         # Get an actual list of the amphorae
         amphorae = self.lb_admin_amphora_client.list_amphorae()
@@ -178,17 +171,10 @@
         amphora_1 = amphorae[0]
 
         # Test RBAC for update an amphora
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'AmphoraClient', 'update_amphora_config', expected_allowed,
-                None, None, amphora_1[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'AmphoraClient', 'update_amphora_config', expected_allowed,
+            None, None, amphora_1[const.ID])
 
         self.lb_admin_amphora_client.update_amphora_config(amphora_1[const.ID])
 
@@ -213,17 +199,10 @@
         amphora_1 = amphorae[0]
 
         # Test RBAC for failover an amphora
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'AmphoraClient', 'amphora_failover', expected_allowed,
-                None, None, amphora_1[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'AmphoraClient', 'amphora_failover', expected_allowed,
+            None, None, amphora_1[const.ID])
 
         self.lb_admin_amphora_client.amphora_failover(amphora_1[const.ID])
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
index fa7b6a4..adcb75a 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
@@ -105,17 +105,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # create an availability zone.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'AvailabilityZoneClient', 'create_availability_zone',
-                expected_allowed, **availability_zone_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_create_RBAC_enforcement(
+            'AvailabilityZoneClient', 'create_availability_zone',
+            expected_allowed, **availability_zone_kwargs)
 
         # Happy path
         availability_zone = (
@@ -232,13 +225,13 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = [
-                'os_system_admin', 'os_system_reader', 'os_roles_lb_admin',
+                'os_admin', 'os_roles_lb_admin',
                 'os_roles_lb_observer', 'os_roles_lb_global_observer',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -385,13 +378,13 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = [
-                'os_system_admin', 'os_system_reader', 'os_roles_lb_admin',
+                'os_admin', 'os_roles_lb_admin',
                 'os_roles_lb_observer', 'os_roles_lb_global_observer',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -454,18 +447,11 @@
 
         # Test that a user without the load balancer role cannot
         # update availability zone details.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'AvailabilityZoneClient', 'update_availability_zone',
-                expected_allowed, None, None, availability_zone[const.NAME],
-                **availability_zone_updated_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'AvailabilityZoneClient', 'update_availability_zone',
+            expected_allowed, None, None, availability_zone[const.NAME],
+            **availability_zone_updated_kwargs)
 
         updated_availability_zone = (
             self.lb_admin_availability_zone_client.update_availability_zone(
@@ -531,17 +517,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # delete an availability zone.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'AvailabilityZoneClient', 'delete_availability_zone',
-                expected_allowed, None, None, availability_zone[const.NAME])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_delete_RBAC_enforcement(
+            'AvailabilityZoneClient', 'delete_availability_zone',
+            expected_allowed, None, None, availability_zone[const.NAME])
 
         # Happy path
         self.lb_admin_availability_zone_client.delete_availability_zone(
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
index d3833f6..7bcb4d7 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
@@ -44,18 +44,11 @@
 
         # Test that a user without the load balancer admin role cannot
         # list provider availability zone capabilities.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_list_RBAC_enforcement(
-                'AvailabilityZoneCapabilitiesClient',
-                'list_availability_zone_capabilities', expected_allowed,
-                CONF.load_balancer.provider)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_list_RBAC_enforcement(
+            'AvailabilityZoneCapabilitiesClient',
+            'list_availability_zone_capabilities', expected_allowed,
+            CONF.load_balancer.provider)
 
         # Check for an expected availability zone capability for the
         # configured provider
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
index 456a01e..78e12ed 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
@@ -76,18 +76,11 @@
 
         # Test that a user without the load balancer admin role cannot
         # create an availability zone profile.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'AvailabilityZoneProfileClient',
-                'create_availability_zone_profile',
-                expected_allowed, **availability_zone_profile_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_create_RBAC_enforcement(
+            'AvailabilityZoneProfileClient',
+            'create_availability_zone_profile',
+            expected_allowed, **availability_zone_profile_kwargs)
 
         # Happy path
         availability_zone_profile = (
@@ -231,17 +224,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # list availability zone profiles.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_list_RBAC_enforcement(
-                'AvailabilityZoneProfileClient',
-                'list_availability_zone_profiles', expected_allowed)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_list_RBAC_enforcement(
+            'AvailabilityZoneProfileClient',
+            'list_availability_zone_profiles', expected_allowed)
 
         # Check the default sort order (by ID)
         profiles = (self.lb_admin_availability_zone_profile_client
@@ -392,18 +378,11 @@
 
         # Test that a user without the load balancer admin role cannot
         # show an availability zone profile
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'AvailabilityZoneProfileClient',
-                'show_availability_zone_profile', expected_allowed,
-                availability_zone_profile[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_show_RBAC_enforcement(
+            'AvailabilityZoneProfileClient',
+            'show_availability_zone_profile', expected_allowed,
+            availability_zone_profile[const.ID])
 
         result = (
             self.lb_admin_availability_zone_profile_client
@@ -494,19 +473,12 @@
 
         # Test that a user without the load balancer admin role cannot
         # update an availability zone profile.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'AvailabilityZoneProfileClient',
-                'update_availability_zone_profile', expected_allowed,
-                None, None, availability_zone_profile[const.ID],
-                **availability_zone_profile_updated_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'AvailabilityZoneProfileClient',
+            'update_availability_zone_profile', expected_allowed,
+            None, None, availability_zone_profile[const.ID],
+            **availability_zone_profile_updated_kwargs)
 
         result = (
             self.lb_admin_availability_zone_profile_client
@@ -576,18 +548,11 @@
 
         # Test that a user without the load balancer admin role cannot
         # delete an availability zone profile.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'AvailabilityZoneProfileClient',
-                'delete_availability_zone_profile', expected_allowed,
-                None, None, availability_zone_profile[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_delete_RBAC_enforcement(
+            'AvailabilityZoneProfileClient',
+            'delete_availability_zone_profile', expected_allowed,
+            None, None, availability_zone_profile[const.ID])
 
         # Happy path
         (self.lb_admin_availability_zone_profile_client
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor.py b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
index b5b4254..53503bd 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
@@ -88,17 +88,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # create a flavor.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'FlavorClient', 'create_flavor',
-                expected_allowed, None, None, **flavor_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_create_RBAC_enforcement(
+            'FlavorClient', 'create_flavor',
+            expected_allowed, None, None, **flavor_kwargs)
 
         # Happy path
         flavor = self.lb_admin_flavor_client.create_flavor(**flavor_kwargs)
@@ -198,13 +191,13 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = [
-                'os_system_admin', 'os_system_reader', 'os_roles_lb_admin',
+                'os_admin', 'os_roles_lb_admin',
                 'os_roles_lb_observer', 'os_roles_lb_global_observer',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -326,13 +319,13 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = [
-                'os_system_admin', 'os_system_reader', 'os_roles_lb_admin',
+                'os_admin', 'os_roles_lb_admin',
                 'os_roles_lb_observer', 'os_roles_lb_global_observer',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -390,17 +383,10 @@
 
         # Test that a user without the load balancer role cannot
         # update flavor details.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'FlavorClient', 'update_flavor', expected_allowed, None, None,
-                flavor[const.ID], **flavor_updated_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'FlavorClient', 'update_flavor', expected_allowed, None, None,
+            flavor[const.ID], **flavor_updated_kwargs)
 
         updated_flavor = self.lb_admin_flavor_client.update_flavor(
             flavor[const.ID], **flavor_updated_kwargs)
@@ -454,17 +440,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # delete a flavor.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'FlavorClient', 'delete_flavor', expected_allowed,
-                None, None, flavor[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_delete_RBAC_enforcement(
+            'FlavorClient', 'delete_flavor', expected_allowed,
+            None, None, flavor[const.ID])
 
         # Happy path
         self.lb_admin_flavor_client.delete_flavor(flavor[const.ID])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
index 884f656..cd2770c 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
@@ -42,18 +42,11 @@
 
         # Test that a user without the load balancer admin role cannot
         # list provider flavor capabilities.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_list_RBAC_enforcement(
-                'FlavorCapabilitiesClient',
-                'list_flavor_capabilities', expected_allowed,
-                CONF.load_balancer.provider)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_list_RBAC_enforcement(
+            'FlavorCapabilitiesClient',
+            'list_flavor_capabilities', expected_allowed,
+            CONF.load_balancer.provider)
 
         # Check for an expected flavor capability for the configured provider
         admin_capabilities_client = self.lb_admin_flavor_capabilities_client
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
index 39f3338..0b6a1ee 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
@@ -60,17 +60,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # create a flavor profile
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'FlavorProfileClient', 'create_flavor_profile',
-                expected_allowed, None, None, **flavor_profile_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_create_RBAC_enforcement(
+            'FlavorProfileClient', 'create_flavor_profile',
+            expected_allowed, None, None, **flavor_profile_kwargs)
 
         # Happy path
         flavor_profile = (
@@ -180,17 +173,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # list flavor profiles.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_list_RBAC_enforcement(
-                'FlavorProfileClient', 'list_flavor_profiles',
-                expected_allowed)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_list_RBAC_enforcement(
+            'FlavorProfileClient', 'list_flavor_profiles',
+            expected_allowed)
 
         # Check the default sort order (by ID)
         profiles = self.lb_admin_flavor_profile_client.list_flavor_profiles()
@@ -309,17 +295,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # show a flavor profile.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'FlavorProfileClient', 'show_flavor_profile',
-                expected_allowed, flavor_profile[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_show_RBAC_enforcement(
+            'FlavorProfileClient', 'show_flavor_profile',
+            expected_allowed, flavor_profile[const.ID])
 
         result = (
             self.lb_admin_flavor_profile_client.show_flavor_profile(
@@ -387,18 +366,11 @@
 
         # Test that a user without the load balancer admin role cannot
         # update a flavor profile.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'FlavorProfileClient', 'update_flavor_profile',
-                expected_allowed, None, None, flavor_profile[const.ID],
-                **flavor_profile_updated_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'FlavorProfileClient', 'update_flavor_profile',
+            expected_allowed, None, None, flavor_profile[const.ID],
+            **flavor_profile_updated_kwargs)
 
         result = self.lb_admin_flavor_profile_client.update_flavor_profile(
             flavor_profile[const.ID], **flavor_profile_updated_kwargs)
@@ -454,17 +426,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # delete a flavor profile
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'FlavorProfileClient', 'delete_flavor_profile',
-                expected_allowed, None, None, flavor_profile[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_delete_RBAC_enforcement(
+            'FlavorProfileClient', 'delete_flavor_profile',
+            expected_allowed, None, None, flavor_profile[const.ID])
 
         # Happy path
         self.lb_admin_flavor_profile_client.delete_flavor_profile(
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index a305ead..3713552 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -277,21 +277,13 @@
 
         # Test that a user without the loadbalancer role cannot
         # create a healthmonitor
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'HealthMonitorClient', 'create_healthmonitor',
-                expected_allowed,
-                status_method=self.mem_lb_client.show_loadbalancer,
-                obj_id=self.lb_id, **hm_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_create_RBAC_enforcement(
+            'HealthMonitorClient', 'create_healthmonitor',
+            expected_allowed,
+            status_method=self.mem_lb_client.show_loadbalancer,
+            obj_id=self.lb_id, **hm_kwargs)
 
         hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
         self.addCleanup(
@@ -724,8 +716,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -739,11 +731,11 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
         if expected_allowed:
             self.check_list_IDs_RBAC_enforcement(
@@ -763,13 +755,13 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -1193,11 +1185,10 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
         if expected_allowed:
@@ -1470,20 +1461,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # update this healthmonitor.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'HealthMonitorClient', 'update_healthmonitor',
-                expected_allowed, None, None, hm[const.ID],
-                admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'HealthMonitorClient', 'update_healthmonitor',
+            expected_allowed, None, None, hm[const.ID],
+            admin_state_up=True)
 
         # Assert we didn't go into PENDING_*
         hm_check = self.mem_healthmonitor_client.show_healthmonitor(
@@ -1773,19 +1756,11 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # healthmonitor.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'HealthMonitorClient', 'delete_healthmonitor',
-                expected_allowed, None, None, hm[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'HealthMonitorClient', 'delete_healthmonitor',
+            expected_allowed, None, None, hm[const.ID])
 
         self.mem_healthmonitor_client.delete_healthmonitor(hm[const.ID])
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index e7ed5a6..868950f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -62,6 +62,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -79,6 +84,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -134,24 +144,21 @@
 
         # Test that a user without the load balancer role cannot
         # create a l7policy
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'L7PolicyClient', 'create_l7policy',
-                expected_allowed,
-                status_method=self.mem_lb_client.show_loadbalancer,
-                obj_id=self.lb_id, **l7policy_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_create_RBAC_enforcement(
+            'L7PolicyClient', 'create_l7policy',
+            expected_allowed,
+            status_method=self.mem_lb_client.show_loadbalancer,
+            obj_id=self.lb_id, **l7policy_kwargs)
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -365,8 +372,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -380,11 +387,11 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
         if expected_allowed:
             self.check_list_IDs_RBAC_enforcement(
@@ -406,13 +413,13 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -609,6 +616,11 @@
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -647,22 +659,16 @@
 
         # Test that the appropriate users can see or not see the L7 policies
         # based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'L7PolicyClient', 'show_l7policy',
-                expected_allowed, l7policy[const.ID])
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'L7PolicyClient', 'show_l7policy',
+            expected_allowed, l7policy[const.ID])
 
     @decorators.idempotent_id('08f73b22-550b-4e5a-b3d6-2ec03251ca13')
     def test_l7policy_update(self):
@@ -720,6 +726,11 @@
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -756,20 +767,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # update this L7 policy.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'L7PolicyClient', 'update_l7policy',
-                expected_allowed, None, None, l7policy[const.ID],
-                admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'L7PolicyClient', 'update_l7policy',
+            expected_allowed, None, None, l7policy[const.ID],
+            admin_state_up=True)
 
         # Assert we didn't go into PENDING_*
         l7policy_check = self.mem_l7policy_client.show_l7policy(
@@ -858,6 +861,11 @@
         }
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -867,19 +875,11 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # L7 policy.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'L7PolicyClient', 'delete_l7policy',
-                expected_allowed, None, None, l7policy[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'L7PolicyClient', 'delete_l7policy',
+            expected_allowed, None, None, l7policy[const.ID])
 
         self.mem_l7policy_client.delete_l7policy(l7policy[const.ID])
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
index 5cb85c4..0e9bb15 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
@@ -142,21 +142,13 @@
 
         # Test that a user without the loadbalancer role cannot
         # create an L7 rule.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'L7RuleClient', 'create_l7rule',
-                expected_allowed,
-                status_method=self.mem_lb_client.show_loadbalancer,
-                obj_id=self.lb_id, **l7rule_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_create_RBAC_enforcement(
+            'L7RuleClient', 'create_l7rule',
+            expected_allowed,
+            status_method=self.mem_lb_client.show_loadbalancer,
+            obj_id=self.lb_id, **l7rule_kwargs)
 
         l7rule = self.mem_l7rule_client.create_l7rule(**l7rule_kwargs)
         self.addClassResourceCleanup(
@@ -357,11 +349,11 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
         if expected_allowed:
             self.check_list_IDs_RBAC_enforcement(
@@ -380,11 +372,10 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
         if expected_allowed:
@@ -561,23 +552,17 @@
 
         # Test that the appropriate users can see or not see the L7 rule
         # based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'L7RuleClient', 'show_l7rule',
-                expected_allowed, l7rule[const.ID],
-                l7policy_id=self.l7policy_id)
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'L7RuleClient', 'show_l7rule',
+            expected_allowed, l7rule[const.ID],
+            l7policy_id=self.l7policy_id)
 
     @decorators.idempotent_id('f8cee23b-89b6-4f3a-a842-1463daf42cf7')
     def test_l7rule_update(self):
@@ -649,20 +634,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # update this L7 rule.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'L7RuleClient', 'update_l7rule',
-                expected_allowed, None, None, l7rule[const.ID],
-                l7policy_id=self.l7policy_id, admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'L7RuleClient', 'update_l7rule',
+            expected_allowed, None, None, l7rule[const.ID],
+            l7policy_id=self.l7policy_id, admin_state_up=True)
 
         # Assert we didn't go into PENDING_*
         l7rule_check = self.mem_l7rule_client.show_l7rule(
@@ -751,20 +728,12 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # L7 rule.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'L7RuleClient', 'delete_l7rule',
-                expected_allowed, None, None, l7rule[const.ID],
-                l7policy_id=self.l7policy_id)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'L7RuleClient', 'delete_l7rule',
+            expected_allowed, None, None, l7rule[const.ID],
+            l7policy_id=self.l7policy_id)
 
         self.mem_l7rule_client.delete_l7rule(l7rule[const.ID],
                                              l7policy_id=self.l7policy_id)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 1c2fa75..46735a5 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -12,31 +12,205 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import base64
 import time
 from uuid import UUID
 
+from cryptography.hazmat.primitives import serialization
+
 from dateutil import parser
+from oslo_log import log as logging
 from oslo_utils import strutils
+from oslo_utils import uuidutils
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
+import testtools
 
+from octavia_tempest_plugin.common import barbican_client_mgr
+from octavia_tempest_plugin.common import cert_utils
 from octavia_tempest_plugin.common import constants as const
 from octavia_tempest_plugin.tests import test_base
 from octavia_tempest_plugin.tests import waiters
 
 CONF = config.CONF
+LOG = logging.getLogger(__name__)
 
 
 class ListenerAPITest(test_base.LoadBalancerBaseTest):
     """Test the listener object API."""
 
     @classmethod
+    def _store_secret(cls, barbican_mgr, secret):
+        new_secret_ref = barbican_mgr.store_secret(secret)
+        cls.addClassResourceCleanup(barbican_mgr.delete_secret,
+                                    new_secret_ref)
+
+        # Set the barbican ACL if the Octavia API version doesn't do it
+        # automatically.
+        if not cls.mem_lb_client.is_version_supported(
+                cls.api_version, '2.1'):
+            user_list = cls.os_admin.users_v3_client.list_users(
+                name=CONF.load_balancer.octavia_svc_username)
+            msg = 'Only one user named "{0}" should exist, {1} found.'.format(
+                CONF.load_balancer.octavia_svc_username,
+                len(user_list['users']))
+            cls.assertEqual(1, len(user_list['users']), msg)
+            barbican_mgr.add_acl(new_secret_ref, user_list['users'][0]['id'])
+        return new_secret_ref
+
+    @classmethod
+    def _generate_load_certificate(cls, barbican_mgr, ca_cert, ca_key, name):
+        new_cert, new_key = cert_utils.generate_server_cert_and_key(
+            ca_cert, ca_key, name)
+
+        LOG.debug('%s Cert: %s', name, new_cert.public_bytes(
+            serialization.Encoding.PEM))
+        LOG.debug('%s private Key: %s', name, new_key.private_bytes(
+            encoding=serialization.Encoding.PEM,
+            format=serialization.PrivateFormat.TraditionalOpenSSL,
+            encryption_algorithm=serialization.NoEncryption()))
+        new_public_key = new_key.public_key()
+        LOG.debug('%s public Key: %s', name, new_public_key.public_bytes(
+            encoding=serialization.Encoding.PEM,
+            format=serialization.PublicFormat.SubjectPublicKeyInfo))
+
+        # Create the pkcs12 bundle
+        pkcs12 = cert_utils.generate_pkcs12_bundle(new_cert, new_key)
+        LOG.debug('%s PKCS12 bundle: %s', name, base64.b64encode(pkcs12))
+
+        new_secret_ref = cls._store_secret(barbican_mgr, pkcs12)
+
+        return new_cert, new_key, new_secret_ref
+
+    @classmethod
+    def _load_pool_pki(cls):
+        # Create the member client authentication CA
+        cls.member_client_ca_cert, member_client_ca_key = (
+            cert_utils.generate_ca_cert_and_key())
+
+        # Create client cert and key
+        cls.member_client_cn = uuidutils.generate_uuid()
+        cls.member_client_cert, cls.member_client_key = (
+            cert_utils.generate_client_cert_and_key(
+                cls.member_client_ca_cert, member_client_ca_key,
+                cls.member_client_cn))
+
+        # Create the pkcs12 bundle
+        pkcs12 = cert_utils.generate_pkcs12_bundle(cls.member_client_cert,
+                                                   cls.member_client_key)
+        LOG.debug('Pool client PKCS12 bundle: %s', base64.b64encode(pkcs12))
+
+        cls.pool_client_ref = cls._store_secret(cls.barbican_mgr, pkcs12)
+
+        cls.member_ca_cert, cls.member_ca_key = (
+            cert_utils.generate_ca_cert_and_key())
+
+        cert, key = cert_utils.generate_server_cert_and_key(
+            cls.member_ca_cert, cls.member_ca_key, cls.server_uuid)
+
+        cls.pool_CA_ref = cls._store_secret(
+            cls.barbican_mgr,
+            cls.member_ca_cert.public_bytes(serialization.Encoding.PEM))
+
+        cls.member_crl = cert_utils.generate_certificate_revocation_list(
+            cls.member_ca_cert, cls.member_ca_key, cert)
+
+        cls.pool_CRL_ref = cls._store_secret(
+            cls.barbican_mgr,
+            cls.member_crl.public_bytes(serialization.Encoding.PEM))
+
+    @classmethod
+    def should_apply_terminated_https(cls, protocol=None):
+        if protocol and protocol != const.TERMINATED_HTTPS:
+            return False
+        return CONF.load_balancer.test_with_noop or getattr(
+            CONF.service_available, 'barbican', False)
+
+    @classmethod
     def resource_setup(cls):
         """Setup resources needed by the tests."""
         super(ListenerAPITest, cls).resource_setup()
 
+        if CONF.load_balancer.test_with_noop:
+            cls.server_secret_ref = uuidutils.generate_uuid()
+            cls.SNI1_secret_ref = uuidutils.generate_uuid()
+            cls.SNI2_secret_ref = uuidutils.generate_uuid()
+        elif getattr(CONF.service_available, 'barbican', False):
+            # Create a CA self-signed cert and key
+            cls.ca_cert, ca_key = cert_utils.generate_ca_cert_and_key()
+
+            LOG.debug('CA Cert: %s', cls.ca_cert.public_bytes(
+                serialization.Encoding.PEM))
+            LOG.debug('CA private Key: %s', ca_key.private_bytes(
+                encoding=serialization.Encoding.PEM,
+                format=serialization.PrivateFormat.TraditionalOpenSSL,
+                encryption_algorithm=serialization.NoEncryption()))
+            LOG.debug('CA public Key: %s', ca_key.public_key().public_bytes(
+                encoding=serialization.Encoding.PEM,
+                format=serialization.PublicFormat.SubjectPublicKeyInfo))
+
+            # Load the secret into the barbican service under the
+            # os_roles_lb_member tenant
+            cls.barbican_mgr = barbican_client_mgr.BarbicanClientManager(
+                cls.os_roles_lb_member)
+
+            # Create a server cert and key
+            # This will be used as the "default certificate" in SNI tests.
+            cls.server_uuid = uuidutils.generate_uuid()
+            LOG.debug('Server (default) UUID: %s', cls.server_uuid)
+
+            server_cert, server_key, cls.server_secret_ref = (
+                cls._generate_load_certificate(cls.barbican_mgr, cls.ca_cert,
+                                               ca_key, cls.server_uuid))
+
+            # Create the SNI1 cert and key
+            cls.SNI1_uuid = uuidutils.generate_uuid()
+            LOG.debug('SNI1 UUID: %s', cls.SNI1_uuid)
+
+            SNI1_cert, SNI1_key, cls.SNI1_secret_ref = (
+                cls._generate_load_certificate(cls.barbican_mgr, cls.ca_cert,
+                                               ca_key, cls.SNI1_uuid))
+
+            # Create the SNI2 cert and key
+            cls.SNI2_uuid = uuidutils.generate_uuid()
+            LOG.debug('SNI2 UUID: %s', cls.SNI2_uuid)
+
+            SNI2_cert, SNI2_key, cls.SNI2_secret_ref = (
+                cls._generate_load_certificate(cls.barbican_mgr, cls.ca_cert,
+                                               ca_key, cls.SNI2_uuid))
+
+            # Create the client authentication CA
+            cls.client_ca_cert, client_ca_key = (
+                cert_utils.generate_ca_cert_and_key())
+
+            cls.client_ca_cert_ref = cls._store_secret(
+                cls.barbican_mgr,
+                cls.client_ca_cert.public_bytes(serialization.Encoding.PEM))
+
+            # Create client cert and key
+            cls.client_cn = uuidutils.generate_uuid()
+            cls.client_cert, cls.client_key = (
+                cert_utils.generate_client_cert_and_key(
+                    cls.client_ca_cert, client_ca_key, cls.client_cn))
+
+            # Create revoked client cert and key
+            cls.revoked_client_cn = uuidutils.generate_uuid()
+            cls.revoked_client_cert, cls.revoked_client_key = (
+                cert_utils.generate_client_cert_and_key(
+                    cls.client_ca_cert, client_ca_key, cls.revoked_client_cn))
+
+            # Create certificate revocation list and revoke cert
+            cls.client_crl = cert_utils.generate_certificate_revocation_list(
+                cls.client_ca_cert, client_ca_key, cls.revoked_client_cert)
+
+            cls.client_crl_ref = cls._store_secret(
+                cls.barbican_mgr,
+                cls.client_crl.public_bytes(serialization.Encoding.PEM))
+
+            cls._load_pool_pki()
+
         lb_name = data_utils.rand_name("lb_member_lb1_listener")
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
@@ -79,15 +253,37 @@
     def test_tcp_listener_create(self):
         self._test_listener_create(const.TCP, 8002)
 
+    @decorators.idempotent_id('1a6ba0d0-f309-4088-a686-dda0e9ab7e43')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_create(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_create(const.PROMETHEUS, 8090)
+
+    @decorators.idempotent_id('df9861c5-4a2a-4122-8d8f-5556156e343e')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_terminated_https_listener_create(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_create(const.TERMINATED_HTTPS, 8095)
+
     @decorators.idempotent_id('7b53f336-47bc-45ae-bbd7-4342ef0673fc')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_create(self):
         self._test_listener_create(const.UDP, 8003)
 
+    @decorators.idempotent_id('d6d36c32-27ff-4977-9d21-fd71a14e3b20')
+    def test_sctp_listener_create(self):
+        self._test_listener_create(const.SCTP, 8004)
+
     def _test_listener_create(self, protocol, protocol_port):
         """Tests listener create and basic show APIs.
 
@@ -97,8 +293,12 @@
         * Show listener details.
         * Validate the show reflects the requested values.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-create")
         listener_description = data_utils.arbitrary_string(size=255)
+        hsts_supported = self.mem_listener_client.is_version_supported(
+            self.api_version, '2.27') and protocol == const.TERMINATED_HTTPS
 
         listener_kwargs = {
             const.NAME: listener_name,
@@ -112,10 +312,6 @@
             # but this will allow us to test that the field isn't mandatory,
             # as well as not conflate pool failures with listener test failures
             # const.DEFAULT_POOL_ID: self.pool_id,
-
-            # TODO(rm_work): need to add TLS related stuff
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
         }
         if protocol == const.HTTP:
             listener_kwargs[const.INSERT_HEADERS] = {
@@ -123,6 +319,15 @@
                 const.X_FORWARDED_PORT: "true",
                 const.X_FORWARDED_PROTO: "true",
             }
+
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_kwargs.update({
@@ -148,29 +353,30 @@
                 exceptions.BadRequest,
                 self.mem_listener_client.create_listener,
                 **listener_kwargs)
-
             listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
 
+        if hsts_supported:
+            listener_kwargs[const.HSTS_PRELOAD] = True
+            listener_kwargs[const.HSTS_MAX_AGE] = 10000
+            listener_kwargs[const.HSTS_INCLUDE_SUBDOMAINS] = True
+
         # Test that a user without the loadbalancer role cannot
         # create a listener.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'ListenerClient', 'create_listener',
-                expected_allowed,
-                status_method=self.mem_lb_client.show_loadbalancer,
-                obj_id=self.lb_id, **listener_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_create_RBAC_enforcement(
+            'ListenerClient', 'create_listener',
+            expected_allowed,
+            status_method=self.mem_lb_client.show_loadbalancer,
+            obj_id=self.lb_id, **listener_kwargs)
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -202,6 +408,11 @@
             equal_items.append(const.TIMEOUT_MEMBER_DATA)
             equal_items.append(const.TIMEOUT_TCP_INSPECT)
 
+        if hsts_supported:
+            equal_items.append(const.HSTS_PRELOAD)
+            equal_items.append(const.HSTS_MAX_AGE)
+            equal_items.append(const.HSTS_INCLUDE_SUBDOMAINS)
+
         for item in equal_items:
             self.assertEqual(listener_kwargs[item], listener[item])
 
@@ -223,6 +434,14 @@
             self.assertTrue(strutils.bool_from_string(
                 insert_headers[const.X_FORWARDED_PROTO]))
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            self.assertEqual(self.server_secret_ref,
+                             listener[const.DEFAULT_TLS_CONTAINER_REF])
+            self.assertEqual(sorted([self.SNI1_secret_ref,
+                                     self.SNI2_secret_ref]),
+                             sorted(listener[const.SNI_CONTAINER_REFS]))
+
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.5'):
             self.assertCountEqual(listener_kwargs[const.TAGS],
@@ -233,27 +452,60 @@
             self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
 
     @decorators.idempotent_id('cceac303-4db5-4d5a-9f6e-ff33780a5f29')
-    def test_http_udp_tcp_listener_create_on_same_port(self):
+    def test_http_udp_sctp_tcp_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.HTTP, const.UDP,
+                                                const.SCTP,
                                                 const.TCP, 8010)
 
     @decorators.idempotent_id('930338b8-3029-48a6-89b2-8b062060fe61')
-    def test_http_udp_https_listener_create_on_same_port(self):
+    def test_http_udp_sctp_https_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.HTTP, const.UDP,
+                                                const.SCTP,
                                                 const.HTTPS, 8011)
 
     @decorators.idempotent_id('01a21892-008a-4327-b4fd-fbf194ecb1a5')
-    def test_tcp_udp_http_listener_create_on_same_port(self):
+    def test_tcp_udp_sctp_http_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.TCP, const.UDP,
+                                                const.SCTP,
                                                 const.HTTP, 8012)
 
     @decorators.idempotent_id('5da764a4-c03a-46ed-848b-98b9d9fa9089')
-    def test_tcp_udp_https_listener_create_on_same_port(self):
+    def test_tcp_udp_sctp_https_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.TCP, const.UDP,
+                                                const.SCTP,
                                                 const.HTTPS, 8013)
 
+    @decorators.idempotent_id('128dabd0-3a9b-4c11-9ef5-8d189a290f17')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_http_udp_sctp_terminated_https_listener_create_on_same_port(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_create_on_same_port(const.HTTP, const.UDP,
+                                                const.SCTP,
+                                                const.TERMINATED_HTTPS, 8014)
+
+    @decorators.idempotent_id('21da2598-c79e-4548-8fe0-b47749027010')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_tcp_udp_sctp_terminated_https_listener_create_on_same_port(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_create_on_same_port(const.TCP, const.UDP,
+                                                const.SCTP,
+                                                const.TERMINATED_HTTPS, 8015)
+
     def _test_listener_create_on_same_port(self, protocol1, protocol2,
-                                           protocol3, protocol_port):
+                                           protocol3, protocol4,
+                                           protocol_port):
         """Tests listener creation on same port number.
 
         * Create a first listener.
@@ -261,10 +513,25 @@
           protocol.
         * Create a second listener with the same parameters and ensure that
           an error is triggered.
-        * Create a third listener with another protocol over TCP, and ensure
+        * Create a third listener on an existing port, but with a different
+          protocol.
+        * Create a fourth listener with another protocol over TCP, and ensure
           that it fails.
         """
 
+        skip_protocol1 = (
+            not self._validate_listener_protocol(protocol1,
+                                                 raise_if_unsupported=False))
+        skip_protocol2 = (
+            not self._validate_listener_protocol(protocol2,
+                                                 raise_if_unsupported=False))
+        skip_protocol3 = (
+            not self._validate_listener_protocol(protocol3,
+                                                 raise_if_unsupported=False))
+        skip_protocol4 = (
+            not self._validate_listener_protocol(protocol4,
+                                                 raise_if_unsupported=False))
+
         # Using listeners on the same port for TCP and UDP was not supported
         # before Train. Use 2.11 API version as reference to detect previous
         # releases and skip the test.
@@ -274,92 +541,139 @@
                                      'is only available on Octavia API '
                                      'version 2.11 or newer.')
 
-        listener_name = data_utils.rand_name("lb_member_listener1-create")
+        if not skip_protocol1:
+            listener_name = data_utils.rand_name("lb_member_listener1-create")
 
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol1,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200
-        }
+            listener_kwargs = {
+                const.NAME: listener_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol1,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200
+            }
 
-        try:
-            self.mem_listener_client.create_listener(**listener_kwargs)
-        except exceptions.BadRequest as e:
-            faultstring = e.resp_body.get('faultstring', '')
-            if ("Invalid input for field/attribute protocol." in faultstring
-                    and "Value should be one of:" in faultstring):
-                raise self.skipException("Skipping unsupported protocol")
-            raise e
+            try:
+                self.mem_listener_client.create_listener(**listener_kwargs)
+            except exceptions.BadRequest as e:
+                fs = e.resp_body.get('faultstring', '')
+                if ("Invalid input for field/attribute protocol." in fs
+                        and "Value should be one of:" in fs):
+                    LOG.info("Skipping unsupported protocol: %s",
+                             listener_kwargs[const.PROTOCOL])
+                else:
+                    raise e
+            else:
+                waiters.wait_for_status(
+                    self.mem_lb_client.show_loadbalancer, self.lb_id,
+                    const.PROVISIONING_STATUS, const.ACTIVE,
+                    CONF.load_balancer.build_interval,
+                    CONF.load_balancer.build_timeout)
 
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+        if not skip_protocol2:
+            # Create a listener on the same port, but with a different protocol
+            listener2_name = data_utils.rand_name("lb_member_listener2-create")
 
-        # Create a listener on the same port, but with a different protocol
-        listener2_name = data_utils.rand_name("lb_member_listener2-create")
+            listener2_kwargs = {
+                const.NAME: listener2_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol2,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
 
-        listener2_kwargs = {
-            const.NAME: listener2_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol2,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-        }
+            try:
+                self.mem_listener_client.create_listener(**listener2_kwargs)
+            except exceptions.BadRequest as e:
+                fs = e.resp_body.get('faultstring', '')
+                if ("Invalid input for field/attribute protocol." in fs
+                        and "Value should be one of:" in fs):
+                    LOG.info("Skipping unsupported protocol: %s",
+                             listener_kwargs[const.PROTOCOL])
+                else:
+                    raise e
+            else:
+                waiters.wait_for_status(
+                    self.mem_lb_client.show_loadbalancer, self.lb_id,
+                    const.PROVISIONING_STATUS, const.ACTIVE,
+                    CONF.load_balancer.build_interval,
+                    CONF.load_balancer.build_timeout)
 
-        try:
-            self.mem_listener_client.create_listener(**listener2_kwargs)
-        except exceptions.BadRequest as e:
-            faultstring = e.resp_body.get('faultstring', '')
-            if ("Invalid input for field/attribute protocol." in faultstring
-                    and "Value should be one of:" in faultstring):
-                raise self.skipException("Skipping unsupported protocol")
-            raise e
+        if not skip_protocol1:
+            # Create a listener on the same port, with an already used protocol
+            listener3_name = data_utils.rand_name("lb_member_listener3-create")
 
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+            listener3_kwargs = {
+                const.NAME: listener3_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol1,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
 
-        # Create a listener on the same port, with an already used protocol
-        listener3_name = data_utils.rand_name("lb_member_listener3-create")
+            self.assertRaises(
+                exceptions.Conflict,
+                self.mem_listener_client.create_listener,
+                **listener3_kwargs)
 
-        listener3_kwargs = {
-            const.NAME: listener3_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol1,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-        }
+        if not skip_protocol3:
+            # Create a listener on the same port, with a different protocol
+            listener4_name = data_utils.rand_name("lb_member_listener4-create")
 
-        self.assertRaises(
-            exceptions.Conflict,
-            self.mem_listener_client.create_listener,
-            **listener3_kwargs)
+            listener4_kwargs = {
+                const.NAME: listener4_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol3,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
 
-        # Create a listener on the same port, with another protocol over TCP
-        listener4_name = data_utils.rand_name("lb_member_listener4-create")
+            try:
+                self.mem_listener_client.create_listener(**listener4_kwargs)
+            except exceptions.BadRequest as e:
+                fs = e.resp_body.get('faultstring', '')
+                if ("Invalid input for field/attribute protocol." in fs
+                        and "Value should be one of:" in fs):
+                    LOG.info("Skipping unsupported protocol: %s",
+                             listener_kwargs[const.PROTOCOL])
+                else:
+                    raise e
+            else:
+                waiters.wait_for_status(
+                    self.mem_lb_client.show_loadbalancer, self.lb_id,
+                    const.PROVISIONING_STATUS, const.ACTIVE,
+                    CONF.load_balancer.build_interval,
+                    CONF.load_balancer.build_timeout)
 
-        listener4_kwargs = {
-            const.NAME: listener4_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol3,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-        }
+        if not skip_protocol4:
+            # Create a listener on the same port, with another protocol over
+            # TCP
+            listener5_name = data_utils.rand_name("lb_member_listener5-create")
 
-        self.assertRaises(
-            exceptions.Conflict,
-            self.mem_listener_client.create_listener,
-            **listener4_kwargs)
+            listener5_kwargs = {
+                const.NAME: listener5_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol4,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
+
+            # Add terminated_https args
+            if self.should_apply_terminated_https(protocol=protocol4):
+                listener5_kwargs.update({
+                    const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                    const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                               self.SNI2_secret_ref],
+                })
+
+            self.assertRaises(
+                exceptions.Conflict,
+                self.mem_listener_client.create_listener,
+                **listener5_kwargs)
 
     @decorators.idempotent_id('78ba6eb0-178c-477e-9156-b6775ca7b271')
     def test_http_listener_list(self):
@@ -369,19 +683,41 @@
     def test_https_listener_list(self):
         self._test_listener_list(const.HTTPS, 8030)
 
+    @decorators.idempotent_id('5473e071-8277-4ac5-9277-01ecaf46e274')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_list(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_list(const.PROMETHEUS, 8091)
+
     @decorators.idempotent_id('1cd476e2-7788-415e-bcaf-c377acfc9794')
     def test_tcp_listener_list(self):
         self._test_listener_list(const.TCP, 8030)
 
     @decorators.idempotent_id('c08fb77e-b317-4d6f-b430-91f5b27ebac6')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_list(self):
         self._test_listener_list(const.UDP, 8040)
 
+    @decorators.idempotent_id('0abc3998-aacd-4edd-88f5-c5c35557646f')
+    def test_sctp_listener_list(self):
+        self._test_listener_list(const.SCTP, 8041)
+
+    @decorators.idempotent_id('aed69f58-fe69-401d-bf07-37b0d6d8437f')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_terminated_https_listener_list(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_list(const.TERMINATED_HTTPS, 8042)
+
     def _test_listener_list(self, protocol, protocol_port_base):
         """Tests listener list API and field filtering.
 
@@ -399,6 +735,8 @@
         # IDs of listeners created in the test
         test_ids = []
 
+        self._validate_listener_protocol(protocol)
+
         lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
         lb = self.mem_lb_client.create_loadbalancer(
             name=lb_name, provider=CONF.load_balancer.provider,
@@ -431,6 +769,14 @@
                               "Marketing", "Creativity"]
             listener1_kwargs.update({const.TAGS: listener1_tags})
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener1_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         listener1 = self.mem_listener_client.create_listener(
             **listener1_kwargs)
         self.addCleanup(
@@ -470,6 +816,14 @@
                               "Soft_skills", "Creativity"]
             listener2_kwargs.update({const.TAGS: listener2_tags})
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener2_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         listener2 = self.mem_listener_client.create_listener(
             **listener2_kwargs)
         self.addCleanup(
@@ -509,6 +863,14 @@
                               "Communication", "Creativity"]
             listener3_kwargs.update({const.TAGS: listener3_tags})
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener3_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         listener3 = self.mem_listener_client.create_listener(
             **listener3_kwargs)
         self.addCleanup(
@@ -551,8 +913,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -566,11 +928,11 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
         if expected_allowed:
             self.check_list_IDs_RBAC_enforcement(
@@ -591,13 +953,13 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -650,6 +1012,11 @@
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.12'):
             show_listener_response_fields.append('allowed_cidrs')
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.27'):
+            show_listener_response_fields.append(const.HSTS_PRELOAD)
+            show_listener_response_fields.append(const.HSTS_MAX_AGE)
+            show_listener_response_fields.append(const.HSTS_INCLUDE_SUBDOMAINS)
         for field in show_listener_response_fields:
             if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
                 continue
@@ -735,19 +1102,41 @@
     def test_https_listener_show(self):
         self._test_listener_show(const.HTTPS, 8051)
 
+    @decorators.idempotent_id('b851b754-4333-4115-9063-a9fce44c2e46')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_show(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_show(const.PROMETHEUS, 8092)
+
     @decorators.idempotent_id('1fcbbee2-b697-4890-b6bf-d308ac1c94cd')
     def test_tcp_listener_show(self):
         self._test_listener_show(const.TCP, 8052)
 
     @decorators.idempotent_id('1dea3a6b-c95b-4e91-b591-1aa9cbcd0d1d')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_show(self):
         self._test_listener_show(const.UDP, 8053)
 
+    @decorators.idempotent_id('10992529-1d0a-47a3-855c-3dbcd868db4e')
+    def test_sctp_listener_show(self):
+        self._test_listener_show(const.SCTP, 8054)
+
+    @decorators.idempotent_id('2c2e7146-0efc-44b6-8401-f1c69c2422fe')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_terminated_https_listener_show(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_show(const.TERMINATED_HTTPS, 8055)
+
     def _test_listener_show(self, protocol, protocol_port):
         """Tests listener show API.
 
@@ -756,8 +1145,12 @@
         * Validate the show reflects the requested values.
         * Validates that other accounts cannot see the listener.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-show")
         listener_description = data_utils.arbitrary_string(size=255)
+        hsts_supported = self.mem_listener_client.is_version_supported(
+            self.api_version, '2.27') and protocol == const.TERMINATED_HTTPS
 
         listener_kwargs = {
             const.NAME: listener_name,
@@ -767,10 +1160,7 @@
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
         }
         if protocol == const.HTTP:
             listener_kwargs[const.INSERT_HEADERS] = {
@@ -779,6 +1169,19 @@
                 const.X_FORWARDED_PROTO: "true",
             }
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
+        if hsts_supported:
+            listener_kwargs[const.HSTS_PRELOAD] = True
+            listener_kwargs[const.HSTS_MAX_AGE] = 10000
+            listener_kwargs[const.HSTS_INCLUDE_SUBDOMAINS] = True
+
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_kwargs.update({
@@ -801,6 +1204,11 @@
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -848,6 +1256,14 @@
             self.assertTrue(strutils.bool_from_string(
                 insert_headers[const.X_FORWARDED_PROTO]))
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            self.assertEqual(self.server_secret_ref,
+                             listener[const.DEFAULT_TLS_CONTAINER_REF])
+            self.assertEqual(sorted([self.SNI1_secret_ref,
+                                     self.SNI2_secret_ref]),
+                             sorted(listener[const.SNI_CONTAINER_REFS]))
+
         parser.parse(listener[const.CREATED_AT])
         parser.parse(listener[const.UPDATED_AT])
         UUID(listener[const.ID])
@@ -861,24 +1277,23 @@
                 self.api_version, '2.12'):
             self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
 
+        if hsts_supported:
+            self.assertTrue(listener[const.HSTS_PRELOAD])
+            self.assertEqual(10000, listener[const.HSTS_MAX_AGE])
+            self.assertTrue(listener[const.HSTS_INCLUDE_SUBDOMAINS])
+
         # Test that the appropriate users can see or not see the listener
         # based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'ListenerClient', 'show_listener',
-                expected_allowed, listener[const.ID])
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'ListenerClient', 'show_listener',
+            expected_allowed, listener[const.ID])
 
     @decorators.idempotent_id('aaae0298-5778-4c7e-a27a-01549a71b319')
     def test_http_listener_update(self):
@@ -888,19 +1303,41 @@
     def test_https_listener_update(self):
         self._test_listener_update(const.HTTPS, 8061)
 
+    @decorators.idempotent_id('cbba6bf8-9184-4da5-95e9-5efe1f89ddf0')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_update(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_update(const.PROMETHEUS, 8093)
+
     @decorators.idempotent_id('8d933121-db03-4ccc-8b77-4e879064a9ba')
     def test_tcp_listener_update(self):
         self._test_listener_update(const.TCP, 8062)
 
     @decorators.idempotent_id('fd02dbfd-39ce-41c2-b181-54fc7ad91707')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_update(self):
         self._test_listener_update(const.UDP, 8063)
 
+    @decorators.idempotent_id('c590b485-4e08-4e49-b384-2282b3f6f1b9')
+    def test_sctp_listener_update(self):
+        self._test_listener_update(const.SCTP, 8064)
+
+    @decorators.idempotent_id('2ae08e10-fbf8-46d8-a073-15f90454d718')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_terminated_https_listener_update(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_update(const.TERMINATED_HTTPS, 8065)
+
     def _test_listener_update(self, protocol, protocol_port):
         """Tests listener update and show APIs.
 
@@ -912,8 +1349,12 @@
         * Show listener details.
         * Validate the show reflects the updated values.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-update")
         listener_description = data_utils.arbitrary_string(size=255)
+        hsts_supported = self.mem_listener_client.is_version_supported(
+            self.api_version, '2.27') and protocol == const.TERMINATED_HTTPS
 
         listener_kwargs = {
             const.NAME: listener_name,
@@ -923,10 +1364,7 @@
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
         }
         if protocol == const.HTTP:
             listener_kwargs[const.INSERT_HEADERS] = {
@@ -935,6 +1373,14 @@
                 const.X_FORWARDED_PROTO: "true"
             }
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_kwargs.update({
@@ -957,6 +1403,11 @@
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -988,6 +1439,13 @@
                 insert_headers[const.X_FORWARDED_PORT]))
             self.assertTrue(strutils.bool_from_string(
                 insert_headers[const.X_FORWARDED_PROTO]))
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            self.assertEqual(self.server_secret_ref,
+                             listener[const.DEFAULT_TLS_CONTAINER_REF])
+            self.assertEqual(sorted([self.SNI1_secret_ref,
+                                     self.SNI2_secret_ref]),
+                             sorted(listener[const.SNI_CONTAINER_REFS]))
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
@@ -1006,22 +1464,14 @@
 
         # Test that a user without the loadbalancer role cannot
         # update a listener.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'ListenerClient', 'update_listener',
-                expected_allowed,
-                status_method=self.mem_listener_client.show_listener,
-                obj_id=listener[const.ID], listener_id=listener[const.ID],
-                admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'ListenerClient', 'update_listener',
+            expected_allowed,
+            status_method=self.mem_listener_client.show_listener,
+            obj_id=listener[const.ID], listener_id=listener[const.ID],
+            admin_state_up=True)
 
         new_name = data_utils.rand_name("lb_member_listener1-UPDATED")
         new_description = data_utils.arbitrary_string(size=255,
@@ -1033,8 +1483,6 @@
             const.CONNECTION_LIMIT: 400,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
-            # const.DEFAULT_TLS_CONTAINER_REF: '',
-            # const.SNI_CONTAINER_REFS: [],
         }
         if protocol == const.HTTP:
             listener_update_kwargs[const.INSERT_HEADERS] = {
@@ -1042,6 +1490,13 @@
                 const.X_FORWARDED_PORT: "false",
                 const.X_FORWARDED_PROTO: "false"
             }
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener_update_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.SNI2_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.server_secret_ref],
+            })
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             listener_update_kwargs.update({
@@ -1073,6 +1528,11 @@
                 new_cidrs = ['2001:db8::/64']
             listener_update_kwargs.update({const.ALLOWED_CIDRS: new_cidrs})
 
+        if hsts_supported:
+            listener_update_kwargs[const.HSTS_PRELOAD] = False
+            listener_update_kwargs[const.HSTS_MAX_AGE] = 0
+            listener_update_kwargs[const.HSTS_INCLUDE_SUBDOMAINS] = False
+
         listener = self.mem_listener_client.update_listener(
             listener[const.ID], **listener_update_kwargs)
 
@@ -1112,6 +1572,13 @@
                 insert_headers[const.X_FORWARDED_PORT]))
             self.assertFalse(strutils.bool_from_string(
                 insert_headers[const.X_FORWARDED_PROTO]))
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            self.assertEqual(self.SNI2_secret_ref,
+                             listener[const.DEFAULT_TLS_CONTAINER_REF])
+            self.assertEqual(sorted([self.SNI1_secret_ref,
+                                     self.server_secret_ref]),
+                             sorted(listener[const.SNI_CONTAINER_REFS]))
         if self.mem_listener_client.is_version_supported(
                 self.api_version, '2.1'):
             self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
@@ -1131,6 +1598,11 @@
                 expected_cidrs = ['2001:db8::/64']
             self.assertEqual(expected_cidrs, listener[const.ALLOWED_CIDRS])
 
+        if hsts_supported:
+            self.assertFalse(listener[const.HSTS_PRELOAD])
+            self.assertEqual(0, listener[const.HSTS_MAX_AGE])
+            self.assertFalse(listener[const.HSTS_INCLUDE_SUBDOMAINS])
+
     @decorators.idempotent_id('16f11c82-f069-4592-8954-81b35a98e3b7')
     def test_http_listener_delete(self):
         self._test_listener_delete(const.HTTP, 8070)
@@ -1139,19 +1611,41 @@
     def test_https_listener_delete(self):
         self._test_listener_delete(const.HTTPS, 8071)
 
+    @decorators.idempotent_id('322a6372-6b56-4a3c-87e3-dd82074bc83e')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_delete(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_delete(const.PROMETHEUS, 8094)
+
     @decorators.idempotent_id('f5ca019d-2b33-48f9-9c2d-2ec169b423ca')
     def test_tcp_listener_delete(self):
         self._test_listener_delete(const.TCP, 8072)
 
     @decorators.idempotent_id('86bd9717-e3e9-41e3-86c4-888c64455926')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_delete(self):
         self._test_listener_delete(const.UDP, 8073)
 
+    @decorators.idempotent_id('0de6f1ad-58ae-4b31-86b6-b440fce70244')
+    def test_sctp_listener_delete(self):
+        self._test_listener_delete(const.SCTP, 8074)
+
+    @decorators.idempotent_id('ef357dcc-c9a0-40fe-a15c-b368f15d7187')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_terminated_https_listener_delete(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_delete(const.TERMINATED_HTTPS, 8075)
+
     def _test_listener_delete(self, protocol, protocol_port):
         """Tests listener create and delete APIs.
 
@@ -1160,6 +1654,8 @@
         * Deletes the listener.
         * Validates the listener is in the DELETED state.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-delete")
 
         listener_kwargs = {
@@ -1168,8 +1664,22 @@
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
         }
+
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -1179,21 +1689,13 @@
 
         # Test that a user without the loadbalancer role cannot
         # delete a listener.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'ListenerClient', 'delete_listener',
-                expected_allowed,
-                status_method=self.mem_listener_client.show_listener,
-                obj_id=listener[const.ID], listener_id=listener[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'ListenerClient', 'delete_listener',
+            expected_allowed,
+            status_method=self.mem_listener_client.show_listener,
+            obj_id=listener[const.ID], listener_id=listener[const.ID])
 
         self.mem_listener_client.delete_listener(listener[const.ID])
 
@@ -1223,14 +1725,25 @@
         self._test_listener_show_stats(const.TCP, 8082)
 
     @decorators.idempotent_id('a4c1f199-923b-41e4-a134-c91e590e20c4')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_show_stats(self):
         self._test_listener_show_stats(const.UDP, 8083)
 
+    @decorators.idempotent_id('7f6d3906-529c-4b99-8376-b836059df220')
+    def test_sctp_listener_show_stats(self):
+        self._test_listener_show_stats(const.SCTP, 8084)
+
+    @decorators.idempotent_id('c39c996f-9633-4d81-a5f1-e94643f0c650')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.terminated_tls_enabled,
+        '[loadbalancer-feature-enabled] "terminated_tls_enabled" is '
+        'False in the tempest configuration. TLS tests will be skipped.')
+    def test_terminated_https_listener_show_stats(self):
+        if not self.should_apply_terminated_https():
+            raise self.skipException(
+                f'Listener API tests with {const.TERMINATED_HTTPS} protocol'
+                ' require the either the barbican service,or running in noop.')
+        self._test_listener_show_stats(const.TERMINATED_HTTPS, 8085)
+
     def _test_listener_show_stats(self, protocol, protocol_port):
         """Tests listener show statistics API.
 
@@ -1240,6 +1753,8 @@
         * Show listener statistics.
         * Validate the show reflects the expected values.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-stats")
         listener_description = data_utils.arbitrary_string(size=255)
 
@@ -1253,6 +1768,14 @@
             const.CONNECTION_LIMIT: 200,
         }
 
+        # Add terminated_https args
+        if self.should_apply_terminated_https(protocol=protocol):
+            listener_kwargs.update({
+                const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+                const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+                                           self.SNI2_secret_ref],
+            })
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addCleanup(
             self.mem_listener_client.cleanup_listener,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 7ade642..5159dc4 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -84,20 +84,11 @@
         lb_kwargs_with_project_id = copy.deepcopy(lb_kwargs)
         lb_kwargs_with_project_id[const.PROJECT_ID] = (
             self.os_roles_lb_member.credentials.project_id)
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
-                                'os_roles_lb_member', 'os_roles_lb_member2']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin',
-                                'os_roles_lb_member', 'os_roles_lb_member2']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member', 'os_roles_lb_member2']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'LoadbalancerClient', 'create_loadbalancer',
-                expected_allowed, None, None, **lb_kwargs_with_project_id)
+        expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                            'os_roles_lb_member', 'os_roles_lb_member2']
+        self.check_create_RBAC_enforcement(
+            'LoadbalancerClient', 'create_loadbalancer',
+            expected_allowed, None, None, **lb_kwargs_with_project_id)
 
         lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
 
@@ -188,19 +179,11 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # load balancer.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'LoadbalancerClient', 'delete_loadbalancer',
-                expected_allowed, None, None, lb[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'LoadbalancerClient', 'delete_loadbalancer',
+            expected_allowed, None, None, lb[const.ID])
 
         self.mem_lb_client.delete_loadbalancer(lb[const.ID])
 
@@ -237,19 +220,11 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # load balancer.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'LoadbalancerClient', 'delete_loadbalancer',
-                expected_allowed, None, None, lb[const.ID], cascade=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'LoadbalancerClient', 'delete_loadbalancer',
+            expected_allowed, None, None, lb[const.ID], cascade=True)
 
         self.mem_lb_client.delete_loadbalancer(lb[const.ID], cascade=True)
 
@@ -418,8 +393,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -433,11 +408,11 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
         if expected_allowed:
             self.check_list_IDs_RBAC_enforcement(
@@ -457,13 +432,13 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -630,22 +605,16 @@
 
         # Test that the appropriate users can see or not see the load
         # balancer based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'LoadbalancerClient', 'show_loadbalancer',
-                expected_allowed, lb[const.ID])
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'LoadbalancerClient', 'show_loadbalancer',
+            expected_allowed, lb[const.ID])
 
         # Attempt to clean up so that one full test run doesn't start 10+
         # amps before the cleanup phase fires
@@ -734,20 +703,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # update this load balancer.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'LoadbalancerClient', 'update_loadbalancer',
-                expected_allowed, None, None, lb[const.ID],
-                admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'LoadbalancerClient', 'update_loadbalancer',
+            expected_allowed, None, None, lb[const.ID],
+            admin_state_up=True)
 
         # Assert we didn't go into PENDING_*
         lb_check = self.mem_lb_client.show_loadbalancer(lb[const.ID])
@@ -826,22 +787,16 @@
 
         # Test that the appropriate users can see or not see the load
         # balancer stats based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'LoadbalancerClient', 'get_loadbalancer_stats',
-                expected_allowed, lb[const.ID])
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'LoadbalancerClient', 'get_loadbalancer_stats',
+            expected_allowed, lb[const.ID])
 
         stats = self.mem_lb_client.get_loadbalancer_stats(lb[const.ID])
 
@@ -897,22 +852,16 @@
 
         # Test that the appropriate users can see or not see the load
         # balancer status based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'LoadbalancerClient', 'get_loadbalancer_status',
-                expected_allowed, lb[const.ID])
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'LoadbalancerClient', 'get_loadbalancer_status',
+            expected_allowed, lb[const.ID])
 
         status = self.mem_lb_client.get_loadbalancer_status(lb[const.ID])
 
@@ -974,17 +923,10 @@
 
         # Test that a user without the load balancer admin role cannot
         # failover a load balancer.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'LoadbalancerClient', 'failover_loadbalancer',
-                expected_allowed, None, None, lb[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin']
+        self.check_update_RBAC_enforcement(
+            'LoadbalancerClient', 'failover_loadbalancer',
+            expected_allowed, None, None, lb[const.ID])
 
         # Assert we didn't go into PENDING_*
         lb = self.mem_lb_client.show_loadbalancer(lb[const.ID])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index aa7cf25..fa35b03 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -29,6 +29,7 @@
 
 CONF = config.CONF
 
+
 # Member port numbers need to be unique on the shared pools so generate them
 @misc.singleton
 class MemberPort(object):
@@ -104,6 +105,11 @@
         cls.current_listener_port += 1
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -133,6 +139,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -141,6 +152,19 @@
         cls.listener_pool_cache[listener_pool_key] = pool[const.ID]
         return pool[const.ID]
 
+
+class MemberAPITest1(MemberAPITest):
+    @decorators.idempotent_id('c1e029b0-b6d6-4fa6-8ccb-5c3f3aa293b0')
+    def test_ipv4_HTTP_LC_backup_member_create(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        pool_id = self._listener_pool_create(
+            listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+            algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+        self._test_member_create(4, pool_id, backup_member=True)
+
     @decorators.idempotent_id('0684575a-0970-4fa8-8006-10c2b39c5f2b')
     def test_ipv4_HTTP_LC_alt_monitor_member_create(self):
         pool_id = self._listener_pool_create(
@@ -499,6 +523,17 @@
             algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
         self._test_member_create(6, pool_id)
 
+    @decorators.idempotent_id('b1994c5d-74b8-44be-b9e5-5e18e9219b61')
+    def test_ipv6_HTTP_LC_backup_member_create(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        pool_id = self._listener_pool_create(
+            listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+            algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+        self._test_member_create(6, pool_id, backup_member=True)
+
     @decorators.idempotent_id('6056724b-d046-497a-ae31-c02af67d4fbb')
     def test_ipv6_HTTPS_LC_alt_monitor_member_create(self):
         pool_id = self._listener_pool_create(
@@ -830,12 +865,12 @@
         self._test_member_create(6, pool_id)
 
     def _test_member_create(self, ip_version, pool_id,
-                            alternate_monitor=False):
+                            alternate_monitor=False, backup_member=False):
         """Tests member create and basic show APIs.
 
         * Tests that users without the loadbalancer member role cannot
           create members.
-        * Create a fully populated member.
+        * Create a fully populated member or backup member.
         * If driver doesnt support Monitors, allow to create without monitor
         * Show member details.
         * Validate the show reflects the requested values.
@@ -870,7 +905,7 @@
         if self.mem_member_client.is_version_supported(
                 self.api_version, '2.1'):
             member_kwargs.update({
-                const.BACKUP: False,
+                const.BACKUP: backup_member,
             })
 
         if self.mem_member_client.is_version_supported(
@@ -895,24 +930,21 @@
 
         # Test that a user without the loadbalancer role cannot
         # create a member.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'MemberClient', 'create_member',
-                expected_allowed,
-                status_method=self.mem_lb_client.show_loadbalancer,
-                obj_id=self.lb_id, **member_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_create_RBAC_enforcement(
+            'MemberClient', 'create_member',
+            expected_allowed,
+            status_method=self.mem_lb_client.show_loadbalancer,
+            obj_id=self.lb_id, **member_kwargs)
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -953,6 +985,17 @@
         for item in equal_items:
             self.assertEqual(member_kwargs[item], member[item])
 
+    @decorators.skip_because(bug='2045803')
+    @decorators.idempotent_id('b982188a-d55f-438a-a1b2-224f0ec8ff12')
+    def test_HTTP_LC_backup_member_list(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        self._test_member_list(const.HTTP,
+                               const.LB_ALGORITHM_LEAST_CONNECTIONS,
+                               backup_member=True)
+
     @decorators.idempotent_id('fcc5c6cd-d1c2-4a49-8d26-2268608e59a6')
     def test_HTTP_LC_member_list(self):
         self._test_member_list(const.HTTP,
@@ -1053,11 +1096,11 @@
         self._test_member_list(const.UDP,
                                const.LB_ALGORITHM_SOURCE_IP_PORT)
 
-    def _test_member_list(self, pool_protocol, algorithm):
+    def _test_member_list(self, pool_protocol, algorithm, backup_member=False):
         """Tests member list API and field filtering.
 
         * Create a clean pool.
-        * Create three members.
+        * Create three members (one backup member if backup_member is True).
         * Validates that other accounts cannot list the members.
         * List the members using the default sort order.
         * List the members using descending sort order.
@@ -1122,6 +1165,9 @@
             const.PROTOCOL_PORT: 101,
         }
 
+        if backup_member:
+            member1_kwargs[const.BACKUP] = True
+
         if self.mem_member_client.is_version_supported(
                 self.api_version, '2.5'):
             member1_tags = ["English", "Mathematics",
@@ -1231,11 +1277,11 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
         if expected_allowed:
             self.check_list_IDs_RBAC_enforcement(
@@ -1253,11 +1299,10 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
         if expected_allowed:
@@ -1333,6 +1378,17 @@
         self.assertEqual(member2[const.PROTOCOL_PORT],
                          members[0][const.PROTOCOL_PORT])
 
+        # Test filtering using the backup flag
+        if backup_member:
+            members = self.mem_member_client.list_members(
+                pool_id,
+                query_params='{backup}={backup_value}'.format(
+                    backup=const.BACKUP,
+                    backup_value=const.BACKUP_TRUE))
+            self.assertEqual(1, len(members))
+            self.assertEqual(member1_name, members[0][const.NAME])
+            self.assertTrue(members[0][const.BACKUP])
+
         # Test combined params
         members = self.mem_member_client.list_members(
             pool_id,
@@ -1375,6 +1431,19 @@
             self.assertTrue(not any(["" in member[const.TAGS]
                                      for member in list_of_members]))
 
+
+class MemberAPITest2(MemberAPITest):
+    @decorators.idempotent_id('048f4b15-1cb4-49ac-82d6-b2ac7fe9d03b')
+    def test_HTTP_LC_backup_member_show(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        pool_id = self._listener_pool_create(
+            listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+            algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+        self._test_member_show(pool_id, backup_member=True)
+
     @decorators.idempotent_id('2674b363-7922-494a-b121-cf415dbbb716')
     def test_HTTP_LC_alt_monitor_member_show(self):
         pool_id = self._listener_pool_create(
@@ -1719,7 +1788,8 @@
             algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
         self._test_member_show(pool_id)
 
-    def _test_member_show(self, pool_id, alternate_monitor=False):
+    def _test_member_show(self, pool_id, alternate_monitor=False,
+                          backup_member=False):
         """Tests member show API.
 
         * Create a fully populated member.
@@ -1743,7 +1813,7 @@
         if self.mem_member_client.is_version_supported(
                 self.api_version, '2.1'):
             member_kwargs.update({
-                const.BACKUP: False,
+                const.BACKUP: backup_member,
             })
         if self.lb_member_vip_subnet:
             member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
@@ -1751,6 +1821,11 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1789,23 +1864,27 @@
 
         # Test that the appropriate users can see or not see the member
         # based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'MemberClient', 'show_member',
-                expected_allowed, member[const.ID],
-                pool_id=pool_id)
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'MemberClient', 'show_member', expected_allowed, member[const.ID],
+            pool_id=pool_id)
+
+    @decorators.idempotent_id('592c19c3-1e0d-4d6d-b2ff-0d39d8654c99')
+    def test_HTTP_LC_backup_member_update(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        pool_id = self._listener_pool_create(
+            listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+            algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+        self._test_member_update(pool_id, backup_member=True)
 
     @decorators.idempotent_id('65680d48-1d49-4959-a7d1-677797e54f6b')
     def test_HTTP_LC_alt_monitor_member_update(self):
@@ -2151,7 +2230,8 @@
             algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
         self._test_member_update(pool_id)
 
-    def _test_member_update(self, pool_id, alternate_monitor=False):
+    def _test_member_update(self, pool_id, alternate_monitor=False,
+                            backup_member=False):
         """Tests member show API and field filtering.
 
         * Create a fully populated member.
@@ -2178,7 +2258,7 @@
         if self.mem_member_client.is_version_supported(
                 self.api_version, '2.1'):
             member_kwargs.update({
-                const.BACKUP: False,
+                const.BACKUP: backup_member,
             })
 
         if self.mem_member_client.is_version_supported(
@@ -2194,6 +2274,11 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -2246,20 +2331,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # update this member.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'MemberClient', 'update_member',
-                expected_allowed, None, None, member[const.ID],
-                pool_id=pool_id, admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'MemberClient', 'update_member',
+            expected_allowed, None, None, member[const.ID],
+            pool_id=pool_id, admin_state_up=True)
 
         # Assert we didn't go into PENDING_*
         member_check = self.mem_member_client.show_member(
@@ -2636,6 +2713,11 @@
                 const.ID]
         member1 = self.mem_member_client.create_member(**member1_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
                                 const.PROVISIONING_STATUS,
@@ -2668,6 +2750,11 @@
 
         member2 = self.mem_member_client.create_member(**member2_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
                                 const.PROVISIONING_STATUS,
@@ -2704,20 +2791,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # batch update this member.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'MemberClient', 'update_members',
-                expected_allowed, None, None,
-                pool_id=pool_id, members_list=batch_update_list)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'MemberClient', 'update_members',
+            expected_allowed, None, None,
+            pool_id=pool_id, members_list=batch_update_list)
 
         # Assert we didn't go into PENDING_*
         member_check = self.mem_member_client.show_member(
@@ -2751,6 +2830,17 @@
         self.assertEqual(member2_name_update, members[0][const.NAME])
         self.assertEqual(member3_name, members[1][const.NAME])
 
+    @decorators.idempotent_id('eab8f0dc-0959-4b50-aea2-2f2319305d15')
+    def test_HTTP_LC_backup_member_delete(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        pool_id = self._listener_pool_create(
+            listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+            algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+        self._test_member_delete(pool_id, backup_member=True)
+
     @decorators.idempotent_id('8b6574a3-17e8-4950-b24e-66d0c28960d3')
     def test_HTTP_LC_member_delete(self):
         pool_id = self._listener_pool_create(
@@ -2923,7 +3013,7 @@
             algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
         self._test_member_delete(pool_id)
 
-    def _test_member_delete(self, pool_id):
+    def _test_member_delete(self, pool_id, backup_member=False):
         """Tests member create and delete APIs.
 
         * Creates a member.
@@ -2938,8 +3028,20 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: self.member_port.increment(),
         }
+
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_kwargs.update({
+                const.BACKUP: backup_member,
+            })
+
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -2949,20 +3051,11 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # member.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'MemberClient', 'delete_member',
-                expected_allowed, None, None, member[const.ID],
-                pool_id=pool_id)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'MemberClient', 'delete_member', expected_allowed, None, None,
+            member[const.ID], pool_id=pool_id)
 
         self.mem_member_client.delete_member(member[const.ID],
                                              pool_id=pool_id)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index ba31a8e..2f62376 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -359,6 +359,11 @@
             listener = self.mem_listener_client.create_listener(
                 **listener_kwargs)
 
+            self.addClassResourceCleanup(
+                self.mem_listener_client.cleanup_listener,
+                listener[const.ID],
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
             waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     self.lb_id, const.PROVISIONING_STATUS,
                                     const.ACTIVE,
@@ -403,21 +408,13 @@
 
         # Test that a user without the loadbalancer role cannot
         # create a pool.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_create_RBAC_enforcement(
-                'PoolClient', 'create_pool',
-                expected_allowed,
-                status_method=self.mem_lb_client.show_loadbalancer,
-                obj_id=self.lb_id, **pool_kwargs)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_create_RBAC_enforcement(
+            'PoolClient', 'create_pool',
+            expected_allowed,
+            status_method=self.mem_lb_client.show_loadbalancer,
+            obj_id=self.lb_id, **pool_kwargs)
 
         # This is a special case as the reference driver does not support
         # SOURCE-IP-PORT. Since it runs with not_implemented_is_error, we must
@@ -434,6 +431,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -584,7 +586,6 @@
     def _test_pool_list(self, pool_protocol, algorithm):
         """Tests pool list API and field filtering.
 
-        * Create a clean loadbalancer.
         * Create three pools.
         * Validates that other accounts cannot list the pools.
         * List the pools using the default sort order.
@@ -605,14 +606,7 @@
                 'Skipping this test as load balancing algorithm '
                 'SOURCE_IP_PORT requires API version 2.13 or newer.')
 
-        lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
-        lb_id = lb[const.ID]
-        self.addCleanup(
-            self.mem_lb_client.cleanup_loadbalancer,
-            lb_id)
+        lb_id = self.lb_id
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 lb_id,
@@ -755,31 +749,31 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
-        if expected_allowed:
-            self.check_list_RBAC_enforcement_count(
-                'PoolClient', 'list_pools', expected_allowed, 0,
-                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
+#        if expected_allowed:
+#            self.check_list_RBAC_enforcement_count(
+#                'PoolClient', 'list_pools', expected_allowed, 0,
+#                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
 
         # Test credentials that should see these pools can see them.
         expected_allowed = []
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_member',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member',
                                 'os_roles_lb_global_observer']
-        if expected_allowed:
-            self.check_list_IDs_RBAC_enforcement(
-                'PoolClient', 'list_pools', expected_allowed, test_ids,
-                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
+#        if expected_allowed:
+#            self.check_list_IDs_RBAC_enforcement(
+#                'PoolClient', 'list_pools', expected_allowed, test_ids,
+#                query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
 
         # Test that users without the lb member role cannot list pools.
         # Note: non-owners can still call this API, they will just get the list
@@ -794,13 +788,13 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if expected_allowed:
@@ -1080,6 +1074,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1124,22 +1123,16 @@
 
         # Test that the appropriate users can see or not see the pool
         # based on the API RBAC.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member']
-        if expected_allowed:
-            self.check_show_RBAC_enforcement(
-                'PoolClient', 'show_pool',
-                expected_allowed, pool[const.ID])
+        else:
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
+        self.check_show_RBAC_enforcement(
+            'PoolClient', 'show_pool',
+            expected_allowed, pool[const.ID])
 
     @decorators.idempotent_id('d73755fe-ba3a-4248-9543-8e167a5aa7f4')
     def test_HTTP_LC_pool_update(self):
@@ -1315,6 +1308,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1359,20 +1357,12 @@
 
         # Test that a user, without the loadbalancer member role, cannot
         # update this pool.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_update_RBAC_enforcement(
-                'PoolClient', 'update_pool',
-                expected_allowed, None, None, pool[const.ID],
-                admin_state_up=True)
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_update_RBAC_enforcement(
+            'PoolClient', 'update_pool',
+            expected_allowed, None, None, pool[const.ID],
+            admin_state_up=True)
 
         # Assert we didn't go into PENDING_*
         pool_check = self.mem_pool_client.show_pool(
@@ -1647,6 +1637,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -1656,19 +1651,11 @@
 
         # Test that a user without the loadbalancer role cannot delete this
         # pool.
-        expected_allowed = []
-        if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
-            expected_allowed = ['os_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
-        if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
-                                'os_roles_lb_member']
-        if expected_allowed:
-            self.check_delete_RBAC_enforcement(
-                'PoolClient', 'delete_pool',
-                expected_allowed, None, None, pool[const.ID])
+        expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                            'os_roles_lb_member']
+        self.check_delete_RBAC_enforcement(
+            'PoolClient', 'delete_pool',
+            expected_allowed, None, None, pool[const.ID])
 
         self.mem_pool_client.delete_pool(pool[const.ID])
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_provider.py b/octavia_tempest_plugin/tests/api/v2/test_provider.py
index 9a9dd28..b6bf12a 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_provider.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_provider.py
@@ -49,15 +49,16 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
-                                'os_system_reader', 'os_roles_lb_observer',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = [
-                'os_system_admin', 'os_system_reader', 'os_roles_lb_observer',
-                'os_roles_lb_global_observer', 'os_roles_lb_admin',
-                'os_roles_lb_member', 'os_roles_lb_member2']
+                'os_admin', 'os_roles_lb_observer',
+                'os_roles_lb_global_observer',
+                'os_roles_lb_admin', 'os_roles_lb_member',
+                'os_roles_lb_member2']
         if expected_allowed:
             self.check_list_RBAC_enforcement(
                 'ProviderClient', 'list_providers', expected_allowed)
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 9664c57..7938c31 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -28,6 +28,7 @@
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
+import testtools
 
 from octavia_tempest_plugin.common import barbican_client_mgr
 from octavia_tempest_plugin.common import cert_utils
@@ -219,7 +220,7 @@
                 floating_network_id=CONF.network.public_network_id,
                 port_id=port_id)
             floating_ip = result['floatingip']
-            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            LOG.info('lb1_floating_ip: %s', floating_ip)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls.lb_mem_float_ip_client.delete_floatingip,
@@ -239,6 +240,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -257,7 +263,13 @@
         if cls.lb_member_1_subnet:
             member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
 
-        cls.mem_member_client.create_member(**member1_kwargs)
+        member1 = cls.mem_member_client.create_member(**member1_kwargs)
+
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member1[const.ID], cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(
             cls.mem_lb_client.show_loadbalancer, cls.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -276,7 +288,13 @@
         if cls.lb_member_2_subnet:
             member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
 
-        cls.mem_member_client.create_member(**member2_kwargs)
+        member2 = cls.mem_member_client.create_member(**member2_kwargs)
+
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member2[const.ID], cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(
             cls.mem_lb_client.show_loadbalancer, cls.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -331,9 +349,9 @@
                 # Make sure the certificate is the one we generated
                 self.assertEqual(expected_cn, received_cn, message=msg)
             else:
-                LOG.error('Certificate with CN: {0} failed validation with '
-                          'OpenSSL verify errno {1}'.format(
-                              x509.get_subject().commonName, errno))
+                LOG.error('Certificate with CN: %s failed validation with '
+                          'OpenSSL verify errno %s',
+                          x509.get_subject().commonName, errno)
                 return False
             return True
 
@@ -469,9 +487,9 @@
                 # Make sure the certificate is the one we generated
                 self.assertEqual(expected_cn, received_cn, message=msg)
             else:
-                LOG.error('Certificate with CN: {0} failed validation with '
-                          'OpenSSL verify errno {1}'.format(
-                              x509.get_subject().commonName, errno))
+                LOG.error('Certificate with CN: %s failed validation with '
+                          'OpenSSL verify errno %s',
+                          x509.get_subject().commonName, errno)
                 return False
             return True
 
@@ -642,9 +660,9 @@
                 # Make sure the certificate is the one we generated
                 self.assertEqual(expected_cn, received_cn, message=msg)
             else:
-                LOG.error('Certificate with CN: {0} failed validation with '
-                          'OpenSSL verify errno {1}'.format(
-                              x509.get_subject().commonName, errno))
+                LOG.error('Certificate with CN: %s failed validation with '
+                          'OpenSSL verify errno %s',
+                          x509.get_subject().commonName, errno)
                 return False
             return True
 
@@ -1204,7 +1222,8 @@
 
         self.assertEqual(expected_proto, selected_proto)
 
-    def _test_http_versions_tls_traffic(self, http_version, alpn_protos):
+    def _test_http_versions_tls_traffic(self, http_version, alpn_protos,
+                                        hsts: bool = False):
         if not self.mem_listener_client.is_version_supported(
                 self.api_version, '2.20'):
             raise self.skipException('ALPN protocols are only available on '
@@ -1219,6 +1238,12 @@
             const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
             const.ALPN_PROTOCOLS: alpn_protos,
         }
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.27'):
+            listener_kwargs[const.HSTS_MAX_AGE] = 100 if hsts else None
+            listener_kwargs[const.HSTS_INCLUDE_SUBDOMAINS] = hsts
+            listener_kwargs[const.HSTS_PRELOAD] = hsts
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.listener_id = listener[const.ID]
         self.addCleanup(
@@ -1231,15 +1256,26 @@
                                 const.ACTIVE,
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
-
         context = ssl.create_default_context(cadata=self.ca_cert.public_bytes(
             serialization.Encoding.PEM).decode('utf-8'))
         context.check_hostname = False
 
         url = 'https://%s:%s' % (self.lb_vip_address, 443)
-        client = httpx.Client(http2=(http_version == 'HTTP/2'), verify=context)
+        client_kwargs = {
+            'verify': context
+        }
+        if http_version == 'HTTP/2':
+            client_kwargs['http1'] = False
+            client_kwargs['http2'] = True
+        client = httpx.Client(**client_kwargs)
         r = client.get(url)
         self.assertEqual(http_version, r.http_version)
+        if hsts:
+            self.assertIn('strict-transport-security', r.headers)
+            self.assertEqual('max-age=100; includeSubDomains; preload;',
+                             r.headers['strict-transport-security'])
+        else:
+            self.assertNotIn('strict-transport-security', r.headers)
 
     @decorators.idempotent_id('9965828d-24af-4fa0-91ae-21c6bc47ab4c')
     def test_http_2_tls_traffic(self):
@@ -1250,6 +1286,15 @@
         self._test_http_versions_tls_traffic(
             'HTTP/1.1', ['http/1.1', 'http/1.0'])
 
+    @decorators.idempotent_id('7436c6b7-44be-4544-a40b-31d2b7b2ad0b')
+    def test_http_1_1_tls_hsts_traffic(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.27'):
+            raise self.skipException('HSTS is only available on '
+                                     'Octavia API version 2.27 or newer.')
+        self._test_http_versions_tls_traffic(
+            'HTTP/1.1', ['http/1.1', 'http/1.0'], hsts=True)
+
     @decorators.idempotent_id('ee0faf71-d11e-4323-8673-e5e15779749b')
     def test_pool_reencryption(self):
         if not self.mem_listener_client.is_version_supported(
@@ -1267,6 +1312,11 @@
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         pool_id = pool[const.ID]
 
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -1285,7 +1335,13 @@
         if self.lb_member_1_subnet:
             member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
 
-        self.mem_member_client.create_member(**member1_kwargs)
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1304,7 +1360,13 @@
         if self.lb_member_2_subnet:
             member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
 
-        self.mem_member_client.create_member(**member2_kwargs)
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1323,6 +1385,11 @@
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.listener_id = listener[const.ID]
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -1395,3 +1462,413 @@
 
         self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
                                     protocol_port=84, traffic_member_count=1)
+
+    @decorators.idempotent_id('11b67c96-a553-4b47-9fc6-4c3d7a2a10ce')
+    def test_pool_reencryption_client_authentication(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.8'):
+            raise self.skipException('Pool re-encryption is only available on '
+                                     'Octavia API version 2.8 or newer.')
+        pool_name = data_utils.rand_name("lb_member_pool1-tls-client-auth")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.TLS_ENABLED: True
+        }
+        # Specify an http/1.x alpn to work around HTTP healthchecks
+        # on older haproxy versions when alpn includes h2
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.24'):
+            pool_kwargs[const.ALPN_PROTOCOLS] = ['http/1.0', 'http/1.1']
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm_name = data_utils.rand_name("lb_member_hm1-tls-client-auth")
+        hm_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTPS,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200',
+            const.DELAY: 1,
+            const.TIMEOUT: 1,
+            const.MAX_RETRIES: 1,
+            const.MAX_RETRIES_DOWN: 1,
+            const.ADMIN_STATE_UP: True,
+        }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name(
+            "lb_member_member1-tls-client-auth")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 9443,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name(
+            "lb_member_member2-tls-client-auth")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 9443,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-tls-client-auth")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '85',
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test that there are no members without a client certificate
+        url = 'http://{0}:85'.format(self.lb_vip_address)
+        self.validate_URL_response(url, expected_status_code=503)
+
+        # Test with client certificates
+        pool_update_kwargs = {
+            const.TLS_CONTAINER_REF: self.pool_client_ref
+        }
+
+        self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_pool_client.show_pool, pool_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Make sure the health monitor has brought the members up after the
+        # the pool update.
+        waiters.wait_for_status(
+            self.mem_member_client.show_member, member1[const.ID],
+            const.OPERATING_STATUS, const.ONLINE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout, error_ok=True, pool_id=pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member, member2[const.ID],
+            const.OPERATING_STATUS, const.ONLINE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout, error_ok=True, pool_id=pool_id)
+
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
+                                    protocol_port=85)
+
+    @decorators.idempotent_id('d3e4c5fe-1726-49e4-b0b0-7a5a47749fc9')
+    def test_basic_h2_listener_http_listener_pool_reencryption(self):
+        """Test both h2 and HTTP traffic on the same load balancer.
+
+        In this test we deploy the following Octavia resources:
+            HTTPS_TERMINATED listener with h2 alpn protocols
+            HTTP listener
+            HTTP pool with both h2 alpn protocols and backend re-encryption
+
+        we send both h2 and http traffic from a client to the load balancer vip
+        and we make sure h2 traffic was negotiated when it was sent on 443 port
+        :raises self.skipException: ALPN support for pools not available prior
+        to v2.24.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.24'):
+            raise self.skipException('ALPN protocols are only available on '
+                                     'pools in Octavia API version 2.24 or'
+                                     ' newer.')
+        pool_name = data_utils.rand_name("lb_member_pool1-tls-alpn")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.TLS_ENABLED: True,
+            const.ALPN_PROTOCOLS: ['h2', 'http/1.1'],
+        }
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-tls-reencrypt")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 443,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client,
+            lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-tls-reencrypt")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 443,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client,
+            lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-tls-terminated-alpn")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.TERMINATED_HTTPS,
+            const.PROTOCOL_PORT: '443',
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+            const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+            const.ALPN_PROTOCOLS: ['h2', 'http/1.1']
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client,
+            lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-http")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: 80,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+        context.set_alpn_protocols(['h2', 'http/1.1'])
+        s = socket.socket()
+        ssl_sock = context.wrap_socket(s)
+        ssl_sock.connect((self.lb_vip_address, 443))
+        selected_proto = ssl_sock.selected_alpn_protocol()
+        self.assertEqual('h2', selected_proto)
+
+        # Test HTTPS listener load balancing.
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
+                                    HTTPS_verify=False, protocol_port=443)
+
+        # Test HTTP listener load balancing.
+        self.check_members_balanced(self.lb_vip_address)
+
+    @decorators.idempotent_id('7d9dcae6-3e2c-4eae-9bfb-1ef0d00aa530')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_tls_prometheus_client_auth_mandatory(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('Prometheus listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        LISTENER1_TCP_PORT = '9443'
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-prometheus-client-auth-mand")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.PROMETHEUS,
+            const.PROTOCOL_PORT: LISTENER1_TCP_PORT,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+            const.CLIENT_AUTHENTICATION: const.CLIENT_AUTH_MANDATORY,
+            const.CLIENT_CA_TLS_CONTAINER_REF: self.client_ca_cert_ref,
+            const.CLIENT_CRL_CONTAINER_REF: self.client_crl_ref,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test that no client certificate fails to connect
+        self.assertRaises(
+            requests.exceptions.SSLError,
+            requests.get,
+            'https://{0}:{1}'.format(self.lb_vip_address, LISTENER1_TCP_PORT),
+            timeout=12, verify=False)
+
+        # Test that a revoked client certificate fails to connect
+        with tempfile.NamedTemporaryFile(buffering=0) as cert_file:
+            cert_file.write(self.revoked_client_cert.public_bytes(
+                serialization.Encoding.PEM))
+            with tempfile.NamedTemporaryFile(buffering=0) as key_file:
+                key_file.write(self.revoked_client_key.private_bytes(
+                    serialization.Encoding.PEM,
+                    serialization.PrivateFormat.TraditionalOpenSSL,
+                    serialization.NoEncryption()))
+                self.assertRaises(
+                    requests.exceptions.SSLError, requests.get,
+                    'https://{0}:{1}'.format(self.lb_vip_address,
+                                             LISTENER1_TCP_PORT),
+                    timeout=12, verify=False, cert=(cert_file.name,
+                                                    key_file.name))
+
+        # Test that a valid client certificate can connect
+        with tempfile.NamedTemporaryFile(buffering=0) as cert_file:
+            cert_file.write(self.client_cert.public_bytes(
+                serialization.Encoding.PEM))
+            with tempfile.NamedTemporaryFile(buffering=0) as key_file:
+                key_file.write(self.client_key.private_bytes(
+                    serialization.Encoding.PEM,
+                    serialization.PrivateFormat.TraditionalOpenSSL,
+                    serialization.NoEncryption()))
+                response = requests.get(
+                    'https://{0}:{1}'.format(self.lb_vip_address,
+                                             LISTENER1_TCP_PORT),
+                    timeout=12, verify=False, cert=(cert_file.name,
+                                                    key_file.name))
+                self.assertEqual(200, response.status_code)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index 8075a35..01548c2 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -269,6 +269,11 @@
         }
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -295,6 +300,11 @@
 
         hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index 69c1f2b..d4eaa86 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -104,6 +104,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_ids[protocol] = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -120,6 +125,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_ids[protocol] = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
index acbd094..4c84ba3 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
@@ -61,6 +61,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -77,6 +82,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index d5683ef..edbdc33 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -61,6 +61,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -76,6 +81,11 @@
         l7policy = cls.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
         cls.l7policy_id = l7policy[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_l7policy_client.cleanup_l7policy,
+            cls.l7policy_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index a720d26..be997aa 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -76,6 +76,11 @@
         pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
         pool1_id = pool1[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool1_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -92,6 +97,11 @@
         pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
         pool2_id = pool2[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool2_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -115,11 +125,6 @@
         self._test_listener_CRUD(const.TCP, pool1, pool2)
 
     @decorators.idempotent_id('27a2ba7d-6147-46e4-886a-47c1ba63bf89')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_least_connections_listener_CRUD(self):
         pool1, pool2 = self._create_pools(const.UDP,
                                           const.LB_ALGORITHM_LEAST_CONNECTIONS)
@@ -138,11 +143,6 @@
         self._test_listener_CRUD(const.TCP, pool1, pool2)
 
     @decorators.idempotent_id('dd913f74-c6a6-4998-9bed-095babb9cb47')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_round_robin_listener_CRUD(self):
         pool1, pool2 = self._create_pools(const.UDP,
                                           const.LB_ALGORITHM_ROUND_ROBIN)
@@ -161,11 +161,6 @@
         self._test_listener_CRUD(const.TCP, pool1, pool2)
 
     @decorators.idempotent_id('7830aba8-12ca-40d9-9d9b-a63f7a43b287')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_source_ip_listener_CRUD(self):
         pool1, pool2 = self._create_pools(const.UDP,
                                           const.LB_ALGORITHM_SOURCE_IP)
@@ -200,11 +195,6 @@
             raise testtools.TestCase.skipException(message)
 
     @decorators.idempotent_id('3f9a2de9-5012-437d-a907-a25e1f68ccfb')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_source_ip_port_listener_CRUD(self):
         try:
             pool1, pool2 = self._create_pools(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
index 6c52f84..e50d243 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
@@ -22,6 +22,7 @@
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
+from tempest.lib import exceptions
 
 from octavia_tempest_plugin.common import constants as const
 from octavia_tempest_plugin.tests import test_base
@@ -38,14 +39,17 @@
         """Setup resources needed by the tests."""
         super(LoadBalancerScenarioTest, cls).resource_setup()
 
+        cls.flavor_id = cls._flavor_create({
+            const.LOADBALANCER_TOPOLOGY:
+            CONF.load_balancer.loadbalancer_topology})
+
+    @classmethod
+    def _flavor_create(cls, flavor_dict, skip_on_not_implemented=False):
         if cls.lb_admin_flavor_profile_client.is_version_supported(
                 cls.api_version, '2.6'):
-
             # Create a shared flavor profile
             flavor_profile_name = data_utils.rand_name("lb_scenario-setup")
-            flavor_data = {const.LOADBALANCER_TOPOLOGY:
-                           CONF.load_balancer.loadbalancer_topology}
-            flavor_data_json = jsonutils.dumps(flavor_data)
+            flavor_data_json = jsonutils.dumps(flavor_dict)
 
             flavor_profile_kwargs = {
                 const.NAME: flavor_profile_name,
@@ -54,12 +58,12 @@
             }
 
             try:
-                cls.flavor_profile = (
+                flavor_profile = (
                     cls.lb_admin_flavor_profile_client.create_flavor_profile(
                         **flavor_profile_kwargs))
                 cls.addClassResourceCleanup(
                     cls.lb_admin_flavor_profile_client.cleanup_flavor_profile,
-                    cls.flavor_profile[const.ID])
+                    flavor_profile[const.ID])
 
                 flavor_name = data_utils.rand_name("lb_scenario-setup")
                 flavor_description = data_utils.arbitrary_string(size=255)
@@ -68,20 +72,21 @@
                     const.NAME: flavor_name,
                     const.DESCRIPTION: flavor_description,
                     const.ENABLED: True,
-                    const.FLAVOR_PROFILE_ID: cls.flavor_profile[const.ID]}
+                    const.FLAVOR_PROFILE_ID: flavor_profile[const.ID]}
 
-                cls.flavor = cls.lb_admin_flavor_client.create_flavor(
+                flavor = cls.lb_admin_flavor_client.create_flavor(
                     **flavor_kwargs)
                 cls.addClassResourceCleanup(
                     cls.lb_admin_flavor_client.cleanup_a_flavor,
-                    cls.flavor[const.ID])
-                cls.flavor_id = cls.flavor[const.ID]
-            except testtools.TestCase.skipException:
-                LOG.debug("Provider driver %s doesn't support flavors.",
-                          CONF.load_balancer.provider)
-                cls.flavor_profile = None
-                cls.flavor_id = None
-                cls.flavor = None
+                    flavor[const.ID])
+                return flavor[const.ID]
+            except (testtools.TestCase.skipException,
+                    exceptions.NotImplemented):
+                msg = (f"Provider driver {CONF.load_balancer.provider} "
+                       "doesn't support flavors.")
+                LOG.debug(msg)
+                if skip_on_not_implemented:
+                    raise cls.skipException(msg)
 
     @decorators.idempotent_id('a5e2e120-4f7e-4c8b-8aac-cf09cb56711c')
     def test_load_balancer_ipv4_CRUD(self):
@@ -93,7 +98,20 @@
     def test_load_balancer_ipv6_CRUD(self):
         self._test_load_balancer_CRUD(6)
 
-    def _test_load_balancer_CRUD(self, ip_version):
+    @decorators.idempotent_id('c9d8b6dd-ef29-40d8-b329-86d31857df3f')
+    def test_load_balancer_ipv4_CRUD_with_compute_flavor(self):
+        self._test_load_balancer_CRUD(4,
+                                      use_custom_compute_flavor=True)
+
+    @decorators.idempotent_id('2f1c2bdc-0df9-4c1e-be83-910fcd5af8f2')
+    @testtools.skipUnless(CONF.load_balancer.test_with_ipv6,
+                          'IPv6 testing is disabled')
+    def test_load_balancer_ipv6_CRUD_with_compute_flavor(self):
+        self._test_load_balancer_CRUD(6,
+                                      use_custom_compute_flavor=True)
+
+    def _test_load_balancer_CRUD(self, ip_version,
+                                 use_custom_compute_flavor=False):
         """Tests load balancer create, read, update, delete
 
         * Create a fully populated load balancer.
@@ -101,6 +119,33 @@
         * Update the load balancer.
         * Delete the load balancer.
         """
+        if use_custom_compute_flavor:
+            if not self.lb_admin_flavor_profile_client.is_version_supported(
+                    self.api_version, '2.6'):
+                raise self.skipException(
+                    'Flavors and flavor profiles are supported in '
+                    'Octavia API version 2.6 or newer.')
+
+            compute_flavor_kwargs = {
+                const.NAME: data_utils.rand_name("lb_scenario_alt_amp_flavor"),
+                const.RAM: 2048,
+                const.VCPUS: 1,
+                const.DISK: 4,
+            }
+
+            compute_flavor = (
+                self.os_admin_compute_flavors_client.create_flavor(
+                    **compute_flavor_kwargs)['flavor'])
+            self.addCleanup(
+                self.os_admin_compute_flavors_client.delete_flavor,
+                compute_flavor[const.ID])
+
+            flavor_id = self._flavor_create({
+                const.COMPUTE_FLAVOR: compute_flavor[const.ID]
+            }, skip_on_not_implemented=True)
+        else:
+            flavor_id = self.flavor_id
+
         lb_name = data_utils.rand_name("lb_member_lb1-CRUD")
         lb_description = data_utils.arbitrary_string(size=255)
 
@@ -110,8 +155,8 @@
                      const.NAME: lb_name}
 
         if self.lb_admin_flavor_profile_client.is_version_supported(
-                self.api_version, '2.6') and self.flavor_id:
-            lb_kwargs[const.FLAVOR_ID] = self.flavor_id
+                self.api_version, '2.6') and flavor_id:
+            lb_kwargs[const.FLAVOR_ID] = flavor_id
 
         self._setup_lb_network_kwargs(lb_kwargs, ip_version)
 
@@ -147,6 +192,13 @@
             self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                              lb[const.VIP_SUBNET_ID])
 
+        if use_custom_compute_flavor:
+            amphorae = self.lb_admin_amphora_client.list_amphorae(
+                query_params=f'{const.LOADBALANCER_ID}={lb[const.ID]}')
+            amphora = amphorae[0]
+            self.assertEqual(compute_flavor[const.ID],
+                             amphora[const.COMPUTE_FLAVOR])
+
         # Load balancer update
         new_name = data_utils.rand_name("lb_member_lb1-update")
         new_description = data_utils.arbitrary_string(size=255,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index 6df4c9c..c53e1c2 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -359,6 +359,11 @@
                 **listener_kwargs)
             listener_id = listener[const.ID]
 
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener,
+                listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
             waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     self.lb_id, const.PROVISIONING_STATUS,
                                     const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index d4d43b5..db92352 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -79,7 +79,7 @@
                 floating_network_id=CONF.network.public_network_id,
                 port_id=port_id)
             floating_ip = result['floatingip']
-            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            LOG.info('lb1_floating_ip: %s', floating_ip)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls.lb_mem_float_ip_client.delete_floatingip,
@@ -120,6 +120,11 @@
 
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -135,6 +140,11 @@
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -204,6 +214,55 @@
             traffic_member_count=traffic_member_count, source_port=source_port,
             delay=delay)
 
+    def _pool_add_healthmonitor(self, pool_id, protocol):
+        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
+        if protocol != const.HTTP:
+            if protocol == const.UDP:
+                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
+            elif protocol == const.TCP:
+                hm_type = const.HEALTH_MONITOR_TCP
+
+            hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: hm_type,
+                const.DELAY: 3,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.ADMIN_STATE_UP: True,
+            }
+        else:
+            hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: const.HEALTH_MONITOR_HTTP,
+                const.DELAY: 2,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.HTTP_METHOD: const.GET,
+                const.URL_PATH: '/',
+                const.EXPECTED_CODES: '200',
+                const.ADMIN_STATE_UP: True,
+            }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        return hm
+
     @decorators.attr(type=['smoke', 'slow'])
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Traffic tests will not work in noop mode.')
@@ -348,54 +407,7 @@
                                     protocol=protocol, persistent=persistent)
 
         # Create the healthmonitor
-        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
-        if protocol != const.HTTP:
-            if protocol == const.UDP:
-                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
-            elif protocol == const.TCP:
-                hm_type = const.HEALTH_MONITOR_TCP
-
-            hm_kwargs = {
-                const.POOL_ID: pool_id,
-                const.NAME: hm_name,
-                const.TYPE: hm_type,
-                const.DELAY: 3,
-                const.TIMEOUT: 2,
-                const.MAX_RETRIES: 2,
-                const.MAX_RETRIES_DOWN: 2,
-                const.ADMIN_STATE_UP: True,
-            }
-        else:
-            hm_kwargs = {
-                const.POOL_ID: pool_id,
-                const.NAME: hm_name,
-                const.TYPE: const.HEALTH_MONITOR_HTTP,
-                const.DELAY: 2,
-                const.TIMEOUT: 2,
-                const.MAX_RETRIES: 2,
-                const.MAX_RETRIES_DOWN: 2,
-                const.HTTP_METHOD: const.GET,
-                const.URL_PATH: '/',
-                const.EXPECTED_CODES: '200',
-                const.ADMIN_STATE_UP: True,
-            }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+        hm = self._pool_add_healthmonitor(pool_id, protocol)
 
         # Wait for members to adjust to the correct OPERATING_STATUS
         waiters.wait_for_status(
@@ -1339,7 +1351,7 @@
         test_url = 'http://{}:{}/request'.format(
             self.lb_vip_address, listener_port)
         data = self.validate_URL_response(test_url)
-        LOG.info('Received payload is: {}'.format(data))
+        LOG.info('Received payload is: %s', data)
 
         # Detect source IP that is used to create TCP socket toward LB_VIP.
         try:
@@ -1386,7 +1398,7 @@
 
         # Initiate HTTP traffic
         data = self.validate_URL_response(test_url)
-        LOG.info('Received payload is: {}'.format(data))
+        LOG.info('Received payload is: %s', data)
         expected_headers = {const.X_FORWARDED_PORT: '{}'.format(
             listener_port), const.X_FORWARDED_PROTO: const.HTTP.lower()}
         received_headers = _data_parser(data, expected_headers)
@@ -1402,3 +1414,386 @@
         self._test_basic_traffic(const.UDP, common_vip_port, pool_id_udp)
         self._test_basic_traffic(const.TCP, common_vip_port, pool_id_tcp,
                                  persistent=False)
+
+    @decorators.idempotent_id('c79f2cd0-0324-11eb-bc8e-74e5f9e2a801')
+    def test_udp_update_pool_healthmonitor_listener(self):
+        """Test scenario:
+
+        * Prerequisites:
+          Create: UDP listener, pool, healtmonitor and validate UDP traffic.
+        * Test scenario:
+          Update pool algorithm to: "source_ip" and start sending UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        * Update healtmonitor with: "delay=20" and start sending UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        * Update listener with: "connection-limit=300" and start sending
+          UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        listener_port = 104
+        listener_id, pool_id = self._listener_pool_create(
+            const.UDP, listener_port)
+        healthmonitor_id = self._pool_add_healthmonitor(
+            pool_id, protocol=const.UDP)[const.ID]
+        self._test_basic_traffic(
+            const.UDP, listener_port, pool_id)
+
+        # Update LB pool
+        self.mem_pool_client.update_pool(
+            pool_id=pool_id, lb_algorithm=const.LB_ALGORITHM_SOURCE_IP)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+        # Update LB healthmonitor
+        self.mem_healthmonitor_client.update_healthmonitor(
+            healthmonitor_id=healthmonitor_id, delay=5)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+        # Update LB listener
+        listener_kwargs = {const.LISTENER_ID: listener_id,
+                           const.CONNECTION_LIMIT: 300}
+        self.mem_listener_client.update_listener(**listener_kwargs)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('cd5aeefa-0e16-11eb-b8dc-74e5f9e2a801')
+    def test_hm_op_status_changed_as_expected_on_update(self):
+        """Test scenario:
+
+        * Create HTTP listener, pool and HTTP health monitor.
+        * Update health monitor with various combinations of:
+          HTTP method, expected HTTP status codes and backend URL.
+          Note: see "fault_cases" and "valid_cases" lists in test's code.
+        * Validate that members' operation status is getting into
+          appropriate state after each particular update done within the test.
+          Important: "operation status" value is expected to be changed from
+          ONLINE to ERROR after each update, otherwise we may miss
+          the potential bug.
+        """
+        listener_port = 105
+        listener_id, pool_id = self._listener_pool_create(
+            const.TCP, listener_port)
+        hm_id = self._pool_add_healthmonitor(
+            pool_id, protocol=const.HTTP)[const.ID]
+        self._test_basic_traffic(
+            const.HTTP, listener_port, pool_id, persistent=False)
+        mb_ids = [mb[const.ID] for
+                  mb in self.mem_member_client.list_members(pool_id)]
+
+        # Create list of test cases to be covered in test
+        fault_cases = [
+            {'mthd': const.POST, 'code': '101-102', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=103'},
+            {'mthd': const.DELETE, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.PUT, 'code': '301-302', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=303'},
+            {'mthd': const.HEAD, 'code': '400-404', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=405'},
+            {'mthd': const.OPTIONS, 'code': '500-504', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=505'},
+            {'mthd': const.PATCH, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.CONNECT, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.TRACE, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'}]
+        valid_cases = [
+            {'mthd': const.GET, 'code': '101-102', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=102'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=202'},
+            {'mthd': const.GET, 'code': '301-302', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=302'},
+            {'mthd': const.GET, 'code': '400-404', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=404'},
+            {'mthd': const.GET, 'code': '500-504', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=504'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'}]
+        # Generate "flip_flop" using zip function, that will have
+        # the operation statuses changed on each subsequent test case.
+        # It means interleaved like: ERROR, ONLINE, ERROR, ONLINE...
+        flip_flop = [v for f in zip(valid_cases, fault_cases) for v in f]
+
+        # For each test case, update HM and validate that members'
+        # "Operation Status" is changed to expected value.
+        for ff in flip_flop:
+            LOG.info('Tested test case is: %s', ff)
+            self.mem_healthmonitor_client.update_healthmonitor(
+                hm_id, expected_codes=ff['code'], http_method=ff['mthd'],
+                url_path=ff['url_path'])
+            waiters.wait_for_status(
+                self.mem_lb_client.show_loadbalancer, self.lb_id,
+                const.PROVISIONING_STATUS, const.ACTIVE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout)
+            for mb_id in mb_ids:
+                waiters.wait_for_status(
+                    self.mem_member_client.show_member,
+                    mb_id, const.OPERATING_STATUS,
+                    ff['op_stat'],
+                    CONF.load_balancer.check_interval,
+                    CONF.load_balancer.check_timeout,
+                    error_ok=True, pool_id=pool_id)
+
+    @decorators.idempotent_id('05e99fb3-2b37-478e-889b-77f1c731a471')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_metrics_page(self):
+        """Tests PROMETHEUS listener create and metrics endpoint is available
+
+        * Create PROMETHEUS listener.
+        * Query the metrics endpoint on the load balancer.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+
+        # Listener create
+        listener_name = data_utils.rand_name("lb_member_prometheus_listener")
+        listener_description = data_utils.arbitrary_string(size=255)
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.DESCRIPTION: listener_description,
+            const.ADMIN_STATE_UP: True,
+            const.PROTOCOL: const.PROMETHEUS,
+            const.PROTOCOL_PORT: 8080,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.CONNECTION_LIMIT: 200,
+        }
+
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            listener_kwargs.update({const.ALLOWED_CIDRS: ['0.0.0.0/0']})
+
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        listener = waiters.wait_for_status(
+            self.mem_listener_client.show_listener,
+            listener[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Make a request to the stats page
+        URL = 'http://{0}:{1}/metrics'.format(self.lb_vip_address, '8080')
+        self.validate_URL_response(URL, expected_status_code=200)
+
+    @decorators.idempotent_id('b2d5cefe-eac0-4eb3-b7c2-54f22578def9')
+    def test_backup_member(self):
+        if not self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('Backup member support is only available '
+                                     'in Octavia API version 2.1 or newer')
+
+        _LISTENER_PORT = 106
+        # Create a unique listener and pool for this test
+        pool_id = self._listener_pool_create(const.HTTP, _LISTENER_PORT)[1]
+
+        # Create a health monitor on the pool
+        hm_name = data_utils.rand_name("lb_member_hm1-backup-not-active")
+        hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: const.HEALTH_MONITOR_HTTP,
+                const.DELAY: 1,
+                const.TIMEOUT: 1,
+                const.MAX_RETRIES: 1,
+                const.MAX_RETRIES_DOWN: 1,
+                const.HTTP_METHOD: const.GET,
+                const.URL_PATH: '/',
+                const.EXPECTED_CODES: '200',
+                const.ADMIN_STATE_UP: True,
+            }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-not-backup")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2 (Backup)
+        member2_name = data_utils.rand_name("lb_member_member2-backup")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+            const.BACKUP: True,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        url_for_tests = f'http://{self.lb_vip_address}:{_LISTENER_PORT}/'
+
+        # Send some requests and check that only member 1 is responding
+        self.assertConsistentResponse((200, self.webserver1_response),
+                                      url_for_tests)
+
+        # Disable member 1 and check that the backup member takes over
+        member_update_kwargs = {
+            const.POOL_ID: pool_id,
+            const.ADMIN_STATE_UP: False}
+
+        self.mem_member_client.update_member(
+            member1[const.ID], **member_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member1[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout,
+            pool_id=pool_id)
+
+        # Send some requests and check that only backup member 2 is responding
+        self.assertConsistentResponse((200, self.webserver2_response),
+                                      url_for_tests)
+
+        # Enable member 1 and check that member 1 traffic resumes
+        member_update_kwargs = {
+            const.POOL_ID: pool_id,
+            const.ADMIN_STATE_UP: True}
+
+        self.mem_member_client.update_member(
+            member1[const.ID], **member_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member1[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout,
+            pool_id=pool_id)
+
+        # Send some requests and check that only member 1 is responding
+        self.assertConsistentResponse((200, self.webserver1_response),
+                                      url_for_tests)
+
+        # Delete member 1 and check that backup member 2 is responding
+        self.mem_member_client.delete_member(
+            member1[const.ID],
+            pool_id=pool_id)
+
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_member_client.show_member, member1[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout,
+            pool_id=pool_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Send some requests and check that only backup member 2 is responding
+        self.assertConsistentResponse((200, self.webserver2_response),
+                                      url_for_tests)
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
index 0ccfe55..26866c3 100644
--- a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
@@ -82,7 +82,7 @@
                 floating_network_id=CONF.network.public_network_id,
                 port_id=port_id)
             floating_ip = result['floatingip']
-            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            LOG.info('lb1_floating_ip: %s', floating_ip)
             self.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 self.lb_mem_float_ip_client.delete_floatingip,
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index a18066a..3f10ad9 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -25,7 +25,9 @@
 from oslo_config import cfg
 from oslo_log import log as logging
 from oslo_utils import uuidutils
+from tempest import clients
 from tempest import config
+from tempest.lib import auth
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils.linux import remote_client
 from tempest.lib import exceptions
@@ -34,6 +36,7 @@
 
 from octavia_tempest_plugin.common import cert_utils
 from octavia_tempest_plugin.common import constants as const
+import octavia_tempest_plugin.services.load_balancer.v2 as lbv2
 from octavia_tempest_plugin.tests import RBAC_tests
 from octavia_tempest_plugin.tests import validators
 from octavia_tempest_plugin.tests import waiters
@@ -56,28 +59,25 @@
             'admin', 'primary', ['lb_admin', CONF.load_balancer.admin_role],
             ['lb_member', CONF.load_balancer.member_role],
             ['lb_member2', CONF.load_balancer.member_role]]
-    elif CONF.load_balancer.enforce_new_defaults:
+    elif CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
         credentials = [
-            'admin', 'primary', ['lb_admin', CONF.load_balancer.admin_role],
-            ['lb_observer', CONF.load_balancer.observer_role, 'reader'],
-            ['lb_global_observer', CONF.load_balancer.global_observer_role,
-             'reader'],
-            ['lb_member', CONF.load_balancer.member_role, 'member'],
-            ['lb_member2', CONF.load_balancer.member_role, 'member'],
-            ['lb_member_not_default_member', CONF.load_balancer.member_role]]
+            'admin', 'primary',
+            ['lb_admin', 'admin'],
+            ['lb_observer', 'reader'],
+            ['lb_global_observer', 'reader'],
+            ['lb_member', 'member'],
+            ['lb_member2', 'member']]
+        # Note: an additional non-member user is added in setup_credentials
     else:
         credentials = [
             'admin', 'primary', ['lb_admin', CONF.load_balancer.admin_role],
             ['lb_observer', CONF.load_balancer.observer_role, 'reader'],
             ['lb_global_observer', CONF.load_balancer.global_observer_role,
              'reader'],
-            ['lb_member', CONF.load_balancer.member_role],
-            ['lb_member2', CONF.load_balancer.member_role]]
-
-    # If scope enforcement is enabled, add in the system scope credentials.
-    # The project scope is already handled by the above credentials.
-    if CONF.enforce_scope.octavia:
-        credentials.extend(['system_admin', 'system_reader'])
+            # Note: Some projects are now requiring the 'member' role by
+            # default (nova for example) so make sure our creds have this role
+            ['lb_member', CONF.load_balancer.member_role, 'member'],
+            ['lb_member2', CONF.load_balancer.member_role, 'member']]
 
     # A tuple of credentials that will be allocated by tempest using the
     # 'credentials' list above. These are used to build RBAC test lists.
@@ -130,15 +130,74 @@
             raise cls.skipException(msg)
 
     @classmethod
+    def _setup_new_user_role_client(cls, project_id, role_name):
+        user = {
+            'name': data_utils.rand_name('user'),
+            'password': data_utils.rand_password()
+        }
+        user_id = cls.os_admin.users_v3_client.create_user(
+            **user)['user']['id']
+        cls._created_users.append(user_id)
+        roles = cls.os_admin.roles_v3_client.list_roles(
+            name=role_name)['roles']
+        if len(roles) == 0:
+            role = {
+                'name': role_name
+            }
+            role_id = cls.os_admin.roles_v3_client.create_role(
+                **role)['role']['id']
+            cls._created_roles.append(role_id)
+        else:
+            role_id = roles[0]['id']
+        cls.os_admin.roles_v3_client.create_user_role_on_project(
+            project_id, user_id, role_id
+        )
+        creds = auth.KeystoneV3Credentials(
+            user_id=user_id,
+            password=user['password'],
+            project_id=project_id
+        )
+        auth_provider = clients.get_auth_provider(creds)
+        creds = auth_provider.fill_credentials()
+        return clients.Manager(credentials=creds)
+
+    @classmethod
     def setup_credentials(cls):
         """Setup test credentials and network resources."""
         # Do not auto create network resources
         cls.set_network_resources()
         super(LoadBalancerBaseTest, cls).setup_credentials()
 
+        cls._created_projects = []
+        cls._created_users = []
+        cls._created_roles = []
+
+        non_dyn_users = []
+
+        if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
+            # Create a non-member user for keystone_default_roles
+            # When using dynamic credentials, tempest cannot create a user
+            # without a role, it always adds at least the "member" role.
+            # We manually create the user with a temporary role
+            project_id = cls.os_admin.projects_client.create_project(
+                data_utils.rand_name()
+            )['project']['id']
+            cls._created_projects.append(project_id)
+            cls.os_not_member = cls._setup_new_user_role_client(
+                project_id,
+                data_utils.rand_name('role'))
+            cls.allocated_creds.append('os_not_member')
+            non_dyn_users.append('not_member')
+
+        # Tests shall not mess with the list of allocated credentials
+        cls.allocated_credentials = tuple(cls.allocated_creds)
+
+        if not CONF.load_balancer.log_user_roles:
+            return
+
         # Log the user roles for this test run
         role_name_cache = {}
-        for cred in cls.credentials:
+        for cred in cls.credentials + non_dyn_users:
             user_roles = []
             if isinstance(cred, list):
                 user_name = cred[0]
@@ -147,7 +206,7 @@
                 user_name = cred
                 cred_obj = getattr(cls, 'os_' + cred)
             params = {'user.id': cred_obj.credentials.user_id,
-                      'project.id': cred_obj.credentials.project_id}
+                      'scope.project.id': cred_obj.credentials.project_id}
             roles = cls.os_admin.role_assignments_client.list_role_assignments(
                 **params)['role_assignments']
             for role in roles:
@@ -162,6 +221,16 @@
             LOG.info("User %s has roles: %s", user_name, user_roles)
 
     @classmethod
+    def clear_credentials(cls):
+        for user_id in cls._created_users:
+            cls.os_admin.users_v3_client.delete_user(user_id)
+        for project_id in cls._created_projects:
+            cls.os_admin.projects_client.delete_project(project_id)
+        for role_id in cls._created_roles:
+            cls.os_admin.roles_v3_client.delete_role(role_id)
+        super().clear_credentials()
+
+    @classmethod
     def setup_clients(cls):
         """Setup client aliases."""
         super(LoadBalancerBaseTest, cls).setup_clients()
@@ -176,27 +245,29 @@
             cls.os_roles_lb_member.security_group_rules_client)
         cls.lb_mem_servers_client = cls.os_roles_lb_member.servers_client
         cls.lb_mem_subnet_client = cls.os_roles_lb_member.subnets_client
-        cls.mem_lb_client = (
+        cls.mem_lb_client: lbv2.LoadbalancerClient = (
             cls.os_roles_lb_member.load_balancer_v2.LoadbalancerClient())
-        cls.mem_listener_client = (
+        cls.mem_listener_client: lbv2.ListenerClient = (
             cls.os_roles_lb_member.load_balancer_v2.ListenerClient())
-        cls.mem_pool_client = (
+        cls.mem_pool_client: lbv2.PoolClient = (
             cls.os_roles_lb_member.load_balancer_v2.PoolClient())
-        cls.mem_member_client = (
+        cls.mem_member_client: lbv2.MemberClient = (
             cls.os_roles_lb_member.load_balancer_v2.MemberClient())
-        cls.mem_healthmonitor_client = (
+        cls.mem_healthmonitor_client: lbv2.HealthMonitorClient = (
             cls.os_roles_lb_member.load_balancer_v2.HealthMonitorClient())
-        cls.mem_l7policy_client = (
+        cls.mem_l7policy_client: lbv2.L7PolicyClient = (
             cls.os_roles_lb_member.load_balancer_v2.L7PolicyClient())
-        cls.mem_l7rule_client = (
+        cls.mem_l7rule_client: lbv2.L7RuleClient = (
             cls.os_roles_lb_member.load_balancer_v2.L7RuleClient())
-        cls.lb_admin_amphora_client = lb_admin_prefix.AmphoraClient()
-        cls.lb_admin_flavor_profile_client = (
+        cls.lb_admin_amphora_client: lbv2.AmphoraClient = (
+            lb_admin_prefix.AmphoraClient())
+        cls.lb_admin_flavor_profile_client: lbv2.FlavorProfileClient = (
             lb_admin_prefix.FlavorProfileClient())
-        cls.lb_admin_flavor_client = lb_admin_prefix.FlavorClient()
-        cls.mem_flavor_client = (
+        cls.lb_admin_flavor_client: lbv2.FlavorClient = (
+            lb_admin_prefix.FlavorClient())
+        cls.mem_flavor_client: lbv2.FlavorClient = (
             cls.os_roles_lb_member.load_balancer_v2.FlavorClient())
-        cls.mem_provider_client = (
+        cls.mem_provider_client: lbv2.ProviderClient = (
             cls.os_roles_lb_member.load_balancer_v2.ProviderClient())
         cls.os_admin_servers_client = cls.os_admin.servers_client
         cls.os_admin_routers_client = cls.os_admin.routers_client
@@ -211,6 +282,7 @@
             lb_admin_prefix.AvailabilityZoneClient())
         cls.mem_availability_zone_client = (
             cls.os_roles_lb_member.load_balancer_v2.AvailabilityZoneClient())
+        cls.os_admin_compute_flavors_client = cls.os_admin.flavors_client
 
     @classmethod
     def resource_setup(cls):
@@ -287,31 +359,31 @@
         else:
             cls._create_networks()
 
-        LOG.debug('Octavia Setup: lb_member_vip_net = {}'.format(
-            cls.lb_member_vip_net[const.ID]))
+        LOG.debug('Octavia Setup: lb_member_vip_net = %s',
+                  cls.lb_member_vip_net[const.ID])
         if cls.lb_member_vip_subnet:
-            LOG.debug('Octavia Setup: lb_member_vip_subnet = {}'.format(
-                cls.lb_member_vip_subnet[const.ID]))
-        LOG.debug('Octavia Setup: lb_member_1_net = {}'.format(
-            cls.lb_member_1_net[const.ID]))
+            LOG.debug('Octavia Setup: lb_member_vip_subnet = %s',
+                      cls.lb_member_vip_subnet[const.ID])
+        LOG.debug('Octavia Setup: lb_member_1_net = %s',
+                  cls.lb_member_1_net[const.ID])
         if cls.lb_member_1_subnet:
-            LOG.debug('Octavia Setup: lb_member_1_subnet = {}'.format(
-                cls.lb_member_1_subnet[const.ID]))
-        LOG.debug('Octavia Setup: lb_member_2_net = {}'.format(
-            cls.lb_member_2_net[const.ID]))
+            LOG.debug('Octavia Setup: lb_member_1_subnet = %s',
+                      cls.lb_member_1_subnet[const.ID])
+        LOG.debug('Octavia Setup: lb_member_2_net = %s',
+                  cls.lb_member_2_net[const.ID])
         if cls.lb_member_2_subnet:
-            LOG.debug('Octavia Setup: lb_member_2_subnet = {}'.format(
-                cls.lb_member_2_subnet[const.ID]))
+            LOG.debug('Octavia Setup: lb_member_2_subnet = %s',
+                      cls.lb_member_2_subnet[const.ID])
         if CONF.load_balancer.test_with_ipv6:
             if cls.lb_member_vip_ipv6_subnet:
-                LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = '
-                          '{}'.format(cls.lb_member_vip_ipv6_subnet[const.ID]))
+                LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = %s',
+                          cls.lb_member_vip_ipv6_subnet[const.ID])
             if cls.lb_member_1_ipv6_subnet:
-                LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = {}'.format(
-                    cls.lb_member_1_ipv6_subnet[const.ID]))
+                LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = %s',
+                          cls.lb_member_1_ipv6_subnet[const.ID])
             if cls.lb_member_2_ipv6_subnet:
-                LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = {}'.format(
-                    cls.lb_member_2_ipv6_subnet[const.ID]))
+                LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = %s',
+                          cls.lb_member_2_ipv6_subnet[const.ID])
 
     @classmethod
     # Neutron can be slow to clean up ports from the subnets/networks.
@@ -326,8 +398,7 @@
         try:
             cls.lb_mem_net_client.delete_network(net_id)
         except Exception:
-            LOG.error('Unable to delete network {}. Active ports:'.format(
-                net_id))
+            LOG.error('Unable to delete network %s. Active ports:', net_id)
             LOG.error(cls.lb_mem_ports_client.list_ports())
             raise
 
@@ -344,8 +415,7 @@
         try:
             cls.lb_mem_subnet_client.delete_subnet(subnet_id)
         except Exception:
-            LOG.error('Unable to delete subnet {}. Active ports:'.format(
-                subnet_id))
+            LOG.error('Unable to delete subnet %s. Active ports:', subnet_id)
             LOG.error(cls.lb_mem_ports_client.list_ports())
             raise
 
@@ -373,7 +443,7 @@
             network_kwargs['port_security_enabled'] = True
         result = cls.lb_mem_net_client.create_network(**network_kwargs)
         cls.lb_member_vip_net = result['network']
-        LOG.info('lb_member_vip_net: {}'.format(cls.lb_member_vip_net))
+        LOG.info('lb_member_vip_net: %s', cls.lb_member_vip_net)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls._logging_delete_network,
@@ -388,7 +458,7 @@
             'ip_version': 4}
         result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
         cls.lb_member_vip_subnet = result['subnet']
-        LOG.info('lb_member_vip_subnet: {}'.format(cls.lb_member_vip_subnet))
+        LOG.info('lb_member_vip_subnet: %s', cls.lb_member_vip_subnet)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls._logging_delete_subnet,
@@ -430,8 +500,8 @@
                 cls.lb_mem_subnet_client.show_subnet,
                 cls.lb_member_vip_ipv6_subnet['id'])
 
-            LOG.info('lb_member_vip_ipv6_subnet: {}'.format(
-                cls.lb_member_vip_ipv6_subnet))
+            LOG.info('lb_member_vip_ipv6_subnet: %s',
+                     cls.lb_member_vip_ipv6_subnet)
 
         # Create tenant member 1 network
         network_kwargs = {
@@ -443,7 +513,7 @@
                 network_kwargs['port_security_enabled'] = False
         result = cls.lb_mem_net_client.create_network(**network_kwargs)
         cls.lb_member_1_net = result['network']
-        LOG.info('lb_member_1_net: {}'.format(cls.lb_member_1_net))
+        LOG.info('lb_member_1_net: %s', cls.lb_member_1_net)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls._logging_delete_network,
@@ -458,7 +528,7 @@
             'ip_version': 4}
         result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
         cls.lb_member_1_subnet = result['subnet']
-        LOG.info('lb_member_1_subnet: {}'.format(cls.lb_member_1_subnet))
+        LOG.info('lb_member_1_subnet: %s', cls.lb_member_1_subnet)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls._logging_delete_subnet,
@@ -476,10 +546,10 @@
             cls.lb_member_1_subnet_prefix = (
                 CONF.load_balancer.member_1_ipv6_subnet_cidr.rpartition('/')[2]
                 )
-            assert(cls.lb_member_1_subnet_prefix.isdigit())
+            assert (cls.lb_member_1_subnet_prefix.isdigit())
             cls.lb_member_1_ipv6_subnet = result['subnet']
-            LOG.info('lb_member_1_ipv6_subnet: {}'.format(
-                cls.lb_member_1_ipv6_subnet))
+            LOG.info('lb_member_1_ipv6_subnet: %s',
+                     cls.lb_member_1_ipv6_subnet)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls._logging_delete_subnet,
@@ -496,7 +566,7 @@
                 network_kwargs['port_security_enabled'] = False
         result = cls.lb_mem_net_client.create_network(**network_kwargs)
         cls.lb_member_2_net = result['network']
-        LOG.info('lb_member_2_net: {}'.format(cls.lb_member_2_net))
+        LOG.info('lb_member_2_net: %s', cls.lb_member_2_net)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls._logging_delete_network,
@@ -511,7 +581,7 @@
             'ip_version': 4}
         result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
         cls.lb_member_2_subnet = result['subnet']
-        LOG.info('lb_member_2_subnet: {}'.format(cls.lb_member_2_subnet))
+        LOG.info('lb_member_2_subnet: %s', cls.lb_member_2_subnet)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls._logging_delete_subnet,
@@ -529,10 +599,10 @@
             cls.lb_member_2_subnet_prefix = (
                 CONF.load_balancer.member_2_ipv6_subnet_cidr.rpartition('/')[2]
                 )
-            assert(cls.lb_member_2_subnet_prefix.isdigit())
+            assert (cls.lb_member_2_subnet_prefix.isdigit())
             cls.lb_member_2_ipv6_subnet = result['subnet']
-            LOG.info('lb_member_2_ipv6_subnet: {}'.format(
-                cls.lb_member_2_ipv6_subnet))
+            LOG.info('lb_member_2_ipv6_subnet: %s',
+                     cls.lb_member_2_ipv6_subnet)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls._logging_delete_subnet,
@@ -581,6 +651,17 @@
             lb_kwargs[const.VIP_NETWORK_ID] = cls.lb_member_vip_net[const.ID]
             lb_kwargs[const.VIP_SUBNET_ID] = None
 
+    def _validate_listener_protocol(self, protocol, raise_if_unsupported=True):
+        if (protocol == const.SCTP and
+            not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.23')):
+            if raise_if_unsupported:
+                raise self.skipException('SCTP listener protocol '
+                                         'is only available on Octavia '
+                                         'API version 2.23 or newer.')
+            return False
+        return True
+
 
 class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
     @classmethod
@@ -608,7 +689,7 @@
         result = cls.lb_mem_keypairs_client.create_keypair(
             name=keypair_name)
         cls.lb_member_keypair = result['keypair']
-        LOG.info('lb_member_keypair: {}'.format(cls.lb_member_keypair))
+        LOG.info('lb_member_keypair: %s', cls.lb_member_keypair)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls.lb_mem_keypairs_client.delete_keypair,
@@ -667,6 +748,20 @@
                 cls.lb_mem_SGr_client.delete_security_group_rule,
                 cls.lb_mem_SGr_client.show_security_group_rule,
                 SGr['id'])
+            # Create a security group rule to allow 9443 (test webservers)
+            # Used in the pool backend encryption client authentication tests
+            SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                direction='ingress',
+                security_group_id=cls.lb_member_sec_group['id'],
+                protocol='tcp',
+                ethertype='IPv4',
+                port_range_min=9443,
+                port_range_max=9443)['security_group_rule']
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_SGr_client.delete_security_group_rule,
+                cls.lb_mem_SGr_client.show_security_group_rule,
+                SGr['id'])
             # Create a security group rule to allow UDP 9999 (test webservers)
             # Port 9999 is used to illustrate health monitor ERRORs on closed
             # ports.
@@ -736,6 +831,20 @@
                     cls.lb_mem_SGr_client.delete_security_group_rule,
                     cls.lb_mem_SGr_client.show_security_group_rule,
                     SGr['id'])
+                # Create a security group rule to allow 9443 (test webservers)
+                # Used in the pool encryption client authentication tests
+                SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                    direction='ingress',
+                    security_group_id=cls.lb_member_sec_group['id'],
+                    protocol='tcp',
+                    ethertype='IPv6',
+                    port_range_min=9443,
+                    port_range_max=9443)['security_group_rule']
+                cls.addClassResourceCleanup(
+                    waiters.wait_for_not_found,
+                    cls.lb_mem_SGr_client.delete_security_group_rule,
+                    cls.lb_mem_SGr_client.show_security_group_rule,
+                    SGr['id'])
                 # Create a security group rule to allow 22 (ssh)
                 SGr = cls.lb_mem_SGr_client.create_security_group_rule(
                     direction='ingress',
@@ -750,7 +859,7 @@
                     cls.lb_mem_SGr_client.show_security_group_rule,
                     SGr['id'])
 
-            LOG.info('lb_member_sec_group: {}'.format(cls.lb_member_sec_group))
+            LOG.info('lb_member_sec_group: %s', cls.lb_member_sec_group)
 
         # Setup backend member reencryption PKI
         cls._create_backend_reencryption_pki()
@@ -764,14 +873,12 @@
         cls.webserver1_ipv6 = server_details.get('ipv6_address')
         cls.webserver1_public_ip = server_details['public_ipv4_address']
 
-        LOG.debug('Octavia Setup: lb_member_webserver1 = {}'.format(
-            cls.lb_member_webserver1[const.ID]))
-        LOG.debug('Octavia Setup: webserver1_ip = {}'.format(
-            cls.webserver1_ip))
-        LOG.debug('Octavia Setup: webserver1_ipv6 = {}'.format(
-            cls.webserver1_ipv6))
-        LOG.debug('Octavia Setup: webserver1_public_ip = {}'.format(
-            cls.webserver1_public_ip))
+        LOG.debug('Octavia Setup: lb_member_webserver1 = %s',
+                  cls.lb_member_webserver1[const.ID])
+        LOG.debug('Octavia Setup: webserver1_ip = %s', cls.webserver1_ip)
+        LOG.debug('Octavia Setup: webserver1_ipv6 = %s', cls.webserver1_ipv6)
+        LOG.debug('Octavia Setup: webserver1_public_ip = %s',
+                  cls.webserver1_public_ip)
 
         # Create webserver 2 instance
         server_details = cls._create_webserver('lb_member_webserver2',
@@ -782,14 +889,12 @@
         cls.webserver2_ipv6 = server_details.get('ipv6_address')
         cls.webserver2_public_ip = server_details['public_ipv4_address']
 
-        LOG.debug('Octavia Setup: lb_member_webserver2 = {}'.format(
-            cls.lb_member_webserver2[const.ID]))
-        LOG.debug('Octavia Setup: webserver2_ip = {}'.format(
-            cls.webserver2_ip))
-        LOG.debug('Octavia Setup: webserver2_ipv6 = {}'.format(
-            cls.webserver2_ipv6))
-        LOG.debug('Octavia Setup: webserver2_public_ip = {}'.format(
-            cls.webserver2_public_ip))
+        LOG.debug('Octavia Setup: lb_member_webserver2 = %s',
+                  cls.lb_member_webserver2[const.ID])
+        LOG.debug('Octavia Setup: webserver2_ip = %s', cls.webserver2_ip)
+        LOG.debug('Octavia Setup: webserver2_ipv6 = %s', cls.webserver2_ipv6)
+        LOG.debug('Octavia Setup: webserver2_public_ip = %s',
+                  cls.webserver2_public_ip)
 
         if CONF.load_balancer.test_with_ipv6:
             # Enable the IPv6 nic in webserver 1
@@ -838,7 +943,7 @@
             external_gateway_info=dict(
                 network_id=CONF.network.public_network_id))
         cls.lb_member_router = result['router']
-        LOG.info('lb_member_router: {}'.format(cls.lb_member_router))
+        LOG.info('lb_member_router: %s', cls.lb_member_router)
         cls.addClassResourceCleanup(
             waiters.wait_for_not_found,
             cls.lb_mem_routers_client.delete_router,
@@ -953,7 +1058,7 @@
             CONF.load_balancer.build_timeout,
             root_tag='server')
         webserver_details = {'server': server}
-        LOG.info('Created server: {}'.format(server))
+        LOG.info('Created server: %s', server)
 
         addresses = server['addresses']
         if CONF.load_balancer.disable_boot_network:
@@ -975,7 +1080,7 @@
                 floating_network_id=CONF.network.public_network_id,
                 port_id=port_id)
             floating_ip = result['floatingip']
-            LOG.info('webserver1_floating_ip: {}'.format(floating_ip))
+            LOG.info('webserver1_floating_ip: %s', floating_ip)
             cls.addClassResourceCleanup(
                 waiters.wait_for_not_found,
                 cls.lb_mem_float_ip_client.delete_floatingip,
@@ -1010,7 +1115,7 @@
         # the new default (SFTP protocol) doesn't work with
         # cirros VMs.
         ssh_version = cls._get_openssh_version()
-        LOG.debug("ssh_version = {}".format(ssh_version))
+        LOG.debug("ssh_version = %s", ssh_version)
         return (ssh_version[0] > 8 or
                 (ssh_version[0] == 8 and ssh_version[1] >= 7))
 
diff --git a/octavia_tempest_plugin/tests/validators.py b/octavia_tempest_plugin/tests/validators.py
index 5972d54..5ff7bd5 100644
--- a/octavia_tempest_plugin/tests/validators.py
+++ b/octavia_tempest_plugin/tests/validators.py
@@ -105,7 +105,7 @@
                 return response_text
             except requests.exceptions.Timeout:
                 # Don't sleep as we have already waited the interval.
-                LOG.info('Request for {} timed out. Retrying.'.format(URL))
+                LOG.info('Request for %s timed out. Retrying.', URL)
             except (exceptions.InvalidHttpSuccessCode,
                     exceptions.InvalidHTTPResponseBody,
                     requests.exceptions.SSLError):
@@ -113,8 +113,8 @@
                     session.close()
                 raise
             except Exception as e:
-                LOG.info('Validate URL got exception: {0}. '
-                         'Retrying.'.format(e))
+                LOG.info('Validate URL got exception: %s. '
+                         'Retrying.', e)
                 time.sleep(request_interval)
         if requests_session is None:
             session.close()
@@ -412,7 +412,7 @@
                     return
             except Exception:
                 LOG.warning('Server is not passing initial traffic. Waiting.')
-                time.sleep(1)
+            time.sleep(request_interval)
 
         LOG.debug('Loadbalancer wait for load balancer response totals: %s',
                   response_counts)
@@ -421,3 +421,36 @@
                                                           protocol_port))
         LOG.error(message)
         raise Exception(message)
+
+    def make_udp_requests_with_retries(
+            self, vip_address, number_of_retries, dst_port,
+            src_port=None, socket_timeout=20):
+        """Send UDP packets using retries mechanism
+
+        The delivery of data to the destination cannot be guaranteed in UDP.
+        In case when UDP package is getting lost and we might want to check
+        what could be the reason for that (Network issues or Server Side),
+        well need to send more packets to get into the conclusion.
+
+        :param vip_address: LB VIP address
+        :param number_of_retries: integer number of retries
+        :param dst_port: UDP server destination port
+        :param src_port: UDP source port to bind for UDP connection
+        :param socket_timeout: UDP socket timeout
+        :return: None if all UPD retries failed, else first successful
+                 response data from UDP server.
+        """
+        retry_number = 0
+        received_data = None
+        while retry_number < number_of_retries:
+            LOG.info('make_udp_requests_with_retries attempt number: %s',
+                     retry_number)
+            retry_number += 1
+            try:
+                received_data = self.make_udp_request(
+                    vip_address, dst_port, timeout=socket_timeout,
+                    source_port=src_port)
+                break
+            except Exception as e:
+                LOG.warning('make_udp_request has failed with: %s', e)
+        return received_data
diff --git a/octavia_tempest_plugin/tests/waiters.py b/octavia_tempest_plugin/tests/waiters.py
index fa6c112..820831a 100644
--- a/octavia_tempest_plugin/tests/waiters.py
+++ b/octavia_tempest_plugin/tests/waiters.py
@@ -48,8 +48,8 @@
     :returns: The object details from the show client.
     """
     start = int(time.time())
-    LOG.info('Waiting for {name} status to update to {status}'.format(
-        name=show_client.__name__, status=status))
+    LOG.info('Waiting for %s status to update to %s',
+             show_client.__name__, status)
     while True:
         if status == const.DELETED:
             try:
@@ -65,8 +65,8 @@
             object_details = response
 
         if object_details[status_key] == status:
-            LOG.info('{name}\'s status updated to {status}.'.format(
-                name=show_client.__name__, status=status))
+            LOG.info('%s\'s status updated to %s.',
+                     show_client.__name__, status)
             return object_details
         elif object_details[status_key] == 'ERROR' and not error_ok:
             message = ('{name} {field} updated to an invalid state of '
@@ -148,8 +148,8 @@
     :returns: None
     """
     start = int(time.time())
-    LOG.info('Waiting for {name} status to update to DELETED or be not '
-             'found(404)'.format(name=show_client.__name__))
+    LOG.info('Waiting for %s status to update to DELETED or be not found(404)',
+             show_client.__name__)
     while True:
         try:
             response = show_client(id, **kwargs)
@@ -162,8 +162,8 @@
             object_details = response
 
         if object_details[status_key] == const.DELETED:
-            LOG.info('{name}\'s status updated to DELETED.'.format(
-                name=show_client.__name__))
+            LOG.info('%s\'s status updated to DELETED.',
+                     show_client.__name__)
             return
         elif int(time.time()) - start >= check_timeout:
             message = (
diff --git a/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml b/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml
new file mode 100644
index 0000000..f88f51b
--- /dev/null
+++ b/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added API and scenario tests for PROMETHEUS listeners.
diff --git a/releasenotes/notes/Make-keystone-default-roles-tests-default-a65a62864b93a296.yaml b/releasenotes/notes/Make-keystone-default-roles-tests-default-a65a62864b93a296.yaml
new file mode 100644
index 0000000..d38181a
--- /dev/null
+++ b/releasenotes/notes/Make-keystone-default-roles-tests-default-a65a62864b93a296.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+  - |
+    The default tempest tests will now expect the keystone default roles RBAC
+    enabled. The legacy behavior can be enabled by setting
+    "RBAC_test_type: advanced" in the [load_balancer] section of your
+    tempest.conf. This change also removes the ability to test system scoped
+    tokens with Octavia.
diff --git a/releasenotes/notes/Make-sure-member-credentials-have-the-member-role-for-new-defaults-5fbc2e05768c04b9.yaml b/releasenotes/notes/Make-sure-member-credentials-have-the-member-role-for-new-defaults-5fbc2e05768c04b9.yaml
new file mode 100644
index 0000000..89fc5a6
--- /dev/null
+++ b/releasenotes/notes/Make-sure-member-credentials-have-the-member-role-for-new-defaults-5fbc2e05768c04b9.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+  - |
+    Some services are now enabling the "new default roles" which means all
+    non-admin users must have the "member" or "reader" role. This fix updates
+    the test credentials to include these roles when not running in
+    admin-or-owner test mode.
diff --git a/releasenotes/notes/add-load-balancer-with-custom-flavor-9a27a1d4142a84d3.yaml b/releasenotes/notes/add-load-balancer-with-custom-flavor-9a27a1d4142a84d3.yaml
new file mode 100644
index 0000000..5d9837c
--- /dev/null
+++ b/releasenotes/notes/add-load-balancer-with-custom-flavor-9a27a1d4142a84d3.yaml
@@ -0,0 +1,5 @@
+---
+features:
+  - |
+    Add a load-balancer scenario test with a custom amphora compute flavor
+    (using Octavia flavor and flavor profile features).
diff --git a/releasenotes/notes/add-pool-client-auth-scenario-02abca554e60d3da.yaml b/releasenotes/notes/add-pool-client-auth-scenario-02abca554e60d3da.yaml
new file mode 100644
index 0000000..b14517f
--- /dev/null
+++ b/releasenotes/notes/add-pool-client-auth-scenario-02abca554e60d3da.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added scenario test coverage for pool client authentication.
diff --git a/releasenotes/notes/add-tls-terminated-listener-api-tests-2c4de76fe04b0409.yaml b/releasenotes/notes/add-tls-terminated-listener-api-tests-2c4de76fe04b0409.yaml
new file mode 100644
index 0000000..83ea11e
--- /dev/null
+++ b/releasenotes/notes/add-tls-terminated-listener-api-tests-2c4de76fe04b0409.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added HTTPS-terminated Listener API tests. Note: the new tests require the noop_cert_manager when using noop.
diff --git a/releasenotes/notes/drop-python38-support-c5d27b136d2222b9.yaml b/releasenotes/notes/drop-python38-support-c5d27b136d2222b9.yaml
new file mode 100644
index 0000000..511fb7b
--- /dev/null
+++ b/releasenotes/notes/drop-python38-support-c5d27b136d2222b9.yaml
@@ -0,0 +1,6 @@
+---
+upgrade:
+  - |
+    Support for Python 3.8 has been dropped. Latest release of
+    octavia-tempest-plugin to support python 3.8 is 2.8.0.
+    The minimum version of Python now supported is Python 3.9.
diff --git a/releasenotes/notes/keystone-default-roles-changes-10733c184f0eebc3.yaml b/releasenotes/notes/keystone-default-roles-changes-10733c184f0eebc3.yaml
new file mode 100644
index 0000000..f3ebc26
--- /dev/null
+++ b/releasenotes/notes/keystone-default-roles-changes-10733c184f0eebc3.yaml
@@ -0,0 +1,9 @@
+---
+other:
+  - |
+    When using the ``keystone_default_roles`` RBAC tests, the
+    ``load-balancer_*`` roles are no longer used by tempest, it relies only on
+    the keystone ``admin``, ``member``, ``reader`` roles. The
+    ``[load_balancer].member_role``, ``[load_balancer].admin_role``,
+    ``[load_balancer].observer_role`` and
+    ``[load_balancer].global_observer_role`` settings are ignored.
diff --git a/releasenotes/notes/pools-service-client-alpn-support-7de3bcd3c901ff1a.yaml b/releasenotes/notes/pools-service-client-alpn-support-7de3bcd3c901ff1a.yaml
new file mode 100644
index 0000000..e7554e2
--- /dev/null
+++ b/releasenotes/notes/pools-service-client-alpn-support-7de3bcd3c901ff1a.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added support for ALPN selection in the pools service client.
diff --git a/requirements.txt b/requirements.txt
index 7b2f0a2..afa0565 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-cryptography>=2.1 # BSD/Apache-2.0
+cryptography>=3.0 # BSD/Apache-2.0
 python-dateutil>=2.5.3 # BSD
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 oslo.config>=5.2.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 954fe98..ed187d4 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -6,7 +6,7 @@
 author = OpenStack
 author_email = openstack-discuss@lists.openstack.org
 home_page = https://docs.openstack.org/octavia-tempest-plugin/latest/
-python_requires = >=3.6
+python_requires = >=3.9
 classifier =
     Environment :: OpenStack
     Intended Audience :: Information Technology
@@ -17,37 +17,17 @@
     Programming Language :: Python :: Implementation :: CPython
     Programming Language :: Python :: 3 :: Only
     Programming Language :: Python :: 3
-    Programming Language :: Python :: 3.6
-    Programming Language :: Python :: 3.7
-    Programming Language :: Python :: 3.8
-
-[global]
-setup_hooks =
-    pbr.hooks.setup_hook
+    Programming Language :: Python :: 3.9
+    Programming Language :: Python :: 3.10
+    Programming Language :: Python :: 3.11
+    Programming Language :: Python :: 3.12
 
 [files]
 packages =
     octavia_tempest_plugin
 
-[compile_catalog]
-directory = octavia_tempest_plugin/locale
-domain = octavia_tempest_plugin
-
-[update_catalog]
-domain = octavia_tempest_plugin
-output_dir = octavia_tempest_plugin/locale
-input_file = octavia_tempest_plugin/locale/octavia_tempest_plugin.pot
-
-[extract_messages]
-keywords = _ gettext ngettext l_ lazy_gettext
-mapping_file = babel.cfg
-output_file = octavia_tempest_plugin/locale/octavia_tempest_plugin.pot
-
-[build_releasenotes]
-all_files = 1
-build-dir = releasenotes/build
-source-dir = releasenotes/source
-
 [entry_points]
 tempest.test_plugins =
     octavia-tempest-plugin = octavia_tempest_plugin.plugin:OctaviaTempestPlugin
+oslo.config.opts =
+    octavia-tempest-plugin = octavia_tempest_plugin.opts:list_opts
diff --git a/test-requirements.txt b/test-requirements.txt
index 2125ea0..bb4fcfe 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -2,7 +2,7 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-hacking>=3.0.1,<3.1.0;python_version>='3.5' # Apache-2.0
+hacking>=6.1.0,<6.2.0 # Apache-2.0
 
 coverage!=4.4,>=4.0 # Apache-2.0
 python-subunit>=1.0.0 # Apache-2.0/BSD
diff --git a/tox.ini b/tox.ini
index f00ef83..a4649bf 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,6 @@
 [tox]
-minversion = 3.1
+minversion = 3.18.0
 envlist = pep8
-skipsdist = True
 ignore_basepython_conflict = True
 
 [testenv]
@@ -32,7 +31,7 @@
 setenv =
   {[testenv]setenv}
   PYTHON=coverage run --source octavia_tempest_plugin --parallel-mode
-whitelist_externals =
+allowlist_externals =
   find
 commands =
   find octavia_tempest_plugin -type f -name "*.pyc" -delete
@@ -48,14 +47,14 @@
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/test-requirements.txt
     -r{toxinidir}/doc/requirements.txt
-whitelist_externals = rm
+allowlist_externals = rm
 commands =
   rm -rf doc/build
   sphinx-build -W -b html doc/source doc/build/html
 
 [testenv:pdf-docs]
 deps = {[testenv:docs]deps}
-whitelist_externals =
+allowlist_externals =
   make
   rm
 commands =
@@ -100,12 +99,11 @@
   O346 = checks:check_line_continuation_no_backslash
   O347 = checks:revert_must_have_kwargs
   O348 = checks:check_no_logging_imports
-  O349 = checks:check_no_import_mock
 paths =
   ./octavia_tempest_plugin/hacking
 
 [testenv:genconfig]
-whitelist_externals = mkdir
+allowlist_externals = mkdir
 commands =
          mkdir -p etc
          oslo-config-generator --output-file etc/octavia.tempest.conf.sample \
@@ -114,6 +112,6 @@
 [testenv:requirements]
 deps =
   -egit+https://opendev.org/openstack/requirements#egg=openstack-requirements
-whitelist_externals = sh
+allowlist_externals = sh
 commands =
     sh -c '{envdir}/src/openstack-requirements/playbooks/files/project-requirements-change.py --req {envdir}/src/openstack-requirements --local {toxinidir} master'
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index f4dd9db..ec8e774 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -19,10 +19,20 @@
           - controller
 
 - nodeset:
-    name: octavia-single-node-centos-7
+    name: octavia-single-node-ubuntu-jammy
     nodes:
       - name: controller
-        label: nested-virt-centos-7
+        label: nested-virt-ubuntu-jammy
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
+    name: octavia-single-node-ubuntu-noble
+    nodes:
+      - name: controller
+        label: nested-virt-ubuntu-noble
     groups:
       - name: tempest
         nodes:
@@ -59,12 +69,22 @@
           - controller
 
 - nodeset:
+    name: octavia-single-node-rockylinux-9
+    nodes:
+      - name: controller
+        label: nested-virt-rockylinux-9
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: octavia-two-node
     nodes:
       - name: controller
-        label: nested-virt-ubuntu-focal
+        label: nested-virt-ubuntu-noble
       - name: controller2
-        label: nested-virt-ubuntu-focal
+        label: nested-virt-ubuntu-noble
     groups:
       - name: controller
         nodes:
@@ -209,7 +229,7 @@
 - job:
     name: octavia-dsvm-live-base
     parent: octavia-dsvm-base
-    nodeset: octavia-single-node-ubuntu-focal
+    nodeset: octavia-single-node-ubuntu-noble
     timeout: 9000
     required-projects:
       - openstack/diskimage-builder
@@ -245,7 +265,7 @@
 - job:
     name: octavia-dsvm-live-base-ipv6-only
     parent: octavia-dsvm-base-ipv6-only
-    nodeset: octavia-single-node-ubuntu-focal
+    nodeset: octavia-single-node-ubuntu-noble
     timeout: 9000
     required-projects:
       - openstack/diskimage-builder
@@ -385,16 +405,13 @@
           barbican: false
           dstat: true
           g-api: true
-          g-reg: true
           key: true
           memory_tracker: false
           mysql: true
           n-api: true
           n-api-meta: true
-          n-cauth: true
           n-cond: true
           n-cpu: true
-          n-obj: true
           n-sch: true
           neutron-qos: true
           octavia: true
@@ -494,6 +511,8 @@
               enabled: True
             audit_middleware_notifications:
               driver: log
+            certificates:
+              cert_manager: noop_cert_manager
       tempest_concurrency: 4
       tempest_test_regex: ^octavia_tempest_plugin.tests.api.v2
       tox_envlist: all
@@ -503,24 +522,22 @@
       - ^doc/.*$
       - ^etc/.*$
       - ^releasenotes/.*$
-      - ^octavia_tempest_plugin/tests/(?!api/|\w+\.py).*
+      - ^octavia_tempest_plugin/tests/act_stdby_scenario/.*
+      - ^octavia_tempest_plugin/tests/barbican_scenario/.*
+      - ^octavia_tempest_plugin/tests/scenario/.*
+      - ^octavia_tempest_plugin/tests/spare_pool_scenario/.*  # TODO: remove
 
 - job:
-    name: octavia-v2-dsvm-noop-api-scoped-tokens
+    name: octavia-v2-dsvm-noop-api-advanced-rbac
     parent: octavia-v2-dsvm-noop-api
     vars:
+      devstack_localrc:
+        OCTAVIA_USE_ADVANCED_RBAC: True
       devstack_local_conf:
-        post-config:
-          $OCTAVIA_CONF:
-            oslo_policy:
-              enforce_scope: True
-              enforce_new_defaults: True
         test-config:
           "$TEMPEST_CONFIG":
-            enforce_scope:
-              octavia: True
             load_balancer:
-              enforce_new_defaults: True
+              RBAC_test_type: advanced
 
 - job:
     name: octavia-v2-dsvm-noop-py2-api
@@ -530,22 +547,43 @@
         USE_PYTHON3: False
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-yoga
+    name: octavia-v2-dsvm-noop-api-stable-2024-2
     parent: octavia-v2-dsvm-noop-api
-    override-checkout: stable/yoga
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-xena
+    name: octavia-v2-dsvm-noop-api-stable-2024-1
     parent: octavia-v2-dsvm-noop-api
-    override-checkout: stable/xena
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.1
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-wallaby
+    name: octavia-v2-dsvm-noop-api-stable-2023-2
     parent: octavia-v2-dsvm-noop-api
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
-    name: octavia-v2-dsvm-scenario
+    name: octavia-v2-dsvm-scenario-base
     parent: octavia-dsvm-live-base
     vars:
       devstack_local_conf:
@@ -566,7 +604,36 @@
       - ^doc/.*$
       - ^etc/.*$
       - ^releasenotes/.*$
-      - ^octavia_tempest_plugin/tests/(?!scenario/|\w+\.py).*
+      - ^octavia_tempest_plugin/tests/act_stdby_scenario/.*
+      - ^octavia_tempest_plugin/tests/api/.*
+      - ^octavia_tempest_plugin/tests/barbican_scenario/.*
+      - ^octavia_tempest_plugin/tests/spare_pool_scenario/.*  # TODO: remove
+
+- job:
+    name: octavia-v2-dsvm-scenario
+    parent: octavia-v2-dsvm-scenario-base
+    branches:
+      regex: ^(unmaintained/|stable/(2023.2|2024.1|2024.2))
+      negate: true
+    nodeset: octavia-single-node-ubuntu-noble
+
+- job:
+    name: octavia-v2-dsvm-scenario
+    parent: octavia-v2-dsvm-scenario-base
+    branches: ^(stable/(2023.2|2024.1|2024.2))
+    nodeset: octavia-single-node-ubuntu-jammy
+
+- job:
+    name: octavia-v2-dsvm-scenario-traffic-ops
+    parent: octavia-v2-dsvm-scenario
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-non-traffic-ops
+    parent: octavia-v2-dsvm-scenario
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
 
 - job:
     name: octavia-v2-dsvm-scenario-ipv6-only
@@ -586,7 +653,10 @@
       - ^doc/.*$
       - ^etc/.*$
       - ^releasenotes/.*$
-      - ^octavia_tempest_plugin/tests/(?!scenario/|\w+\.py).*
+      - ^octavia_tempest_plugin/tests/act_stdby_scenario/.*
+      - ^octavia_tempest_plugin/tests/api/.*
+      - ^octavia_tempest_plugin/tests/barbican_scenario/.*
+      - ^octavia_tempest_plugin/tests/spare_pool_scenario/.*  # TODO: remove
 
 - job:
     name: octavia-v2-dsvm-py2-scenario
@@ -599,19 +669,76 @@
         override-checkout: 2.30.0
 
 - job:
-    name: octavia-v2-dsvm-scenario-stable-yoga
+    name: octavia-v2-dsvm-scenario-stable-2024-2
     parent: octavia-v2-dsvm-scenario
-    override-checkout: stable/yoga
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
-    name: octavia-v2-dsvm-scenario-stable-xena
-    parent: octavia-v2-dsvm-scenario
-    override-checkout: stable/xena
+    name: octavia-v2-dsvm-scenario-traffic-ops-stable-2024-2
+    parent: octavia-v2-dsvm-scenario-stable-2024-2
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
 
 - job:
-    name: octavia-v2-dsvm-scenario-stable-wallaby
+    name: octavia-v2-dsvm-scenario-non-traffic-ops-stable-2024-2
+    parent: octavia-v2-dsvm-scenario-stable-2024-2
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-stable-2024-1
     parent: octavia-v2-dsvm-scenario
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.1
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
+
+- job:
+    name: octavia-v2-dsvm-scenario-traffic-ops-stable-2024-1
+    parent: octavia-v2-dsvm-scenario-stable-2024-1
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-non-traffic-ops-stable-2024-1
+    parent: octavia-v2-dsvm-scenario-stable-2024-1
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-stable-2023-2
+    parent: octavia-v2-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
+
+- job:
+    name: octavia-v2-dsvm-scenario-traffic-ops-stable-2023-2
+    parent: octavia-v2-dsvm-scenario-stable-2023-2
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-2
+    parent: octavia-v2-dsvm-scenario-stable-2023-2
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
 
 # Legacy jobs for the transition to the act-stdby two node jobs
 - job:
@@ -627,7 +754,10 @@
       - ^doc/.*$
       - ^etc/.*$
       - ^releasenotes/.*$
-      - ^octavia_tempest_plugin/tests/(?!scenario/|\w+\.py).*
+      - ^octavia_tempest_plugin/tests/act_stdby_scenario/.*
+      - ^octavia_tempest_plugin/tests/api/.*
+      - ^octavia_tempest_plugin/tests/barbican_scenario/.*
+      - ^octavia_tempest_plugin/tests/spare_pool_scenario/.*  # TODO: remove
 
 - job:
     name: octavia-v2-dsvm-py2-scenario-two-node
@@ -676,16 +806,6 @@
                 loadbalancer_topology: ACTIVE_STANDBY
 
 - job:
-    name: octavia-v2-dsvm-py2-scenario-centos-7
-    parent: octavia-v2-dsvm-py2-scenario
-    nodeset: octavia-single-node-centos-7
-    vars:
-      devstack_localrc:
-        OCTAVIA_AMP_BASE_OS: centos
-        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 7
-        OCTAVIA_AMP_IMAGE_SIZE: 3
-
-- job:
     name: octavia-v2-dsvm-scenario-centos-8
     parent: octavia-v2-dsvm-scenario
     nodeset: octavia-single-node-centos-8
@@ -723,15 +843,60 @@
               ssh_key_type: ecdsa
 
 - job:
-    name: octavia-v2-dsvm-scenario-ubuntu-focal
+    name: octavia-v2-dsvm-scenario-centos-9-stream-traffic-ops
+    parent: octavia-v2-dsvm-scenario-centos-9-stream
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-centos-9-stream-non-traffic-ops
+    parent: octavia-v2-dsvm-scenario-centos-9-stream
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-rockylinux-9
     parent: octavia-v2-dsvm-scenario
+    nodeset: octavia-single-node-rockylinux-9
+    vars:
+      devstack_localrc:
+        OCTAVIA_AMP_BASE_OS: rocky
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 9
+        OCTAVIA_AMP_IMAGE_SIZE: 3
+        OCTAVIA_SSH_KEY_TYPE: ecdsa
+        OCTAVIA_SSH_KEY_BITS: 256
+        # Temporary workaround to fix centos 9 stream and rocky, they don't
+        # work with the new default value of GLOBAL_VENV in devstack
+        GLOBAL_VENV: false
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            validation:
+              ssh_key_type: ecdsa
+
+- job:
+    name: octavia-v2-dsvm-scenario-rockylinux-9-traffic-ops
+    parent: octavia-v2-dsvm-scenario-rockylinux-9
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-rockylinux-9-non-traffic-ops
+    parent: octavia-v2-dsvm-scenario-rockylinux-9
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-ubuntu-jammy
+    parent: octavia-v2-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-jammy
     vars:
       devstack_localrc:
         OCTAVIA_AMP_BASE_OS: ubuntu
-        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: focal
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: jammy
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican
+    name: octavia-v2-dsvm-tls-barbican-base
     parent: octavia-v2-dsvm-scenario
     required-projects:
       - openstack/barbican
@@ -753,39 +918,60 @@
       - ^doc/.*$
       - ^etc/.*$
       - ^releasenotes/.*$
-      - ^octavia_tempest_plugin/tests/(?!barbican_scenario/|\w+\.py).*
+      - ^octavia_tempest_plugin/tests/act_stdby_scenario/.*
+      - ^octavia_tempest_plugin/tests/api/.*
+      - ^octavia_tempest_plugin/tests/scenario/.*
+      - ^octavia_tempest_plugin/tests/spare_pool_scenario/.*  # TODO: remove
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-yoga
-    parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/yoga
+    name: octavia-v2-dsvm-tls-barbican
+    parent: octavia-v2-dsvm-tls-barbican-base
+    branches:
+      regex: ^(unmaintained/|stable/(2023.2|2024.1|2024.2))
+      negate: true
+    nodeset: octavia-single-node-ubuntu-noble
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-xena
-    parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/xena
+    name: octavia-v2-dsvm-tls-barbican
+    parent: octavia-v2-dsvm-tls-barbican-base
+    branches: ^(stable/(2023.2|2024.1|2024.2))
+    nodeset: octavia-single-node-ubuntu-jammy
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-wallaby
+    name: octavia-v2-dsvm-tls-barbican-stable-2024-2
     parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-victoria
+    name: octavia-v2-dsvm-tls-barbican-stable-2024-1
     parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/victoria
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.1
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-ussuri
+    name: octavia-v2-dsvm-tls-barbican-stable-2023-2
     parent: octavia-v2-dsvm-tls-barbican
-    nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/ussuri
-
-- job:
-    name: octavia-v2-dsvm-tls-barbican-stable-train
-    parent: octavia-v2-dsvm-tls-barbican
-    nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/train
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
 
 - job:
     name: octavia-v2-dsvm-spare-pool
@@ -813,11 +999,6 @@
         override-checkout: 2.30.0
 
 - job:
-    name: octavia-v2-dsvm-spare-pool-stable-wallaby
-    parent: octavia-v2-dsvm-spare-pool
-    override-checkout: stable/wallaby
-
-- job:
     name: octavia-v2-dsvm-cinder-amphora
     parent: octavia-v2-dsvm-scenario
     required-projects:
@@ -872,11 +1053,6 @@
     parent: octavia-v2-dsvm-scenario
 
 - job:
-    name: octavia-v2-dsvm-scenario-centos-7
-    parent: octavia-v2-dsvm-py2-scenario-centos-7
-    nodeset: octavia-single-node-centos-7
-
-- job:
     name: octavia-v2-act-stdby-iptables-dsvm-scenario
     parent: octavia-dsvm-live-base
     pre-run: playbooks/act_stby_iptables/pre.yaml
@@ -914,23 +1090,7 @@
         override-checkout: 2.30.0
 
 - job:
-    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7
-    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
-    nodeset: octavia-single-node-centos-7
-    vars:
-      devstack_localrc:
-        USE_PYTHON3: False
-        OCTAVIA_AMP_BASE_OS: centos
-        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 7
-        OCTAVIA_AMP_IMAGE_SIZE: 3
-      devstack_local_conf:
-        test-config:
-          "$TEMPEST_CONFIG":
-            load_balancer:
-              amphora_ssh_user: centos
-
-- job:
-    name: octavia-v2-act-stdby-dsvm-scenario
+    name: octavia-v2-act-stdby-dsvm-scenario-base
     parent: octavia-dsvm-live-base
     vars:
       devstack_local_conf:
@@ -947,19 +1107,74 @@
       tox_envlist: all
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-yoga
-    parent: octavia-v2-act-stdby-dsvm-scenario
-    override-checkout: stable/yoga
+    name: octavia-v2-act-stdby-dsvm-scenario
+    parent: octavia-v2-act-stdby-dsvm-scenario-base
+    branches:
+      regex: ^(unmaintained/|stable/(2023.2|2024.1|2024.2))
+      negate: true
+    nodeset: octavia-single-node-ubuntu-noble
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-xena
-    parent: octavia-v2-act-stdby-dsvm-scenario
-    override-checkout: stable/xena
+    name: octavia-v2-act-stdby-dsvm-scenario
+    parent: octavia-v2-act-stdby-dsvm-scenario-base
+    branches: ^(stable/(2023.2|2024.1|2024.2))
+    nodeset: octavia-single-node-ubuntu-jammy
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-wallaby
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-2024-2
     parent: octavia-v2-act-stdby-dsvm-scenario
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-2024-1
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2024.1
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-2023-2
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.2
+    vars:
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              RBAC_test_type: advanced
+
+# Temporary job, remove once it is no longer used in octavia gates
+- job:
+    name: octavia-v2-dsvm-noop-api-keystone-default-roles
+    parent: octavia-v2-dsvm-noop-api
+    vars:
+      devstack_localrc:
+        OCTAVIA_USE_KEYSTONE_DEFAULT_ROLES: True
+      devstack_local_conf:
+        post-config:
+          $OCTAVIA_CONF:
+            oslo_policy:
+              enforce_scope: True
+              enforce_new_defaults: True
+        test-config:
+          "$TEMPEST_CONFIG":
+            enforce_scope:
+              octavia: True
+            load_balancer:
+              RBAC_test_type: keystone_default_roles
 
 ######### Third party jobs ##########
 
@@ -968,7 +1183,7 @@
     parent: ovn-octavia-provider-tempest-release
     description: Runs the neutron OVN provider driver for Octavia api test.
     voting: false
-    timeout: 5400
+    timeout: 7800
     attempts: 1
     tags: ovn-octavia-provider
     irrelevant-files:
@@ -997,7 +1212,7 @@
     parent: ovn-octavia-provider-tempest-release
     description: Runs the neutron OVN provider driver for Octavia scenario test.
     voting: false
-    timeout: 5400
+    timeout: 7800
     attempts: 1
     tags: ovn-octavia-provider
     irrelevant-files:
@@ -1014,6 +1229,7 @@
       - ^octavia/volume/.*$
       - ^octavia/tests/.*$
     vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2
       devstack_local_conf:
         test-config:
           "$TEMPEST_CONFIG":
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index 116da49..9034367 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -9,35 +9,41 @@
     check:
       jobs:
         - octavia-v2-dsvm-noop-api
-        - octavia-v2-dsvm-noop-api-stable-yoga
-        - octavia-v2-dsvm-noop-api-stable-xena
-        - octavia-v2-dsvm-noop-api-stable-wallaby
-        - octavia-v2-dsvm-noop-api-scoped-tokens
-        - octavia-v2-dsvm-scenario
-        - octavia-v2-dsvm-scenario-stable-yoga
-        - octavia-v2-dsvm-scenario-stable-xena
-        - octavia-v2-dsvm-scenario-stable-wallaby
+        - octavia-v2-dsvm-noop-api-stable-2024-2
+        - octavia-v2-dsvm-noop-api-stable-2024-1
+        - octavia-v2-dsvm-noop-api-stable-2023-2
+        - octavia-v2-dsvm-noop-api-advanced-rbac
+        - octavia-v2-dsvm-scenario-traffic-ops
+        - octavia-v2-dsvm-scenario-non-traffic-ops
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2024-2
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2024-2
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2024-1
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2024-1
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2023-2
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-2
         - octavia-v2-dsvm-tls-barbican
-        - octavia-v2-dsvm-tls-barbican-stable-yoga
-        - octavia-v2-dsvm-tls-barbican-stable-xena
-        - octavia-v2-dsvm-tls-barbican-stable-wallaby
+        - octavia-v2-dsvm-tls-barbican-stable-2024-2
+        - octavia-v2-dsvm-tls-barbican-stable-2024-1
+        - octavia-v2-dsvm-tls-barbican-stable-2023-2
         - octavia-v2-dsvm-scenario-ipv6-only:
             voting: false
-        - octavia-v2-dsvm-scenario-centos-8-stream:
+        - octavia-v2-dsvm-scenario-centos-9-stream-traffic-ops:
             voting: false
-        - octavia-v2-dsvm-scenario-centos-9-stream:
+        - octavia-v2-dsvm-scenario-centos-9-stream-non-traffic-ops:
+            voting: false
+        - octavia-v2-dsvm-scenario-rockylinux-9-traffic-ops:
+            voting: false
+        - octavia-v2-dsvm-scenario-rockylinux-9-non-traffic-ops:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario-two-node:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-yoga:
+        - octavia-v2-act-stdby-dsvm-scenario-stable-2024-2:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-xena:
+        - octavia-v2-act-stdby-dsvm-scenario-stable-2024-1:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-wallaby:
-            voting: false
-        - octavia-v2-dsvm-spare-pool-stable-wallaby:
+        - octavia-v2-act-stdby-dsvm-scenario-stable-2023-2:
             voting: false
         - octavia-v2-dsvm-cinder-amphora:
             voting: false
@@ -51,15 +57,19 @@
       fail-fast: true
       jobs:
         - octavia-v2-dsvm-noop-api
-        - octavia-v2-dsvm-noop-api-stable-yoga
-        - octavia-v2-dsvm-noop-api-stable-xena
-        - octavia-v2-dsvm-noop-api-stable-wallaby
-        - octavia-v2-dsvm-noop-api-scoped-tokens
-        - octavia-v2-dsvm-scenario
-        - octavia-v2-dsvm-scenario-stable-yoga
-        - octavia-v2-dsvm-scenario-stable-xena
-        - octavia-v2-dsvm-scenario-stable-wallaby
+        - octavia-v2-dsvm-noop-api-stable-2024-2
+        - octavia-v2-dsvm-noop-api-stable-2024-1
+        - octavia-v2-dsvm-noop-api-stable-2023-2
+        - octavia-v2-dsvm-noop-api-advanced-rbac
+        - octavia-v2-dsvm-scenario-traffic-ops
+        - octavia-v2-dsvm-scenario-non-traffic-ops
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2024-2
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2024-2
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2024-1
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2024-1
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2023-2
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-2
         - octavia-v2-dsvm-tls-barbican
-        - octavia-v2-dsvm-tls-barbican-stable-yoga
-        - octavia-v2-dsvm-tls-barbican-stable-xena
-        - octavia-v2-dsvm-tls-barbican-stable-wallaby
+        - octavia-v2-dsvm-tls-barbican-stable-2024-2
+        - octavia-v2-dsvm-tls-barbican-stable-2024-1
+        - octavia-v2-dsvm-tls-barbican-stable-2023-2