Merge "Add *.orig to .gitignore"
diff --git a/README.rst b/README.rst
index 8b5bd3c..beb0f5a 100644
--- a/README.rst
+++ b/README.rst
@@ -30,20 +30,26 @@
 Installing
 ----------
 
-Clone this repository to the destination machine, and call from the repo::
+From the tempest directory, setup the tempest virtual environment for the
+Octavia tempest plugin::
 
-    $ pip install -e .
+    $ tox -e venv-tempest -- pip3 install -e <path to octavia-tempest-plugin>
+
+For example, when using a typical devstack setup::
+
+    $ cd /opt/stack/tempest
+    $ tox -e venv-tempest -- pip3 install -e /opt/stack/octavia-tempest-plugin
 
 Running the tests
 -----------------
 
 To run all the tests from this plugin, call from the tempest repo::
 
-    $ tox -e all-plugin -- octavia_tempest_plugin
+    $ tox -e all -- octavia_tempest_plugin
 
 To run a single test case, call with full path, for example::
 
-    $ tox -e all-plugin -- octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_traffic
+    $ tox -e all -- octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_traffic
 
 To retrieve a list of all tempest tests, run::
 
diff --git a/octavia_tempest_plugin/common/cert_utils.py b/octavia_tempest_plugin/common/cert_utils.py
index 753da6b..34d0d7d 100644
--- a/octavia_tempest_plugin/common/cert_utils.py
+++ b/octavia_tempest_plugin/common/cert_utils.py
@@ -21,7 +21,6 @@
 from cryptography.hazmat.primitives.serialization import pkcs12
 from cryptography import x509
 from cryptography.x509.oid import NameOID
-import OpenSSL
 
 
 def generate_ca_cert_and_key():
@@ -176,38 +175,13 @@
 def generate_pkcs12_bundle(server_cert, server_key):
     """Creates a pkcs12 formated bundle.
 
-    Note: This uses pyOpenSSL as the cryptography package does not yet
-          support creating pkcs12 bundles. The currently un-released
-          2.5 version of cryptography supports reading pkcs12, but not
-          creation. This method should be updated to only use
-          cryptography once it supports creating pkcs12 bundles.
-
     :param server_cert: A cryptography certificate (x509) object.
     :param server_key: A cryptography key (x509) object.
     :returns: A pkcs12 bundle.
     """
-    # Use the PKCS12 serialization function from cryptography if it exists
-    # (>=3.0), otherwise use the pyOpenSSL module.
-    #
-    # The PKCS12 class of the pyOpenSSL module is not compliant with FIPS.
-    # It uses the SHA1 function [0] which is not allowed when generating
-    # digital signatures [1]
-    #
-    # [0] https://github.com/pyca/pyopenssl/blob/
-    #       65ca53a7a06a7c78c1749200a6b3a007e47d3214/src/OpenSSL/
-    #       crypto.py#L2748-L2749
-    # [1] https://nvlpubs.nist.gov/nistpubs/SpecialPublications/
-    #       NIST.SP.800-131Ar1.pdf
-    if hasattr(pkcs12, 'serialize_key_and_certificates'):
-        p12 = pkcs12.serialize_key_and_certificates(
-            b'', server_key, server_cert,
-            cas=None, encryption_algorithm=NoEncryption())
-    else:
-        p12 = OpenSSL.crypto.PKCS12()
-        p12.set_privatekey(
-            OpenSSL.crypto.PKey.from_cryptography_key(server_key))
-        p12.set_certificate(OpenSSL.crypto.X509.from_cryptography(server_cert))
-        p12 = p12.export()
+    p12 = pkcs12.serialize_key_and_certificates(
+        b'', server_key, server_cert,
+        cas=None, encryption_algorithm=NoEncryption())
     return p12
 
 
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 174589c..e3bd83e 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -45,6 +45,7 @@
 PROVIDER_NAME = 'provider_name'
 PROVISIONING_STATUS = 'provisioning_status'
 REQUEST_ERRORS = 'request_errors'
+TLS_CONTAINER_REF = 'tls_container_ref'
 TOTAL_CONNECTIONS = 'total_connections'
 UPDATED_AT = 'updated_at'
 VIP_ADDRESS = 'vip_address'
@@ -130,6 +131,8 @@
 HTTP = 'HTTP'
 HTTPS = 'HTTPS'
 PROXY = 'PROXY'
+PROMETHEUS = 'PROMETHEUS'
+SCTP = 'SCTP'
 TCP = 'TCP'
 TERMINATED_HTTPS = 'TERMINATED_HTTPS'
 UDP = 'UDP'
@@ -139,12 +142,18 @@
 POST = 'POST'
 PUT = 'PUT'
 DELETE = 'DELETE'
+HEAD = 'HEAD'
+OPTIONS = 'OPTIONS'
+PATCH = 'PATCH'
+CONNECT = 'CONNECT'
+TRACE = 'TRACE'
 
 # HM Types
 HEALTH_MONITOR_PING = 'PING'
 HEALTH_MONITOR_TCP = 'TCP'
 HEALTH_MONITOR_HTTP = 'HTTP'
 HEALTH_MONITOR_HTTPS = 'HTTPS'
+HEALTH_MONITOR_SCTP = 'SCTP'
 HEALTH_MONITOR_TLS_HELLO = 'TLS-HELLO'
 HEALTH_MONITOR_UDP_CONNECT = 'UDP-CONNECT'
 
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 4d1543b..9e259d3 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -61,6 +61,9 @@
     cfg.StrOpt('octavia_svc_username', default='admin',
                help='The service_auth username the Octavia services are using'
                     'to access other OpenStack services.'),
+    cfg.BoolOpt('log_user_roles',
+                default=True,
+                help='Log the user roles at the start of every test.'),
     # load-balancer specific options
     cfg.FloatOpt('check_interval',
                  default=5,
@@ -241,7 +244,11 @@
                 help='Does the load-balancer service API policies enforce '
                      'the new keystone default roles? This configuration '
                      'value should be same as octavia.conf: '
-                     '[oslo_policy].enforce_new_defaults option.'),
+                     '[oslo_policy].enforce_new_defaults option.',
+                deprecated_for_removal=True,
+                deprecated_reason='Consolidated into the RBAC_test_type '
+                                  'setting.',
+                deprecated_since='bobcat'),
 ]
 
 lb_feature_enabled_group = cfg.OptGroup(name='loadbalancer-feature-enabled',
@@ -285,6 +292,8 @@
                 help="Whether the log offload tests will run. These require "
                      "the tempest instance have access to the log files "
                      "specified in the tempest configuration."),
+    cfg.BoolOpt('prometheus_listener_enabled', default=True,
+                help="Whether the PROMETHEUS listener tests will run."),
 ]
 
 # Extending this enforce_scope group defined in tempest
diff --git a/octavia_tempest_plugin/tests/RBAC_tests.py b/octavia_tempest_plugin/tests/RBAC_tests.py
index d31d506..8dae024 100644
--- a/octavia_tempest_plugin/tests/RBAC_tests.py
+++ b/octavia_tempest_plugin/tests/RBAC_tests.py
@@ -155,15 +155,14 @@
         """
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
+        # The legacy admin behavior changed during the sRBAC development,
+        # os_admin is still a valid admin [0]
+        # [0] https://governance.openstack.org/tc/goals/selected/
+        #       consistent-and-secure-rbac.html
+        #       #legacy-admin-continues-to-work-as-it-is
+        # TODO(gthiemonge) we may have to revisit it in the future if the
+        # legacy admin scope changes.
+        if 'os_system_admin' in expected_allowed:
             allowed_list.append('os_admin')
 
         # #### Test that disallowed credentials cannot access the API.
@@ -244,15 +243,14 @@
         """
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
+        # The legacy admin behavior changed during the sRBAC development,
+        # os_admin is still a valid admin [0]
+        # [0] https://governance.openstack.org/tc/goals/selected/
+        #       consistent-and-secure-rbac.html
+        #       #legacy-admin-continues-to-work-as-it-is
+        # TODO(gthiemonge) we may have to revisit it in the future if the
+        # legacy admin scope changes.
+        if 'os_system_admin' in expected_allowed:
             allowed_list.append('os_admin')
 
         # #### Test that disallowed credentials cannot access the API.
@@ -371,15 +369,14 @@
         """
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
+        # The legacy admin behavior changed during the sRBAC development,
+        # os_admin is still a valid admin [0]
+        # [0] https://governance.openstack.org/tc/goals/selected/
+        #       consistent-and-secure-rbac.html
+        #       #legacy-admin-continues-to-work-as-it-is
+        # TODO(gthiemonge) we may have to revisit it in the future if the
+        # legacy admin scope changes.
+        if 'os_system_admin' in expected_allowed:
             allowed_list.append('os_admin')
 
         for cred in allowed_list:
@@ -439,15 +436,14 @@
         """
 
         allowed_list = copy.deepcopy(expected_allowed)
-        # os_admin is a special case as it is valid with the old defaults,
-        # but will not be with the new defaults and/or token scoping.
-        # The old keystone role "admin" becomes project scoped "admin"
-        # instead of being a global admin.
-        # To keep the tests simple, handle that edge case here.
-        # TODO(johnsom) Once token scope is default, remove this.
-        if ('os_system_admin' in expected_allowed and
-                not CONF.load_balancer.enforce_new_defaults and
-                not CONF.enforce_scope.octavia):
+        # The legacy admin behavior changed during the sRBAC development,
+        # os_admin is still a valid admin [0]
+        # [0] https://governance.openstack.org/tc/goals/selected/
+        #       consistent-and-secure-rbac.html
+        #       #legacy-admin-continues-to-work-as-it-is
+        # TODO(gthiemonge) we may have to revisit it in the future if the
+        # legacy admin scope changes.
+        if 'os_system_admin' in expected_allowed:
             allowed_list.append('os_admin')
 
         for cred in allowed_list:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index 180e4f3..8d591cc 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -94,7 +94,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -182,7 +182,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -217,7 +217,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
index fa7b6a4..fe80084 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
@@ -109,7 +109,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -232,7 +232,7 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -385,7 +385,7 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -458,7 +458,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -535,7 +535,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
index d3833f6..6f67d8f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
@@ -48,7 +48,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
index 456a01e..6984420 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
@@ -80,7 +80,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -235,7 +235,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -396,7 +396,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -498,7 +498,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -580,7 +580,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor.py b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
index b5b4254..565ff99 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
@@ -92,7 +92,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -198,7 +198,7 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -326,7 +326,7 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -394,7 +394,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -458,7 +458,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
index 884f656..285df19 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
@@ -46,7 +46,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
index 39f3338..48ade67 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
@@ -64,7 +64,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -184,7 +184,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -313,7 +313,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -391,7 +391,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
@@ -458,7 +458,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index a305ead..64368aa 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -282,7 +282,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -724,8 +725,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -739,8 +740,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -763,7 +764,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -1193,8 +1194,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -1475,7 +1476,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -1778,7 +1780,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index e7ed5a6..47b2faa 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -62,6 +62,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -79,6 +84,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -139,7 +149,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -152,6 +163,11 @@
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -365,8 +381,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -380,8 +396,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -406,7 +422,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -609,6 +625,11 @@
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -652,8 +673,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -720,6 +741,11 @@
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -761,7 +787,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -858,6 +885,11 @@
         }
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_l7policy_client.cleanup_l7policy,
+            l7policy[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -872,7 +904,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
index 5cb85c4..c0eb1d2 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
@@ -147,7 +147,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -357,8 +358,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -380,7 +381,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
@@ -566,8 +567,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -654,7 +655,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -756,7 +758,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 1c2fa75..b60fb20 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -16,17 +16,20 @@
 from uuid import UUID
 
 from dateutil import parser
+from oslo_log import log as logging
 from oslo_utils import strutils
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
+import testtools
 
 from octavia_tempest_plugin.common import constants as const
 from octavia_tempest_plugin.tests import test_base
 from octavia_tempest_plugin.tests import waiters
 
 CONF = config.CONF
+LOG = logging.getLogger(__name__)
 
 
 class ListenerAPITest(test_base.LoadBalancerBaseTest):
@@ -79,15 +82,25 @@
     def test_tcp_listener_create(self):
         self._test_listener_create(const.TCP, 8002)
 
+    @decorators.idempotent_id('1a6ba0d0-f309-4088-a686-dda0e9ab7e43')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_create(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_create(const.PROMETHEUS, 8090)
+
     @decorators.idempotent_id('7b53f336-47bc-45ae-bbd7-4342ef0673fc')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_create(self):
         self._test_listener_create(const.UDP, 8003)
 
+    @decorators.idempotent_id('d6d36c32-27ff-4977-9d21-fd71a14e3b20')
+    def test_sctp_listener_create(self):
+        self._test_listener_create(const.SCTP, 8004)
+
     def _test_listener_create(self, protocol, protocol_port):
         """Tests listener create and basic show APIs.
 
@@ -97,6 +110,8 @@
         * Show listener details.
         * Validate the show reflects the requested values.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-create")
         listener_description = data_utils.arbitrary_string(size=255)
 
@@ -158,7 +173,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -171,6 +187,11 @@
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -233,27 +254,32 @@
             self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
 
     @decorators.idempotent_id('cceac303-4db5-4d5a-9f6e-ff33780a5f29')
-    def test_http_udp_tcp_listener_create_on_same_port(self):
+    def test_http_udp_sctp_tcp_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.HTTP, const.UDP,
+                                                const.SCTP,
                                                 const.TCP, 8010)
 
     @decorators.idempotent_id('930338b8-3029-48a6-89b2-8b062060fe61')
-    def test_http_udp_https_listener_create_on_same_port(self):
+    def test_http_udp_sctp_https_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.HTTP, const.UDP,
+                                                const.SCTP,
                                                 const.HTTPS, 8011)
 
     @decorators.idempotent_id('01a21892-008a-4327-b4fd-fbf194ecb1a5')
-    def test_tcp_udp_http_listener_create_on_same_port(self):
+    def test_tcp_udp_sctp_http_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.TCP, const.UDP,
+                                                const.SCTP,
                                                 const.HTTP, 8012)
 
     @decorators.idempotent_id('5da764a4-c03a-46ed-848b-98b9d9fa9089')
-    def test_tcp_udp_https_listener_create_on_same_port(self):
+    def test_tcp_udp_sctp_https_listener_create_on_same_port(self):
         self._test_listener_create_on_same_port(const.TCP, const.UDP,
+                                                const.SCTP,
                                                 const.HTTPS, 8013)
 
     def _test_listener_create_on_same_port(self, protocol1, protocol2,
-                                           protocol3, protocol_port):
+                                           protocol3, protocol4,
+                                           protocol_port):
         """Tests listener creation on same port number.
 
         * Create a first listener.
@@ -261,10 +287,25 @@
           protocol.
         * Create a second listener with the same parameters and ensure that
           an error is triggered.
-        * Create a third listener with another protocol over TCP, and ensure
+        * Create a third listener on an existing port, but with a different
+          protocol.
+        * Create a fourth listener with another protocol over TCP, and ensure
           that it fails.
         """
 
+        skip_protocol1 = (
+            not self._validate_listener_protocol(protocol1,
+                                                 raise_if_unsupported=False))
+        skip_protocol2 = (
+            not self._validate_listener_protocol(protocol2,
+                                                 raise_if_unsupported=False))
+        skip_protocol3 = (
+            not self._validate_listener_protocol(protocol3,
+                                                 raise_if_unsupported=False))
+        skip_protocol4 = (
+            not self._validate_listener_protocol(protocol4,
+                                                 raise_if_unsupported=False))
+
         # Using listeners on the same port for TCP and UDP was not supported
         # before Train. Use 2.11 API version as reference to detect previous
         # releases and skip the test.
@@ -274,92 +315,131 @@
                                      'is only available on Octavia API '
                                      'version 2.11 or newer.')
 
-        listener_name = data_utils.rand_name("lb_member_listener1-create")
+        if not skip_protocol1:
+            listener_name = data_utils.rand_name("lb_member_listener1-create")
 
-        listener_kwargs = {
-            const.NAME: listener_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol1,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200
-        }
+            listener_kwargs = {
+                const.NAME: listener_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol1,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200
+            }
 
-        try:
-            self.mem_listener_client.create_listener(**listener_kwargs)
-        except exceptions.BadRequest as e:
-            faultstring = e.resp_body.get('faultstring', '')
-            if ("Invalid input for field/attribute protocol." in faultstring
-                    and "Value should be one of:" in faultstring):
-                raise self.skipException("Skipping unsupported protocol")
-            raise e
+            try:
+                self.mem_listener_client.create_listener(**listener_kwargs)
+            except exceptions.BadRequest as e:
+                fs = e.resp_body.get('faultstring', '')
+                if ("Invalid input for field/attribute protocol." in fs
+                        and "Value should be one of:" in fs):
+                    LOG.info("Skipping unsupported protocol: {}".format(
+                        listener_kwargs[const.PROTOCOL]))
+                else:
+                    raise e
+            else:
+                waiters.wait_for_status(
+                    self.mem_lb_client.show_loadbalancer, self.lb_id,
+                    const.PROVISIONING_STATUS, const.ACTIVE,
+                    CONF.load_balancer.build_interval,
+                    CONF.load_balancer.build_timeout)
 
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+        if not skip_protocol2:
+            # Create a listener on the same port, but with a different protocol
+            listener2_name = data_utils.rand_name("lb_member_listener2-create")
 
-        # Create a listener on the same port, but with a different protocol
-        listener2_name = data_utils.rand_name("lb_member_listener2-create")
+            listener2_kwargs = {
+                const.NAME: listener2_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol2,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
 
-        listener2_kwargs = {
-            const.NAME: listener2_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol2,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-        }
+            try:
+                self.mem_listener_client.create_listener(**listener2_kwargs)
+            except exceptions.BadRequest as e:
+                fs = e.resp_body.get('faultstring', '')
+                if ("Invalid input for field/attribute protocol." in fs
+                        and "Value should be one of:" in fs):
+                    LOG.info("Skipping unsupported protocol: {}".format(
+                        listener_kwargs[const.PROTOCOL]))
+                else:
+                    raise e
+            else:
+                waiters.wait_for_status(
+                    self.mem_lb_client.show_loadbalancer, self.lb_id,
+                    const.PROVISIONING_STATUS, const.ACTIVE,
+                    CONF.load_balancer.build_interval,
+                    CONF.load_balancer.build_timeout)
 
-        try:
-            self.mem_listener_client.create_listener(**listener2_kwargs)
-        except exceptions.BadRequest as e:
-            faultstring = e.resp_body.get('faultstring', '')
-            if ("Invalid input for field/attribute protocol." in faultstring
-                    and "Value should be one of:" in faultstring):
-                raise self.skipException("Skipping unsupported protocol")
-            raise e
+        if not skip_protocol1:
+            # Create a listener on the same port, with an already used protocol
+            listener3_name = data_utils.rand_name("lb_member_listener3-create")
 
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+            listener3_kwargs = {
+                const.NAME: listener3_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol1,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
 
-        # Create a listener on the same port, with an already used protocol
-        listener3_name = data_utils.rand_name("lb_member_listener3-create")
+            self.assertRaises(
+                exceptions.Conflict,
+                self.mem_listener_client.create_listener,
+                **listener3_kwargs)
 
-        listener3_kwargs = {
-            const.NAME: listener3_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol1,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-        }
+        if not skip_protocol3:
+            # Create a listener on the same port, with a different protocol
+            listener4_name = data_utils.rand_name("lb_member_listener4-create")
 
-        self.assertRaises(
-            exceptions.Conflict,
-            self.mem_listener_client.create_listener,
-            **listener3_kwargs)
+            listener4_kwargs = {
+                const.NAME: listener4_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol3,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
 
-        # Create a listener on the same port, with another protocol over TCP
-        listener4_name = data_utils.rand_name("lb_member_listener4-create")
+            try:
+                self.mem_listener_client.create_listener(**listener4_kwargs)
+            except exceptions.BadRequest as e:
+                fs = e.resp_body.get('faultstring', '')
+                if ("Invalid input for field/attribute protocol." in fs
+                        and "Value should be one of:" in fs):
+                    LOG.info("Skipping unsupported protocol: {}".format(
+                        listener_kwargs[const.PROTOCOL]))
+                else:
+                    raise e
+            else:
+                waiters.wait_for_status(
+                    self.mem_lb_client.show_loadbalancer, self.lb_id,
+                    const.PROVISIONING_STATUS, const.ACTIVE,
+                    CONF.load_balancer.build_interval,
+                    CONF.load_balancer.build_timeout)
 
-        listener4_kwargs = {
-            const.NAME: listener4_name,
-            const.ADMIN_STATE_UP: True,
-            const.PROTOCOL: protocol3,
-            const.PROTOCOL_PORT: protocol_port,
-            const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
-        }
+        if not skip_protocol4:
+            # Create a listener on the same port, with another protocol over
+            # TCP
+            listener5_name = data_utils.rand_name("lb_member_listener5-create")
 
-        self.assertRaises(
-            exceptions.Conflict,
-            self.mem_listener_client.create_listener,
-            **listener4_kwargs)
+            listener5_kwargs = {
+                const.NAME: listener5_name,
+                const.ADMIN_STATE_UP: True,
+                const.PROTOCOL: protocol4,
+                const.PROTOCOL_PORT: protocol_port,
+                const.LOADBALANCER_ID: self.lb_id,
+                const.CONNECTION_LIMIT: 200,
+            }
+
+            self.assertRaises(
+                exceptions.Conflict,
+                self.mem_listener_client.create_listener,
+                **listener5_kwargs)
 
     @decorators.idempotent_id('78ba6eb0-178c-477e-9156-b6775ca7b271')
     def test_http_listener_list(self):
@@ -369,19 +449,29 @@
     def test_https_listener_list(self):
         self._test_listener_list(const.HTTPS, 8030)
 
+    @decorators.idempotent_id('5473e071-8277-4ac5-9277-01ecaf46e274')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_list(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_list(const.PROMETHEUS, 8091)
+
     @decorators.idempotent_id('1cd476e2-7788-415e-bcaf-c377acfc9794')
     def test_tcp_listener_list(self):
         self._test_listener_list(const.TCP, 8030)
 
     @decorators.idempotent_id('c08fb77e-b317-4d6f-b430-91f5b27ebac6')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_list(self):
         self._test_listener_list(const.UDP, 8040)
 
+    @decorators.idempotent_id('0abc3998-aacd-4edd-88f5-c5c35557646f')
+    def test_sctp_listener_list(self):
+        self._test_listener_list(const.SCTP, 8041)
+
     def _test_listener_list(self, protocol, protocol_port_base):
         """Tests listener list API and field filtering.
 
@@ -399,6 +489,8 @@
         # IDs of listeners created in the test
         test_ids = []
 
+        self._validate_listener_protocol(protocol)
+
         lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
         lb = self.mem_lb_client.create_loadbalancer(
             name=lb_name, provider=CONF.load_balancer.provider,
@@ -551,8 +643,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -566,8 +658,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -591,7 +683,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -735,19 +827,29 @@
     def test_https_listener_show(self):
         self._test_listener_show(const.HTTPS, 8051)
 
+    @decorators.idempotent_id('b851b754-4333-4115-9063-a9fce44c2e46')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_show(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_show(const.PROMETHEUS, 8092)
+
     @decorators.idempotent_id('1fcbbee2-b697-4890-b6bf-d308ac1c94cd')
     def test_tcp_listener_show(self):
         self._test_listener_show(const.TCP, 8052)
 
     @decorators.idempotent_id('1dea3a6b-c95b-4e91-b591-1aa9cbcd0d1d')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_show(self):
         self._test_listener_show(const.UDP, 8053)
 
+    @decorators.idempotent_id('10992529-1d0a-47a3-855c-3dbcd868db4e')
+    def test_sctp_listener_show(self):
+        self._test_listener_show(const.SCTP, 8054)
+
     def _test_listener_show(self, protocol, protocol_port):
         """Tests listener show API.
 
@@ -756,6 +858,8 @@
         * Validate the show reflects the requested values.
         * Validates that other accounts cannot see the listener.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-show")
         listener_description = data_utils.arbitrary_string(size=255)
 
@@ -801,6 +905,11 @@
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -868,8 +977,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -888,19 +997,29 @@
     def test_https_listener_update(self):
         self._test_listener_update(const.HTTPS, 8061)
 
+    @decorators.idempotent_id('cbba6bf8-9184-4da5-95e9-5efe1f89ddf0')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_update(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_update(const.PROMETHEUS, 8093)
+
     @decorators.idempotent_id('8d933121-db03-4ccc-8b77-4e879064a9ba')
     def test_tcp_listener_update(self):
         self._test_listener_update(const.TCP, 8062)
 
     @decorators.idempotent_id('fd02dbfd-39ce-41c2-b181-54fc7ad91707')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_update(self):
         self._test_listener_update(const.UDP, 8063)
 
+    @decorators.idempotent_id('c590b485-4e08-4e49-b384-2282b3f6f1b9')
+    def test_sctp_listener_update(self):
+        self._test_listener_update(const.SCTP, 8064)
+
     def _test_listener_update(self, protocol, protocol_port):
         """Tests listener update and show APIs.
 
@@ -912,6 +1031,8 @@
         * Show listener details.
         * Validate the show reflects the updated values.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-update")
         listener_description = data_utils.arbitrary_string(size=255)
 
@@ -957,6 +1078,11 @@
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1011,7 +1137,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -1139,19 +1266,29 @@
     def test_https_listener_delete(self):
         self._test_listener_delete(const.HTTPS, 8071)
 
+    @decorators.idempotent_id('322a6372-6b56-4a3c-87e3-dd82074bc83e')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_delete(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_delete(const.PROMETHEUS, 8094)
+
     @decorators.idempotent_id('f5ca019d-2b33-48f9-9c2d-2ec169b423ca')
     def test_tcp_listener_delete(self):
         self._test_listener_delete(const.TCP, 8072)
 
     @decorators.idempotent_id('86bd9717-e3e9-41e3-86c4-888c64455926')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_delete(self):
         self._test_listener_delete(const.UDP, 8073)
 
+    @decorators.idempotent_id('0de6f1ad-58ae-4b31-86b6-b440fce70244')
+    def test_sctp_listener_delete(self):
+        self._test_listener_delete(const.SCTP, 8074)
+
     def _test_listener_delete(self, protocol, protocol_port):
         """Tests listener create and delete APIs.
 
@@ -1160,6 +1297,8 @@
         * Deletes the listener.
         * Validates the listener is in the DELETED state.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-delete")
 
         listener_kwargs = {
@@ -1170,6 +1309,11 @@
         }
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -1184,7 +1328,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -1223,14 +1368,13 @@
         self._test_listener_show_stats(const.TCP, 8082)
 
     @decorators.idempotent_id('a4c1f199-923b-41e4-a134-c91e590e20c4')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_listener_show_stats(self):
         self._test_listener_show_stats(const.UDP, 8083)
 
+    @decorators.idempotent_id('7f6d3906-529c-4b99-8376-b836059df220')
+    def test_sctp_listener_show_stats(self):
+        self._test_listener_show_stats(const.SCTP, 8084)
+
     def _test_listener_show_stats(self, protocol, protocol_port):
         """Tests listener show statistics API.
 
@@ -1240,6 +1384,8 @@
         * Show listener statistics.
         * Validate the show reflects the expected values.
         """
+        self._validate_listener_protocol(protocol)
+
         listener_name = data_utils.rand_name("lb_member_listener1-stats")
         listener_description = data_utils.arbitrary_string(size=255)
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 7ade642..9035260 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -89,7 +89,7 @@
             expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
@@ -193,7 +193,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -242,7 +243,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -418,8 +420,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -433,8 +435,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -457,7 +459,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -635,8 +637,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -739,7 +741,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -831,8 +834,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -902,8 +905,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -978,7 +981,7 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin']
         if expected_allowed:
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index fee2893..8b25fc7 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -104,6 +104,11 @@
         cls.current_listener_port += 1
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -133,6 +138,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -902,7 +912,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -915,6 +926,11 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1233,8 +1249,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -1255,7 +1271,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
@@ -1755,6 +1771,11 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1798,8 +1819,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -2198,6 +2219,11 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -2255,7 +2281,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -2640,6 +2667,11 @@
                 const.ID]
         member1 = self.mem_member_client.create_member(**member1_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
                                 const.PROVISIONING_STATUS,
@@ -2672,6 +2704,11 @@
 
         member2 = self.mem_member_client.create_member(**member2_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
                                 const.PROVISIONING_STATUS,
@@ -2713,7 +2750,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -2944,6 +2982,11 @@
         }
         member = self.mem_member_client.create_member(**member_kwargs)
 
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -2958,7 +3001,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index ba31a8e..4e3e667 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -359,6 +359,11 @@
             listener = self.mem_listener_client.create_listener(
                 **listener_kwargs)
 
+            self.addClassResourceCleanup(
+                self.mem_listener_client.cleanup_listener,
+                listener[const.ID],
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
             waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     self.lb_id, const.PROVISIONING_STATUS,
                                     const.ACTIVE,
@@ -408,7 +413,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -434,6 +440,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -584,7 +595,6 @@
     def _test_pool_list(self, pool_protocol, algorithm):
         """Tests pool list API and field filtering.
 
-        * Create a clean loadbalancer.
         * Create three pools.
         * Validates that other accounts cannot list the pools.
         * List the pools using the default sort order.
@@ -605,14 +615,7 @@
                 'Skipping this test as load balancing algorithm '
                 'SOURCE_IP_PORT requires API version 2.13 or newer.')
 
-        lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
-        lb_id = lb[const.ID]
-        self.addCleanup(
-            self.mem_lb_client.cleanup_loadbalancer,
-            lb_id)
+        lb_id = self.lb_id
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 lb_id,
@@ -755,8 +758,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_primary', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary',
-                                'os_roles_lb_member2', 'os_roles_lb_observer',
+            expected_allowed = ['os_primary', 'os_roles_lb_member2',
+                                'os_roles_lb_observer',
                                 'os_roles_lb_global_observer']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_roles_lb_observer', 'os_roles_lb_member2']
@@ -770,8 +773,8 @@
         if CONF.load_balancer.RBAC_test_type == const.OWNERADMIN:
             expected_allowed = ['os_admin', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin', 'os_roles_lb_member',
@@ -794,7 +797,7 @@
         #       a superscope of "project_reader". This means it can read
         #       objects in the "admin" credential's project.
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
@@ -1080,6 +1083,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1129,8 +1137,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_system_reader',
-                                'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_system_reader', 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_system_reader',
                                 'os_roles_lb_admin',
@@ -1315,6 +1323,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1364,7 +1377,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
@@ -1647,6 +1661,11 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
@@ -1661,7 +1680,8 @@
             expected_allowed = ['os_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_system_admin', 'os_roles_lb_member']
+            expected_allowed = ['os_admin', 'os_roles_lb_admin',
+                                'os_roles_lb_member']
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             expected_allowed = ['os_system_admin', 'os_roles_lb_admin',
                                 'os_roles_lb_member']
diff --git a/octavia_tempest_plugin/tests/api/v2/test_provider.py b/octavia_tempest_plugin/tests/api/v2/test_provider.py
index 9a9dd28..e47ae8e 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_provider.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_provider.py
@@ -49,7 +49,7 @@
                 'os_admin', 'os_primary', 'os_roles_lb_admin',
                 'os_roles_lb_member', 'os_roles_lb_member2']
         if CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
-            expected_allowed = ['os_admin', 'os_primary', 'os_system_admin',
+            expected_allowed = ['os_admin', 'os_primary', 'os_roles_lb_admin',
                                 'os_system_reader', 'os_roles_lb_observer',
                                 'os_roles_lb_global_observer',
                                 'os_roles_lb_member', 'os_roles_lb_member2']
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 9664c57..511c724 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -28,6 +28,7 @@
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
+import testtools
 
 from octavia_tempest_plugin.common import barbican_client_mgr
 from octavia_tempest_plugin.common import cert_utils
@@ -239,6 +240,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -257,7 +263,13 @@
         if cls.lb_member_1_subnet:
             member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
 
-        cls.mem_member_client.create_member(**member1_kwargs)
+        member1 = cls.mem_member_client.create_member(**member1_kwargs)
+
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member1[const.ID], cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(
             cls.mem_lb_client.show_loadbalancer, cls.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -276,7 +288,13 @@
         if cls.lb_member_2_subnet:
             member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
 
-        cls.mem_member_client.create_member(**member2_kwargs)
+        member2 = cls.mem_member_client.create_member(**member2_kwargs)
+
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member2[const.ID], cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(
             cls.mem_lb_client.show_loadbalancer, cls.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1267,6 +1285,11 @@
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         pool_id = pool[const.ID]
 
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -1285,7 +1308,13 @@
         if self.lb_member_1_subnet:
             member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
 
-        self.mem_member_client.create_member(**member1_kwargs)
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1304,7 +1333,13 @@
         if self.lb_member_2_subnet:
             member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
 
-        self.mem_member_client.create_member(**member2_kwargs)
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1323,6 +1358,11 @@
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.listener_id = listener[const.ID]
 
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -1395,3 +1435,413 @@
 
         self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
                                     protocol_port=84, traffic_member_count=1)
+
+    @decorators.idempotent_id('11b67c96-a553-4b47-9fc6-4c3d7a2a10ce')
+    def test_pool_reencryption_client_authentication(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.8'):
+            raise self.skipException('Pool re-encryption is only available on '
+                                     'Octavia API version 2.8 or newer.')
+        pool_name = data_utils.rand_name("lb_member_pool1-tls-client-auth")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.TLS_ENABLED: True
+        }
+        # Specify an http/1.x alpn to work around HTTP healthchecks
+        # on older haproxy versions when alpn includes h2
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.24'):
+            pool_kwargs[const.ALPN_PROTOCOLS] = ['http/1.0', 'http/1.1']
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        hm_name = data_utils.rand_name("lb_member_hm1-tls-client-auth")
+        hm_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: hm_name,
+            const.TYPE: const.HEALTH_MONITOR_HTTPS,
+            const.HTTP_METHOD: const.GET,
+            const.URL_PATH: '/',
+            const.EXPECTED_CODES: '200',
+            const.DELAY: 1,
+            const.TIMEOUT: 1,
+            const.MAX_RETRIES: 1,
+            const.MAX_RETRIES_DOWN: 1,
+            const.ADMIN_STATE_UP: True,
+        }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name(
+            "lb_member_member1-tls-client-auth")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 9443,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name(
+            "lb_member_member2-tls-client-auth")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 9443,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-tls-client-auth")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '85',
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test that there are no members without a client certificate
+        url = 'http://{0}:85'.format(self.lb_vip_address)
+        self.validate_URL_response(url, expected_status_code=503)
+
+        # Test with client certificates
+        pool_update_kwargs = {
+            const.TLS_CONTAINER_REF: self.pool_client_ref
+        }
+
+        self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+        waiters.wait_for_status(
+            self.mem_pool_client.show_pool, pool_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Make sure the health monitor has brought the members up after the
+        # the pool update.
+        waiters.wait_for_status(
+            self.mem_member_client.show_member, member1[const.ID],
+            const.OPERATING_STATUS, const.ONLINE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout, error_ok=True, pool_id=pool_id)
+        waiters.wait_for_status(
+            self.mem_member_client.show_member, member2[const.ID],
+            const.OPERATING_STATUS, const.ONLINE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout, error_ok=True, pool_id=pool_id)
+
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
+                                    protocol_port=85)
+
+    @decorators.idempotent_id('d3e4c5fe-1726-49e4-b0b0-7a5a47749fc9')
+    def test_basic_h2_listener_http_listener_pool_reencryption(self):
+        """Test both h2 and HTTP traffic on the same load balancer.
+
+        In this test we deploy the following Octavia resources:
+            HTTPS_TERMINATED listener with h2 alpn protocols
+            HTTP listener
+            HTTP pool with both h2 alpn protocols and backend re-encryption
+
+        we send both h2 and http traffic from a client to the load balancer vip
+        and we make sure h2 traffic was negotiated when it was sent on 443 port
+        :raises self.skipException: ALPN support for pools not available prior
+        to v2.24.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.24'):
+            raise self.skipException('ALPN protocols are only available on '
+                                     'pools in Octavia API version 2.24 or'
+                                     ' newer.')
+        pool_name = data_utils.rand_name("lb_member_pool1-tls-alpn")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.TLS_ENABLED: True,
+            const.ALPN_PROTOCOLS: ['h2', 'http/1.1'],
+        }
+
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-tls-reencrypt")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 443,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(**member1_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client,
+            lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-tls-reencrypt")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 443,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client,
+            lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-tls-terminated-alpn")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.TERMINATED_HTTPS,
+            const.PROTOCOL_PORT: '443',
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+            const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+            const.ALPN_PROTOCOLS: ['h2', 'http/1.1']
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client,
+            lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-http")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: 80,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_POOL_ID: pool_id,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        context = ssl.SSLContext(ssl.PROTOCOL_TLS)
+        context.set_alpn_protocols(['h2', 'http/1.1'])
+        s = socket.socket()
+        ssl_sock = context.wrap_socket(s)
+        ssl_sock.connect((self.lb_vip_address, 443))
+        selected_proto = ssl_sock.selected_alpn_protocol()
+        self.assertEqual('h2', selected_proto)
+
+        # Test HTTPS listener load balancing.
+        self.check_members_balanced(self.lb_vip_address, protocol=const.HTTPS,
+                                    HTTPS_verify=False, protocol_port=443)
+
+        # Test HTTP listener load balancing.
+        self.check_members_balanced(self.lb_vip_address)
+
+    @decorators.idempotent_id('7d9dcae6-3e2c-4eae-9bfb-1ef0d00aa530')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_tls_prometheus_client_auth_mandatory(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('Prometheus listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        LISTENER1_TCP_PORT = '9443'
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-prometheus-client-auth-mand")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.PROMETHEUS,
+            const.PROTOCOL_PORT: LISTENER1_TCP_PORT,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+            const.CLIENT_AUTHENTICATION: const.CLIENT_AUTH_MANDATORY,
+            const.CLIENT_CA_TLS_CONTAINER_REF: self.client_ca_cert_ref,
+            const.CLIENT_CRL_CONTAINER_REF: self.client_crl_ref,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test that no client certificate fails to connect
+        self.assertRaises(
+            requests.exceptions.SSLError,
+            requests.get,
+            'https://{0}:{1}'.format(self.lb_vip_address, LISTENER1_TCP_PORT),
+            timeout=12, verify=False)
+
+        # Test that a revoked client certificate fails to connect
+        with tempfile.NamedTemporaryFile(buffering=0) as cert_file:
+            cert_file.write(self.revoked_client_cert.public_bytes(
+                serialization.Encoding.PEM))
+            with tempfile.NamedTemporaryFile(buffering=0) as key_file:
+                key_file.write(self.revoked_client_key.private_bytes(
+                    serialization.Encoding.PEM,
+                    serialization.PrivateFormat.TraditionalOpenSSL,
+                    serialization.NoEncryption()))
+                self.assertRaises(
+                    requests.exceptions.SSLError, requests.get,
+                    'https://{0}:{1}'.format(self.lb_vip_address,
+                                             LISTENER1_TCP_PORT),
+                    timeout=12, verify=False, cert=(cert_file.name,
+                                                    key_file.name))
+
+        # Test that a valid client certificate can connect
+        with tempfile.NamedTemporaryFile(buffering=0) as cert_file:
+            cert_file.write(self.client_cert.public_bytes(
+                serialization.Encoding.PEM))
+            with tempfile.NamedTemporaryFile(buffering=0) as key_file:
+                key_file.write(self.client_key.private_bytes(
+                    serialization.Encoding.PEM,
+                    serialization.PrivateFormat.TraditionalOpenSSL,
+                    serialization.NoEncryption()))
+                response = requests.get(
+                    'https://{0}:{1}'.format(self.lb_vip_address,
+                                             LISTENER1_TCP_PORT),
+                    timeout=12, verify=False, cert=(cert_file.name,
+                                                    key_file.name))
+                self.assertEqual(200, response.status_code)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index 8075a35..01548c2 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -269,6 +269,11 @@
         }
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -295,6 +300,11 @@
 
         hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
 
+        self.addClassResourceCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index 69c1f2b..d4eaa86 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -104,6 +104,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_ids[protocol] = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -120,6 +125,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_ids[protocol] = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
index acbd094..4c84ba3 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
@@ -61,6 +61,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -77,6 +82,11 @@
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index d5683ef..edbdc33 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -61,6 +61,11 @@
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -76,6 +81,11 @@
         l7policy = cls.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
         cls.l7policy_id = l7policy[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_l7policy_client.cleanup_l7policy,
+            cls.l7policy_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index a720d26..be997aa 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -76,6 +76,11 @@
         pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
         pool1_id = pool1[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool1_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -92,6 +97,11 @@
         pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
         pool2_id = pool2[const.ID]
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool2_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -115,11 +125,6 @@
         self._test_listener_CRUD(const.TCP, pool1, pool2)
 
     @decorators.idempotent_id('27a2ba7d-6147-46e4-886a-47c1ba63bf89')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_least_connections_listener_CRUD(self):
         pool1, pool2 = self._create_pools(const.UDP,
                                           const.LB_ALGORITHM_LEAST_CONNECTIONS)
@@ -138,11 +143,6 @@
         self._test_listener_CRUD(const.TCP, pool1, pool2)
 
     @decorators.idempotent_id('dd913f74-c6a6-4998-9bed-095babb9cb47')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_round_robin_listener_CRUD(self):
         pool1, pool2 = self._create_pools(const.UDP,
                                           const.LB_ALGORITHM_ROUND_ROBIN)
@@ -161,11 +161,6 @@
         self._test_listener_CRUD(const.TCP, pool1, pool2)
 
     @decorators.idempotent_id('7830aba8-12ca-40d9-9d9b-a63f7a43b287')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_source_ip_listener_CRUD(self):
         pool1, pool2 = self._create_pools(const.UDP,
                                           const.LB_ALGORITHM_SOURCE_IP)
@@ -200,11 +195,6 @@
             raise testtools.TestCase.skipException(message)
 
     @decorators.idempotent_id('3f9a2de9-5012-437d-a907-a25e1f68ccfb')
-    # Skipping due to a status update bug in the amphora driver.
-    @decorators.skip_because(
-        bug='2007979',
-        bug_type='storyboard',
-        condition=CONF.load_balancer.provider in const.AMPHORA_PROVIDERS)
     def test_udp_source_ip_port_listener_CRUD(self):
         try:
             pool1, pool2 = self._create_pools(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index 6df4c9c..c53e1c2 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -359,6 +359,11 @@
                 **listener_kwargs)
             listener_id = listener[const.ID]
 
+            self.addCleanup(
+                self.mem_listener_client.cleanup_listener,
+                listener_id,
+                lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
             waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     self.lb_id, const.PROVISIONING_STATUS,
                                     const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index d4d43b5..a704b88 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -120,6 +120,11 @@
 
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
 
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -135,6 +140,11 @@
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
 
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            pool[const.ID],
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -204,6 +214,55 @@
             traffic_member_count=traffic_member_count, source_port=source_port,
             delay=delay)
 
+    def _pool_add_healthmonitor(self, pool_id, protocol):
+        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
+        if protocol != const.HTTP:
+            if protocol == const.UDP:
+                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
+            elif protocol == const.TCP:
+                hm_type = const.HEALTH_MONITOR_TCP
+
+            hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: hm_type,
+                const.DELAY: 3,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.ADMIN_STATE_UP: True,
+            }
+        else:
+            hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: const.HEALTH_MONITOR_HTTP,
+                const.DELAY: 2,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.HTTP_METHOD: const.GET,
+                const.URL_PATH: '/',
+                const.EXPECTED_CODES: '200',
+                const.ADMIN_STATE_UP: True,
+            }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        return hm
+
     @decorators.attr(type=['smoke', 'slow'])
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Traffic tests will not work in noop mode.')
@@ -348,54 +407,7 @@
                                     protocol=protocol, persistent=persistent)
 
         # Create the healthmonitor
-        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
-        if protocol != const.HTTP:
-            if protocol == const.UDP:
-                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
-            elif protocol == const.TCP:
-                hm_type = const.HEALTH_MONITOR_TCP
-
-            hm_kwargs = {
-                const.POOL_ID: pool_id,
-                const.NAME: hm_name,
-                const.TYPE: hm_type,
-                const.DELAY: 3,
-                const.TIMEOUT: 2,
-                const.MAX_RETRIES: 2,
-                const.MAX_RETRIES_DOWN: 2,
-                const.ADMIN_STATE_UP: True,
-            }
-        else:
-            hm_kwargs = {
-                const.POOL_ID: pool_id,
-                const.NAME: hm_name,
-                const.TYPE: const.HEALTH_MONITOR_HTTP,
-                const.DELAY: 2,
-                const.TIMEOUT: 2,
-                const.MAX_RETRIES: 2,
-                const.MAX_RETRIES_DOWN: 2,
-                const.HTTP_METHOD: const.GET,
-                const.URL_PATH: '/',
-                const.EXPECTED_CODES: '200',
-                const.ADMIN_STATE_UP: True,
-            }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+        hm = self._pool_add_healthmonitor(pool_id, protocol)
 
         # Wait for members to adjust to the correct OPERATING_STATUS
         waiters.wait_for_status(
@@ -1402,3 +1414,218 @@
         self._test_basic_traffic(const.UDP, common_vip_port, pool_id_udp)
         self._test_basic_traffic(const.TCP, common_vip_port, pool_id_tcp,
                                  persistent=False)
+
+    @decorators.idempotent_id('c79f2cd0-0324-11eb-bc8e-74e5f9e2a801')
+    def test_udp_update_pool_healthmonitor_listener(self):
+        """Test scenario:
+
+        * Prerequisites:
+          Create: UDP listener, pool, healtmonitor and validate UDP traffic.
+        * Test scenario:
+          Update pool algorithm to: "source_ip" and start sending UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        * Update healtmonitor with: "delay=20" and start sending UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        * Update listener with: "connection-limit=300" and start sending
+          UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        listener_port = 104
+        listener_id, pool_id = self._listener_pool_create(
+            const.UDP, listener_port)
+        healthmonitor_id = self._pool_add_healthmonitor(
+            pool_id, protocol=const.UDP)[const.ID]
+        self._test_basic_traffic(
+            const.UDP, listener_port, pool_id)
+
+        # Update LB pool
+        self.mem_pool_client.update_pool(
+            pool_id=pool_id, lb_algorithm=const.LB_ALGORITHM_SOURCE_IP)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+        # Update LB healthmonitor
+        self.mem_healthmonitor_client.update_healthmonitor(
+            healthmonitor_id=healthmonitor_id, delay=5)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+        # Update LB listener
+        listener_kwargs = {const.LISTENER_ID: listener_id,
+                           const.CONNECTION_LIMIT: 300}
+        self.mem_listener_client.update_listener(**listener_kwargs)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('cd5aeefa-0e16-11eb-b8dc-74e5f9e2a801')
+    def test_hm_op_status_changed_as_expected_on_update(self):
+        """Test scenario:
+
+        * Create HTTP listener, pool and HTTP health monitor.
+        * Update health monitor with various combinations of:
+          HTTP method, expected HTTP status codes and backend URL.
+          Note: see "fault_cases" and "valid_cases" lists in test's code.
+        * Validate that members' operation status is getting into
+          appropriate state after each particular update done within the test.
+          Important: "operation status" value is expected to be changed from
+          ONLINE to ERROR after each update, otherwise we may miss
+          the potential bug.
+        """
+        listener_port = 105
+        listener_id, pool_id = self._listener_pool_create(
+            const.TCP, listener_port)
+        hm_id = self._pool_add_healthmonitor(
+            pool_id, protocol=const.HTTP)[const.ID]
+        self._test_basic_traffic(
+            const.HTTP, listener_port, pool_id, persistent=False)
+        mb_ids = [mb[const.ID] for
+                  mb in self.mem_member_client.list_members(pool_id)]
+
+        # Create list of test cases to be covered in test
+        fault_cases = [
+            {'mthd': const.POST, 'code': '101-102', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=103'},
+            {'mthd': const.DELETE, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.PUT, 'code': '301-302', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=303'},
+            {'mthd': const.HEAD, 'code': '400-404', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=405'},
+            {'mthd': const.OPTIONS, 'code': '500-504', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=505'},
+            {'mthd': const.PATCH, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.CONNECT, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.TRACE, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'}]
+        valid_cases = [
+            {'mthd': const.GET, 'code': '101-102', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=102'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=202'},
+            {'mthd': const.GET, 'code': '301-302', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=302'},
+            {'mthd': const.GET, 'code': '400-404', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=404'},
+            {'mthd': const.GET, 'code': '500-504', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=504'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'}]
+        # Generate "flip_flop" using zip function, that will have
+        # the operation statuses changed on each subsequent test case.
+        # It means interleaved like: ERROR, ONLINE, ERROR, ONLINE...
+        flip_flop = [v for f in zip(valid_cases, fault_cases) for v in f]
+
+        # For each test case, update HM and validate that members'
+        # "Operation Status" is changed to expected value.
+        for ff in flip_flop:
+            LOG.info('Tested test case is: {}'.format(ff))
+            self.mem_healthmonitor_client.update_healthmonitor(
+                hm_id, expected_codes=ff['code'], http_method=ff['mthd'],
+                url_path=ff['url_path'])
+            waiters.wait_for_status(
+                self.mem_lb_client.show_loadbalancer, self.lb_id,
+                const.PROVISIONING_STATUS, const.ACTIVE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout)
+            for mb_id in mb_ids:
+                waiters.wait_for_status(
+                    self.mem_member_client.show_member,
+                    mb_id, const.OPERATING_STATUS,
+                    ff['op_stat'],
+                    CONF.load_balancer.check_interval,
+                    CONF.load_balancer.check_timeout,
+                    error_ok=True, pool_id=pool_id)
+
+    @decorators.idempotent_id('05e99fb3-2b37-478e-889b-77f1c731a471')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_metrics_page(self):
+        """Tests PROMETHEUS listener create and metrics endpoint is available
+
+        * Create PROMETHEUS listener.
+        * Query the metrics endpoint on the load balancer.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+
+        # Listener create
+        listener_name = data_utils.rand_name("lb_member_prometheus_listener")
+        listener_description = data_utils.arbitrary_string(size=255)
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.DESCRIPTION: listener_description,
+            const.ADMIN_STATE_UP: True,
+            const.PROTOCOL: const.PROMETHEUS,
+            const.PROTOCOL_PORT: 8080,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.CONNECTION_LIMIT: 200,
+        }
+
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            listener_kwargs.update({const.ALLOWED_CIDRS: ['0.0.0.0/0']})
+
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        listener = waiters.wait_for_status(
+            self.mem_listener_client.show_listener,
+            listener[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Make a request to the stats page
+        URL = 'http://{0}:{1}/metrics'.format(self.lb_vip_address, '8080')
+        self.validate_URL_response(URL, expected_status_code=200)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index a18066a..f08cec9 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -56,9 +56,10 @@
             'admin', 'primary', ['lb_admin', CONF.load_balancer.admin_role],
             ['lb_member', CONF.load_balancer.member_role],
             ['lb_member2', CONF.load_balancer.member_role]]
-    elif CONF.load_balancer.enforce_new_defaults:
+    elif CONF.load_balancer.RBAC_test_type == const.KEYSTONE_DEFAULT_ROLES:
         credentials = [
-            'admin', 'primary', ['lb_admin', CONF.load_balancer.admin_role],
+            'admin', 'primary',
+            ['lb_admin', CONF.load_balancer.admin_role, 'admin'],
             ['lb_observer', CONF.load_balancer.observer_role, 'reader'],
             ['lb_global_observer', CONF.load_balancer.global_observer_role,
              'reader'],
@@ -71,8 +72,10 @@
             ['lb_observer', CONF.load_balancer.observer_role, 'reader'],
             ['lb_global_observer', CONF.load_balancer.global_observer_role,
              'reader'],
-            ['lb_member', CONF.load_balancer.member_role],
-            ['lb_member2', CONF.load_balancer.member_role]]
+            # Note: Some projects are now requiring the 'member' role by
+            # default (nova for example) so make sure our creds have this role
+            ['lb_member', CONF.load_balancer.member_role, 'member'],
+            ['lb_member2', CONF.load_balancer.member_role, 'member']]
 
     # If scope enforcement is enabled, add in the system scope credentials.
     # The project scope is already handled by the above credentials.
@@ -136,6 +139,9 @@
         cls.set_network_resources()
         super(LoadBalancerBaseTest, cls).setup_credentials()
 
+        if not CONF.load_balancer.log_user_roles:
+            return
+
         # Log the user roles for this test run
         role_name_cache = {}
         for cred in cls.credentials:
@@ -581,6 +587,17 @@
             lb_kwargs[const.VIP_NETWORK_ID] = cls.lb_member_vip_net[const.ID]
             lb_kwargs[const.VIP_SUBNET_ID] = None
 
+    def _validate_listener_protocol(self, protocol, raise_if_unsupported=True):
+        if (protocol == const.SCTP and
+            not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.23')):
+            if raise_if_unsupported:
+                raise self.skipException('SCTP listener protocol '
+                                         'is only available on Octavia '
+                                         'API version 2.23 or newer.')
+            return False
+        return True
+
 
 class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
     @classmethod
@@ -667,6 +684,20 @@
                 cls.lb_mem_SGr_client.delete_security_group_rule,
                 cls.lb_mem_SGr_client.show_security_group_rule,
                 SGr['id'])
+            # Create a security group rule to allow 9443 (test webservers)
+            # Used in the pool backend encryption client authentication tests
+            SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                direction='ingress',
+                security_group_id=cls.lb_member_sec_group['id'],
+                protocol='tcp',
+                ethertype='IPv4',
+                port_range_min=9443,
+                port_range_max=9443)['security_group_rule']
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_SGr_client.delete_security_group_rule,
+                cls.lb_mem_SGr_client.show_security_group_rule,
+                SGr['id'])
             # Create a security group rule to allow UDP 9999 (test webservers)
             # Port 9999 is used to illustrate health monitor ERRORs on closed
             # ports.
@@ -736,6 +767,20 @@
                     cls.lb_mem_SGr_client.delete_security_group_rule,
                     cls.lb_mem_SGr_client.show_security_group_rule,
                     SGr['id'])
+                # Create a security group rule to allow 9443 (test webservers)
+                # Used in the pool encryption client authentication tests
+                SGr = cls.lb_mem_SGr_client.create_security_group_rule(
+                    direction='ingress',
+                    security_group_id=cls.lb_member_sec_group['id'],
+                    protocol='tcp',
+                    ethertype='IPv6',
+                    port_range_min=9443,
+                    port_range_max=9443)['security_group_rule']
+                cls.addClassResourceCleanup(
+                    waiters.wait_for_not_found,
+                    cls.lb_mem_SGr_client.delete_security_group_rule,
+                    cls.lb_mem_SGr_client.show_security_group_rule,
+                    SGr['id'])
                 # Create a security group rule to allow 22 (ssh)
                 SGr = cls.lb_mem_SGr_client.create_security_group_rule(
                     direction='ingress',
diff --git a/octavia_tempest_plugin/tests/validators.py b/octavia_tempest_plugin/tests/validators.py
index 5972d54..34e8148 100644
--- a/octavia_tempest_plugin/tests/validators.py
+++ b/octavia_tempest_plugin/tests/validators.py
@@ -412,7 +412,7 @@
                     return
             except Exception:
                 LOG.warning('Server is not passing initial traffic. Waiting.')
-                time.sleep(1)
+            time.sleep(request_interval)
 
         LOG.debug('Loadbalancer wait for load balancer response totals: %s',
                   response_counts)
@@ -421,3 +421,37 @@
                                                           protocol_port))
         LOG.error(message)
         raise Exception(message)
+
+    def make_udp_requests_with_retries(
+            self, vip_address, number_of_retries, dst_port,
+            src_port=None, socket_timeout=20):
+        """Send UDP packets using retries mechanism
+
+        The delivery of data to the destination cannot be guaranteed in UDP.
+        In case when UDP package is getting lost and we might want to check
+        what could be the reason for that (Network issues or Server Side),
+        well need to send more packets to get into the conclusion.
+
+        :param vip_address: LB VIP address
+        :param number_of_retries: integer number of retries
+        :param dst_port: UDP server destination port
+        :param src_port: UDP source port to bind for UDP connection
+        :param socket_timeout: UDP socket timeout
+        :return: None if all UPD retries failed, else first successful
+                 response data from UDP server.
+        """
+        retry_number = 0
+        received_data = None
+        while retry_number < number_of_retries:
+            LOG.info('make_udp_requests_with_retries attempt '
+                     'number:{}'.format(retry_number))
+            retry_number += 1
+            try:
+                received_data = self.make_udp_request(
+                    vip_address, dst_port, timeout=socket_timeout,
+                    source_port=src_port)
+                break
+            except Exception as e:
+                LOG.warning('make_udp_request has failed with: '
+                            '{}'.format(e))
+        return received_data
diff --git a/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml b/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml
new file mode 100644
index 0000000..f88f51b
--- /dev/null
+++ b/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added API and scenario tests for PROMETHEUS listeners.
diff --git a/releasenotes/notes/Make-sure-member-credentials-have-the-member-role-for-new-defaults-5fbc2e05768c04b9.yaml b/releasenotes/notes/Make-sure-member-credentials-have-the-member-role-for-new-defaults-5fbc2e05768c04b9.yaml
new file mode 100644
index 0000000..89fc5a6
--- /dev/null
+++ b/releasenotes/notes/Make-sure-member-credentials-have-the-member-role-for-new-defaults-5fbc2e05768c04b9.yaml
@@ -0,0 +1,7 @@
+---
+fixes:
+  - |
+    Some services are now enabling the "new default roles" which means all
+    non-admin users must have the "member" or "reader" role. This fix updates
+    the test credentials to include these roles when not running in
+    admin-or-owner test mode.
diff --git a/releasenotes/notes/add-pool-client-auth-scenario-02abca554e60d3da.yaml b/releasenotes/notes/add-pool-client-auth-scenario-02abca554e60d3da.yaml
new file mode 100644
index 0000000..b14517f
--- /dev/null
+++ b/releasenotes/notes/add-pool-client-auth-scenario-02abca554e60d3da.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added scenario test coverage for pool client authentication.
diff --git a/requirements.txt b/requirements.txt
index 7b2f0a2..afa0565 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -2,7 +2,7 @@
 # of appearance. Changing the order has an impact on the overall integration
 # process, which may cause wedges in the gate later.
 
-cryptography>=2.1 # BSD/Apache-2.0
+cryptography>=3.0 # BSD/Apache-2.0
 python-dateutil>=2.5.3 # BSD
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
 oslo.config>=5.2.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index ed7a3cd..29df1ca 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,6 @@
 [tox]
 minversion = 3.18.0
 envlist = pep8
-skipsdist = True
 ignore_basepython_conflict = True
 
 [testenv]
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index f4dd9db..303dc66 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -19,6 +19,16 @@
           - controller
 
 - nodeset:
+    name: octavia-single-node-ubuntu-jammy
+    nodes:
+      - name: controller
+        label: nested-virt-ubuntu-jammy
+    groups:
+      - name: tempest
+        nodes:
+          - controller
+
+- nodeset:
     name: octavia-single-node-centos-7
     nodes:
       - name: controller
@@ -62,9 +72,9 @@
     name: octavia-two-node
     nodes:
       - name: controller
-        label: nested-virt-ubuntu-focal
+        label: nested-virt-ubuntu-jammy
       - name: controller2
-        label: nested-virt-ubuntu-focal
+        label: nested-virt-ubuntu-jammy
     groups:
       - name: controller
         nodes:
@@ -209,7 +219,7 @@
 - job:
     name: octavia-dsvm-live-base
     parent: octavia-dsvm-base
-    nodeset: octavia-single-node-ubuntu-focal
+    nodeset: octavia-single-node-ubuntu-jammy
     timeout: 9000
     required-projects:
       - openstack/diskimage-builder
@@ -245,7 +255,7 @@
 - job:
     name: octavia-dsvm-live-base-ipv6-only
     parent: octavia-dsvm-base-ipv6-only
-    nodeset: octavia-single-node-ubuntu-focal
+    nodeset: octavia-single-node-ubuntu-jammy
     timeout: 9000
     required-projects:
       - openstack/diskimage-builder
@@ -506,21 +516,23 @@
       - ^octavia_tempest_plugin/tests/(?!api/|\w+\.py).*
 
 - job:
-    name: octavia-v2-dsvm-noop-api-scoped-tokens
+    name: octavia-v2-dsvm-noop-api-keystone-default-roles
     parent: octavia-v2-dsvm-noop-api
     vars:
+      devstack_localrc:
+        OCTAVIA_USE_KEYSTONE_DEFAULT_ROLES: True
       devstack_local_conf:
         post-config:
           $OCTAVIA_CONF:
             oslo_policy:
-              enforce_scope: True
+              enforce_scope: False
               enforce_new_defaults: True
         test-config:
           "$TEMPEST_CONFIG":
             enforce_scope:
-              octavia: True
+              octavia: False
             load_balancer:
-              enforce_new_defaults: True
+              RBAC_test_type: keystone_default_roles
 
 - job:
     name: octavia-v2-dsvm-noop-py2-api
@@ -530,22 +542,25 @@
         USE_PYTHON3: False
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-yoga
+    name: octavia-v2-dsvm-noop-api-stable-2023-2
     parent: octavia-v2-dsvm-noop-api
-    override-checkout: stable/yoga
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.2
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-xena
+    name: octavia-v2-dsvm-noop-api-stable-2023-1
     parent: octavia-v2-dsvm-noop-api
-    override-checkout: stable/xena
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.1
 
 - job:
-    name: octavia-v2-dsvm-noop-api-stable-wallaby
+    name: octavia-v2-dsvm-noop-api-stable-zed
     parent: octavia-v2-dsvm-noop-api
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-focal
+    override-checkout: stable/zed
 
 - job:
-    name: octavia-v2-dsvm-scenario
+    name: octavia-v2-dsvm-scenario-base
     parent: octavia-dsvm-live-base
     vars:
       devstack_local_conf:
@@ -569,6 +584,38 @@
       - ^octavia_tempest_plugin/tests/(?!scenario/|\w+\.py).*
 
 - job:
+    name: octavia-v2-dsvm-scenario
+    parent: octavia-v2-dsvm-scenario-base
+    branches:
+      regex: ^(stable/(train|ussuri|victoria|wallaby|xena|yoga|zed))
+      negate: true
+    nodeset: octavia-single-node-ubuntu-jammy
+
+- job:
+    name: octavia-v2-dsvm-scenario
+    parent: octavia-v2-dsvm-scenario-base
+    branches: ^(stable/(victoria|wallaby|xena|yoga|zed))
+    nodeset: octavia-single-node-ubuntu-focal
+
+- job:
+    name: octavia-v2-dsvm-scenario
+    parent: octavia-v2-dsvm-scenario-base
+    branches: ^(stable/(train|ussuri))
+    nodeset: octavia-single-node-ubuntu-bionic
+
+- job:
+    name: octavia-v2-dsvm-scenario-traffic-ops
+    parent: octavia-v2-dsvm-scenario
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-non-traffic-ops
+    parent: octavia-v2-dsvm-scenario
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
     name: octavia-v2-dsvm-scenario-ipv6-only
     parent: octavia-dsvm-live-base-ipv6-only
     vars:
@@ -599,19 +646,58 @@
         override-checkout: 2.30.0
 
 - job:
-    name: octavia-v2-dsvm-scenario-stable-yoga
+    name: octavia-v2-dsvm-scenario-stable-2023-2
     parent: octavia-v2-dsvm-scenario
-    override-checkout: stable/yoga
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.1
 
 - job:
-    name: octavia-v2-dsvm-scenario-stable-xena
-    parent: octavia-v2-dsvm-scenario
-    override-checkout: stable/xena
+    name: octavia-v2-dsvm-scenario-traffic-ops-stable-2023-2
+    parent: octavia-v2-dsvm-scenario-stable-2023-2
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
 
 - job:
-    name: octavia-v2-dsvm-scenario-stable-wallaby
+    name: octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-2
+    parent: octavia-v2-dsvm-scenario-stable-2023-2
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-stable-2023-1
     parent: octavia-v2-dsvm-scenario
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.1
+
+- job:
+    name: octavia-v2-dsvm-scenario-traffic-ops-stable-2023-1
+    parent: octavia-v2-dsvm-scenario-stable-2023-1
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-1
+    parent: octavia-v2-dsvm-scenario-stable-2023-1
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-stable-zed
+    parent: octavia-v2-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-focal
+    override-checkout: stable/zed
+
+- job:
+    name: octavia-v2-dsvm-scenario-traffic-ops-stable-zed
+    parent: octavia-v2-dsvm-scenario-stable-zed
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-non-traffic-ops-stable-zed
+    parent: octavia-v2-dsvm-scenario-stable-zed
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
 
 # Legacy jobs for the transition to the act-stdby two node jobs
 - job:
@@ -723,15 +809,27 @@
               ssh_key_type: ecdsa
 
 - job:
-    name: octavia-v2-dsvm-scenario-ubuntu-focal
+    name: octavia-v2-dsvm-scenario-centos-9-stream-traffic-ops
+    parent: octavia-v2-dsvm-scenario-centos-9-stream
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.*traffic_ops
+
+- job:
+    name: octavia-v2-dsvm-scenario-centos-9-stream-non-traffic-ops
+    parent: octavia-v2-dsvm-scenario-centos-9-stream
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2.(?!.*traffic_ops)
+
+- job:
+    name: octavia-v2-dsvm-scenario-ubuntu-jammy
     parent: octavia-v2-dsvm-scenario
     vars:
       devstack_localrc:
         OCTAVIA_AMP_BASE_OS: ubuntu
-        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: focal
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: jammy
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican
+    name: octavia-v2-dsvm-tls-barbican-base
     parent: octavia-v2-dsvm-scenario
     required-projects:
       - openstack/barbican
@@ -756,32 +854,51 @@
       - ^octavia_tempest_plugin/tests/(?!barbican_scenario/|\w+\.py).*
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-yoga
-    parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/yoga
+    name: octavia-v2-dsvm-tls-barbican
+    parent: octavia-v2-dsvm-tls-barbican-base
+    branches:
+      regex: ^(stable/(train|ussuri|victoria|wallaby|xena|yoga|zed))
+      negate: true
+    nodeset: octavia-single-node-ubuntu-jammy
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-xena
-    parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/xena
+    name: octavia-v2-dsvm-tls-barbican
+    parent: octavia-v2-dsvm-tls-barbican-base
+    branches: ^(stable/(victoria|wallaby|xena|yoga|zed))
+    nodeset: octavia-single-node-ubuntu-focal
 
 - job:
-    name: octavia-v2-dsvm-tls-barbican-stable-wallaby
-    parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/wallaby
-
-- job:
-    name: octavia-v2-dsvm-tls-barbican-stable-victoria
-    parent: octavia-v2-dsvm-tls-barbican
-    override-checkout: stable/victoria
-
-- job:
-    name: octavia-v2-dsvm-tls-barbican-stable-ussuri
-    parent: octavia-v2-dsvm-tls-barbican
+    name: octavia-v2-dsvm-tls-barbican
+    parent: octavia-v2-dsvm-tls-barbican-base
+    branches: ^(stable/(train|ussuri))
     nodeset: octavia-single-node-ubuntu-bionic
-    override-checkout: stable/ussuri
+    required-projects:
+      - openstack/barbican
+      - openstack/diskimage-builder
+      - openstack/python-barbicanclient
+      - name: openstack/barbican-tempest-plugin
+        override-checkout: 1.6.0
 
 - job:
+    name: octavia-v2-dsvm-tls-barbican-stable-2023-2
+    parent: octavia-v2-dsvm-tls-barbican
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.1
+
+- job:
+    name: octavia-v2-dsvm-tls-barbican-stable-2023-1
+    parent: octavia-v2-dsvm-tls-barbican
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.1
+
+- job:
+    name: octavia-v2-dsvm-tls-barbican-stable-zed
+    parent: octavia-v2-dsvm-tls-barbican
+    nodeset: octavia-single-node-ubuntu-focal
+    override-checkout: stable/zed
+
+# Still used by barbican
+- job:
     name: octavia-v2-dsvm-tls-barbican-stable-train
     parent: octavia-v2-dsvm-tls-barbican
     nodeset: octavia-single-node-ubuntu-bionic
@@ -815,6 +932,7 @@
 - job:
     name: octavia-v2-dsvm-spare-pool-stable-wallaby
     parent: octavia-v2-dsvm-spare-pool
+    nodeset: octavia-single-node-ubuntu-focal
     override-checkout: stable/wallaby
 
 - job:
@@ -930,7 +1048,7 @@
               amphora_ssh_user: centos
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario
+    name: octavia-v2-act-stdby-dsvm-scenario-base
     parent: octavia-dsvm-live-base
     vars:
       devstack_local_conf:
@@ -947,19 +1065,42 @@
       tox_envlist: all
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-yoga
-    parent: octavia-v2-act-stdby-dsvm-scenario
-    override-checkout: stable/yoga
+    name: octavia-v2-act-stdby-dsvm-scenario
+    parent: octavia-v2-act-stdby-dsvm-scenario-base
+    branches:
+      regex: ^(stable/(train|ussuri|victoria|wallaby|xena|yoga|zed))
+      negate: true
+    nodeset: octavia-single-node-ubuntu-jammy
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-xena
-    parent: octavia-v2-act-stdby-dsvm-scenario
-    override-checkout: stable/xena
+    name: octavia-v2-act-stdby-dsvm-scenario
+    parent: octavia-v2-act-stdby-dsvm-scenario-base
+    branches: ^(stable/(victoria|wallaby|xena|yoga|zed))
+    nodeset: octavia-single-node-ubuntu-focal
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-wallaby
+    name: octavia-v2-act-stdby-dsvm-scenario
+    parent: octavia-v2-act-stdby-dsvm-scenario-base
+    branches: ^(stable/(train|ussuri))
+    nodeset: octavia-single-node-ubuntu-bionic
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-2023-2
     parent: octavia-v2-act-stdby-dsvm-scenario
-    override-checkout: stable/wallaby
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.2
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-2023-1
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-jammy
+    override-checkout: stable/2023.1
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-zed
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    nodeset: octavia-single-node-ubuntu-focal
+    override-checkout: stable/zed
 
 ######### Third party jobs ##########
 
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index 116da49..e052720 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -9,35 +9,37 @@
     check:
       jobs:
         - octavia-v2-dsvm-noop-api
-        - octavia-v2-dsvm-noop-api-stable-yoga
-        - octavia-v2-dsvm-noop-api-stable-xena
-        - octavia-v2-dsvm-noop-api-stable-wallaby
-        - octavia-v2-dsvm-noop-api-scoped-tokens
-        - octavia-v2-dsvm-scenario
-        - octavia-v2-dsvm-scenario-stable-yoga
-        - octavia-v2-dsvm-scenario-stable-xena
-        - octavia-v2-dsvm-scenario-stable-wallaby
+        - octavia-v2-dsvm-noop-api-stable-2023-2
+        - octavia-v2-dsvm-noop-api-stable-2023-1
+        - octavia-v2-dsvm-noop-api-stable-zed
+        - octavia-v2-dsvm-noop-api-keystone-default-roles
+        - octavia-v2-dsvm-scenario-traffic-ops
+        - octavia-v2-dsvm-scenario-non-traffic-ops
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2023-2
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-2
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2023-1
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-1
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-zed
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-zed
         - octavia-v2-dsvm-tls-barbican
-        - octavia-v2-dsvm-tls-barbican-stable-yoga
-        - octavia-v2-dsvm-tls-barbican-stable-xena
-        - octavia-v2-dsvm-tls-barbican-stable-wallaby
+        - octavia-v2-dsvm-tls-barbican-stable-2023-2
+        - octavia-v2-dsvm-tls-barbican-stable-2023-1
+        - octavia-v2-dsvm-tls-barbican-stable-zed
         - octavia-v2-dsvm-scenario-ipv6-only:
             voting: false
-        - octavia-v2-dsvm-scenario-centos-8-stream:
+        - octavia-v2-dsvm-scenario-centos-9-stream-traffic-ops:
             voting: false
-        - octavia-v2-dsvm-scenario-centos-9-stream:
+        - octavia-v2-dsvm-scenario-centos-9-stream-non-traffic-ops:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario-two-node:
             voting: false
         - octavia-v2-act-stdby-dsvm-scenario:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-yoga:
+        - octavia-v2-act-stdby-dsvm-scenario-stable-2023-2:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-xena:
+        - octavia-v2-act-stdby-dsvm-scenario-stable-2023-1:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-wallaby:
-            voting: false
-        - octavia-v2-dsvm-spare-pool-stable-wallaby:
+        - octavia-v2-act-stdby-dsvm-scenario-stable-zed:
             voting: false
         - octavia-v2-dsvm-cinder-amphora:
             voting: false
@@ -51,15 +53,19 @@
       fail-fast: true
       jobs:
         - octavia-v2-dsvm-noop-api
-        - octavia-v2-dsvm-noop-api-stable-yoga
-        - octavia-v2-dsvm-noop-api-stable-xena
-        - octavia-v2-dsvm-noop-api-stable-wallaby
-        - octavia-v2-dsvm-noop-api-scoped-tokens
-        - octavia-v2-dsvm-scenario
-        - octavia-v2-dsvm-scenario-stable-yoga
-        - octavia-v2-dsvm-scenario-stable-xena
-        - octavia-v2-dsvm-scenario-stable-wallaby
+        - octavia-v2-dsvm-noop-api-stable-2023-2
+        - octavia-v2-dsvm-noop-api-stable-2023-1
+        - octavia-v2-dsvm-noop-api-stable-zed
+        - octavia-v2-dsvm-noop-api-keystone-default-roles
+        - octavia-v2-dsvm-scenario-traffic-ops
+        - octavia-v2-dsvm-scenario-non-traffic-ops
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2023-2
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-2
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-2023-1
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-2023-1
+        - octavia-v2-dsvm-scenario-traffic-ops-stable-zed
+        - octavia-v2-dsvm-scenario-non-traffic-ops-stable-zed
         - octavia-v2-dsvm-tls-barbican
-        - octavia-v2-dsvm-tls-barbican-stable-yoga
-        - octavia-v2-dsvm-tls-barbican-stable-xena
-        - octavia-v2-dsvm-tls-barbican-stable-wallaby
+        - octavia-v2-dsvm-tls-barbican-stable-2023-2
+        - octavia-v2-dsvm-tls-barbican-stable-2023-1
+        - octavia-v2-dsvm-tls-barbican-stable-zed