Merge "Remove skip flag for tests skipped due to inactive bug"
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 3ceea1c..8ef8d94 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -131,6 +131,7 @@
 HTTP = 'HTTP'
 HTTPS = 'HTTPS'
 PROXY = 'PROXY'
+PROMETHEUS = 'PROMETHEUS'
 TCP = 'TCP'
 TERMINATED_HTTPS = 'TERMINATED_HTTPS'
 UDP = 'UDP'
@@ -140,6 +141,11 @@
 POST = 'POST'
 PUT = 'PUT'
 DELETE = 'DELETE'
+HEAD = 'HEAD'
+OPTIONS = 'OPTIONS'
+PATCH = 'PATCH'
+CONNECT = 'CONNECT'
+TRACE = 'TRACE'
 
 # HM Types
 HEALTH_MONITOR_PING = 'PING'
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 4d1543b..502bdec 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -285,6 +285,8 @@
                 help="Whether the log offload tests will run. These require "
                      "the tempest instance have access to the log files "
                      "specified in the tempest configuration."),
+    cfg.BoolOpt('prometheus_listener_enabled', default=True,
+                help="Whether the PROMETHEUS listener tests will run."),
 ]
 
 # Extending this enforce_scope group defined in tempest
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 74581c2..625bcf7 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -21,6 +21,7 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 from tempest.lib import exceptions
+import testtools
 
 from octavia_tempest_plugin.common import constants as const
 from octavia_tempest_plugin.tests import test_base
@@ -79,6 +80,17 @@
     def test_tcp_listener_create(self):
         self._test_listener_create(const.TCP, 8002)
 
+    @decorators.idempotent_id('1a6ba0d0-f309-4088-a686-dda0e9ab7e43')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_create(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_create(const.PROMETHEUS, 8090)
+
     @decorators.idempotent_id('7b53f336-47bc-45ae-bbd7-4342ef0673fc')
     def test_udp_listener_create(self):
         self._test_listener_create(const.UDP, 8003)
@@ -364,6 +376,17 @@
     def test_https_listener_list(self):
         self._test_listener_list(const.HTTPS, 8030)
 
+    @decorators.idempotent_id('5473e071-8277-4ac5-9277-01ecaf46e274')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_list(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_list(const.PROMETHEUS, 8091)
+
     @decorators.idempotent_id('1cd476e2-7788-415e-bcaf-c377acfc9794')
     def test_tcp_listener_list(self):
         self._test_listener_list(const.TCP, 8030)
@@ -725,6 +748,17 @@
     def test_https_listener_show(self):
         self._test_listener_show(const.HTTPS, 8051)
 
+    @decorators.idempotent_id('b851b754-4333-4115-9063-a9fce44c2e46')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_show(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_show(const.PROMETHEUS, 8092)
+
     @decorators.idempotent_id('1fcbbee2-b697-4890-b6bf-d308ac1c94cd')
     def test_tcp_listener_show(self):
         self._test_listener_show(const.TCP, 8052)
@@ -873,6 +907,17 @@
     def test_https_listener_update(self):
         self._test_listener_update(const.HTTPS, 8061)
 
+    @decorators.idempotent_id('cbba6bf8-9184-4da5-95e9-5efe1f89ddf0')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_update(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_update(const.PROMETHEUS, 8093)
+
     @decorators.idempotent_id('8d933121-db03-4ccc-8b77-4e879064a9ba')
     def test_tcp_listener_update(self):
         self._test_listener_update(const.TCP, 8062)
@@ -1119,6 +1164,17 @@
     def test_https_listener_delete(self):
         self._test_listener_delete(const.HTTPS, 8071)
 
+    @decorators.idempotent_id('322a6372-6b56-4a3c-87e3-dd82074bc83e')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_delete(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        self._test_listener_delete(const.PROMETHEUS, 8094)
+
     @decorators.idempotent_id('f5ca019d-2b33-48f9-9c2d-2ec169b423ca')
     def test_tcp_listener_delete(self):
         self._test_listener_delete(const.TCP, 8072)
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 3fdada4..a983e13 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -28,6 +28,7 @@
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
+import testtools
 
 from octavia_tempest_plugin.common import barbican_client_mgr
 from octavia_tempest_plugin.common import cert_utils
@@ -1539,3 +1540,77 @@
 
         self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
                                     protocol_port=85)
+
+    @decorators.idempotent_id('7d9dcae6-3e2c-4eae-9bfb-1ef0d00aa530')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_tls_prometheus_client_auth_mandatory(self):
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('Prometheus listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+        LISTENER1_TCP_PORT = '9443'
+        listener_name = data_utils.rand_name(
+            "lb_member_listener1-prometheus-client-auth-mand")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.PROMETHEUS,
+            const.PROTOCOL_PORT: LISTENER1_TCP_PORT,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+            const.CLIENT_AUTHENTICATION: const.CLIENT_AUTH_MANDATORY,
+            const.CLIENT_CA_TLS_CONTAINER_REF: self.client_ca_cert_ref,
+            const.CLIENT_CRL_CONTAINER_REF: self.client_crl_ref,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Test that no client certificate fails to connect
+        self.assertRaises(
+            requests.exceptions.SSLError,
+            requests.get,
+            'https://{0}:{1}'.format(self.lb_vip_address, LISTENER1_TCP_PORT),
+            timeout=12, verify=False)
+
+        # Test that a revoked client certificate fails to connect
+        with tempfile.NamedTemporaryFile(buffering=0) as cert_file:
+            cert_file.write(self.revoked_client_cert.public_bytes(
+                serialization.Encoding.PEM))
+            with tempfile.NamedTemporaryFile(buffering=0) as key_file:
+                key_file.write(self.revoked_client_key.private_bytes(
+                    serialization.Encoding.PEM,
+                    serialization.PrivateFormat.TraditionalOpenSSL,
+                    serialization.NoEncryption()))
+                self.assertRaises(
+                    requests.exceptions.SSLError, requests.get,
+                    'https://{0}:{1}'.format(self.lb_vip_address,
+                                             LISTENER1_TCP_PORT),
+                    timeout=12, verify=False, cert=(cert_file.name,
+                                                    key_file.name))
+
+        # Test that a valid client certificate can connect
+        with tempfile.NamedTemporaryFile(buffering=0) as cert_file:
+            cert_file.write(self.client_cert.public_bytes(
+                serialization.Encoding.PEM))
+            with tempfile.NamedTemporaryFile(buffering=0) as key_file:
+                key_file.write(self.client_key.private_bytes(
+                    serialization.Encoding.PEM,
+                    serialization.PrivateFormat.TraditionalOpenSSL,
+                    serialization.NoEncryption()))
+                response = requests.get(
+                    'https://{0}:{1}'.format(self.lb_vip_address,
+                                             LISTENER1_TCP_PORT),
+                    timeout=12, verify=False, cert=(cert_file.name,
+                                                    key_file.name))
+                self.assertEqual(200, response.status_code)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index d4d43b5..e8221fe 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -204,6 +204,55 @@
             traffic_member_count=traffic_member_count, source_port=source_port,
             delay=delay)
 
+    def _pool_add_healthmonitor(self, pool_id, protocol):
+        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
+        if protocol != const.HTTP:
+            if protocol == const.UDP:
+                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
+            elif protocol == const.TCP:
+                hm_type = const.HEALTH_MONITOR_TCP
+
+            hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: hm_type,
+                const.DELAY: 3,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.ADMIN_STATE_UP: True,
+            }
+        else:
+            hm_kwargs = {
+                const.POOL_ID: pool_id,
+                const.NAME: hm_name,
+                const.TYPE: const.HEALTH_MONITOR_HTTP,
+                const.DELAY: 2,
+                const.TIMEOUT: 2,
+                const.MAX_RETRIES: 2,
+                const.MAX_RETRIES_DOWN: 2,
+                const.HTTP_METHOD: const.GET,
+                const.URL_PATH: '/',
+                const.EXPECTED_CODES: '200',
+                const.ADMIN_STATE_UP: True,
+            }
+        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+        self.addCleanup(
+            self.mem_healthmonitor_client.cleanup_healthmonitor,
+            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        hm = waiters.wait_for_status(
+            self.mem_healthmonitor_client.show_healthmonitor,
+            hm[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        return hm
+
     @decorators.attr(type=['smoke', 'slow'])
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Traffic tests will not work in noop mode.')
@@ -348,54 +397,7 @@
                                     protocol=protocol, persistent=persistent)
 
         # Create the healthmonitor
-        hm_name = data_utils.rand_name("lb_member_hm1-hm-traffic")
-        if protocol != const.HTTP:
-            if protocol == const.UDP:
-                hm_type = const.HEALTH_MONITOR_UDP_CONNECT
-            elif protocol == const.TCP:
-                hm_type = const.HEALTH_MONITOR_TCP
-
-            hm_kwargs = {
-                const.POOL_ID: pool_id,
-                const.NAME: hm_name,
-                const.TYPE: hm_type,
-                const.DELAY: 3,
-                const.TIMEOUT: 2,
-                const.MAX_RETRIES: 2,
-                const.MAX_RETRIES_DOWN: 2,
-                const.ADMIN_STATE_UP: True,
-            }
-        else:
-            hm_kwargs = {
-                const.POOL_ID: pool_id,
-                const.NAME: hm_name,
-                const.TYPE: const.HEALTH_MONITOR_HTTP,
-                const.DELAY: 2,
-                const.TIMEOUT: 2,
-                const.MAX_RETRIES: 2,
-                const.MAX_RETRIES_DOWN: 2,
-                const.HTTP_METHOD: const.GET,
-                const.URL_PATH: '/',
-                const.EXPECTED_CODES: '200',
-                const.ADMIN_STATE_UP: True,
-            }
-
-        hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
-        waiters.wait_for_status(
-            self.mem_lb_client.show_loadbalancer, self.lb_id,
-            const.PROVISIONING_STATUS, const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
-        hm = waiters.wait_for_status(
-            self.mem_healthmonitor_client.show_healthmonitor,
-            hm[const.ID], const.PROVISIONING_STATUS,
-            const.ACTIVE,
-            CONF.load_balancer.build_interval,
-            CONF.load_balancer.build_timeout)
+        hm = self._pool_add_healthmonitor(pool_id, protocol)
 
         # Wait for members to adjust to the correct OPERATING_STATUS
         waiters.wait_for_status(
@@ -1402,3 +1404,218 @@
         self._test_basic_traffic(const.UDP, common_vip_port, pool_id_udp)
         self._test_basic_traffic(const.TCP, common_vip_port, pool_id_tcp,
                                  persistent=False)
+
+    @decorators.idempotent_id('c79f2cd0-0324-11eb-bc8e-74e5f9e2a801')
+    def test_udp_update_pool_healthmonitor_listener(self):
+        """Test scenario:
+
+        * Prerequisites:
+          Create: UDP listener, pool, healtmonitor and validate UDP traffic.
+        * Test scenario:
+          Update pool algorithm to: "source_ip" and start sending UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        * Update healtmonitor with: "delay=20" and start sending UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        * Update listener with: "connection-limit=300" and start sending
+          UDP traffic.
+          Expected: successfully received UDP packages from LB VIP.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            raise self.skipException('UDP listener support is only available '
+                                     'in Octavia API version 2.1 or newer')
+        listener_port = 104
+        listener_id, pool_id = self._listener_pool_create(
+            const.UDP, listener_port)
+        healthmonitor_id = self._pool_add_healthmonitor(
+            pool_id, protocol=const.UDP)[const.ID]
+        self._test_basic_traffic(
+            const.UDP, listener_port, pool_id)
+
+        # Update LB pool
+        self.mem_pool_client.update_pool(
+            pool_id=pool_id, lb_algorithm=const.LB_ALGORITHM_SOURCE_IP)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+        # Update LB healthmonitor
+        self.mem_healthmonitor_client.update_healthmonitor(
+            healthmonitor_id=healthmonitor_id, delay=5)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+        # Update LB listener
+        listener_kwargs = {const.LISTENER_ID: listener_id,
+                           const.CONNECTION_LIMIT: 300}
+        self.mem_listener_client.update_listener(**listener_kwargs)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+        self.assertIsNotNone(self.make_udp_requests_with_retries(
+            vip_address=self.lb_vip_address, dst_port=listener_port,
+            number_of_retries=3),
+            'Failed - all UDP retries to LB VIP has failed')
+
+    @decorators.attr(type='slow')
+    @decorators.idempotent_id('cd5aeefa-0e16-11eb-b8dc-74e5f9e2a801')
+    def test_hm_op_status_changed_as_expected_on_update(self):
+        """Test scenario:
+
+        * Create HTTP listener, pool and HTTP health monitor.
+        * Update health monitor with various combinations of:
+          HTTP method, expected HTTP status codes and backend URL.
+          Note: see "fault_cases" and "valid_cases" lists in test's code.
+        * Validate that members' operation status is getting into
+          appropriate state after each particular update done within the test.
+          Important: "operation status" value is expected to be changed from
+          ONLINE to ERROR after each update, otherwise we may miss
+          the potential bug.
+        """
+        listener_port = 105
+        listener_id, pool_id = self._listener_pool_create(
+            const.TCP, listener_port)
+        hm_id = self._pool_add_healthmonitor(
+            pool_id, protocol=const.HTTP)[const.ID]
+        self._test_basic_traffic(
+            const.HTTP, listener_port, pool_id, persistent=False)
+        mb_ids = [mb[const.ID] for
+                  mb in self.mem_member_client.list_members(pool_id)]
+
+        # Create list of test cases to be covered in test
+        fault_cases = [
+            {'mthd': const.POST, 'code': '101-102', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=103'},
+            {'mthd': const.DELETE, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.PUT, 'code': '301-302', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=303'},
+            {'mthd': const.HEAD, 'code': '400-404', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=405'},
+            {'mthd': const.OPTIONS, 'code': '500-504', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=505'},
+            {'mthd': const.PATCH, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.CONNECT, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'},
+            {'mthd': const.TRACE, 'code': '201-204', 'op_stat': const.ERROR,
+             'url_path': '/request?response_code=205'}]
+        valid_cases = [
+            {'mthd': const.GET, 'code': '101-102', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=102'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=202'},
+            {'mthd': const.GET, 'code': '301-302', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=302'},
+            {'mthd': const.GET, 'code': '400-404', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=404'},
+            {'mthd': const.GET, 'code': '500-504', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=504'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'},
+            {'mthd': const.GET, 'code': '201-204', 'op_stat': const.ONLINE,
+             'url_path': '/request?response_code=204'}]
+        # Generate "flip_flop" using zip function, that will have
+        # the operation statuses changed on each subsequent test case.
+        # It means interleaved like: ERROR, ONLINE, ERROR, ONLINE...
+        flip_flop = [v for f in zip(valid_cases, fault_cases) for v in f]
+
+        # For each test case, update HM and validate that members'
+        # "Operation Status" is changed to expected value.
+        for ff in flip_flop:
+            LOG.info('Tested test case is: {}'.format(ff))
+            self.mem_healthmonitor_client.update_healthmonitor(
+                hm_id, expected_codes=ff['code'], http_method=ff['mthd'],
+                url_path=ff['url_path'])
+            waiters.wait_for_status(
+                self.mem_lb_client.show_loadbalancer, self.lb_id,
+                const.PROVISIONING_STATUS, const.ACTIVE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout)
+            for mb_id in mb_ids:
+                waiters.wait_for_status(
+                    self.mem_member_client.show_member,
+                    mb_id, const.OPERATING_STATUS,
+                    ff['op_stat'],
+                    CONF.load_balancer.check_interval,
+                    CONF.load_balancer.check_timeout,
+                    error_ok=True, pool_id=pool_id)
+
+    @decorators.idempotent_id('05e99fb3-2b37-478e-889b-77f1c731a471')
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.prometheus_listener_enabled,
+        'PROMETHEUS listener tests are disabled in the tempest configuration.')
+    def test_prometheus_listener_metrics_page(self):
+        """Tests PROMETHEUS listener create and metrics endpoint is available
+
+        * Create PROMETHEUS listener.
+        * Query the metrics endpoint on the load balancer.
+        """
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.25'):
+            raise self.skipException('PROMETHEUS listeners are only available '
+                                     'on Octavia API version 2.25 or newer.')
+
+        # Listener create
+        listener_name = data_utils.rand_name("lb_member_prometheus_listener")
+        listener_description = data_utils.arbitrary_string(size=255)
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.DESCRIPTION: listener_description,
+            const.ADMIN_STATE_UP: True,
+            const.PROTOCOL: const.PROMETHEUS,
+            const.PROTOCOL_PORT: 8080,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.CONNECTION_LIMIT: 200,
+        }
+
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            listener_kwargs.update({const.ALLOWED_CIDRS: ['0.0.0.0/0']})
+
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener[const.ID],
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+        listener = waiters.wait_for_status(
+            self.mem_listener_client.show_listener,
+            listener[const.ID], const.PROVISIONING_STATUS,
+            const.ACTIVE,
+            CONF.load_balancer.build_interval,
+            CONF.load_balancer.build_timeout)
+
+        # Make a request to the stats page
+        URL = 'http://{0}:{1}/metrics'.format(self.lb_vip_address, '8080')
+        self.validate_URL_response(URL, expected_status_code=200)
diff --git a/octavia_tempest_plugin/tests/validators.py b/octavia_tempest_plugin/tests/validators.py
index 5972d54..5afe28a 100644
--- a/octavia_tempest_plugin/tests/validators.py
+++ b/octavia_tempest_plugin/tests/validators.py
@@ -421,3 +421,37 @@
                                                           protocol_port))
         LOG.error(message)
         raise Exception(message)
+
+    def make_udp_requests_with_retries(
+            self, vip_address, number_of_retries, dst_port,
+            src_port=None, socket_timeout=20):
+        """Send UDP packets using retries mechanism
+
+        The delivery of data to the destination cannot be guaranteed in UDP.
+        In case when UDP package is getting lost and we might want to check
+        what could be the reason for that (Network issues or Server Side),
+        well need to send more packets to get into the conclusion.
+
+        :param vip_address: LB VIP address
+        :param number_of_retries: integer number of retries
+        :param dst_port: UDP server destination port
+        :param src_port: UDP source port to bind for UDP connection
+        :param socket_timeout: UDP socket timeout
+        :return: None if all UPD retries failed, else first successful
+                 response data from UDP server.
+        """
+        retry_number = 0
+        received_data = None
+        while retry_number < number_of_retries:
+            LOG.info('make_udp_requests_with_retries attempt '
+                     'number:{}'.format(retry_number))
+            retry_number += 1
+            try:
+                received_data = self.make_udp_request(
+                    vip_address, dst_port, timeout=socket_timeout,
+                    source_port=src_port)
+                break
+            except Exception as e:
+                LOG.warning('make_udp_request has failed with: '
+                            '{}'.format(e))
+        return received_data
diff --git a/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml b/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml
new file mode 100644
index 0000000..f88f51b
--- /dev/null
+++ b/releasenotes/notes/Add-PROMETHEUS-listener-API-and-scenario-tests-ccab4b09f6a64428.yaml
@@ -0,0 +1,4 @@
+---
+features:
+  - |
+    Added API and scenario tests for PROMETHEUS listeners.
diff --git a/tox.ini b/tox.ini
index ed7a3cd..29df1ca 100644
--- a/tox.ini
+++ b/tox.ini
@@ -1,7 +1,6 @@
 [tox]
 minversion = 3.18.0
 envlist = pep8
-skipsdist = True
 ignore_basepython_conflict = True
 
 [testenv]
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index f4dd9db..bd90bea 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -733,12 +733,13 @@
 - job:
     name: octavia-v2-dsvm-tls-barbican
     parent: octavia-v2-dsvm-scenario
+    branches: ^(?!stable/(train|ussuri)).*$
     required-projects:
       - openstack/barbican
       - openstack/diskimage-builder
       - openstack/python-barbicanclient
       - openstack/barbican-tempest-plugin
-    vars:
+    vars: &octavia-v2-dsvm-tls-barbican-vars
       tempest_test_regex: ^octavia_tempest_plugin.tests.barbican_scenario.v2
       devstack_services:
         barbican: true
@@ -756,6 +757,18 @@
       - ^octavia_tempest_plugin/tests/(?!barbican_scenario/|\w+\.py).*
 
 - job:
+    name: octavia-v2-dsvm-tls-barbican
+    parent: octavia-v2-dsvm-scenario
+    branches: ^(stable/(train|ussuri)).*$
+    required-projects:
+      - openstack/barbican
+      - openstack/diskimage-builder
+      - openstack/python-barbicanclient
+      - name: openstack/barbican-tempest-plugin
+        override-checkout: 1.6.0
+    vars: *octavia-v2-dsvm-tls-barbican-vars
+
+- job:
     name: octavia-v2-dsvm-tls-barbican-stable-yoga
     parent: octavia-v2-dsvm-tls-barbican
     override-checkout: stable/yoga