Add tests for allowed CIDRs in listeners

This patch adds API and scenario tests for testing allowed CIDRS in
listeners introduced in API version 2.12 (included in Train release).

Change-Id: Ibe677e046afc16f038ccacb10e5fe62802828581
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 0e24283..16d6cdf 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -14,6 +14,7 @@
 
 # API field names
 ACTIVE_CONNECTIONS = 'active_connections'
+ALLOWED_CIDRS = 'allowed_cidrs'
 AVAILABILITY_ZONE = 'availability_zone'
 AVAILABILITY_ZONE_DATA = 'availability_zone_data'
 AVAILABILITY_ZONE_PROFILE_ID = 'availability_zone_profile_id'
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py b/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
index c059a84..a3f5958 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/listener_client.py
@@ -40,7 +40,7 @@
                         default_tls_container_ref=Unset,
                         sni_container_refs=Unset, client_authentication=Unset,
                         client_ca_tls_container_ref=Unset,
-                        client_crl_container_ref=Unset,
+                        client_crl_container_ref=Unset, allowed_cidrs=Unset,
                         return_object_only=True):
         """Create a listener.
 
@@ -88,6 +88,7 @@
                                          secret containing a PEM format CA
                                          revocation list file for
                                          TERMINATED_HTTPS listeners.
+        :param allowed_cidrs: A list of IPv4 or IPv6 CIDRs.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
@@ -213,7 +214,7 @@
                         default_tls_container_ref=Unset,
                         sni_container_refs=Unset, client_authentication=Unset,
                         client_ca_tls_container_ref=Unset,
-                        client_crl_container_ref=Unset,
+                        client_crl_container_ref=Unset, allowed_cidrs=Unset,
                         return_object_only=True):
         """Update a listener.
 
@@ -259,6 +260,7 @@
                                          secret containing a PEM format CA
                                          revocation list file for
                                          TERMINATED_HTTPS listeners.
+        :param allowed_cidrs: A list of IPv4 or IPv6 CIDRs.
         :param return_object_only: If True, the response returns the object
                                    inside the root tag. False returns the full
                                    response from the API.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index fb542ce..db98958 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -59,6 +59,10 @@
                                 CONF.load_balancer.lb_build_interval,
                                 CONF.load_balancer.lb_build_timeout)
 
+        cls.allowed_cidrs = ['192.0.1.0/24']
+        if CONF.load_balancer.test_with_ipv6:
+            cls.allowed_cidrs = ['2001:db8:a0b:12f0::/64']
+
     @decorators.idempotent_id('88d0ec83-7b08-48d9-96e2-0df1d2f8cd98')
     def test_listener_create(self):
         """Tests listener create and basic show APIs.
@@ -109,6 +113,18 @@
                 const.TAGS: listener_tags
             })
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            # Test that CIDR IP version matches VIP IP version
+            bad_cidrs = ['192.0.1.0/24', '2001:db8:a0b:12f0::/64']
+            listener_kwargs.update({const.ALLOWED_CIDRS: bad_cidrs})
+            self.assertRaises(
+                exceptions.BadRequest,
+                self.mem_listener_client.create_listener,
+                **listener_kwargs)
+
+            listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
+
         # Test that a user without the load balancer role cannot
         # create a listener
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
@@ -177,6 +193,10 @@
             self.assertCountEqual(listener_kwargs[const.TAGS],
                                   listener[const.TAGS])
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
+
     @decorators.idempotent_id('cceac303-4db5-4d5a-9f6e-ff33780a5f29')
     def test_listener_create_on_same_port(self):
         """Tests listener creation on same port number.
@@ -521,6 +541,9 @@
             show_listener_response_fields.append('timeout_member_connect')
             show_listener_response_fields.append('timeout_member_data')
             show_listener_response_fields.append('timeout_tcp_inspect')
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            show_listener_response_fields.append('allowed_cidrs')
         for field in show_listener_response_fields:
             if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
                 continue
@@ -644,6 +667,10 @@
                 const.TAGS: listener_tags
             })
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addClassResourceCleanup(
             self.mem_listener_client.cleanup_listener,
@@ -703,6 +730,10 @@
         else:
             self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
+
         # Test that a user with lb_admin role can see the listener
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
             listener_client = self.os_roles_lb_admin.listener_client
@@ -779,6 +810,10 @@
                 const.TAGS: listener_tags
             })
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addClassResourceCleanup(
             self.mem_listener_client.cleanup_listener,
@@ -825,6 +860,10 @@
             self.assertCountEqual(listener_kwargs[const.TAGS],
                                   listener[const.TAGS])
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
+
         # Test that a user, without the load balancer member role, cannot
         # use this command
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
@@ -888,6 +927,21 @@
                 const.TAGS: listener_updated_tags
             })
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            # Test that CIDR IP version matches VIP IP version
+            bad_cidrs = ['192.0.2.0/24', '2001:db8::/6']
+            listener_update_kwargs.update({const.ALLOWED_CIDRS: bad_cidrs})
+            self.assertRaises(
+                exceptions.BadRequest,
+                self.mem_listener_client.update_listener,
+                listener[const.ID], **listener_update_kwargs)
+
+            new_cidrs = ['192.0.2.0/24']
+            if CONF.load_balancer.test_with_ipv6:
+                new_cidrs = ['2001:db8::/64']
+            listener_update_kwargs.update({const.ALLOWED_CIDRS: new_cidrs})
+
         listener = self.mem_listener_client.update_listener(
             listener[const.ID], **listener_update_kwargs)
 
@@ -936,6 +990,13 @@
             self.assertCountEqual(listener_update_kwargs[const.TAGS],
                                   listener[const.TAGS])
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            expected_cidrs = ['192.0.2.0/24']
+            if CONF.load_balancer.test_with_ipv6:
+                expected_cidrs = ['2001:db8::/64']
+            self.assertEqual(expected_cidrs, listener[const.ALLOWED_CIDRS])
+
     @decorators.idempotent_id('16f11c82-f069-4592-8954-81b35a98e3b7')
     def test_listener_delete(self):
         """Tests listener create and delete APIs.
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index b37ab57..fbfe930 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -12,6 +12,8 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import requests
+
 from tempest import config
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
@@ -290,3 +292,144 @@
                                      'in Octavia API version 2.1 or newer')
 
         self._test_ipv6_vip_ipv6_members_traffic(const.UDP, 8080)
+
+    @decorators.idempotent_id('84b23f68-4bc3-49e5-8372-60c25fe69613')
+    def test_listener_with_allowed_cidrs(self):
+        """Tests traffic through a loadbalancer with allowed CIDRs set.
+
+        * Set up listener with allowed CIDRS (allow all) on a loadbalancer.
+        * Set up pool on a loadbalancer
+        * Set up members on a loadbalancer.
+        * Test traffic to ensure it is balanced properly.
+        * Update allowed CIDRs to restrict traffic to a small subnet.
+        * Assert loadbalancer does not respond to client requests.
+        """
+
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            raise self.skipException('Allowed CIDRS in listeners is only '
+                                     'available on Octavia API version 2.12 '
+                                     'or newer.')
+
+        listener_name = data_utils.rand_name("lb_member_listener2_cidrs")
+        listener_port = 8080
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: self.protocol,
+            const.PROTOCOL_PORT: listener_port,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.ALLOWED_CIDRS: ['::/0']
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool3_cidrs")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: self.protocol,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LISTENER_ID: listener_id,
+        }
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-cidrs-traffic")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(
+            **member1_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-cidrs-traffic")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Send some traffic
+        self.check_members_balanced(
+            self.lb_vip_address, protocol_port=listener_port)
+
+        listener_kwargs = {
+            const.LISTENER_ID: listener_id,
+            const.ALLOWED_CIDRS: ['2001:db8:a0b:12f0::/128']
+        }
+        self.mem_listener_client.update_listener(**listener_kwargs)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        url_for_vip = 'http://[{}]:{}/'.format(self.lb_vip_address,
+                                               listener_port)
+
+        # NOTE: Before we start with the consistent response check, we must
+        # wait until Neutron completes the SG update.
+        # See https://bugs.launchpad.net/neutron/+bug/1866353.
+        def expect_conn_error(url):
+            try:
+                requests.Session().get(url)
+            except requests.exceptions.ConnectionError:
+                return True
+            return False
+
+        waiters.wait_until_true(expect_conn_error, url=url_for_vip)
+
+        # Assert that the server is consistently unavailable
+        self.assertConsistentResponse(
+            (None, None), url_for_vip, repeat=3, conn_error=True)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index bb3df64..c056bd0 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -96,6 +96,10 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
+        cls.allowed_cidrs = ['192.0.1.0/24']
+        if CONF.load_balancer.test_with_ipv6:
+            cls.allowed_cidrs = ['2001:db8:a0b:12f0::/64']
+
     @decorators.idempotent_id('4a874014-b7d1-49a4-ac9a-2400b3434700')
     def test_listener_CRUD(self):
         """Tests listener create, read, update, delete
@@ -134,6 +138,9 @@
                 const.TIMEOUT_MEMBER_DATA: 1000,
                 const.TIMEOUT_TCP_INSPECT: 50,
             })
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addCleanup(
@@ -176,6 +183,9 @@
             self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
             self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
             self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
 
         # Listener update
         new_name = data_utils.rand_name("lb_member_listener1-update")
@@ -204,6 +214,13 @@
                 const.TIMEOUT_TCP_INSPECT: 100,
             })
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            new_cidrs = ['192.0.2.0/24']
+            if CONF.load_balancer.test_with_ipv6:
+                new_cidrs = ['2001:db8::/64']
+            listener_update_kwargs.update({const.ALLOWED_CIDRS: new_cidrs})
+
         listener = self.mem_listener_client.update_listener(
             listener[const.ID], **listener_update_kwargs)
 
@@ -249,6 +266,12 @@
             self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
             self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
             self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            expected_cidrs = ['192.0.2.0/24']
+            if CONF.load_balancer.test_with_ipv6:
+                expected_cidrs = ['2001:db8::/64']
+            self.assertEqual(expected_cidrs, listener[const.ALLOWED_CIDRS])
 
         # Listener delete
         waiters.wait_for_status(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index 5c831d8..7dd4a29 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -14,6 +14,7 @@
 
 import datetime
 import ipaddress
+import requests
 import shlex
 import testtools
 import time
@@ -940,3 +941,146 @@
         self.assertEqual(member_id, fields[13])  # member_id
         self.assertTrue(fields[14].isdigit())  # processing_time
         self.assertEqual('----', fields[15])  # term_state
+
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Traffic tests will not work in noop mode.')
+    @decorators.idempotent_id('13b0f2de-9934-457b-8be0-f1bffc6915a0')
+    def test_listener_with_allowed_cidrs(self):
+        """Tests traffic through a loadbalancer with allowed CIDRs set.
+
+        * Set up listener with allowed CIDRS (allow all) on a loadbalancer.
+        * Set up pool on a loadbalancer
+        * Set up members on a loadbalancer.
+        * Test traffic to ensure it is balanced properly.
+        * Update allowed CIDRs to restrict traffic to a small subnet.
+        * Assert loadbalancer does not respond to client requests.
+        """
+
+        if not self.mem_listener_client.is_version_supported(
+                self.api_version, '2.12'):
+            raise self.skipException('Allowed CIDRS in listeners is only '
+                                     'available on Octavia API version 2.12 '
+                                     'or newer.')
+
+        listener_name = data_utils.rand_name("lb_member_listener2_cidrs")
+        listener_port = 8080
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: self.protocol,
+            const.PROTOCOL_PORT: listener_port,
+            const.LOADBALANCER_ID: self.lb_id,
+            const.ALLOWED_CIDRS: ['0.0.0.0/0']
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        listener_id = listener[const.ID]
+        self.addCleanup(
+            self.mem_listener_client.cleanup_listener,
+            listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool3_cidrs")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: self.protocol,
+            const.LB_ALGORITHM: self.lb_algorithm,
+            const.LISTENER_ID: listener_id,
+        }
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        pool_id = pool[const.ID]
+        self.addCleanup(
+            self.mem_pool_client.cleanup_pool,
+            pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-cidrs-traffic")
+        member1_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(
+            **member1_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-cidrs-traffic")
+        member2_kwargs = {
+            const.POOL_ID: pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(**member2_kwargs)
+        self.addCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Send some traffic
+        self.check_members_balanced(
+            self.lb_vip_address, protocol_port=listener_port)
+
+        listener_kwargs = {
+            const.LISTENER_ID: listener_id,
+            const.ALLOWED_CIDRS: ['192.0.1.0/32']
+        }
+        self.mem_listener_client.update_listener(**listener_kwargs)
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        url_for_vip = 'http://{}:{}/'.format(
+            self.lb_vip_address, listener_port)
+
+        # NOTE: Before we start with the consistent response check, we must
+        # wait until Neutron completes the SG update.
+        # See https://bugs.launchpad.net/neutron/+bug/1866353.
+        def expect_conn_error(url):
+            try:
+                requests.Session().get(url)
+            except requests.exceptions.ConnectionError:
+                return True
+            return False
+
+        waiters.wait_until_true(expect_conn_error, url=url_for_vip)
+
+        # Assert that the server is consistently unavailable
+        self.assertConsistentResponse(
+            (None, None), url_for_vip, repeat=3, conn_error=True)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 741bb1c..bd1a225 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -503,6 +503,8 @@
             if CONF.load_balancer.test_with_noop:
                 lb_kwargs[const.VIP_NETWORK_ID] = (
                     cls.lb_member_vip_net[const.ID])
+                if ip_version == 6:
+                    lb_kwargs[const.VIP_ADDRESS] = lb_vip_address
         else:
             lb_kwargs[const.VIP_NETWORK_ID] = cls.lb_member_vip_net[const.ID]
             lb_kwargs[const.VIP_SUBNET_ID] = None
@@ -1099,7 +1101,8 @@
             protocol_port=protocol_port)
 
     def assertConsistentResponse(self, response, url, method='GET', repeat=10,
-                                 redirect=False, timeout=2, **kwargs):
+                                 redirect=False, timeout=2,
+                                 conn_error=False, **kwargs):
         """Assert that a request to URL gets the expected response.
 
         :param response: Expected response in format (status_code, content).
@@ -1112,6 +1115,7 @@
         :param redirect: Is the request a redirect? If true, assume the passed
                          content should be the next URL in the chain.
         :param timeout: Optional seconds to wait for the server to send data.
+        :param conn_error: Optional Expect a connection error?
 
         :return: boolean success status
 
@@ -1121,6 +1125,13 @@
         response_code, response_content = response
 
         for i in range(0, repeat):
+            if conn_error:
+                self.assertRaises(
+                    requests.exceptions.ConnectionError, session.request,
+                    method, url, allow_redirects=not redirect, timeout=timeout,
+                    **kwargs)
+                continue
+
             req = session.request(method, url, allow_redirects=not redirect,
                                   timeout=timeout, **kwargs)
             if response_code:
diff --git a/octavia_tempest_plugin/tests/waiters.py b/octavia_tempest_plugin/tests/waiters.py
index eb7410a..e0d9d2d 100644
--- a/octavia_tempest_plugin/tests/waiters.py
+++ b/octavia_tempest_plugin/tests/waiters.py
@@ -210,3 +210,26 @@
                            timeout=check_timeout))
             raise exceptions.TimeoutException(message)
         time.sleep(check_interval)
+
+
+def wait_until_true(func, timeout=60, sleep=1, **kwargs):
+    """Wait until callable predicate is evaluated as True
+
+    :param func: Callable deciding whether waiting should continue.
+    :param timeout: Timeout in seconds how long should function wait.
+    :param sleep: Polling interval for results in seconds.
+    """
+    start = int(time.time())
+    while True:
+        try:
+            ret = func(**kwargs)
+            if ret:
+                return
+        except Exception as e:
+            LOG.error(e)
+
+        if int(time.time()) - start >= timeout:
+            message = "Timed out after {timeout} seconds waiting".format(
+                timeout=timeout)
+            raise exceptions.TimeoutException(message)
+        time.sleep(sleep)