Merge "Revert "Skip octavia tests with compute when using fixed networks"" into mcp/victoria
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 77d2f6e..b196f2a 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -139,6 +139,9 @@
                       'dict. Example: {"compute_zone": "The compute '
                       'availability zone."}'),
                 default={'compute_zone': 'The compute availability zone.'}),
+    cfg.IntOpt('listener_conn_limit',
+               default=200,
+               help='Defines listener connection limit.'),
     # Networking
     cfg.BoolOpt('test_with_ipv6',
                 default=True,
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
index 424d911..831582d 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
@@ -339,7 +339,8 @@
         self.expected_success(204, response.status)
         return response.status
 
-    def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None, parent_id=None):
+    def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None, parent_id=None,
+                     cascade=False):
         """Clean up an object (for use in tempest addClassResourceCleanup).
 
         We always need to wait for the parent LB to be in a mutable state
@@ -362,6 +363,8 @@
                           loadbalancer client already.
         :param lb_id: (Optional) The ID of the parent loadbalancer, if the main
                       obj_id is for a sub-object and not a loadbalancer.
+        :param cascade: If true will delete all child objects of an
+                        object, if that object supports it.
         :return:
         """
         if parent_id:
@@ -399,8 +402,8 @@
             waiters.wait_for_status(wait_func, wait_id,
                                     const.PROVISIONING_STATUS,
                                     const.ACTIVE,
-                                    self.build_interval,
-                                    self.timeout)
+                                    CONF.load_balancer.check_interval,
+                                    CONF.load_balancer.check_timeout)
         except exceptions.UnexpectedResponseCode:
             # Status is ERROR, go ahead with deletion
             LOG.debug("Found %s %s in ERROR status, proceeding with cleanup.",
@@ -419,7 +422,10 @@
             LOG.error("Cleanup encountered an unknown exception while waiting "
                       "for %s %s: %s", wait_client.root_tag, wait_id, e)
 
-        uri = '{0}/{1}'.format(uri, obj_id)
+        if cascade:
+            uri = '{0}/{1}?cascade=true'.format(uri, obj_id)
+        else:
+            uri = '{0}/{1}'.format(uri, obj_id)
         LOG.info("Cleaning up %s %s...", self.root_tag, obj_id)
         return_status = test_utils.call_and_ignore_notfound_exc(
             self.delete, uri)
@@ -430,8 +436,8 @@
             waiters.wait_for_status(wait_func, wait_id,
                                     const.PROVISIONING_STATUS,
                                     const.ACTIVE,
-                                    self.build_interval,
-                                    self.timeout)
+                                    CONF.load_balancer.check_interval,
+                                    CONF.load_balancer.check_timeout)
         else:
             LOG.info("Waiting for %s %s to be DELETED...",
                      wait_client.root_tag, wait_id)
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
index b4df6c9..d33d5a5 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
@@ -31,6 +31,8 @@
 LOG = logging.getLogger(__name__)
 
 
+@testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+                  "Not supported by TungstenFabric")
 @testtools.skipUnless(
     CONF.validation.run_validation,
     'Active-Standby tests will not work without run_validation enabled.')
@@ -171,8 +173,6 @@
             CONF.load_balancer.check_interval,
             CONF.load_balancer.check_timeout)
 
-    @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
-                      "Not supported by TungstenFabric")
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Active/Standby tests will not work in noop mode.')
     @decorators.idempotent_id('e591fa7a-0eee-485a-8ca0-5cf1a556bdf0')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index e8abc10..5594c85 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -54,7 +54,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -243,6 +243,11 @@
 
         for new_amp in after_amphorae:
             self.assertNotEqual(amphora_1[const.ID], new_amp[const.ID])
+            waiters.wait_for_status(self.lb_admin_amphora_client.show_amphora,
+                                    new_amp[const.ID], const.STATUS,
+                                    const.STATUS_ALLOCATED,
+                                    CONF.load_balancer.build_interval,
+                                    CONF.load_balancer.build_timeout)
 
     @testtools.skipIf(CONF.load_balancer.test_with_noop,
                       'Log offload tests will not work in noop mode.')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index 3baf36d..2472eb2 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -56,7 +56,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index 50cb2f0..571314f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -52,7 +52,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -69,10 +69,6 @@
         }
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -90,10 +86,6 @@
 
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -157,10 +149,6 @@
                 **l7policy_kwargs)
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -567,10 +555,6 @@
         }
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -691,10 +675,6 @@
             })
 
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -839,10 +819,6 @@
             const.ACTION: const.REJECT,
         }
         l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_l7policy_client.cleanup_l7policy,
-            l7policy[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index ce7d989..4ee15a8 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -47,7 +47,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -99,7 +99,7 @@
             const.PROTOCOL: protocol,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
             # Don't test with a default pool -- we'll do that in the scenario,
             # but this will allow us to test that the field isn't mandatory,
             # as well as not conflate pool failures with listener test failures
@@ -154,10 +154,6 @@
                 **listener_kwargs)
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -272,12 +268,11 @@
             const.PROTOCOL: protocol1,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
         }
 
         try:
-            listener = self.mem_listener_client.create_listener(
-                **listener_kwargs)
+            self.mem_listener_client.create_listener(**listener_kwargs)
         except exceptions.BadRequest as e:
             faultstring = e.resp_body.get('faultstring', '')
             if ("Invalid input for field/attribute protocol." in faultstring
@@ -285,11 +280,6 @@
                 raise self.skipException("Skipping unsupported protocol")
             raise e
 
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -305,12 +295,11 @@
             const.PROTOCOL: protocol2,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
 
         try:
-            listener2 = self.mem_listener_client.create_listener(
-                **listener2_kwargs)
+            self.mem_listener_client.create_listener(**listener2_kwargs)
         except exceptions.BadRequest as e:
             faultstring = e.resp_body.get('faultstring', '')
             if ("Invalid input for field/attribute protocol." in faultstring
@@ -318,11 +307,6 @@
                 raise self.skipException("Skipping unsupported protocol")
             raise e
 
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener2[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -338,7 +322,7 @@
             const.PROTOCOL: protocol1,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
 
         self.assertRaises(
@@ -355,7 +339,7 @@
             const.PROTOCOL: protocol3,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
 
         self.assertRaises(
@@ -719,7 +703,7 @@
             const.PROTOCOL: protocol,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
             # const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -755,10 +739,6 @@
             listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -892,7 +872,7 @@
             const.PROTOCOL: protocol,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
             # const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -928,10 +908,6 @@
             listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -955,7 +931,8 @@
         self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
         self.assertEqual(protocol, listener[const.PROTOCOL])
         self.assertEqual(protocol_port, listener[const.PROTOCOL_PORT])
-        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+        self.assertEqual(CONF.load_balancer.listener_conn_limit,
+                         listener[const.CONNECTION_LIMIT])
         if (protocol == const.HTTP and
                 CONF.load_balancer.provider != 'tungstenfabric'):
             insert_headers = listener[const.INSERT_HEADERS]
@@ -1019,7 +996,7 @@
             const.NAME: new_name,
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
-            const.CONNECTION_LIMIT: 400,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_POOL_ID: '',
             # const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1094,7 +1071,8 @@
             self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
         else:
             self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
-        self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+        self.assertEqual(CONF.load_balancer.listener_conn_limit+200,
+                         listener[const.CONNECTION_LIMIT])
         if (protocol == const.HTTP and
                 CONF.load_balancer.provider != 'tungstenfabric'):
             insert_headers = listener[const.INSERT_HEADERS]
@@ -1162,10 +1140,6 @@
             const.LOADBALANCER_ID: self.lb_id,
         }
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
@@ -1245,7 +1219,7 @@
             const.PROTOCOL: protocol,
             const.PROTOCOL_PORT: protocol_port,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index a5c78b3..dd0081f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -61,7 +61,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -103,13 +103,10 @@
             const.PROTOCOL: listener_protocol,
             const.PROTOCOL_PORT: cls.current_listener_port,
             const.LOADBALANCER_ID: cls.lb_id,
-            const.CONNECTION_LIMIT: 200
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
         }
         cls.current_listener_port += 1
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener, listener[const.ID],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -140,10 +137,6 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
@@ -910,11 +903,6 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1748,11 +1736,6 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -2210,11 +2193,6 @@
 
         member = self.mem_member_client.create_member(**member_kwargs)
 
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -2640,9 +2618,6 @@
             raise testtools.TestCase.skipException(message)
 
         pool_id = pool[const.ID]
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool, pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
@@ -2675,11 +2650,6 @@
                 const.ID]
         member1 = self.mem_member_client.create_member(**member1_kwargs)
 
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
                                 const.PROVISIONING_STATUS,
@@ -2711,10 +2681,6 @@
                 const.ID]
 
         member2 = self.mem_member_client.create_member(**member2_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id,
@@ -2778,11 +2744,6 @@
             pool_id,
             query_params='{sort}={port}:{asc}'.format(
                 sort=const.SORT, port=const.PROTOCOL_PORT, asc=const.ASC))
-        for m in members:
-            self.addClassResourceCleanup(
-                self.mem_member_client.cleanup_member,
-                m[const.ID], pool_id=pool_id,
-                lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         # We should have two members: member2 and member3, in that order
         self.assertEqual(2, len(members))
@@ -2983,10 +2944,6 @@
             const.PROTOCOL_PORT: self.member_port.increment(),
         }
         member = self.mem_member_client.create_member(**member_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member[const.ID], pool_id=pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 2d92c18..a7bdea7 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -46,7 +46,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -367,10 +367,6 @@
             }
             listener = self.mem_listener_client.create_listener(
                 **listener_kwargs)
-            self.addClassResourceCleanup(
-                self.mem_listener_client.cleanup_listener,
-                listener[const.ID],
-                lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
             waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     self.lb_id, const.PROVISIONING_STATUS,
@@ -437,11 +433,6 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1096,11 +1087,6 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1367,11 +1353,6 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1733,11 +1714,6 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 10c264b..daad0cd 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -22,6 +22,7 @@
 import httpx
 from OpenSSL.crypto import X509
 from OpenSSL import SSL
+import tenacity
 
 from oslo_log import log as logging
 from oslo_utils import uuidutils
@@ -186,7 +187,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -219,10 +220,6 @@
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -242,12 +239,7 @@
         if cls.lb_member_1_subnet:
             member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
 
-        member1 = cls.mem_member_client.create_member(
-            **member1_kwargs)
-        cls.addClassResourceCleanup(
-            cls.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+        cls.mem_member_client.create_member(**member1_kwargs)
         waiters.wait_for_status(
             cls.mem_lb_client.show_loadbalancer, cls.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -266,12 +258,7 @@
         if cls.lb_member_2_subnet:
             member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
 
-        member2 = cls.mem_member_client.create_member(
-            **member2_kwargs)
-        cls.addClassResourceCleanup(
-            cls.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+        cls.mem_member_client.create_member(**member2_kwargs)
         waiters.wait_for_status(
             cls.mem_lb_client.show_loadbalancer, cls.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -818,6 +805,14 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
+        if not CONF.load_balancer.test_with_noop:
+            waiters.wait_for_status(
+                self.mem_listener_client.show_listener,
+                self.listener_id, const.OPERATING_STATUS,
+                const.ONLINE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout)
+
         # Test that no client certificate connects
         response = requests.get(
             'https://{0}:{1}'.format(self.lb_vip_address, LISTENER1_TCP_PORT),
@@ -923,6 +918,14 @@
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
 
+        if not CONF.load_balancer.test_with_noop:
+            waiters.wait_for_status(
+                self.mem_listener_client.show_listener,
+                self.listener_id, const.OPERATING_STATUS,
+                const.ONLINE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout)
+
         LISTENER2_TCP_PORT = '8443'
         listener_name = data_utils.rand_name(
             "lb_member_listener2-multi-list-client-auth")
@@ -949,7 +952,13 @@
                                 const.ACTIVE,
                                 CONF.load_balancer.build_interval,
                                 CONF.load_balancer.build_timeout)
-
+        if not CONF.load_balancer.test_with_noop:
+            waiters.wait_for_status(
+                self.mem_listener_client.show_listener,
+                self.listener2_id, const.OPERATING_STATUS,
+                const.ONLINE,
+                CONF.load_balancer.build_interval,
+                CONF.load_balancer.build_timeout)
         # Test that no client certificate fails to connect to listener1
         self.assertRaises(
             requests.exceptions.SSLError,
@@ -1167,6 +1176,17 @@
         self.assertEqual(expected_proto, selected_proto)
 
     def _test_http_versions_tls_traffic(self, http_version, alpn_protos):
+        @tenacity.retry(
+            retry=tenacity.retry_if_exception_type(httpx.ConnectTimeout),
+            wait=tenacity.wait_incrementing(
+                const.RETRY_INITIAL_DELAY, const.RETRY_BACKOFF, const.RETRY_MAX
+            ),
+            stop=tenacity.stop_after_attempt(const.RETRY_ATTEMPTS),
+            reraise=True,
+        )
+        def _get(client, url):
+            return client.get(url)
+
         if not self.mem_listener_client.is_version_supported(
                 self.api_version, '2.20'):
             raise self.skipException('ALPN protocols are only available on '
@@ -1200,7 +1220,7 @@
 
         url = 'https://%s:%s' % (self.lb_vip_address, 443)
         client = httpx.Client(http2=(http_version == 'HTTP/2'), verify=context)
-        r = client.get(url)
+        r = _get(client, url)
         self.assertEqual(http_version, r.http_version)
 
     @decorators.idempotent_id('9965828d-24af-4fa0-91ae-21c6bc47ab4c')
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index eafcfc3..8d4ff06 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -52,7 +52,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -275,9 +275,6 @@
             const.LOADBALANCER_ID: self.lb_id,
         }
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
@@ -304,9 +301,6 @@
                               const.EXPECTED_CODES: '200'})
 
         hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
-        self.addCleanup(
-            self.mem_healthmonitor_client.cleanup_healthmonitor,
-            hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -389,3 +383,9 @@
             const.PROVISIONING_STATUS,
             CONF.load_balancer.check_interval,
             CONF.load_balancer.check_timeout)
+
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index 06d10dc..3fb6e1b 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -61,7 +61,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         cls.lb_vip_address = lb[const.VIP_ADDRESS]
 
@@ -99,14 +99,10 @@
             const.LOADBALANCER_ID: cls.lb_id,
             # For branches that don't support multiple listeners in single
             # haproxy process and use haproxy>=1.8:
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_ids[protocol] = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_ids[protocol],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -123,10 +119,6 @@
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_ids[protocol] = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_ids[protocol],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
index 07508ac..7b50c24 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
@@ -49,7 +49,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -66,10 +66,6 @@
         }
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -86,10 +82,6 @@
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
         cls.pool_id = pool[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            cls.pool_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index 7e4be04..82c0dca 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -49,7 +49,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -66,10 +66,6 @@
         }
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
         cls.listener_id = listener[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            cls.listener_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -85,10 +81,6 @@
         }
         l7policy = cls.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
         cls.l7policy_id = l7policy[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_l7policy_client.cleanup_l7policy,
-            cls.l7policy_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index 7e31537..79d43e7 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -46,7 +46,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -89,9 +89,6 @@
             pool1_kwargs.update({const.LISTENER_ID: listener1_id})
         pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
         pool1_id = pool1[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool, pool1_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -120,9 +117,6 @@
             pool2_kwargs.update({const.LISTENER_ID: listener2_id})
         pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
         pool2_id = pool2[const.ID]
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool, pool2_id,
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -269,7 +263,7 @@
             const.PROTOCOL: protocol,
             const.PROTOCOL_PORT: 80,
             const.LOADBALANCER_ID: self.lb_id,
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
             const.DEFAULT_POOL_ID: pool1_id,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -326,7 +320,8 @@
         self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
         self.assertEqual(protocol, listener[const.PROTOCOL])
         self.assertEqual(80, listener[const.PROTOCOL_PORT])
-        self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+        self.assertEqual(CONF.load_balancer.listener_conn_limit,
+                         listener[const.CONNECTION_LIMIT])
         if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
                 CONF.load_balancer.provider != 'tungstenfabric'):
             insert_headers = listener[const.INSERT_HEADERS]
@@ -355,7 +350,7 @@
             const.NAME: new_name,
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
-            const.CONNECTION_LIMIT: 400,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
             const.DEFAULT_POOL_ID: pool2_id,
             # TODO(rm_work): need to finish the rest of this stuff
             # const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -418,7 +413,8 @@
             self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
         self.assertEqual(protocol, listener[const.PROTOCOL])
         self.assertEqual(80, listener[const.PROTOCOL_PORT])
-        self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+        self.assertEqual(listener_update_kwargs[const.CONNECTION_LIMIT],
+                         listener[const.CONNECTION_LIMIT])
         if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
                 CONF.load_balancer.provider != 'tungstenfabric'):
             insert_headers = listener[const.INSERT_HEADERS]
@@ -443,16 +439,16 @@
             self.assertEqual(expected_cidrs, listener[const.ALLOWED_CIDRS])
 
         # Listener delete
+        self.mem_listener_client.delete_listener(listener[const.ID])
+        waiters.wait_for_deleted_status_or_not_found(
+            self.mem_listener_client.show_listener, listener[const.ID],
+            const.PROVISIONING_STATUS,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer,
             self.lb_id, const.PROVISIONING_STATUS,
             const.ACTIVE,
             CONF.load_balancer.check_interval,
             CONF.load_balancer.check_timeout)
-        self.mem_listener_client.delete_listener(listener[const.ID])
-
-        waiters.wait_for_deleted_status_or_not_found(
-            self.mem_listener_client.show_listener, listener[const.ID],
-            const.PROVISIONING_STATUS,
-            CONF.load_balancer.check_interval,
-            CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 622eda9..3ec1b99 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -64,7 +64,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -106,13 +106,10 @@
             const.LOADBALANCER_ID: cls.lb_id,
             # For branches that don't support multiple listeners in single
             # haproxy process and use haproxy>=1.8:
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
         cls.current_listener_port += 1
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener, listener[const.ID],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -139,10 +136,6 @@
                 message = e.resp_body.get('faultstring', message)
             raise testtools.TestCase.skipException(message)
 
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool, pool[const.ID],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index cd9c39f..d05368c 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -45,7 +45,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -367,9 +367,6 @@
             listener = self.mem_listener_client.create_listener(
                 **listener_kwargs)
             listener_id = listener[const.ID]
-            self.addClassResourceCleanup(
-                self.mem_listener_client.cleanup_listener, listener_id,
-                lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
             waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                     self.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index 78c6114..72eac2d 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -64,7 +64,7 @@
         cls.lb_id = lb[const.ID]
         cls.addClassResourceCleanup(
             cls.mem_lb_client.cleanup_loadbalancer,
-            cls.lb_id)
+            cls.lb_id, cascade=True)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -113,13 +113,9 @@
             const.LOADBALANCER_ID: cls.lb_id,
             # For branches that don't support multiple listeners in single
             # haproxy process and use haproxy>=1.8:
-            const.CONNECTION_LIMIT: 200,
+            const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
         }
         listener = cls.mem_listener_client.create_listener(**listener_kwargs)
-        cls.addClassResourceCleanup(
-            cls.mem_listener_client.cleanup_listener,
-            listener[const.ID],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -135,10 +131,6 @@
             const.LISTENER_ID: listener[const.ID],
         }
         pool = cls.mem_pool_client.create_pool(**pool_kwargs)
-        cls.addClassResourceCleanup(
-            cls.mem_pool_client.cleanup_pool,
-            pool[const.ID],
-            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
 
         waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
                                 cls.lb_id, const.PROVISIONING_STATUS,
@@ -724,10 +716,11 @@
                                       url_for_member1)
 
         # Assert that slow traffic goes to pool2->member2
+        # Increase timeout to cope with slow test systems.
         url_for_member2 = 'http://{}:{}/slow?delay=1s'.format(
             self.lb_vip_address, LISTENER_PORT)
         self.assertConsistentResponse((200, self.webserver2_response),
-                                      url_for_member2)
+                                      url_for_member2, timeout=3)
 
         # Assert that /turtles is redirected to identity
         url_for_identity = 'http://{}:{}/turtles'.format(self.lb_vip_address,
@@ -1210,8 +1203,8 @@
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
 
         pool_name = data_utils.rand_name("lb_member_pool3_cidrs")
         pool_kwargs = {
@@ -1244,8 +1237,8 @@
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
 
         # Set up Member 1 for Webserver 1
         member1_name = data_utils.rand_name("lb_member_member1-cidrs-traffic")
@@ -1311,8 +1304,8 @@
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
                                 const.ACTIVE,
-                                CONF.load_balancer.build_interval,
-                                CONF.load_balancer.build_timeout)
+                                CONF.load_balancer.check_interval,
+                                CONF.load_balancer.check_timeout)
 
         # NOTE: Before we start with the consistent response check, we must
         # wait until Neutron completes the SG update.
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
index e7fbd34..e641bc4 100644
--- a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
@@ -74,7 +74,7 @@
         lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         self.lb_id = lb[const.ID]
         self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
-                                     self.lb_id)
+                                     self.lb_id, cascade=True)
 
         if CONF.validation.connect_method == 'floating':
             port_id = lb[const.VIP_PORT_ID]
@@ -114,10 +114,6 @@
         }
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.listener_id = listener[const.ID]
-        self.addClassResourceCleanup(
-            self.mem_listener_client.cleanup_listener,
-            self.listener_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
@@ -134,10 +130,6 @@
         }
         pool = self.mem_pool_client.create_pool(**pool_kwargs)
         self.pool_id = pool[const.ID]
-        self.addClassResourceCleanup(
-            self.mem_pool_client.cleanup_pool,
-            self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
 
         waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
                                 self.lb_id, const.PROVISIONING_STATUS,
@@ -157,12 +149,7 @@
         if self.lb_member_1_subnet:
             member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
 
-        member1 = self.mem_member_client.create_member(
-            **member1_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member1[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        self.mem_member_client.create_member(**member1_kwargs)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
@@ -181,12 +168,7 @@
         if self.lb_member_2_subnet:
             member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
 
-        member2 = self.mem_member_client.create_member(
-            **member2_kwargs)
-        self.addClassResourceCleanup(
-            self.mem_member_client.cleanup_member,
-            member2[const.ID], pool_id=self.pool_id,
-            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        self.mem_member_client.create_member(**member2_kwargs)
         waiters.wait_for_status(
             self.mem_lb_client.show_loadbalancer, self.lb_id,
             const.PROVISIONING_STATUS, const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index b491c5c..f8c9836 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -193,9 +193,9 @@
             cls.lb_member_2_subnet = override_subnet
 
             if (CONF.load_balancer.test_with_ipv6 and
-                    conf_lb.test_IPv6_subnet_override):
+                    conf_lb.test_ipv6_subnet_override):
                 override_ipv6_subnet = show_subnet(
-                    conf_lb.test_IPv6_subnet_override)
+                    conf_lb.test_ipv6_subnet_override)
                 cls.lb_member_vip_ipv6_subnet = override_ipv6_subnet
                 cls.lb_member_1_ipv6_subnet = override_ipv6_subnet
                 cls.lb_member_2_ipv6_subnet = override_ipv6_subnet
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 523802e..97a3dfe 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -78,6 +78,7 @@
       - ^etc/.*$
       - ^releasenotes/.*$
     vars:
+      configure_swap_size: 8192
       devstack_localrc:
         TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
         USE_PYTHON3: true