Fix scenario tests issue with immutable LBs
Wait for the loadbalancer to become ACTIVE after deleting an
healthmonitor or a listener, to ensure the next test uses a LB in a
non-transitional state.
This also fixes the cidrs tests that were using the wrong waiter
timeouts.
This patch also moves individual deletion of children resources in class
tear down to a more performant way by cascade deleting the load
balancer.
Story: 2008219
Task: 41008
Depends-On: https://review.opendev.org/#/c/757604/
Depends-On: https://review.opendev.org/#/c/757840/
Depends-On: https://review.opendev.org/#/c/757841/
Depends-On: https://review.opendev.org/#/c/757842/
Co-Authored-By: Carlos Goncalves <cgoncalves@redhat.com>
Change-Id: I6a4eed7269e4f502bd0fc8613cb4ec4da13890e7
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
index 424d911..831582d 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
@@ -339,7 +339,8 @@
self.expected_success(204, response.status)
return response.status
- def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None, parent_id=None):
+ def _cleanup_obj(self, obj_id, lb_client=None, lb_id=None, parent_id=None,
+ cascade=False):
"""Clean up an object (for use in tempest addClassResourceCleanup).
We always need to wait for the parent LB to be in a mutable state
@@ -362,6 +363,8 @@
loadbalancer client already.
:param lb_id: (Optional) The ID of the parent loadbalancer, if the main
obj_id is for a sub-object and not a loadbalancer.
+ :param cascade: If true will delete all child objects of an
+ object, if that object supports it.
:return:
"""
if parent_id:
@@ -399,8 +402,8 @@
waiters.wait_for_status(wait_func, wait_id,
const.PROVISIONING_STATUS,
const.ACTIVE,
- self.build_interval,
- self.timeout)
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
except exceptions.UnexpectedResponseCode:
# Status is ERROR, go ahead with deletion
LOG.debug("Found %s %s in ERROR status, proceeding with cleanup.",
@@ -419,7 +422,10 @@
LOG.error("Cleanup encountered an unknown exception while waiting "
"for %s %s: %s", wait_client.root_tag, wait_id, e)
- uri = '{0}/{1}'.format(uri, obj_id)
+ if cascade:
+ uri = '{0}/{1}?cascade=true'.format(uri, obj_id)
+ else:
+ uri = '{0}/{1}'.format(uri, obj_id)
LOG.info("Cleaning up %s %s...", self.root_tag, obj_id)
return_status = test_utils.call_and_ignore_notfound_exc(
self.delete, uri)
@@ -430,8 +436,8 @@
waiters.wait_for_status(wait_func, wait_id,
const.PROVISIONING_STATUS,
const.ACTIVE,
- self.build_interval,
- self.timeout)
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
else:
LOG.info("Waiting for %s %s to be DELETED...",
wait_client.root_tag, wait_id)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index e8abc10..a7c96f1 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -54,7 +54,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index 3baf36d..2472eb2 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -56,7 +56,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index 50cb2f0..571314f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -52,7 +52,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -69,10 +69,6 @@
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_id = listener[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener,
- cls.listener_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -90,10 +86,6 @@
pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_id = pool[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool,
- cls.pool_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -157,10 +149,6 @@
**l7policy_kwargs)
l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
- self.addClassResourceCleanup(
- self.mem_l7policy_client.cleanup_l7policy,
- l7policy[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -567,10 +555,6 @@
}
l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
- self.addClassResourceCleanup(
- self.mem_l7policy_client.cleanup_l7policy,
- l7policy[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -691,10 +675,6 @@
})
l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
- self.addClassResourceCleanup(
- self.mem_l7policy_client.cleanup_l7policy,
- l7policy[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -839,10 +819,6 @@
const.ACTION: const.REJECT,
}
l7policy = self.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
- self.addClassResourceCleanup(
- self.mem_l7policy_client.cleanup_l7policy,
- l7policy[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 350f739..4ee15a8 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -47,7 +47,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -154,10 +154,6 @@
**listener_kwargs)
listener = self.mem_listener_client.create_listener(**listener_kwargs)
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -276,8 +272,7 @@
}
try:
- listener = self.mem_listener_client.create_listener(
- **listener_kwargs)
+ self.mem_listener_client.create_listener(**listener_kwargs)
except exceptions.BadRequest as e:
faultstring = e.resp_body.get('faultstring', '')
if ("Invalid input for field/attribute protocol." in faultstring
@@ -285,11 +280,6 @@
raise self.skipException("Skipping unsupported protocol")
raise e
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -309,8 +299,7 @@
}
try:
- listener2 = self.mem_listener_client.create_listener(
- **listener2_kwargs)
+ self.mem_listener_client.create_listener(**listener2_kwargs)
except exceptions.BadRequest as e:
faultstring = e.resp_body.get('faultstring', '')
if ("Invalid input for field/attribute protocol." in faultstring
@@ -318,11 +307,6 @@
raise self.skipException("Skipping unsupported protocol")
raise e
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener2[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -755,10 +739,6 @@
listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
listener = self.mem_listener_client.create_listener(**listener_kwargs)
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -928,10 +908,6 @@
listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
listener = self.mem_listener_client.create_listener(**listener_kwargs)
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -1164,10 +1140,6 @@
const.LOADBALANCER_ID: self.lb_id,
}
listener = self.mem_listener_client.create_listener(**listener_kwargs)
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index 694a253..dd0081f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -61,7 +61,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -107,9 +107,6 @@
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener, listener[const.ID],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -140,10 +137,6 @@
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool, pool[const.ID],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -910,11 +903,6 @@
member = self.mem_member_client.create_member(**member_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1748,11 +1736,6 @@
member = self.mem_member_client.create_member(**member_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -2210,11 +2193,6 @@
member = self.mem_member_client.create_member(**member_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -2640,9 +2618,6 @@
raise testtools.TestCase.skipException(message)
pool_id = pool[const.ID]
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool, pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id,
@@ -2675,11 +2650,6 @@
const.ID]
member1 = self.mem_member_client.create_member(**member1_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member1[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id,
const.PROVISIONING_STATUS,
@@ -2711,10 +2681,6 @@
const.ID]
member2 = self.mem_member_client.create_member(**member2_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member2[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id,
@@ -2778,11 +2744,6 @@
pool_id,
query_params='{sort}={port}:{asc}'.format(
sort=const.SORT, port=const.PROTOCOL_PORT, asc=const.ASC))
- for m in members:
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- m[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
# We should have two members: member2 and member3, in that order
self.assertEqual(2, len(members))
@@ -2983,10 +2944,6 @@
const.PROTOCOL_PORT: self.member_port.increment(),
}
member = self.mem_member_client.create_member(**member_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member[const.ID], pool_id=pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 2d92c18..a7bdea7 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -46,7 +46,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -367,10 +367,6 @@
}
listener = self.mem_listener_client.create_listener(
**listener_kwargs)
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
@@ -437,11 +433,6 @@
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool,
- pool[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1096,11 +1087,6 @@
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool,
- pool[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1367,11 +1353,6 @@
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool,
- pool[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -1733,11 +1714,6 @@
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool,
- pool[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
-
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 855466d..10f949d 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -186,7 +186,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -219,10 +219,6 @@
}
pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_id = pool[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool,
- cls.pool_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -242,12 +238,7 @@
if cls.lb_member_1_subnet:
member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
- member1 = cls.mem_member_client.create_member(
- **member1_kwargs)
- cls.addClassResourceCleanup(
- cls.mem_member_client.cleanup_member,
- member1[const.ID], pool_id=cls.pool_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+ cls.mem_member_client.create_member(**member1_kwargs)
waiters.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -266,12 +257,7 @@
if cls.lb_member_2_subnet:
member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
- member2 = cls.mem_member_client.create_member(
- **member2_kwargs)
- cls.addClassResourceCleanup(
- cls.mem_member_client.cleanup_member,
- member2[const.ID], pool_id=cls.pool_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+ cls.mem_member_client.create_member(**member2_kwargs)
waiters.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index eafcfc3..8d4ff06 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -52,7 +52,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -275,9 +275,6 @@
const.LOADBALANCER_ID: self.lb_id,
}
pool = self.mem_pool_client.create_pool(**pool_kwargs)
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool, pool[const.ID],
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
@@ -304,9 +301,6 @@
const.EXPECTED_CODES: '200'})
hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
- self.addCleanup(
- self.mem_healthmonitor_client.cleanup_healthmonitor,
- hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
@@ -389,3 +383,9 @@
const.PROVISIONING_STATUS,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index daa061c..3fb6e1b 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -61,7 +61,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
cls.lb_vip_address = lb[const.VIP_ADDRESS]
@@ -103,10 +103,6 @@
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener,
- cls.listener_ids[protocol],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -123,10 +119,6 @@
}
pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_ids[protocol] = pool[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool,
- cls.pool_ids[protocol],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
index 07508ac..7b50c24 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
@@ -49,7 +49,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -66,10 +66,6 @@
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_id = listener[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener,
- cls.listener_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -86,10 +82,6 @@
}
pool = cls.mem_pool_client.create_pool(**pool_kwargs)
cls.pool_id = pool[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool,
- cls.pool_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index 7e4be04..82c0dca 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -49,7 +49,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -66,10 +66,6 @@
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_id = listener[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener,
- cls.listener_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -85,10 +81,6 @@
}
l7policy = cls.mem_l7policy_client.create_l7policy(**l7policy_kwargs)
cls.l7policy_id = l7policy[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_l7policy_client.cleanup_l7policy,
- cls.l7policy_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index 8c1b74d..79d43e7 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -46,7 +46,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -89,9 +89,6 @@
pool1_kwargs.update({const.LISTENER_ID: listener1_id})
pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
pool1_id = pool1[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool, pool1_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -120,9 +117,6 @@
pool2_kwargs.update({const.LISTENER_ID: listener2_id})
pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
pool2_id = pool2[const.ID]
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool, pool2_id,
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -445,16 +439,16 @@
self.assertEqual(expected_cidrs, listener[const.ALLOWED_CIDRS])
# Listener delete
+ self.mem_listener_client.delete_listener(listener[const.ID])
+ waiters.wait_for_deleted_status_or_not_found(
+ self.mem_listener_client.show_listener, listener[const.ID],
+ const.PROVISIONING_STATUS,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- self.mem_listener_client.delete_listener(listener[const.ID])
-
- waiters.wait_for_deleted_status_or_not_found(
- self.mem_listener_client.show_listener, listener[const.ID],
- const.PROVISIONING_STATUS,
- CONF.load_balancer.check_interval,
- CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index a3b683e..3ec1b99 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -64,7 +64,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -110,9 +110,6 @@
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener, listener[const.ID],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -139,10 +136,6 @@
message = e.resp_body.get('faultstring', message)
raise testtools.TestCase.skipException(message)
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool, pool[const.ID],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
-
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index cd9c39f..d05368c 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -45,7 +45,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -367,9 +367,6 @@
listener = self.mem_listener_client.create_listener(
**listener_kwargs)
listener_id = listener[const.ID]
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener, listener_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index 947cf27..72eac2d 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -64,7 +64,7 @@
cls.lb_id = lb[const.ID]
cls.addClassResourceCleanup(
cls.mem_lb_client.cleanup_loadbalancer,
- cls.lb_id)
+ cls.lb_id, cascade=True)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -116,10 +116,6 @@
const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
- cls.addClassResourceCleanup(
- cls.mem_listener_client.cleanup_listener,
- listener[const.ID],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -135,10 +131,6 @@
const.LISTENER_ID: listener[const.ID],
}
pool = cls.mem_pool_client.create_pool(**pool_kwargs)
- cls.addClassResourceCleanup(
- cls.mem_pool_client.cleanup_pool,
- pool[const.ID],
- lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
cls.lb_id, const.PROVISIONING_STATUS,
@@ -724,10 +716,11 @@
url_for_member1)
# Assert that slow traffic goes to pool2->member2
+ # Increase timeout to cope with slow test systems.
url_for_member2 = 'http://{}:{}/slow?delay=1s'.format(
self.lb_vip_address, LISTENER_PORT)
self.assertConsistentResponse((200, self.webserver2_response),
- url_for_member2)
+ url_for_member2, timeout=3)
# Assert that /turtles is redirected to identity
url_for_identity = 'http://{}:{}/turtles'.format(self.lb_vip_address,
@@ -1210,8 +1203,8 @@
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
pool_name = data_utils.rand_name("lb_member_pool3_cidrs")
pool_kwargs = {
@@ -1244,8 +1237,8 @@
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-cidrs-traffic")
@@ -1311,8 +1304,8 @@
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
# NOTE: Before we start with the consistent response check, we must
# wait until Neutron completes the SG update.
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
index e7fbd34..e641bc4 100644
--- a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
@@ -74,7 +74,7 @@
lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.lb_id = lb[const.ID]
self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
- self.lb_id)
+ self.lb_id, cascade=True)
if CONF.validation.connect_method == 'floating':
port_id = lb[const.VIP_PORT_ID]
@@ -114,10 +114,6 @@
}
listener = self.mem_listener_client.create_listener(**listener_kwargs)
self.listener_id = listener[const.ID]
- self.addClassResourceCleanup(
- self.mem_listener_client.cleanup_listener,
- self.listener_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
@@ -134,10 +130,6 @@
}
pool = self.mem_pool_client.create_pool(**pool_kwargs)
self.pool_id = pool[const.ID]
- self.addClassResourceCleanup(
- self.mem_pool_client.cleanup_pool,
- self.pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
@@ -157,12 +149,7 @@
if self.lb_member_1_subnet:
member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
- member1 = self.mem_member_client.create_member(
- **member1_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member1[const.ID], pool_id=self.pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ self.mem_member_client.create_member(**member1_kwargs)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
@@ -181,12 +168,7 @@
if self.lb_member_2_subnet:
member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
- member2 = self.mem_member_client.create_member(
- **member2_kwargs)
- self.addClassResourceCleanup(
- self.mem_member_client.cleanup_member,
- member2[const.ID], pool_id=self.pool_id,
- lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ self.mem_member_client.create_member(**member2_kwargs)
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 523802e..97a3dfe 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -78,6 +78,7 @@
- ^etc/.*$
- ^releasenotes/.*$
vars:
+ configure_swap_size: 8192
devstack_localrc:
TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
USE_PYTHON3: true