Log amphora console if lb is failed reach ACTIVE
Try getting amphora's console logs if loadbalancer is not ACTIVE.
Related-PRODX: PRODX-54342
Change-Id: Ida3bd8222cccf56900a86d6b3821fc42e50d57f7
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
index c81b982..360196c 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
@@ -67,11 +67,11 @@
# because for correct assignment of Floating IP for loadbalancer
# we need to wait for amphora port
if config.is_tungstenfabric_backend_enabled():
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
if CONF.validation.connect_method == 'floating':
port_id = lb[const.VIP_PORT_ID]
@@ -89,11 +89,11 @@
else:
cls.lb_vip_address = lb[const.VIP_ADDRESS]
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
listener_name = data_utils.rand_name("lb_member_listener1_actstdby")
listener_kwargs = {
@@ -109,11 +109,11 @@
cls.listener_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1_actstdby")
pool_kwargs = {
@@ -129,11 +129,11 @@
cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1_actstdby")
@@ -153,7 +153,7 @@
cls.mem_member_client.cleanup_member,
member1[const.ID], pool_id=cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(
+ cls.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -177,7 +177,7 @@
cls.mem_member_client.cleanup_member,
member2[const.ID], pool_id=cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(
+ cls.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -288,14 +288,14 @@
self.assertGreater(connections, 0)
LOG.info('Backup amphora is now Master.')
# Wait for the amphora failover to start
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.PENDING_UPDATE, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
# Wait for the load balancer to return to ACTIVE so the
# cleanup steps will pass
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE, CONF.load_balancer.lb_build_interval,
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
index e7dccb6..9058d29 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -86,11 +86,11 @@
else:
cls.lb_vip_address = lb[const.VIP_ADDRESS]
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
listener_name = data_utils.rand_name("lb_member_listener1_actstdby")
listener_kwargs = {
@@ -106,11 +106,11 @@
cls.listener_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1_actstdby")
pool_kwargs = {
@@ -126,11 +126,11 @@
cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1_actstdby")
@@ -150,7 +150,7 @@
cls.mem_member_client.cleanup_member,
member1[const.ID], pool_id=cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(
+ cls.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -174,7 +174,7 @@
cls.mem_member_client.cleanup_member,
member2[const.ID], pool_id=cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(
+ cls.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -276,7 +276,7 @@
self.os_admin_servers_client.delete_server(active[const.COMPUTE_ID])
# Wait for the amphora failover to start
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.PENDING_UPDATE, CONF.load_balancer.check_interval,
@@ -286,7 +286,7 @@
self.check_members_balanced(self.lb_vip_address)
# Wait for the load balancer to return to ACTIVE
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE, CONF.load_balancer.lb_build_interval,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index ead8935..8cab041 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -58,11 +58,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
def _test_healthmonitor_create(self, pool_protocol, pool_algorithm,
hm_type):
@@ -109,11 +109,11 @@
self.mem_pool_client.cleanup_pool, pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
hm_name = data_utils.rand_name("lb_member_hm1-create")
delay = 3 if hm_type == const.HEALTH_MONITOR_UDP_CONNECT else 2
@@ -164,12 +164,12 @@
hm[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- hm = waiters.wait_for_status(
+ hm = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -262,11 +262,11 @@
self.mem_pool_client.cleanup_pool, pool1_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool2_name = data_utils.rand_name("lb_member_pool2_hm-list")
pool2_kwargs = {
@@ -282,11 +282,11 @@
self.mem_pool_client.cleanup_pool, pool2_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool3_name = data_utils.rand_name("lb_member_pool3_hm-list")
pool3_kwargs = {
@@ -302,11 +302,11 @@
self.mem_pool_client.cleanup_pool, pool3_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
hm1_name = data_utils.rand_name("lb_member_hm2-list")
delay = 3 if hm_type == const.HEALTH_MONITOR_UDP_CONNECT else 2
@@ -337,17 +337,17 @@
self.mem_healthmonitor_client.cleanup_healthmonitor,
hm1[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- hm1 = waiters.wait_for_status(
+ hm1 = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor, hm1[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(hm1[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -382,17 +382,17 @@
self.mem_healthmonitor_client.cleanup_healthmonitor,
hm2[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- hm2 = waiters.wait_for_status(
+ hm2 = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor, hm2[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(hm2[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -427,17 +427,17 @@
self.mem_healthmonitor_client.cleanup_healthmonitor,
hm3[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- hm3 = waiters.wait_for_status(
+ hm3 = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor, hm3[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(hm3[const.ID])
# Test that a different users cannot see the lb_member healthmonitors
@@ -705,11 +705,11 @@
self.mem_pool_client.cleanup_pool, pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
hm_name = data_utils.rand_name("lb_member_hm1-show")
delay = 3 if hm_type == const.HEALTH_MONITOR_UDP_CONNECT else 2
@@ -734,12 +734,12 @@
hm[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- hm = waiters.wait_for_status(
+ hm = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -829,11 +829,11 @@
self.mem_pool_client.cleanup_pool, pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
hm_name = data_utils.rand_name("lb_member_hm1-update")
delay = 3 if hm_type == const.HEALTH_MONITOR_UDP_CONNECT else 2
@@ -865,12 +865,12 @@
hm[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- hm = waiters.wait_for_status(
+ hm = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -950,12 +950,12 @@
hm = self.mem_healthmonitor_client.update_healthmonitor(
hm[const.ID], **hm_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- hm = waiters.wait_for_status(
+ hm = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1032,11 +1032,11 @@
self.mem_pool_client.cleanup_pool, pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
hm_name = data_utils.rand_name("lb_member_hm1-delete")
delay = 3 if hm_type == const.HEALTH_MONITOR_UDP_CONNECT else 2
@@ -1054,7 +1054,7 @@
hm[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1086,7 +1086,7 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index 2732ec8..8647d0a 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -53,11 +53,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
listener_name = data_utils.rand_name("lb_member_listener1_l7policy")
listener_kwargs = {
@@ -74,11 +74,11 @@
cls.listener_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1_l7policy")
pool_kwargs = {
@@ -96,11 +96,11 @@
cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
@decorators.idempotent_id('eba4ddc2-758b-4dd5-bd28-c1b41d6575ca')
def test_l7policy_create_redirect_pool(self):
@@ -175,19 +175,19 @@
l7policy[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -258,11 +258,11 @@
listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
l7policy1_name = data_utils.rand_name("lb_member_l7policy2-list")
l7policy1_desc = 'B'
@@ -287,17 +287,17 @@
self.mem_l7policy_client.cleanup_l7policy,
l7policy1[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- l7policy1 = waiters.wait_for_status(
+ l7policy1 = self.wait_for_status(
self.mem_l7policy_client.show_l7policy, l7policy1[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(l7policy1[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -328,17 +328,17 @@
self.mem_l7policy_client.cleanup_l7policy,
l7policy2[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- l7policy2 = waiters.wait_for_status(
+ l7policy2 = self.wait_for_status(
self.mem_l7policy_client.show_l7policy, l7policy2[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(l7policy2[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -370,17 +370,17 @@
self.mem_l7policy_client.cleanup_l7policy,
l7policy3[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- l7policy3 = waiters.wait_for_status(
+ l7policy3 = self.wait_for_status(
self.mem_l7policy_client.show_l7policy, l7policy3[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(l7policy3[const.ID])
# Test that a different users cannot see the lb_member l7policies
@@ -614,11 +614,11 @@
listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
l7policy_name = data_utils.rand_name("lb_member_l7policy1-show")
l7policy_description = data_utils.arbitrary_string(size=255)
l7policy_kwargs = {
@@ -637,19 +637,19 @@
l7policy[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -720,11 +720,11 @@
listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
l7policy_name = data_utils.rand_name("lb_member_l7policy1-update")
l7policy_description = data_utils.arbitrary_string(size=255)
@@ -753,12 +753,12 @@
l7policy[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -834,19 +834,19 @@
l7policy = self.mem_l7policy_client.update_l7policy(
l7policy[const.ID], **l7policy_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- l7policy = waiters.wait_for_status(
+ l7policy = self.wait_for_status(
self.mem_l7policy_client.show_l7policy,
l7policy[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -897,7 +897,7 @@
l7policy[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -929,7 +929,7 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index e015c82..2d91706 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -51,11 +51,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
cls.allowed_cidrs = ['192.0.1.0/24']
if CONF.load_balancer.test_with_ipv6:
@@ -162,19 +162,19 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -286,7 +286,7 @@
else:
raise e
else:
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
@@ -316,7 +316,7 @@
else:
raise e
else:
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
@@ -364,7 +364,7 @@
else:
raise e
else:
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
@@ -424,12 +424,12 @@
self.mem_lb_client.cleanup_loadbalancer,
lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
listener1_name = data_utils.rand_name("lb_member_listener2-list")
listener1_desc = 'B'
@@ -453,17 +453,17 @@
self.mem_listener_client.cleanup_listener,
listener1[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
- listener1 = waiters.wait_for_status(
+ listener1 = self.wait_for_status(
self.mem_listener_client.show_listener, listener1[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(listener1[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -492,17 +492,17 @@
self.mem_listener_client.cleanup_listener,
listener2[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
- listener2 = waiters.wait_for_status(
+ listener2 = self.wait_for_status(
self.mem_listener_client.show_listener, listener2[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(listener2[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -531,32 +531,32 @@
self.mem_listener_client.cleanup_listener,
listener3[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
- listener3 = waiters.wait_for_status(
+ listener3 = self.wait_for_status(
self.mem_listener_client.show_listener, listener3[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(listener3[const.ID])
if not CONF.load_balancer.test_with_noop:
# Wait for the enabled listeners to come ONLINE
- listener1 = waiters.wait_for_status(
+ listener1 = self.wait_for_status(
self.mem_listener_client.show_listener, listener1[const.ID],
const.OPERATING_STATUS, const.ONLINE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener2 = waiters.wait_for_status(
+ listener2 = self.wait_for_status(
self.mem_listener_client.show_listener, listener2[const.ID],
const.OPERATING_STATUS, const.ONLINE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener3 = waiters.wait_for_status(
+ listener3 = self.wait_for_status(
self.mem_listener_client.show_listener, listener3[const.ID],
const.OPERATING_STATUS, const.OFFLINE,
CONF.load_balancer.build_interval,
@@ -806,19 +806,19 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -952,12 +952,12 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1077,19 +1077,19 @@
listener = self.mem_listener_client.update_listener(
listener[const.ID], **listener_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -1160,7 +1160,7 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1194,7 +1194,7 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1231,19 +1231,19 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- listener = waiters.wait_for_status(
+ listener = self.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.OPERATING_STATUS,
const.ONLINE,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index 35e04ca..4a010c5 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -63,11 +63,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
@classmethod
def _listener_pool_create(cls, listener_protocol, pool_protocol,
@@ -113,11 +113,11 @@
listener[const.ID],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1_member")
pool_kwargs = {
@@ -147,11 +147,11 @@
pool[const.ID],
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
cls.listener_pool_cache[listener_pool_key] = pool[const.ID]
return pool[const.ID]
@@ -1000,12 +1000,12 @@
member[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- member = waiters.wait_for_status(
+ member = self.wait_for_status(
self.mem_member_client.show_member,
member[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1226,12 +1226,12 @@
self.mem_pool_client.cleanup_pool, pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
member1_name = data_utils.rand_name("lb_member_member2-list")
member1_kwargs = {
@@ -1254,18 +1254,18 @@
self.mem_member_client.cleanup_member,
member1[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- member1 = waiters.wait_for_status(
+ member1 = self.wait_for_status(
self.mem_member_client.show_member, member1[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout,
pool_id=pool_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.check_interval,
- CONF.load_balancer.check_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
test_ids.append(member1[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -1293,18 +1293,18 @@
self.mem_member_client.cleanup_member,
member2[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- member2 = waiters.wait_for_status(
+ member2 = self.wait_for_status(
self.mem_member_client.show_member, member2[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout,
pool_id=pool_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.check_interval,
- CONF.load_balancer.check_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
test_ids.append(member2[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -1332,18 +1332,18 @@
self.mem_member_client.cleanup_member,
member3[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- member3 = waiters.wait_for_status(
+ member3 = self.wait_for_status(
self.mem_member_client.show_member, member3[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout,
pool_id=pool_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.check_interval,
- CONF.load_balancer.check_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
test_ids.append(member3[const.ID])
# Test credentials that should see these members can see them.
@@ -1912,12 +1912,12 @@
member[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- member = waiters.wait_for_status(
+ member = self.wait_for_status(
self.mem_member_client.show_member,
member[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -2395,12 +2395,12 @@
member[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- member = waiters.wait_for_status(
+ member = self.wait_for_status(
self.mem_member_client.show_member,
member[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -2410,7 +2410,7 @@
status = const.OFFLINE
if CONF.load_balancer.test_with_noop or provider == 'tungstenfabric':
status = const.NO_MONITOR
- member = waiters.wait_for_status(
+ member = self.wait_for_status(
self.mem_member_client.show_member,
member[const.ID], const.OPERATING_STATUS,
status,
@@ -2497,12 +2497,12 @@
const.MONITOR_PORT] + 1
member = self.mem_member_client.update_member(
member[const.ID], **member_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- member = waiters.wait_for_status(
+ member = self.wait_for_status(
self.mem_member_client.show_member,
member[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -2510,7 +2510,7 @@
CONF.load_balancer.build_timeout,
pool_id=pool_id)
# Operating status will be NO_MONITOR regardless of noop
- member = waiters.wait_for_status(
+ member = self.wait_for_status(
self.mem_member_client.show_member,
member[const.ID], const.OPERATING_STATUS,
const.NO_MONITOR,
@@ -2844,12 +2844,12 @@
pool_id = pool[const.ID]
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
member1_name = data_utils.rand_name("lb_member_member1-batch")
member1_kwargs = {
@@ -2880,12 +2880,12 @@
member1[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
member2_name = data_utils.rand_name("lb_member_member2-batch")
member2_kwargs = {
@@ -2917,12 +2917,12 @@
member2[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
member3_name = data_utils.rand_name("lb_member_member3-batch")
member3_kwargs = {
@@ -2978,7 +2978,7 @@
self.mem_member_client.update_members(
pool_id=pool_id, members_list=batch_update_list)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -3210,7 +3210,7 @@
member[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -3245,7 +3245,7 @@
CONF.load_balancer.check_timeout,
pool_id=pool_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index d85d593..b05d27b 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -48,11 +48,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
def _test_pool_create(self, listener_protocol, pool_protocol,
protocol_port, algorithm, session_persistence=None):
@@ -96,11 +96,11 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1-create")
pool_description = data_utils.arbitrary_string(size=255)
@@ -177,12 +177,12 @@
pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- pool = waiters.wait_for_status(
+ pool = self.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -190,7 +190,7 @@
CONF.load_balancer.build_timeout)
if (listener_protocol is not None and
not CONF.load_balancer.test_with_noop):
- pool = waiters.wait_for_status(
+ pool = self.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.OPERATING_STATUS,
const.ONLINE,
@@ -272,12 +272,12 @@
lb_id = self.lb_id
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
pool1_name = data_utils.rand_name("lb_member_pool2-list")
pool1_desc = 'B'
@@ -326,17 +326,17 @@
self.mem_pool_client.cleanup_pool,
pool1[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
- pool1 = waiters.wait_for_status(
+ pool1 = self.wait_for_status(
self.mem_pool_client.show_pool, pool1[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(pool1[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -377,17 +377,17 @@
self.mem_pool_client.cleanup_pool,
pool2[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
- pool2 = waiters.wait_for_status(
+ pool2 = self.wait_for_status(
self.mem_pool_client.show_pool, pool2[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(pool2[const.ID])
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
@@ -428,17 +428,17 @@
self.mem_pool_client.cleanup_pool,
pool3[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
- pool3 = waiters.wait_for_status(
+ pool3 = self.wait_for_status(
self.mem_pool_client.show_pool, pool3[const.ID],
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb_id,
- const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
test_ids.append(pool3[const.ID])
# Test that a different users cannot see the lb_member pools.
@@ -687,12 +687,12 @@
pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- pool = waiters.wait_for_status(
+ pool = self.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -841,12 +841,12 @@
pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- pool = waiters.wait_for_status(
+ pool = self.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -956,12 +956,12 @@
pool = self.mem_pool_client.update_pool(
pool[const.ID], **pool_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- pool = waiters.wait_for_status(
+ pool = self.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1005,12 +1005,12 @@
pool = self.mem_pool_client.update_pool(
pool[const.ID], **pool_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- pool = waiters.wait_for_status(
+ pool = self.wait_for_status(
self.mem_pool_client.show_pool,
pool[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1094,7 +1094,7 @@
pool[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1126,7 +1126,7 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index af84445..8a95810 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -209,11 +209,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
if CONF.validation.connect_method == 'floating':
port_id = lb[const.VIP_PORT_ID]
@@ -246,11 +246,11 @@
cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-tls")
@@ -271,7 +271,7 @@
member1[const.ID], cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(
+ cls.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -296,7 +296,7 @@
member2[const.ID], cls.pool_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(
+ cls.wait_for_status(
cls.mem_lb_client.show_loadbalancer, cls.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -322,11 +322,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test
@@ -390,11 +390,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
listener_name = data_utils.rand_name("lb_member_listener2-http-tls")
listener_kwargs = {
@@ -411,11 +411,11 @@
self.listener2_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test
@@ -446,11 +446,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test
@@ -591,11 +591,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test
@@ -619,11 +619,11 @@
self.listener2_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test
@@ -797,11 +797,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test that no client certificate fails to connect
self.assertRaises(
@@ -870,14 +870,14 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_listener_client.show_listener,
self.listener_id, const.OPERATING_STATUS,
const.ONLINE,
@@ -983,14 +983,14 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_listener_client.show_listener,
self.listener_id, const.OPERATING_STATUS,
const.ONLINE,
@@ -1018,13 +1018,13 @@
self.listener2_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
if not CONF.load_balancer.test_with_noop:
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_listener_client.show_listener,
self.listener2_id, const.OPERATING_STATUS,
const.ONLINE,
@@ -1231,11 +1231,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.set_alpn_protocols(c_protos)
@@ -1279,11 +1279,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# NOTE(pas-ha): depending on what other tempest plugins are installed
# the eventlet might've been imported by that time, and, since
# dnspython 2.2.0, importing eventlet or any part of it effectively
@@ -1335,11 +1335,11 @@
pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-tls-reencrypt")
@@ -1360,7 +1360,7 @@
member1[const.ID], pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1385,7 +1385,7 @@
member2[const.ID], pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1408,11 +1408,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test with no CA validation
self.check_members_balanced(self.lb_vip_address, protocol=const.HTTP,
@@ -1425,12 +1425,12 @@
self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_pool_client.show_pool, pool_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1446,12 +1446,12 @@
self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_pool_client.show_pool, pool_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1467,12 +1467,12 @@
self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_pool_client.show_pool, pool_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1509,11 +1509,11 @@
pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
hm_name = data_utils.rand_name("lb_member_hm1-tls-client-auth")
hm_kwargs = {
@@ -1535,12 +1535,12 @@
hm[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- hm = waiters.wait_for_status(
+ hm = self.wait_for_status(
self.mem_healthmonitor_client.show_healthmonitor,
hm[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -1567,7 +1567,7 @@
member1[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1593,7 +1593,7 @@
member2[const.ID], pool_id=pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1616,11 +1616,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test that there are no members without a client certificate
url = 'http://{0}:85'.format(self.lb_vip_address)
@@ -1633,12 +1633,12 @@
self.mem_pool_client.update_pool(pool_id, **pool_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_pool_client.show_pool, pool_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1646,12 +1646,12 @@
# Make sure the health monitor has brought the members up after the
# the pool update.
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_member_client.show_member, member1[const.ID],
const.OPERATING_STATUS, const.ONLINE,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout, error_ok=True, pool_id=pool_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_member_client.show_member, member2[const.ID],
const.OPERATING_STATUS, const.ONLINE,
CONF.load_balancer.check_interval,
@@ -1697,11 +1697,11 @@
pool_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-tls-reencrypt")
@@ -1723,7 +1723,7 @@
lb_client=self.mem_lb_client,
lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1749,7 +1749,7 @@
lb_client=self.mem_lb_client,
lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -1774,11 +1774,11 @@
lb_client=self.mem_lb_client,
lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
listener_name = data_utils.rand_name(
"lb_member_listener1-http")
@@ -1796,11 +1796,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
context = ssl.SSLContext(ssl.PROTOCOL_TLS)
context.set_alpn_protocols(['h2', 'http/1.1'])
@@ -1846,11 +1846,11 @@
self.listener_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Test that no client certificate fails to connect
self.assertRaises(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index e3d9b91..922f388 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -51,11 +51,11 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id, cascade=True)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
listener_name = data_utils.rand_name("lb_member_listener1_l7rule")
listener_kwargs = {
@@ -72,11 +72,11 @@
cls.listener_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
l7policy_name = data_utils.rand_name("lb_member_l7policy1_l7rule")
l7policy_kwargs = {
@@ -92,11 +92,11 @@
cls.l7policy_id,
lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ cls.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
@decorators.idempotent_id('a1c268b9-5304-48c7-9a34-0ef0e8e9307e')
def test_l7rule_CRUD(self):
@@ -125,12 +125,12 @@
l7rule[const.ID], l7policy_id=self.l7policy_id,
lb_client=self.mem_lb_client, lb_id=self.lb_id)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- l7rule = waiters.wait_for_status(
+ l7rule = self.wait_for_status(
self.mem_l7rule_client.show_l7rule,
l7rule[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -163,12 +163,12 @@
l7rule = self.mem_l7rule_client.update_l7rule(
l7rule[const.ID], **l7rule_update_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
- l7rule = waiters.wait_for_status(
+ l7rule = self.wait_for_status(
self.mem_l7rule_client.show_l7rule,
l7rule[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
@@ -186,7 +186,7 @@
self.assertEqual(l7rule_update_kwargs[item], l7rule[item])
# L7Rule delete
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
index e5a678c..8fee023 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
@@ -120,11 +120,11 @@
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
- lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb[const.ID], const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ lb = self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
self.assertFalse(lb[const.ADMIN_STATE_UP])
parser.parse(lb[const.CREATED_AT])
@@ -157,11 +157,11 @@
description=new_description,
name=new_name)
- lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- lb[const.ID], const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ lb = self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
self.assertTrue(lb[const.ADMIN_STATE_UP])
self.assertEqual(new_description, lb[const.DESCRIPTION])
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
index 0ccfe55..06d1fca 100644
--- a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
@@ -92,11 +92,11 @@
else:
self.lb_vip_address = lb[const.VIP_ADDRESS]
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
# Confirm the spare pool has changed since last check
spare_amps_2 = waiters.wait_for_spare_amps(
@@ -115,11 +115,11 @@
listener = self.mem_listener_client.create_listener(**listener_kwargs)
self.listener_id = listener[const.ID]
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
pool_name = data_utils.rand_name("lb_member_pool1-spare")
pool_kwargs = {
@@ -131,11 +131,11 @@
pool = self.mem_pool_client.create_pool(**pool_kwargs)
self.pool_id = pool[const.ID]
- waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
- self.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.build_interval,
- CONF.load_balancer.build_timeout)
+ self.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
# Set up Member 1 for Webserver 1
member1_name = data_utils.rand_name("lb_member_member1-spare")
@@ -150,7 +150,7 @@
member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
self.mem_member_client.create_member(**member1_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -169,7 +169,7 @@
member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
self.mem_member_client.create_member(**member2_kwargs)
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer, self.lb_id,
const.PROVISIONING_STATUS, const.ACTIVE,
CONF.load_balancer.check_interval,
@@ -192,7 +192,7 @@
self.os_admin_servers_client.delete_server(amp[0][const.COMPUTE_ID])
# Wait for the amphora failover to start
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.PENDING_UPDATE, CONF.load_balancer.check_interval,
@@ -200,7 +200,7 @@
# Wait for the load balancer to return to ACTIVE so the
# cleanup steps will pass
- waiters.wait_for_status(
+ self.wait_for_status(
self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
const.ACTIVE, CONF.load_balancer.lb_build_interval,
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 3db8b8e..a50ad44 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -646,6 +646,36 @@
port = port + 1
return port
+ @classmethod
+ def wait_for_status(cls, show_client, id, status_key, status,
+ check_interval, check_timeout,
+ root_tag=None, error_ok=False,
+ **kwargs):
+ try:
+ return waiters.wait_for_status(
+ show_client, id, status_key, status,
+ check_interval, check_timeout,
+ root_tag=root_tag,
+ error_ok=error_ok, **kwargs)
+ except exceptions.TimeoutException:
+ amphoras = cls.lb_admin_amphora_client.list_amphorae(
+ query_params='{loadbalancer_id}={lb_id}'.format(
+ loadbalancer_id=const.LOADBALANCER_ID, lb_id=id))
+ servers = [{'id': amp[const.COMPUTE_ID]} for amp in amphoras]
+ if servers and not CONF.compute_feature_enabled.console_output:
+ LOG.debug('Console output not supported, cannot log')
+ raise
+ for server in servers:
+ try:
+ output = cls.os_admin_servers_client.get_console_output(
+ server['id'], **kwargs)['output']
+ LOG.debug('Console output for %s\nbody=\n%s',
+ server['id'], output)
+ except exceptions.NotFound:
+ LOG.debug("Server %s disappeared(deleted) while looking "
+ "for the console log", server['id'])
+ raise
+
class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
@classmethod