[TF] Fix issue with missing NetworkIpAvailability extension.
In environment with TungstenFabric and enabled amphora provider.
Related-PROD: PRODX-30528
Change-Id: I63c36c3c23d920f12f4823062990227844f2bf49
(cherry picked from commit 2f7eee6f505c2ec5d067f2c5ae040e41033e6401)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index 3ecf664..9ce5fb1 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -76,9 +76,15 @@
* Validates that other accounts cannot see the amphora.
"""
lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
lb_id = lb[const.ID]
self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)
@@ -173,9 +179,15 @@
'Octavia API version 2.7 or newer.')
lb_name = data_utils.rand_name("lb_member_lb2_amphora-update")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
lb_id = lb[const.ID]
self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index f9b32c6..1868147 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -502,7 +502,7 @@
lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
lb_kwargs = {const.NAME: lb_name,
const.PROVIDER: CONF.load_balancer.provider}
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
self._setup_lb_network_kwargs(lb_kwargs, 4)
else:
lb_kwargs.update({
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 50ad0e1..4aa3c46 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -175,7 +175,7 @@
lb_name = data_utils.rand_name("lb_member_lb1-delete")
lb_kwargs = {const.NAME: lb_name,
const.PROVIDER: CONF.load_balancer.provider}
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
self._setup_lb_network_kwargs(lb_kwargs, 4)
else:
lb_kwargs.update({
@@ -229,9 +229,15 @@
* Validates the load balancer is in the DELETED state.
"""
lb_name = data_utils.rand_name("lb_member_lb1-cascade_delete")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -277,7 +283,7 @@
return [lb for i, lb in enumerate(lbs) if i not in indexes]
def _setup_lb_kwargs(self, lb_kwargs):
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
del lb_kwargs[const.VIP_NETWORK_ID]
self._setup_lb_network_kwargs(lb_kwargs, 4)
@@ -985,9 +991,15 @@
* Wait for the load balancer to go ACTIVE.
"""
lb_name = data_utils.rand_name("lb_member_lb1-failover")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index a4e28b0..b998b9b 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -34,6 +34,7 @@
from octavia_tempest_plugin.common import cert_utils
from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin import config as config_octavia
from octavia_tempest_plugin.tests import RBAC_tests
from octavia_tempest_plugin.tests import validators
from octavia_tempest_plugin.tests import waiters
@@ -873,7 +874,8 @@
LOG.debug('Octavia Setup: webserver2_public_ip = {}'.format(
cls.webserver2_public_ip))
- if CONF.load_balancer.test_with_ipv6:
+ if (CONF.load_balancer.test_with_ipv6 and not
+ config_octavia.is_tungstenfabric_backend_enabled()):
# Enable the IPv6 nic in webserver 1
cls._enable_ipv6_nic_webserver(
cls.webserver1_public_ip, cls.lb_member_keypair['private_key'],
@@ -1053,9 +1055,24 @@
network_id=network['id'],
mac_address=instance_network[0]['OS-EXT-IPS-MAC:mac_addr'])
port_id = result['ports'][0]['id']
- result = cls.lb_mem_float_ip_client.create_floatingip(
- floating_network_id=CONF.network.public_network_id,
- port_id=port_id)
+ if config_octavia.is_tungstenfabric_backend_enabled():
+ port = result['ports'][0]
+ fixed_ip = None
+ for ip in port["fixed_ips"]:
+ if (type(ipaddress.ip_address(ip["ip_address"])) is
+ ipaddress.IPv4Address):
+ fixed_ip = ip["ip_address"]
+ break
+ assert fixed_ip is not None, (f"Port doesn't have ipv4 "
+ f"address: {port['fixed_ips']}")
+ result = cls.lb_mem_float_ip_client.create_floatingip(
+ floating_network_id=CONF.network.public_network_id,
+ port_id=port_id,
+ fixed_ip_address=fixed_ip)
+ else:
+ result = cls.lb_mem_float_ip_client.create_floatingip(
+ floating_network_id=CONF.network.public_network_id,
+ port_id=port_id)
floating_ip = result['floatingip']
LOG.info('webserver1_floating_ip: {}'.format(floating_ip))
cls.addClassResourceCleanup(