[TF] Fix issue with missing NetworkIpAvailability extension.
In environment with TungstenFabric and enabled amphora provider.
Related-PROD: PRODX-30528
Change-Id: I63c36c3c23d920f12f4823062990227844f2bf49
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index c48e257..bbf1ba6 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -15,6 +15,7 @@
from oslo_config import cfg
from oslo_log import log as logging
+from tempest import config
from octavia_tempest_plugin.common import constants as const
@@ -304,3 +305,12 @@
'scope? This configuration value should be same as '
'octavia.conf: [oslo_policy].enforce_scope option.'),
]
+
+
+def is_tungstenfabric_backend_enabled():
+ """Return True if TungstenFabric is used as a backend."""
+ try:
+ sdn = getattr(config.CONF, 'sdn')
+ return getattr(sdn, 'service_name', None) == 'tungstenfabric'
+ except cfg.NoSuchOptError:
+ return False
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index 65d4fe5..ae2edc3 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -76,9 +76,15 @@
* Validates that other accounts cannot see the amphora.
"""
lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
lb_id = lb[const.ID]
self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)
@@ -173,9 +179,15 @@
'Octavia API version 2.7 or newer.')
lb_name = data_utils.rand_name("lb_member_lb2_amphora-update")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
lb_id = lb[const.ID]
self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 3dc0d04..e429b67 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -396,7 +396,7 @@
lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
lb_kwargs = {const.NAME: lb_name,
const.PROVIDER: CONF.load_balancer.provider}
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
self._setup_lb_network_kwargs(lb_kwargs, 4)
else:
lb_kwargs.update({
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 1d48e3c..754e439 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -175,7 +175,7 @@
lb_name = data_utils.rand_name("lb_member_lb1-delete")
lb_kwargs = {const.NAME: lb_name,
const.PROVIDER: CONF.load_balancer.provider}
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
self._setup_lb_network_kwargs(lb_kwargs, 4)
else:
lb_kwargs.update({
@@ -228,9 +228,15 @@
* Validates the load balancer is in the DELETED state.
"""
lb_name = data_utils.rand_name("lb_member_lb1-cascade_delete")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -275,7 +281,7 @@
return [lb for i, lb in enumerate(lbs) if i not in indexes]
def _setup_lb_kwargs(self, lb_kwargs):
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
del lb_kwargs[const.VIP_NETWORK_ID]
self._setup_lb_network_kwargs(lb_kwargs, 4)
@@ -982,9 +988,15 @@
* Wait for the load balancer to go ACTIVE.
"""
lb_name = data_utils.rand_name("lb_member_lb1-failover")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if config.is_tungstenfabric_backend_enabled():
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 6b56979..9df1ca5 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -263,7 +263,7 @@
lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
lb_kwargs = {const.NAME: lb_name,
const.PROVIDER: CONF.load_balancer.provider}
- if CONF.load_balancer.provider == 'tungstenfabric':
+ if config.is_tungstenfabric_backend_enabled():
self._setup_lb_network_kwargs(lb_kwargs, 4)
else:
lb_kwargs.update({
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 6d8523c..c93008e 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -34,6 +34,7 @@
from octavia_tempest_plugin.common import cert_utils
from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin import config as config_octavia
from octavia_tempest_plugin.tests import RBAC_tests
from octavia_tempest_plugin.tests import validators
from octavia_tempest_plugin.tests import waiters
@@ -828,7 +829,8 @@
LOG.debug('Octavia Setup: webserver2_public_ip = {}'.format(
cls.webserver2_public_ip))
- if CONF.load_balancer.test_with_ipv6:
+ if (CONF.load_balancer.test_with_ipv6 and not
+ config_octavia.is_tungstenfabric_backend_enabled()):
# Enable the IPv6 nic in webserver 1
cls._enable_ipv6_nic_webserver(
cls.webserver1_public_ip, cls.lb_member_keypair['private_key'],
@@ -1008,9 +1010,24 @@
network_id=network['id'],
mac_address=instance_network[0]['OS-EXT-IPS-MAC:mac_addr'])
port_id = result['ports'][0]['id']
- result = cls.lb_mem_float_ip_client.create_floatingip(
- floating_network_id=CONF.network.public_network_id,
- port_id=port_id)
+ if config_octavia.is_tungstenfabric_backend_enabled():
+ port = result['ports'][0]
+ fixed_ip = None
+ for ip in port["fixed_ips"]:
+ if (type(ipaddress.ip_address(ip["ip_address"])) is
+ ipaddress.IPv4Address):
+ fixed_ip = ip["ip_address"]
+ break
+ assert fixed_ip is not None, (f"Port doesn't have ipv4 "
+ f"address: {port['fixed_ips']}")
+ result = cls.lb_mem_float_ip_client.create_floatingip(
+ floating_network_id=CONF.network.public_network_id,
+ port_id=port_id,
+ fixed_ip_address=fixed_ip)
+ else:
+ result = cls.lb_mem_float_ip_client.create_floatingip(
+ floating_network_id=CONF.network.public_network_id,
+ port_id=port_id)
floating_ip = result['floatingip']
LOG.info('webserver1_floating_ip: {}'.format(floating_ip))
cls.addClassResourceCleanup(