Merge "[TF driver] Add check for listener protocols" into mcp/epoxy
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 0eb773a..44f864c 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -15,6 +15,7 @@
 
 from oslo_config import cfg
 from oslo_log import log as logging
+from tempest import config
 
 from octavia_tempest_plugin.common import constants as const
 
@@ -310,3 +311,12 @@
                      'scope? This configuration value should be same as '
                      'octavia.conf: [oslo_policy].enforce_scope option.'),
 ]
+
+
+def is_tungstenfabric_backend_enabled():
+    """Return True if TungstenFabric is used as a backend."""
+    try:
+        sdn = getattr(config.CONF, 'sdn')
+        return getattr(sdn, 'service_name', None) == 'tungstenfabric'
+    except cfg.NoSuchOptError:
+        return False
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index ac8c635..75c29c3 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -76,9 +76,15 @@
         * Validates that other accounts cannot see the amphora.
         """
         lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+        if config.is_tungstenfabric_backend_enabled():
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         lb_id = lb[const.ID]
         self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)
 
@@ -166,9 +172,15 @@
                                      'Octavia API version 2.7 or newer.')
 
         lb_name = data_utils.rand_name("lb_member_lb2_amphora-update")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+        if config.is_tungstenfabric_backend_enabled():
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         lb_id = lb[const.ID]
         self.addCleanup(self.mem_lb_client.cleanup_loadbalancer, lb_id)
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index f1abffe..f810a2a 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -36,6 +36,7 @@
 
     @classmethod
     def skip_checks(cls):
+        super(HealthMonitorAPITest, cls).skip_checks()
         if CONF.load_balancer.provider == 'tungstenfabric':
             raise cls.skipException("Health monitor entity isn't applicable "
                                     "in case of TungstenFabric.")
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index af62c79..50aa35f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def skip_checks(cls):
+        super(L7PolicyAPITest, cls).skip_checks()
         if CONF.load_balancer.provider == 'tungstenfabric':
             raise cls.skipException('Not supported by TungstenFabric.')
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
index 8607344..282a541 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def skip_checks(cls):
+        super(L7RuleAPITest, cls).skip_checks()
         if CONF.load_balancer.provider == 'tungstenfabric':
             raise cls.skipException('Not supported by TungstenFabric.')
 
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 0bd4dd2..3a2747f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -748,7 +748,7 @@
         lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
         lb_kwargs = {const.NAME: lb_name,
                      const.PROVIDER: CONF.load_balancer.provider}
-        if CONF.load_balancer.provider == 'tungstenfabric':
+        if config.is_tungstenfabric_backend_enabled():
             self._setup_lb_network_kwargs(lb_kwargs, 4)
         else:
             lb_kwargs.update({
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index c9620d4..8ca9a49 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -166,7 +166,7 @@
         lb_name = data_utils.rand_name("lb_member_lb1-delete")
         lb_kwargs = {const.NAME: lb_name,
                      const.PROVIDER: CONF.load_balancer.provider}
-        if CONF.load_balancer.provider == 'tungstenfabric':
+        if config.is_tungstenfabric_backend_enabled():
             self._setup_lb_network_kwargs(lb_kwargs, 4)
         else:
             lb_kwargs.update({
@@ -211,9 +211,15 @@
         * Validates the load balancer is in the DELETED state.
         """
         lb_name = data_utils.rand_name("lb_member_lb1-cascade_delete")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.NAME: lb_name,
+                     const.PROVIDER: CONF.load_balancer.provider}
+        if config.is_tungstenfabric_backend_enabled():
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -250,7 +256,7 @@
         return [lb for i, lb in enumerate(lbs) if i not in indexes]
 
     def _setup_lb_kwargs(self, lb_kwargs):
-        if CONF.load_balancer.provider == 'tungstenfabric':
+        if config.is_tungstenfabric_backend_enabled():
             del lb_kwargs[const.VIP_NETWORK_ID]
             self._setup_lb_network_kwargs(lb_kwargs, 4)
 
@@ -931,9 +937,15 @@
         * Wait for the load balancer to go ACTIVE.
         """
         lb_name = data_utils.rand_name("lb_member_lb1-failover")
-        lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, provider=CONF.load_balancer.provider,
-            vip_network_id=self.lb_member_vip_net[const.ID])
+        lb_kwargs = {const.NAME: lb_name,
+                     const.PROVIDER: CONF.load_balancer.provider}
+        if config.is_tungstenfabric_backend_enabled():
+            self._setup_lb_network_kwargs(lb_kwargs, 4)
+        else:
+            lb_kwargs.update({
+                const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+            })
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index a4c7630..c666085 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -2328,7 +2328,7 @@
             CONF.load_balancer.build_timeout,
             pool_id=pool_id)
         status = const.OFFLINE
-        if CONF.load_balancer.test_with_noop:
+        if CONF.load_balancer.test_with_noop or provider == 'tungstenfabric':
             status = const.NO_MONITOR
         member = waiters.wait_for_status(
             self.mem_member_client.show_member,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 4c9508d..c1e2500 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -40,12 +40,7 @@
         lb_name = data_utils.rand_name("lb_member_lb1_pool")
         lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
                      const.NAME: lb_name}
-        if CONF.load_balancer.provider == 'tungstenfabric':
-            cls._setup_lb_network_kwargs(lb_kwargs, 4)
-        else:
-            lb_kwargs.update({
-                const.VIP_NETWORK_ID: cls.lb_member_vip_net[const.ID]
-            })
+        cls._setup_lb_network_kwargs(lb_kwargs)
 
         lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
         cls.lb_id = lb[const.ID]
@@ -351,7 +346,7 @@
                                         algorithm=algorithm)
             if not listener_protocol:
                 raise testtools.TestCase.skipException(
-                    "TungstenFabric can't create listener with loadbalancer "
+                    "TungstenFabric can't create pool with loadbalancer "
                     "argument."
                 )
 
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index b9f0107..eff0e1f 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -32,6 +32,7 @@
 
     @classmethod
     def skip_checks(cls):
+        super(HealthMonitorScenarioTest, cls).skip_checks()
         if CONF.load_balancer.provider == 'tungstenfabric':
             raise cls.skipException("Health monitor entity isn't applicable "
                                     "in case of TungstenFabric.")
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
index aa9f6f4..ffa084b 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
@@ -30,6 +30,7 @@
 
     @classmethod
     def skip_checks(cls):
+        super(L7PolicyScenarioTest, cls).skip_checks()
         if CONF.load_balancer.provider == 'tungstenfabric':
             raise cls.skipException('Not supported by TungstenFabric.')
 
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index ce5a5bf..e3d9b91 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -30,6 +30,7 @@
 
     @classmethod
     def skip_checks(cls):
+        super(L7RuleScenarioTest, cls).skip_checks()
         if CONF.load_balancer.provider == 'tungstenfabric':
             raise cls.skipException('Not supported by TungstenFabric.')
 
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index f63d96d..07394cf 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -344,7 +344,7 @@
                                         algorithm=algorithm)
             if not listener_protocol:
                 raise testtools.TestCase.skipException(
-                    "TungstenFabric can't create listener with loadbalancer "
+                    "TungstenFabric can't create pool with loadbalancer "
                     "argument."
                 )
 
@@ -448,6 +448,12 @@
         parser.parse(pool[const.CREATED_AT])
         parser.parse(pool[const.UPDATED_AT])
         UUID(pool[const.ID])
+        if CONF.load_balancer.provider == 'tungstenfabric':
+            # Operating status for a pool without members will be ONLINE if
+            # it is attached to a listener
+            self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
+        else:
+            self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
         self.assertEqual(pool_protocol, pool[const.PROTOCOL])
         self.assertEqual(1, len(pool[const.LOADBALANCERS]))
         self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 0ae145c..a63c5aa 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -13,6 +13,7 @@
 #    under the License.
 
 import ipaddress
+import netaddr
 import os
 import random
 import re
@@ -36,6 +37,7 @@
 
 from octavia_tempest_plugin.common import cert_utils
 from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin import config as config_octavia
 import octavia_tempest_plugin.services.load_balancer.v2 as lbv2
 from octavia_tempest_plugin.tests import RBAC_tests
 from octavia_tempest_plugin.tests import validators
@@ -450,12 +452,20 @@
             cls.lb_mem_net_client.show_network,
             cls.lb_member_vip_net['id'])
 
+        # Add allocation pool to prevent IP address conflicts with portprober
+        cidr = netaddr.IPNetwork(CONF.load_balancer.vip_subnet_cidr)
+        pool_start = ipaddress.ip_address(str(cidr[101]))
+        pool_end = ipaddress.ip_address(str(cidr[254]))
+        allocation_pools = [{'start': str(pool_start), 'end': str(pool_end)}]
+
         # Create tenant VIP subnet
         subnet_kwargs = {
             'name': data_utils.rand_name("lb_member_vip_subnet"),
             'network_id': cls.lb_member_vip_net['id'],
             'cidr': CONF.load_balancer.vip_subnet_cidr,
-            'ip_version': 4}
+            'ip_version': 4,
+            'allocation_pools': allocation_pools
+        }
         result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
         cls.lb_member_vip_subnet = result['subnet']
         LOG.info('lb_member_vip_subnet: %s', cls.lb_member_vip_subnet)
@@ -667,7 +677,7 @@
     @classmethod
     def check_tf_compatibility(cls, protocol=None, algorithm=None):
         # TungstenFabric supported protocols and algorithms
-        tf_protocols = [const.HTTP, const.HTTPS, const.TCP, const.UDP,
+        tf_protocols = [const.HTTP, const.HTTPS, const.TCP,
                         const.TERMINATED_HTTPS]
         tf_algorithms = [const.LB_ALGORITHM_ROUND_ROBIN,
                          const.LB_ALGORITHM_LEAST_CONNECTIONS,
@@ -940,7 +950,8 @@
         LOG.debug('Octavia Setup: webserver2_public_ip = %s',
                   cls.webserver2_public_ip)
 
-        if CONF.load_balancer.test_with_ipv6:
+        if (CONF.load_balancer.test_with_ipv6 and not
+                config_octavia.is_tungstenfabric_backend_enabled()):
             # Enable the IPv6 nic in webserver 1
             cls._enable_ipv6_nic_webserver(
                 cls.webserver1_public_ip, cls.lb_member_keypair['private_key'],
@@ -1120,9 +1131,24 @@
                 network_id=network['id'],
                 mac_address=instance_network[0]['OS-EXT-IPS-MAC:mac_addr'])
             port_id = result['ports'][0]['id']
-            result = cls.lb_mem_float_ip_client.create_floatingip(
-                floating_network_id=CONF.network.public_network_id,
-                port_id=port_id)
+            if config_octavia.is_tungstenfabric_backend_enabled():
+                port = result['ports'][0]
+                fixed_ip = None
+                for ip in port["fixed_ips"]:
+                    if (type(ipaddress.ip_address(ip["ip_address"])) is
+                            ipaddress.IPv4Address):
+                        fixed_ip = ip["ip_address"]
+                        break
+                assert fixed_ip is not None, (f"Port doesn't have ipv4 "
+                                              f"address: {port['fixed_ips']}")
+                result = cls.lb_mem_float_ip_client.create_floatingip(
+                    floating_network_id=CONF.network.public_network_id,
+                    port_id=port_id,
+                    fixed_ip_address=fixed_ip)
+            else:
+                result = cls.lb_mem_float_ip_client.create_floatingip(
+                    floating_network_id=CONF.network.public_network_id,
+                    port_id=port_id)
             floating_ip = result['floatingip']
             LOG.info('webserver1_floating_ip: %s', floating_ip)
             cls.addClassResourceCleanup(