[TF] Add support of tungstenfabric driver for octavia
Were skipped unsupported tests, avoided unsupported parameters
for loadbalancers, listeners, pools and members. Were met contrail
restrictions.
Related-PROD: PRODX-7072
Change-Id: I736f55fa4186464424e1ebc05c650f9a43375c62
(cherry picked from commit 0aece76a33ee755f6b56a70e8821b99c1f6b2600)
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
index 11b0cec..87a4c5f 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
@@ -171,6 +171,8 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+ @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+ "Not supported by TungstenFabric")
@testtools.skipIf(CONF.load_balancer.test_with_noop,
'Active/Standby tests will not work in noop mode.')
@decorators.idempotent_id('e591fa7a-0eee-485a-8ca0-5cf1a556bdf0')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
index fa7b6a4..075e09f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone.py
@@ -39,6 +39,8 @@
raise cls.skipException(
'Availability Zone API tests require an availability zone '
'configured in the [load_balancer] availability_zone setting.')
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
@classmethod
def resource_setup(cls):
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
index d3833f6..1992e44 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_capabilities.py
@@ -25,6 +25,12 @@
class AvailabilityZoneCapabilitiesAPITest(test_base.LoadBalancerBaseTest):
"""Test the provider availability zone capabilities API."""
+ @classmethod
+ def skip_checks(cls):
+ super(AvailabilityZoneCapabilitiesAPITest, cls).skip_checks()
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
@decorators.idempotent_id('cb3e4c59-4114-420b-9837-2666d4d5fef4')
def test_availability_zone_capabilities_list(self):
"""Tests provider availability zone capabilities list API/filtering.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
index 456a01e..14b2d92 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_availability_zone_profile.py
@@ -41,6 +41,8 @@
'Availability zone profile API tests require an availability '
'zone configured in the [load_balancer] availability_zone '
'setting in the tempest configuration file.')
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
@decorators.idempotent_id('e512b580-ef32-44c3-bbd2-efdc27ba2ea6')
def test_availability_zone_profile_create(self):
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor.py b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
index b5b4254..06bd557 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor.py
@@ -32,6 +32,12 @@
"""Test the flavor object API."""
@classmethod
+ def skip_checks(cls):
+ super(FlavorAPITest, cls).skip_checks()
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
+ @classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
super(FlavorAPITest, cls).resource_setup()
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
index 884f656..1da6c91 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_capabilities.py
@@ -24,6 +24,12 @@
class FlavorCapabilitiesAPITest(test_base.LoadBalancerBaseTest):
"""Test the provider flavor capabilities API."""
+ @classmethod
+ def skip_checks(cls):
+ super(FlavorCapabilitiesAPITest, cls).skip_checks()
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
@decorators.idempotent_id('df837ee3-ca4b-4a4d-a7a3-27fa57cf3a33')
def test_flavor_capabilities_list(self):
"""Tests provider flavor capabilities list API and field filtering.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
index 39f3338..dbd370b 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_flavor_profile.py
@@ -31,6 +31,12 @@
class FlavorProfileAPITest(test_base.LoadBalancerBaseTest):
"""Test the flavor profile object API."""
+ @classmethod
+ def skip_checks(cls):
+ super(FlavorProfileAPITest, cls).skip_checks()
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
@decorators.idempotent_id('d0e3a08e-d58a-4460-83ed-34307ca04cde')
def test_flavor_profile_create(self):
"""Tests flavor profile create and basic show APIs.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index a305ead..9f50a7d 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -33,6 +33,13 @@
class HealthMonitorAPITest(test_base.LoadBalancerBaseTest):
"""Test the healthmonitor object API."""
+
+ @classmethod
+ def skip_checks(cls):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException("Health monitor entity isn't applicable "
+ "in case of TungstenFabric.")
+
@classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index e7ed5a6..2ac6288 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -29,6 +29,12 @@
class L7PolicyAPITest(test_base.LoadBalancerBaseTest):
"""Test the l7policy object API."""
+
+ @classmethod
+ def skip_checks(cls):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
@classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
index 5cb85c4..ddd46d1 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
@@ -29,6 +29,12 @@
class L7RuleAPITest(test_base.LoadBalancerBaseTest):
"""Test the l7rule object API."""
+
+ @classmethod
+ def skip_checks(cls):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
@classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 152f6ff..8893295 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -117,7 +117,8 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs[const.INSERT_HEADERS] = {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true",
@@ -139,8 +140,9 @@
const.TAGS: listener_tags
})
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
# Test that CIDR IP version matches VIP IP version
bad_cidrs = ['192.0.1.0/24', '2001:db8:a0b:12f0::/64']
listener_kwargs.update({const.ALLOWED_CIDRS: bad_cidrs})
@@ -214,7 +216,8 @@
else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
self.assertTrue(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_FOR]))
@@ -228,8 +231,9 @@
self.assertCountEqual(listener_kwargs[const.TAGS],
listener[const.TAGS])
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
@decorators.idempotent_id('cceac303-4db5-4d5a-9f6e-ff33780a5f29')
@@ -400,9 +404,16 @@
test_ids = []
lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
lb_id = lb[const.ID]
self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
@@ -642,8 +653,9 @@
show_listener_response_fields.append('timeout_member_connect')
show_listener_response_fields.append('timeout_member_data')
show_listener_response_fields.append('timeout_tcp_inspect')
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
show_listener_response_fields.append('allowed_cidrs')
for field in show_listener_response_fields:
if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
@@ -767,7 +779,8 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs[const.INSERT_HEADERS] = {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true",
@@ -790,8 +803,9 @@
const.TAGS: listener_tags
})
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
listener = self.mem_listener_client.create_listener(**listener_kwargs)
@@ -834,7 +848,8 @@
for item in equal_items:
self.assertEqual(listener_kwargs[item], listener[item])
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
self.assertTrue(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_FOR]))
@@ -852,8 +867,9 @@
else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
# Test that the appropriate users can see or not see the listener
@@ -923,7 +939,8 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs[const.INSERT_HEADERS] = {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true",
@@ -946,8 +963,9 @@
const.TAGS: listener_tags
})
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
listener = self.mem_listener_client.create_listener(**listener_kwargs)
@@ -975,7 +993,8 @@
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(protocol_port, listener[const.PROTOCOL_PORT])
self.assertEqual(200, listener[const.CONNECTION_LIMIT])
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
self.assertTrue(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_FOR]))
@@ -995,8 +1014,9 @@
self.assertCountEqual(listener_kwargs[const.TAGS],
listener[const.TAGS])
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
# Test that a user, without the load balancer member role, cannot
@@ -1042,7 +1062,8 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_update_kwargs[const.INSERT_HEADERS] = {
const.X_FORWARDED_FOR: "false",
const.X_FORWARDED_PORT: "false",
@@ -1064,8 +1085,9 @@
const.TAGS: listener_updated_tags
})
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
# Test that CIDR IP version matches VIP IP version
bad_cidrs = ['192.0.2.0/24', '2001:db8::/6']
listener_update_kwargs.update({const.ALLOWED_CIDRS: bad_cidrs})
@@ -1110,7 +1132,8 @@
else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(400, listener[const.CONNECTION_LIMIT])
- if protocol == const.HTTP:
+ if (protocol == const.HTTP and
+ CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
self.assertFalse(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_FOR]))
@@ -1130,8 +1153,9 @@
self.assertCountEqual(listener_update_kwargs[const.TAGS],
listener[const.TAGS])
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
expected_cidrs = ['192.0.2.0/24']
if CONF.load_balancer.test_with_ipv6:
expected_cidrs = ['2001:db8::/64']
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 7ade642..1d48e3c 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -173,9 +173,15 @@
* Validates the load balancer is in the DELETED state.
"""
lb_name = data_utils.rand_name("lb_member_lb1-delete")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -210,6 +216,8 @@
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
+ @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+ "Tungstenfabric driver doesn't support cascade deletion")
@decorators.idempotent_id('abd784e3-485f-442a-85da-d91365c6b5dd')
def test_load_balancer_delete_cascade(self):
"""Tests load balancer create and cascade delete APIs.
@@ -266,6 +274,11 @@
def _filter_lbs_by_index(self, lbs, indexes):
return [lb for i, lb in enumerate(lbs) if i not in indexes]
+ def _setup_lb_kwargs(self, lb_kwargs):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del lb_kwargs[const.VIP_NETWORK_ID]
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+
@decorators.idempotent_id('6546ef3c-c0e2-46af-b892-f795f4d01119')
def test_load_balancer_list(self):
"""Tests load balancer list API and field filtering.
@@ -304,6 +317,7 @@
# vip_qos_policy_id=lb_qos_policy_id)
const.VIP_NETWORK_ID: lb_vip_network_id
}
+ self._setup_lb_kwargs(lb_kwargs)
if self.mem_lb_client.is_version_supported(self.api_version, '2.5'):
lb_tags = ["English", "Mathematics", "Marketing", "Creativity"]
@@ -348,6 +362,7 @@
const.NAME: lb_name,
const.VIP_NETWORK_ID: lb_vip_network_id,
}
+ self._setup_lb_kwargs(lb_kwargs)
if self.mem_lb_client.is_version_supported(self.api_version, '2.5'):
lb_tags = ["English", "Spanish", "Soft_skills", "Creativity"]
@@ -392,6 +407,7 @@
const.NAME: lb_name,
const.VIP_NETWORK_ID: lb_vip_network_id,
}
+ self._setup_lb_kwargs(lb_kwargs)
if self.mem_lb_client.is_version_supported(self.api_version, '2.5'):
lb_tags = ["English", "Project_management",
@@ -811,9 +827,14 @@
* Validate the show reflects the expected values.
"""
lb_name = data_utils.rand_name("lb_member_lb1-show_stats")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {
+ const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider,
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ }
+ self._setup_lb_kwargs(lb_kwargs)
+
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -876,9 +897,14 @@
* Validate the show reflects the expected values.
"""
lb_name = data_utils.rand_name("lb_member_lb1-status")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {
+ const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider,
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ }
+ self._setup_lb_kwargs(lb_kwargs)
+
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
self.addClassResourceCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -942,6 +968,9 @@
except Exception:
pass
+ @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+ "Tungstenfabric provider does not support failing over "
+ "load balancers.")
@decorators.idempotent_id('fc2e07a6-9776-4559-90c9-141170d4c397')
def test_load_balancer_failover(self):
"""Tests load balancer failover API.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index aa7cf25..8e90e0c 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -73,6 +73,10 @@
def _listener_pool_create(cls, listener_protocol, pool_protocol,
algorithm):
"""Setup resources needed by the tests."""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ cls.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
+
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
cls.mem_listener_client.is_version_supported(
cls.api_version, '2.13')):
@@ -840,6 +844,10 @@
* Show member details.
* Validate the show reflects the requested values.
"""
+ if (alternate_monitor and
+ CONF.load_balancer.provider == 'tungstenfabric'):
+ raise self.skipException('Alternate Healht Monitor not supported '
+ 'by TungstenFabric.')
if ip_version == 6 and not CONF.load_balancer.test_with_ipv6:
raise testtools.TestCase.skipException(
'Skipping this test as test_with_ipv6 is not "True" in '
@@ -867,8 +875,9 @@
member_kwargs[const.MONITOR_ADDRESS] = member_monitor_address
member_kwargs[const.MONITOR_PORT] = 8080
+ provider = CONF.load_balancer.provider
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
member_kwargs.update({
const.BACKUP: False,
})
@@ -934,7 +943,7 @@
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
const.PROTOCOL_PORT, const.WEIGHT]
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
equal_items.append(const.BACKUP)
if self.mem_member_client.is_version_supported(
@@ -1067,6 +1076,9 @@
* List the members filtering to one of the three.
* List the members filtered, one field, and sorted.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
# IDs of members created in the test
test_ids = []
@@ -1087,10 +1099,25 @@
# SOURCE-IP-PORT. Since it runs with not_implemented_is_error, we must
# handle this test case special.
try:
- pool = self.mem_pool_client.create_pool(
- name=pool_name, loadbalancer_id=self.lb_id,
- protocol=pool_protocol,
- lb_algorithm=algorithm)
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ listener_name = data_utils.rand_name("lb_member_listener-list")
+ listener = self._tf_create_listener(
+ listener_name, pool_protocol, 81, self.lb_id
+ )
+ listener_id = listener[const.ID]
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener, listener_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ pool = self.mem_pool_client.create_pool(
+ name=pool_name, listener_id=listener_id,
+ protocol=pool_protocol,
+ lb_algorithm=algorithm)
+ else:
+ pool = self.mem_pool_client.create_pool(
+ name=pool_name, loadbalancer_id=self.lb_id,
+ protocol=pool_protocol,
+ lb_algorithm=algorithm)
except exceptions.NotImplemented as e:
if algorithm != const.LB_ALGORITHM_SOURCE_IP_PORT:
raise
@@ -1727,6 +1754,10 @@
* Validate the show reflects the requested values.
* Validates that other accounts cannot see the member.
"""
+ if (CONF.load_balancer.provider == 'tungstenfabric'
+ and alternate_monitor):
+ raise self.skipException('Alternate Health Monitor is not '
+ 'supported by TungstenFabric.')
member_name = data_utils.rand_name("lb_member_member1-show")
member_kwargs = {
const.NAME: member_name,
@@ -1739,9 +1770,9 @@
if alternate_monitor:
member_kwargs[const.MONITOR_ADDRESS] = '192.0.2.2'
member_kwargs[const.MONITOR_PORT] = 8080
-
+ provider = CONF.load_balancer.provider
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
member_kwargs.update({
const.BACKUP: False,
})
@@ -1773,7 +1804,7 @@
const.PROTOCOL_PORT, const.WEIGHT]
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
equal_items.append(const.BACKUP)
if alternate_monitor:
@@ -2162,6 +2193,10 @@
* Show member details.
* Validate the show reflects the initial values.
"""
+ if (CONF.load_balancer.provider == 'tungstenfabric'
+ and alternate_monitor):
+ raise self.skipException('Alternate Health Monitor not supported '
+ 'by TungstenFabric.')
member_name = data_utils.rand_name("lb_member_member1-update")
member_kwargs = {
const.NAME: member_name,
@@ -2175,8 +2210,9 @@
member_kwargs[const.MONITOR_ADDRESS] = '192.0.2.2'
member_kwargs[const.MONITOR_PORT] = 8080
+ provider = CONF.load_balancer.provider
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
member_kwargs.update({
const.BACKUP: False,
})
@@ -2225,7 +2261,7 @@
const.PROTOCOL_PORT, const.WEIGHT]
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
equal_items.append(const.BACKUP)
if self.mem_member_client.is_version_supported(
@@ -2277,7 +2313,7 @@
const.WEIGHT: member[const.WEIGHT] + 1,
}
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
member_update_kwargs.update({
const.BACKUP: not member[const.BACKUP]
})
@@ -2320,7 +2356,7 @@
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT]
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
equal_items.append(const.BACKUP)
if self.mem_member_client.is_version_supported(
@@ -2564,6 +2600,9 @@
const.LB_ALGORITHM_SOURCE_IP_PORT,
alternate_monitor=True)
+ @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+ "Tungstenfabric provider does not support batch "
+ "updating members.")
def _test_member_batch_update(self, pool_protocol, algorithm,
alternate_monitor=False):
"""Tests member batch update.
@@ -2572,6 +2611,10 @@
* Batch update the members so one is deleted, created, and updated
* Validate the member list is correct.
"""
+ if (CONF.load_balancer.provider == 'tungstenfabric'
+ and alternate_monitor):
+ raise self.skipException('Alternate Health Monitor not supported '
+ 'by TungstenFabric.')
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index ba31a8e..0916f7e 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -341,6 +341,15 @@
* Show pool details.
* Validate the show reflects the requested values.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
+ if not listener_protocol:
+ raise testtools.TestCase.skipException(
+ "TungstenFabric can't create listener with loadbalancer "
+ "argument."
+ )
+
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
@@ -595,6 +604,9 @@
* List the pools filtering to one of the three.
* List the pools filtered, one field, and sorted.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
# IDs of pools created in the test
test_ids = []
@@ -606,9 +618,16 @@
'SOURCE_IP_PORT requires API version 2.13 or newer.')
lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
- lb = self.mem_lb_client.create_loadbalancer(
- name=lb_name, provider=CONF.load_balancer.provider,
- vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_kwargs = {const.NAME: lb_name,
+ const.PROVIDER: CONF.load_balancer.provider}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self._setup_lb_network_kwargs(lb_kwargs, 4)
+ else:
+ lb_kwargs.update({
+ const.VIP_NETWORK_ID: self.lb_member_vip_net[const.ID]
+ })
+
+ lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
lb_id = lb[const.ID]
self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
@@ -631,6 +650,17 @@
const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool1_kwargs[const.LOADBALANCER_ID]
+ listener1_name = data_utils.rand_name("lb_member_listener1-list")
+ listener_port = self._tf_get_free_port(lb_id)
+ listener1 = self._tf_create_listener(listener1_name, pool_protocol,
+ listener_port, lb_id)
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener, listener1[const.ID],
+ lb_client=self.mem_lb_client, lb_id=lb_id)
+ listener1_id = listener1[const.ID]
+ pool1_kwargs.update({const.LISTENER_ID: listener1_id})
if self.mem_pool_client.is_version_supported(
self.api_version, '2.5'):
@@ -684,6 +714,17 @@
const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool2_kwargs[const.LOADBALANCER_ID]
+ listener2_name = data_utils.rand_name("lb_member_listener2-list")
+ listener_port = self._tf_get_free_port(lb_id)
+ listener2 = self._tf_create_listener(listener2_name, pool_protocol,
+ listener_port, lb_id)
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener, listener2[const.ID],
+ lb_client=self.mem_lb_client, lb_id=lb_id)
+ listener2_id = listener2[const.ID]
+ pool2_kwargs.update({const.LISTENER_ID: listener2_id})
if self.mem_pool_client.is_version_supported(
self.api_version, '2.5'):
@@ -724,6 +765,17 @@
const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool3_kwargs[const.LOADBALANCER_ID]
+ listener3_name = data_utils.rand_name("lb_member_listener3-list")
+ listener_port = self._tf_get_free_port(lb_id)
+ listener3 = self._tf_create_listener(listener3_name, pool_protocol,
+ listener_port, lb_id)
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener, listener3[const.ID],
+ lb_client=self.mem_lb_client, lb_id=lb_id)
+ listener3_id = listener3[const.ID]
+ pool3_kwargs.update({const.LISTENER_ID: listener3_id})
if self.mem_pool_client.is_version_supported(
self.api_version, '2.5'):
@@ -1032,6 +1084,10 @@
* Validate the show reflects the requested values.
* Validates that other accounts cannot see the pool.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
+
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
@@ -1051,6 +1107,18 @@
const.LOADBALANCER_ID: self.lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool_kwargs[const.LOADBALANCER_ID]
+ listener_name = data_utils.rand_name("lb_member_listener-show")
+ listener_port = self._tf_get_free_port(self.lb_id)
+ listener = self._tf_create_listener(listener_name, pool_protocol,
+ listener_port, self.lb_id)
+ self.addClassResourceCleanup(
+ self.mem_listener_client.cleanup_listener, listener[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ listener_id = listener[const.ID]
+ pool_kwargs.update({const.LISTENER_ID: listener_id})
+
if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
@@ -1098,12 +1166,18 @@
parser.parse(pool[const.CREATED_AT])
parser.parse(pool[const.UPDATED_AT])
UUID(pool[const.ID])
- # Operating status for pools will always be offline without members
- self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ # Operating status for a pool without members will be ONLINE if
+ # it is attached to a listener
+ self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
+ self.assertNotEmpty(pool[const.LISTENERS])
+ else:
+ # Operating status for pools will always be offline without members
+ self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(pool_protocol, pool[const.PROTOCOL])
self.assertEqual(1, len(pool[const.LOADBALANCERS]))
self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
- self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(algorithm, pool[const.LB_ALGORITHM])
if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
@@ -1261,6 +1335,10 @@
* Show pool details.
* Validate the show reflects the updated values.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
+
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
@@ -1280,6 +1358,18 @@
const.LOADBALANCER_ID: self.lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool_kwargs[const.LOADBALANCER_ID]
+ listener_name = data_utils.rand_name("lb_member_listener-update")
+ listener_port = self._tf_get_free_port(self.lb_id)
+ listener = self._tf_create_listener(listener_name, pool_protocol,
+ listener_port, self.lb_id)
+ self.addClassResourceCleanup(
+ self.mem_listener_client.cleanup_listener, listener[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ listener_id = listener[const.ID]
+ pool_kwargs.update({const.LISTENER_ID: listener_id})
+
if self.mem_lb_client.is_version_supported(self.api_version, '2.5'):
pool_tags = ["Hello", "World"]
pool_kwargs.update({
@@ -1334,11 +1424,18 @@
parser.parse(pool[const.UPDATED_AT])
UUID(pool[const.ID])
# Operating status for pools will always be offline without members
- self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ # Operating status for a pool without members will be ONLINE if
+ # it is attached to a listener
+ self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
+ self.assertNotEmpty(pool[const.LISTENERS])
+ else:
+ # Operating status for pools will always be offline without members
+ self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(pool_protocol, pool[const.PROTOCOL])
self.assertEqual(1, len(pool[const.LOADBALANCERS]))
self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
- self.assertEmpty(pool[const.LISTENERS])
self.assertEqual(algorithm, pool[const.LB_ALGORITHM])
if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
@@ -1602,6 +1699,10 @@
* Deletes the pool.
* Validates the pool is in the DELETED state.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
+
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
@@ -1618,6 +1719,18 @@
const.LOADBALANCER_ID: self.lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool_kwargs[const.LOADBALANCER_ID]
+ listener_name = data_utils.rand_name("lb_member_listener-delete")
+ listener_port = self._tf_get_free_port(self.lb_id)
+ listener = self._tf_create_listener(listener_name, pool_protocol,
+ listener_port, self.lb_id)
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener, listener[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ listener_id = listener[const.ID]
+ pool_kwargs.update({const.LISTENER_ID: listener_id})
+
if session_persistence == const.SESSION_PERSISTENCE_APP_COOKIE:
pool_kwargs[const.SESSION_PERSISTENCE] = {
const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
index 8075a35..bd18528 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_healthmonitor.py
@@ -31,6 +31,12 @@
class HealthMonitorScenarioTest(test_base.LoadBalancerBaseTest):
@classmethod
+ def skip_checks(cls):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException("Health monitor entity isn't applicable "
+ "in case of TungstenFabric.")
+
+ @classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
super(HealthMonitorScenarioTest, cls).resource_setup()
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
index acbd094..359c605 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7policy.py
@@ -29,6 +29,11 @@
class L7PolicyScenarioTest(test_base.LoadBalancerBaseTest):
@classmethod
+ def skip_checks(cls):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
+ @classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
super(L7PolicyScenarioTest, cls).resource_setup()
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
index d5683ef..3b0b8f3 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_l7rule.py
@@ -29,6 +29,11 @@
class L7RuleScenarioTest(test_base.LoadBalancerBaseTest):
@classmethod
+ def skip_checks(cls):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise cls.skipException('Not supported by TungstenFabric.')
+
+ @classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
super(L7RuleScenarioTest, cls).resource_setup()
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index a720d26..66b1ec0 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -59,6 +59,8 @@
cls.allowed_cidrs = ['2001:db8:a0b:12f0::/64']
def _create_pools(cls, protocol, algorithm):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ cls.check_tf_compatibility(protocol=protocol, algorithm=algorithm)
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
cls.mem_listener_client.is_version_supported(
cls.api_version, '2.13')):
@@ -73,6 +75,18 @@
const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: cls.lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool1_kwargs[const.LOADBALANCER_ID]
+ listener1_name = data_utils.rand_name("lb_member_listener1")
+ listener_port = cls._tf_get_free_port(cls.lb_id)
+ listener1 = cls._tf_create_listener(listener1_name, protocol,
+ listener_port, cls.lb_id)
+ listener1_id = listener1[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_listener_client.cleanup_listener, listener1_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ pool1_kwargs.update({const.LISTENER_ID: listener1_id})
pool1 = cls.mem_pool_client.create_pool(**pool1_kwargs)
pool1_id = pool1[const.ID]
@@ -89,6 +103,18 @@
const.LB_ALGORITHM: algorithm,
const.LOADBALANCER_ID: cls.lb_id,
}
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del pool2_kwargs[const.LOADBALANCER_ID]
+ listener2_name = data_utils.rand_name("lb_member_listener2")
+ listener_port = cls._tf_get_free_port(cls.lb_id)
+ listener2 = cls._tf_create_listener(listener2_name, protocol,
+ listener_port, cls.lb_id)
+ listener2_id = listener2[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_listener_client.cleanup_listener, listener2_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ pool2_kwargs.update({const.LISTENER_ID: listener2_id})
pool2 = cls.mem_pool_client.create_pool(**pool2_kwargs)
pool2_id = pool2[const.ID]
@@ -243,8 +269,10 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
-
- if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ del listener_kwargs[const.DEFAULT_POOL_ID]
+ if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs.update({
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "true",
@@ -259,8 +287,9 @@
const.TIMEOUT_MEMBER_DATA: 1000,
const.TIMEOUT_TCP_INSPECT: 50,
})
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_kwargs.update({const.ALLOWED_CIDRS: self.allowed_cidrs})
listener = self.mem_listener_client.create_listener(**listener_kwargs)
@@ -292,21 +321,24 @@
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
self.assertEqual(200, listener[const.CONNECTION_LIMIT])
- if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
+ if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
+ CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
self.assertTrue(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_FOR]))
self.assertTrue(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(pool1_id, listener[const.DEFAULT_POOL_ID])
+ if CONF.load_balancer.provider != 'tungstenfabric':
+ self.assertEqual(pool1_id, listener[const.DEFAULT_POOL_ID])
if self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
self.assertEqual(self.allowed_cidrs, listener[const.ALLOWED_CIDRS])
# Listener update
@@ -323,7 +355,8 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
- if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
+ if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
+ CONF.load_balancer.provider != 'tungstenfabric'):
listener_update_kwargs.update({
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "false",
@@ -339,8 +372,9 @@
const.TIMEOUT_TCP_INSPECT: 100,
})
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
new_cidrs = ['192.0.2.0/24']
if CONF.load_balancer.test_with_ipv6:
new_cidrs = ['2001:db8::/64']
@@ -379,21 +413,24 @@
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
self.assertEqual(400, listener[const.CONNECTION_LIMIT])
- if protocol in [const.HTTP, const.TERMINATED_HTTPS]:
+ if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
+ CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
self.assertFalse(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_FOR]))
self.assertFalse(strutils.bool_from_string(
insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(pool2_id, listener[const.DEFAULT_POOL_ID])
+ if CONF.load_balancer.provider != 'tungstenfabric':
+ self.assertEqual(pool2_id, listener[const.DEFAULT_POOL_ID])
if self.mem_listener_client.is_version_supported(
self.api_version, '2.1'):
self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
- if self.mem_listener_client.is_version_supported(
- self.api_version, '2.12'):
+ if (self.mem_listener_client.is_version_supported(
+ self.api_version, '2.12') and
+ CONF.load_balancer.provider != 'tungstenfabric'):
expected_cidrs = ['192.0.2.0/24']
if CONF.load_balancer.test_with_ipv6:
expected_cidrs = ['2001:db8::/64']
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
index 6c52f84..e5a678c 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
@@ -168,7 +168,11 @@
self.assertEqual(new_name, lb[const.NAME])
# Load balancer delete
- self.mem_lb_client.delete_loadbalancer(lb[const.ID], cascade=True)
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ cascade = False
+ else:
+ cascade = True
+ self.mem_lb_client.delete_loadbalancer(lb[const.ID], cascade=cascade)
waiters.wait_for_deleted_status_or_not_found(
self.mem_lb_client.show_loadbalancer, lb[const.ID],
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index a85405f..bdd47a9 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -76,6 +76,9 @@
def _listener_pool_create(cls, listener_protocol, pool_protocol,
algorithm):
"""Setup resources needed by the tests."""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ cls.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
cls.mem_listener_client.is_version_supported(
cls.api_version, '2.13')):
@@ -389,6 +392,10 @@
* Update the member.
* Delete the member.
"""
+ provider = CONF.load_balancer.provider
+ if alternate_monitoring and provider == 'tungstenfabric':
+ raise self.skipException('Alternate Health Monitor not supported '
+ 'by TungstenFabric.')
# Member create
member_name = data_utils.rand_name("lb_member_member1-CRUD")
@@ -405,7 +412,7 @@
member_kwargs[const.MONITOR_PORT] = 8080
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
member_kwargs.update({
const.BACKUP: False,
})
@@ -454,7 +461,7 @@
equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
equal_items.append(const.BACKUP)
if const.SUBNET_ID in member_kwargs:
@@ -474,7 +481,7 @@
const.WEIGHT: member[const.WEIGHT] + 1,
}
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
member_update_kwargs.update({
const.BACKUP: not member[const.BACKUP],
})
@@ -504,7 +511,7 @@
if alternate_monitoring:
equal_items += [const.MONITOR_ADDRESS, const.MONITOR_PORT]
if self.mem_member_client.is_version_supported(
- self.api_version, '2.1'):
+ self.api_version, '2.1') and provider != 'tungstenfabric':
equal_items.append(const.BACKUP)
for item in equal_items:
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index 6df4c9c..e376a7a 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -339,6 +339,15 @@
* Update the pool.
* Delete the pool.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ self.check_tf_compatibility(protocol=pool_protocol,
+ algorithm=algorithm)
+ if not listener_protocol:
+ raise testtools.TestCase.skipException(
+ "TungstenFabric can't create listener with loadbalancer "
+ "argument."
+ )
+
if (algorithm == const.LB_ALGORITHM_SOURCE_IP_PORT and not
self.mem_listener_client.is_version_supported(
self.api_version, '2.13')):
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index d4d43b5..96ac97d 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -93,6 +93,9 @@
def _listener_pool_create(cls, protocol, protocol_port,
pool_algorithm=const.LB_ALGORITHM_ROUND_ROBIN,
insert_headers_dic=None):
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ cls.check_tf_compatibility(protocol=protocol,
+ algorithm=pool_algorithm)
if (protocol == const.UDP and
not cls.mem_listener_client.is_version_supported(
cls.api_version, '2.1')):
@@ -248,6 +251,9 @@
* Verify members are in their correct respective operating statuses.
* Verify that traffic is balanced evenly between the working members.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise self.skipException("Health monitor entity isn't applicable "
+ "in case of TungstenFabric.")
member1_name = data_utils.rand_name("lb_member_member1-hm-traffic")
member1_kwargs = {
@@ -486,6 +492,8 @@
pool_id = self._listener_pool_create(const.UDP, 8081)[1]
self._test_healthmonitor_traffic(const.UDP, 8081, pool_id)
+ @testtools.skipIf(CONF.load_balancer.provider == 'tungstenfabric',
+ "Not supported by TungstenFabric")
@decorators.idempotent_id('3558186d-6dcd-4d9d-b7f7-adc190b66149')
def test_http_l7policies_and_l7rules(self):
"""Tests sending traffic through a loadbalancer with l7rules
@@ -1171,6 +1179,9 @@
* Update allowed CIDRs to restrict traffic to a small subnet.
* Assert loadbalancer does not respond to client requests.
"""
+ if CONF.load_balancer.provider == 'tungstenfabric':
+ raise self.skipException("Allowed CIDRS aren't supported by "
+ "TungstenFabric.")
if not self.mem_listener_client.is_version_supported(
self.api_version, '2.12'):
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index b0bd643..f485816 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -579,6 +579,48 @@
lb_kwargs[const.VIP_NETWORK_ID] = cls.lb_member_vip_net[const.ID]
lb_kwargs[const.VIP_SUBNET_ID] = None
+ @classmethod
+ def check_tf_compatibility(cls, protocol=None, algorithm=None):
+ # TungstenFabric supported protocols and algorithms
+ tf_protocols = [const.HTTP, const.HTTPS, const.TCP, const.UDP,
+ const.TERMINATED_HTTPS]
+ tf_algorithms = [const.LB_ALGORITHM_ROUND_ROBIN,
+ const.LB_ALGORITHM_LEAST_CONNECTIONS,
+ const.LB_ALGORITHM_SOURCE_IP]
+
+ if algorithm and algorithm not in tf_algorithms:
+ raise cls.skipException(
+ 'TungstenFabric does not support {} algorithm.'
+ ''.format(algorithm))
+ if protocol and protocol not in tf_protocols:
+ raise cls.skipException(
+ 'TungstenFabric does not support {} protocol.'
+ ''.format(protocol))
+
+ @classmethod
+ def _tf_create_listener(cls, name, proto, port, lb_id):
+ listener_kwargs = {
+ const.NAME: name,
+ const.PROTOCOL: proto,
+ const.PROTOCOL_PORT: port,
+ const.LOADBALANCER_ID: lb_id,
+ }
+ listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+ return listener
+
+ @classmethod
+ def _tf_get_free_port(cls, lb_id):
+ port = 8081
+ lb = cls.mem_lb_client.show_loadbalancer(lb_id)
+ listeners = lb[const.LISTENERS]
+ if not listeners:
+ return port
+ ports = [cls.mem_listener_client.show_listener(x[const.ID])[
+ const.PROTOCOL_PORT] for x in listeners]
+ while port in ports:
+ port = port + 1
+ return port
+
class LoadBalancerBaseTestWithCompute(LoadBalancerBaseTest):
@classmethod