Add conf option for listener connection limit
The patch provides configuration option to define listener connection
limit to make chance to adjust connections for environments with various
amount of resources.
Related-PRODX: PRODX-19517
Change-Id: I435a0a38440860b3d8022aa164f6288aced1d70a
(cherry picked from commit 81344d251fd7ac1e34d6e8927a494a554ec81b24)
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 58ddf13..36131bd 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -151,6 +151,9 @@
'dict. Example: {"compute_zone": "The compute '
'availability zone."}'),
default={'compute_zone': 'The compute availability zone.'}),
+ cfg.IntOpt('listener_conn_limit',
+ default=200,
+ help='Defines listener connection limit.'),
# Networking
cfg.BoolOpt('test_with_ipv6',
default=True,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 1868147..4805487 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -124,7 +124,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# Don't test with a default pool -- we'll do that in the scenario,
# but this will allow us to test that the field isn't mandatory,
# as well as not conflate pool failures with listener test failures
@@ -330,7 +330,7 @@
const.PROTOCOL: protocol1,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
}
try:
@@ -360,7 +360,7 @@
const.PROTOCOL: protocol2,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
try:
@@ -390,7 +390,7 @@
const.PROTOCOL: protocol1,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
self.assertRaises(
@@ -408,7 +408,7 @@
const.PROTOCOL: protocol3,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
try:
@@ -439,7 +439,7 @@
const.PROTOCOL: protocol4,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
self.assertRaises(
@@ -888,7 +888,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1067,7 +1067,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1131,7 +1131,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(protocol_port, listener[const.PROTOCOL_PORT])
- self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit,
+ listener[const.CONNECTION_LIMIT])
if (protocol == const.HTTP and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -1185,7 +1186,7 @@
const.NAME: new_name,
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
- const.CONNECTION_LIMIT: 400,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1260,7 +1261,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
- self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit+200,
+ listener[const.CONNECTION_LIMIT])
if (protocol == const.HTTP and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -1430,7 +1432,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = self.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index b1c0e21..35e04ca 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -103,7 +103,7 @@
const.PROTOCOL: listener_protocol,
const.PROTOCOL_PORT: cls.current_listener_port,
const.LOADBALANCER_ID: cls.lb_id,
- const.CONNECTION_LIMIT: 200
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index b27b8fc..abf501d 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -99,7 +99,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index 9683244..8828433 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -262,7 +262,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: 80,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
const.DEFAULT_POOL_ID: pool1_id,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -319,7 +319,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
- self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit,
+ listener[const.CONNECTION_LIMIT])
if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -348,7 +349,7 @@
const.NAME: new_name,
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
- const.CONNECTION_LIMIT: 400,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
const.DEFAULT_POOL_ID: pool2_id,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -411,7 +412,8 @@
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
- self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(listener_update_kwargs[const.CONNECTION_LIMIT],
+ listener[const.CONNECTION_LIMIT])
if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 5a01eab..d3cada1 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -106,7 +106,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index fb10313..6faeb57 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -115,7 +115,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
if insert_headers_dic:
@@ -1501,8 +1501,10 @@
'Failed - all UDP retries to LB VIP has failed')
# Update LB listener
- listener_kwargs = {const.LISTENER_ID: listener_id,
- const.CONNECTION_LIMIT: 300}
+ listener_kwargs = {
+ const.LISTENER_ID: listener_id,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+100,
+ }
self.mem_listener_client.update_listener(**listener_kwargs)
waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
self.lb_id, const.PROVISIONING_STATUS,
@@ -1629,7 +1631,7 @@
const.PROTOCOL: const.PROMETHEUS,
const.PROTOCOL_PORT: 8080,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
if self.mem_listener_client.is_version_supported(