Add conf option for listener connection limit
The patch provides configuration option to define listener connection
limit to make chance to adjust connections for environments with various
amount of resources.
Related-PRODX: PRODX-19517
Change-Id: I435a0a38440860b3d8022aa164f6288aced1d70a
(cherry picked from commit 81344d251fd7ac1e34d6e8927a494a554ec81b24)
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index c40293c..b313471 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -151,6 +151,9 @@
'dict. Example: {"compute_zone": "The compute '
'availability zone."}'),
default={'compute_zone': 'The compute availability zone.'}),
+ cfg.IntOpt('listener_conn_limit',
+ default=200,
+ help='Defines listener connection limit.'),
# Networking
cfg.BoolOpt('test_with_ipv6',
default=True,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 0ee1f09..d9b80b6 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -114,7 +114,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# Don't test with a default pool -- we'll do that in the scenario,
# but this will allow us to test that the field isn't mandatory,
# as well as not conflate pool failures with listener test failures
@@ -293,7 +293,7 @@
const.PROTOCOL: protocol1,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
}
try:
@@ -320,7 +320,7 @@
const.PROTOCOL: protocol2,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
try:
@@ -347,7 +347,7 @@
const.PROTOCOL: protocol1,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
self.assertRaises(
@@ -364,7 +364,7 @@
const.PROTOCOL: protocol3,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
self.assertRaises(
@@ -797,7 +797,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -963,7 +963,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1022,7 +1022,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(protocol_port, listener[const.PROTOCOL_PORT])
- self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit,
+ listener[const.CONNECTION_LIMIT])
if (protocol == const.HTTP and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -1075,7 +1076,7 @@
const.NAME: new_name,
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
- const.CONNECTION_LIMIT: 400,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1150,7 +1151,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
- self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit+200,
+ listener[const.CONNECTION_LIMIT])
if (protocol == const.HTTP and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -1300,7 +1302,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = self.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index 833363d..96f555b 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -103,7 +103,7 @@
const.PROTOCOL: listener_protocol,
const.PROTOCOL_PORT: cls.current_listener_port,
const.LOADBALANCER_ID: cls.lb_id,
- const.CONNECTION_LIMIT: 200
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index 69c1f2b..ad7ded3 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -99,7 +99,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index bce1378..8a6d5d5 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -243,7 +243,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: 80,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
const.DEFAULT_POOL_ID: pool1_id,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -300,7 +300,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
- self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit,
+ listener[const.CONNECTION_LIMIT])
if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -329,7 +330,7 @@
const.NAME: new_name,
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
- const.CONNECTION_LIMIT: 400,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
const.DEFAULT_POOL_ID: pool2_id,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -392,7 +393,8 @@
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
- self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(listener_update_kwargs[const.CONNECTION_LIMIT],
+ listener[const.CONNECTION_LIMIT])
if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index bdd47a9..3ec1b99 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -106,7 +106,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index bec1a75..097f1fa 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -115,7 +115,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
if insert_headers_dic: