Add conf option for listener connection limit
The patch provides configuration option to define listener connection
limit to make chance to adjust connections for environments with various
amount of resources.
Related-PRODX: PRODX-19517
Change-Id: I435a0a38440860b3d8022aa164f6288aced1d70a
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 77d2f6e..b196f2a 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -139,6 +139,9 @@
'dict. Example: {"compute_zone": "The compute '
'availability zone."}'),
default={'compute_zone': 'The compute availability zone.'}),
+ cfg.IntOpt('listener_conn_limit',
+ default=200,
+ help='Defines listener connection limit.'),
# Networking
cfg.BoolOpt('test_with_ipv6',
default=True,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index ce7d989..350f739 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -99,7 +99,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# Don't test with a default pool -- we'll do that in the scenario,
# but this will allow us to test that the field isn't mandatory,
# as well as not conflate pool failures with listener test failures
@@ -272,7 +272,7 @@
const.PROTOCOL: protocol1,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
}
try:
@@ -305,7 +305,7 @@
const.PROTOCOL: protocol2,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
try:
@@ -338,7 +338,7 @@
const.PROTOCOL: protocol1,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
self.assertRaises(
@@ -355,7 +355,7 @@
const.PROTOCOL: protocol3,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
self.assertRaises(
@@ -719,7 +719,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -892,7 +892,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -955,7 +955,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(protocol_port, listener[const.PROTOCOL_PORT])
- self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit,
+ listener[const.CONNECTION_LIMIT])
if (protocol == const.HTTP and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -1019,7 +1020,7 @@
const.NAME: new_name,
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
- const.CONNECTION_LIMIT: 400,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_POOL_ID: '',
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -1094,7 +1095,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
else:
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
- self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit+200,
+ listener[const.CONNECTION_LIMIT])
if (protocol == const.HTTP and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -1245,7 +1247,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: protocol_port,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = self.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index a5c78b3..694a253 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -103,7 +103,7 @@
const.PROTOCOL: listener_protocol,
const.PROTOCOL_PORT: cls.current_listener_port,
const.LOADBALANCER_ID: cls.lb_id,
- const.CONNECTION_LIMIT: 200
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
index 06d10dc..daa061c 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -99,7 +99,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.listener_ids[protocol] = listener[const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index 7e31537..8c1b74d 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -269,7 +269,7 @@
const.PROTOCOL: protocol,
const.PROTOCOL_PORT: 80,
const.LOADBALANCER_ID: self.lb_id,
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
const.DEFAULT_POOL_ID: pool1_id,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -326,7 +326,8 @@
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
- self.assertEqual(200, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(CONF.load_balancer.listener_conn_limit,
+ listener[const.CONNECTION_LIMIT])
if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
@@ -355,7 +356,7 @@
const.NAME: new_name,
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
- const.CONNECTION_LIMIT: 400,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit+200,
const.DEFAULT_POOL_ID: pool2_id,
# TODO(rm_work): need to finish the rest of this stuff
# const.DEFAULT_TLS_CONTAINER_REF: '',
@@ -418,7 +419,8 @@
self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(protocol, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
- self.assertEqual(400, listener[const.CONNECTION_LIMIT])
+ self.assertEqual(listener_update_kwargs[const.CONNECTION_LIMIT],
+ listener[const.CONNECTION_LIMIT])
if (protocol in [const.HTTP, const.TERMINATED_HTTPS] and
CONF.load_balancer.provider != 'tungstenfabric'):
insert_headers = listener[const.INSERT_HEADERS]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 622eda9..a3b683e 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -106,7 +106,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
cls.current_listener_port += 1
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index 78c6114..947cf27 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -113,7 +113,7 @@
const.LOADBALANCER_ID: cls.lb_id,
# For branches that don't support multiple listeners in single
# haproxy process and use haproxy>=1.8:
- const.CONNECTION_LIMIT: 200,
+ const.CONNECTION_LIMIT: CONF.load_balancer.listener_conn_limit,
}
listener = cls.mem_listener_client.create_listener(**listener_kwargs)
cls.addClassResourceCleanup(