Merge "Add backup member tests"
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 733f0f4..48a83ac 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -115,6 +115,7 @@
# Other constants
ACTIVE = 'ACTIVE'
+PAUSED = 'PAUSED'
PENDING_UPDATE = 'PENDING_UPDATE'
ADMIN_STATE_UP_TRUE = 'true'
ASC = 'asc'
@@ -129,6 +130,7 @@
SINGLE = 'SINGLE'
ACTIVE_STANDBY = 'ACTIVE_STANDBY'
SUPPORTED_LB_TOPOLOGIES = (SINGLE, ACTIVE_STANDBY)
+BACKUP_TRUE = 'true'
# Protocols
HTTP = 'HTTP'
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index 8b25fc7..1e18af3 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -153,6 +153,17 @@
class MemberAPITest1(MemberAPITest):
+ @decorators.idempotent_id('c1e029b0-b6d6-4fa6-8ccb-5c3f3aa293b0')
+ def test_ipv4_HTTP_LC_backup_member_create(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+ pool_id = self._listener_pool_create(
+ listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+ algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+ self._test_member_create(4, pool_id, backup_member=True)
+
@decorators.idempotent_id('0684575a-0970-4fa8-8006-10c2b39c5f2b')
def test_ipv4_HTTP_LC_alt_monitor_member_create(self):
pool_id = self._listener_pool_create(
@@ -511,6 +522,17 @@
algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
self._test_member_create(6, pool_id)
+ @decorators.idempotent_id('b1994c5d-74b8-44be-b9e5-5e18e9219b61')
+ def test_ipv6_HTTP_LC_backup_member_create(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+ pool_id = self._listener_pool_create(
+ listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+ algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+ self._test_member_create(6, pool_id, backup_member=True)
+
@decorators.idempotent_id('6056724b-d046-497a-ae31-c02af67d4fbb')
def test_ipv6_HTTPS_LC_alt_monitor_member_create(self):
pool_id = self._listener_pool_create(
@@ -842,12 +864,12 @@
self._test_member_create(6, pool_id)
def _test_member_create(self, ip_version, pool_id,
- alternate_monitor=False):
+ alternate_monitor=False, backup_member=False):
"""Tests member create and basic show APIs.
* Tests that users without the loadbalancer member role cannot
create members.
- * Create a fully populated member.
+ * Create a fully populated member or backup member.
* If driver doesnt support Monitors, allow to create without monitor
* Show member details.
* Validate the show reflects the requested values.
@@ -882,7 +904,7 @@
if self.mem_member_client.is_version_supported(
self.api_version, '2.1'):
member_kwargs.update({
- const.BACKUP: False,
+ const.BACKUP: backup_member,
})
if self.mem_member_client.is_version_supported(
@@ -971,6 +993,17 @@
for item in equal_items:
self.assertEqual(member_kwargs[item], member[item])
+ @decorators.skip_because(bug='2045803')
+ @decorators.idempotent_id('b982188a-d55f-438a-a1b2-224f0ec8ff12')
+ def test_HTTP_LC_backup_member_list(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+ self._test_member_list(const.HTTP,
+ const.LB_ALGORITHM_LEAST_CONNECTIONS,
+ backup_member=True)
+
@decorators.idempotent_id('fcc5c6cd-d1c2-4a49-8d26-2268608e59a6')
def test_HTTP_LC_member_list(self):
self._test_member_list(const.HTTP,
@@ -1071,11 +1104,11 @@
self._test_member_list(const.UDP,
const.LB_ALGORITHM_SOURCE_IP_PORT)
- def _test_member_list(self, pool_protocol, algorithm):
+ def _test_member_list(self, pool_protocol, algorithm, backup_member=False):
"""Tests member list API and field filtering.
* Create a clean pool.
- * Create three members.
+ * Create three members (one backup member if backup_member is True).
* Validates that other accounts cannot list the members.
* List the members using the default sort order.
* List the members using descending sort order.
@@ -1140,6 +1173,9 @@
const.PROTOCOL_PORT: 101,
}
+ if backup_member:
+ member1_kwargs[const.BACKUP] = True
+
if self.mem_member_client.is_version_supported(
self.api_version, '2.5'):
member1_tags = ["English", "Mathematics",
@@ -1351,6 +1387,17 @@
self.assertEqual(member2[const.PROTOCOL_PORT],
members[0][const.PROTOCOL_PORT])
+ # Test filtering using the backup flag
+ if backup_member:
+ members = self.mem_member_client.list_members(
+ pool_id,
+ query_params='{backup}={backup_value}'.format(
+ backup=const.BACKUP,
+ backup_value=const.BACKUP_TRUE))
+ self.assertEqual(1, len(members))
+ self.assertEqual(member1_name, members[0][const.NAME])
+ self.assertTrue(members[0][const.BACKUP])
+
# Test combined params
members = self.mem_member_client.list_members(
pool_id,
@@ -1395,6 +1442,17 @@
class MemberAPITest2(MemberAPITest):
+ @decorators.idempotent_id('048f4b15-1cb4-49ac-82d6-b2ac7fe9d03b')
+ def test_HTTP_LC_backup_member_show(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+ pool_id = self._listener_pool_create(
+ listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+ algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+ self._test_member_show(pool_id, backup_member=True)
+
@decorators.idempotent_id('2674b363-7922-494a-b121-cf415dbbb716')
def test_HTTP_LC_alt_monitor_member_show(self):
pool_id = self._listener_pool_create(
@@ -1739,7 +1797,8 @@
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_show(pool_id)
- def _test_member_show(self, pool_id, alternate_monitor=False):
+ def _test_member_show(self, pool_id, alternate_monitor=False,
+ backup_member=False):
"""Tests member show API.
* Create a fully populated member.
@@ -1763,7 +1822,7 @@
if self.mem_member_client.is_version_supported(
self.api_version, '2.1'):
member_kwargs.update({
- const.BACKUP: False,
+ const.BACKUP: backup_member,
})
if self.lb_member_vip_subnet:
member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
@@ -1832,6 +1891,17 @@
expected_allowed, member[const.ID],
pool_id=pool_id)
+ @decorators.idempotent_id('592c19c3-1e0d-4d6d-b2ff-0d39d8654c99')
+ def test_HTTP_LC_backup_member_update(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+ pool_id = self._listener_pool_create(
+ listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+ algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+ self._test_member_update(pool_id, backup_member=True)
+
@decorators.idempotent_id('65680d48-1d49-4959-a7d1-677797e54f6b')
def test_HTTP_LC_alt_monitor_member_update(self):
pool_id = self._listener_pool_create(
@@ -2176,7 +2246,8 @@
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_update(pool_id)
- def _test_member_update(self, pool_id, alternate_monitor=False):
+ def _test_member_update(self, pool_id, alternate_monitor=False,
+ backup_member=False):
"""Tests member show API and field filtering.
* Create a fully populated member.
@@ -2203,7 +2274,7 @@
if self.mem_member_client.is_version_supported(
self.api_version, '2.1'):
member_kwargs.update({
- const.BACKUP: False,
+ const.BACKUP: backup_member,
})
if self.mem_member_client.is_version_supported(
@@ -2793,6 +2864,17 @@
self.assertEqual(member2_name_update, members[0][const.NAME])
self.assertEqual(member3_name, members[1][const.NAME])
+ @decorators.idempotent_id('eab8f0dc-0959-4b50-aea2-2f2319305d15')
+ def test_HTTP_LC_backup_member_delete(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+ pool_id = self._listener_pool_create(
+ listener_protocol=const.HTTP, pool_protocol=const.HTTP,
+ algorithm=const.LB_ALGORITHM_LEAST_CONNECTIONS)
+ self._test_member_delete(pool_id, backup_member=True)
+
@decorators.idempotent_id('8b6574a3-17e8-4950-b24e-66d0c28960d3')
def test_HTTP_LC_member_delete(self):
pool_id = self._listener_pool_create(
@@ -2965,7 +3047,7 @@
algorithm=const.LB_ALGORITHM_SOURCE_IP_PORT)
self._test_member_delete(pool_id)
- def _test_member_delete(self, pool_id):
+ def _test_member_delete(self, pool_id, backup_member=False):
"""Tests member create and delete APIs.
* Creates a member.
@@ -2980,6 +3062,13 @@
const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: self.member_port.increment(),
}
+
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_kwargs.update({
+ const.BACKUP: backup_member,
+ })
+
member = self.mem_member_client.create_member(**member_kwargs)
self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index a704b88..0083887 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -1629,3 +1629,171 @@
# Make a request to the stats page
URL = 'http://{0}:{1}/metrics'.format(self.lb_vip_address, '8080')
self.validate_URL_response(URL, expected_status_code=200)
+
+ @decorators.idempotent_id('b2d5cefe-eac0-4eb3-b7c2-54f22578def9')
+ def test_backup_member(self):
+ if not self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ raise self.skipException('Backup member support is only available '
+ 'in Octavia API version 2.1 or newer')
+
+ _LISTENER_PORT = 106
+ # Create a unique listener and pool for this test
+ pool_id = self._listener_pool_create(const.HTTP, _LISTENER_PORT)[1]
+
+ # Create a health monitor on the pool
+ hm_name = data_utils.rand_name("lb_member_hm1-backup-not-active")
+ hm_kwargs = {
+ const.POOL_ID: pool_id,
+ const.NAME: hm_name,
+ const.TYPE: const.HEALTH_MONITOR_HTTP,
+ const.DELAY: 1,
+ const.TIMEOUT: 1,
+ const.MAX_RETRIES: 1,
+ const.MAX_RETRIES_DOWN: 1,
+ const.HTTP_METHOD: const.GET,
+ const.URL_PATH: '/',
+ const.EXPECTED_CODES: '200',
+ const.ADMIN_STATE_UP: True,
+ }
+ hm = self.mem_healthmonitor_client.create_healthmonitor(**hm_kwargs)
+ self.addCleanup(
+ self.mem_healthmonitor_client.cleanup_healthmonitor,
+ hm[const.ID], lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+ hm = waiters.wait_for_status(
+ self.mem_healthmonitor_client.show_healthmonitor,
+ hm[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ # Set up Member 1 for Webserver 1
+ member1_name = data_utils.rand_name("lb_member_member1-not-backup")
+ member1_kwargs = {
+ const.POOL_ID: pool_id,
+ const.NAME: member1_name,
+ const.ADMIN_STATE_UP: True,
+ const.ADDRESS: self.webserver1_ip,
+ const.PROTOCOL_PORT: 80,
+ }
+ if self.lb_member_1_subnet:
+ member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+ member1 = self.mem_member_client.create_member(**member1_kwargs)
+ self.addCleanup(
+ self.mem_member_client.cleanup_member,
+ member1[const.ID], pool_id=pool_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ # Set up Member 2 for Webserver 2 (Backup)
+ member2_name = data_utils.rand_name("lb_member_member2-backup")
+ member2_kwargs = {
+ const.POOL_ID: pool_id,
+ const.NAME: member2_name,
+ const.ADMIN_STATE_UP: True,
+ const.ADDRESS: self.webserver2_ip,
+ const.PROTOCOL_PORT: 80,
+ const.BACKUP: True,
+ }
+ if self.lb_member_2_subnet:
+ member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+ member2 = self.mem_member_client.create_member(**member2_kwargs)
+ self.addCleanup(
+ self.mem_member_client.cleanup_member,
+ member2[const.ID], pool_id=pool_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ url_for_tests = f'http://{self.lb_vip_address}:{_LISTENER_PORT}/'
+
+ # Send some requests and check that only member 1 is responding
+ self.assertConsistentResponse((200, self.webserver1_response),
+ url_for_tests)
+
+ # Disable member 1 and check that the backup member takes over
+ member_update_kwargs = {
+ const.POOL_ID: pool_id,
+ const.ADMIN_STATE_UP: False}
+
+ self.mem_member_client.update_member(
+ member1[const.ID], **member_update_kwargs)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+ waiters.wait_for_status(
+ self.mem_member_client.show_member,
+ member1[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout,
+ pool_id=pool_id)
+
+ # Send some requests and check that only backup member 2 is responding
+ self.assertConsistentResponse((200, self.webserver2_response),
+ url_for_tests)
+
+ # Enable member 1 and check that member 1 traffic resumes
+ member_update_kwargs = {
+ const.POOL_ID: pool_id,
+ const.ADMIN_STATE_UP: True}
+
+ self.mem_member_client.update_member(
+ member1[const.ID], **member_update_kwargs)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+ waiters.wait_for_status(
+ self.mem_member_client.show_member,
+ member1[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout,
+ pool_id=pool_id)
+
+ # Send some requests and check that only member 1 is responding
+ self.assertConsistentResponse((200, self.webserver1_response),
+ url_for_tests)
+
+ # Delete member 1 and check that backup member 2 is responding
+ self.mem_member_client.delete_member(
+ member1[const.ID],
+ pool_id=pool_id)
+
+ waiters.wait_for_deleted_status_or_not_found(
+ self.mem_member_client.show_member, member1[const.ID],
+ const.PROVISIONING_STATUS,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout,
+ pool_id=pool_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ # Send some requests and check that only backup member 2 is responding
+ self.assertConsistentResponse((200, self.webserver2_response),
+ url_for_tests)