Fix tests to honor Octavia API versioning

This patch adds support for Octavia API endpoint versioning.
Previously Rocky API additions were being tested, regardless of the Octavia
API version.
It also fixes a potential race condition when testing the "ONLINE"
operating status.
The patch also makes sure that duplicate fixed IP addresses are not allocated
on subnets.

Story: 2003466
Task: 24726

Change-Id: If0ec777f34326c09c7215d9c7c2ed249571d779e
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 707b743..38da0ae 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -76,10 +76,6 @@
             const.PROTOCOL_PORT: 80,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.TIMEOUT_CLIENT_DATA: 1000,
-            const.TIMEOUT_MEMBER_CONNECT: 1000,
-            const.TIMEOUT_MEMBER_DATA: 1000,
-            const.TIMEOUT_TCP_INSPECT: 50,
             const.INSERT_HEADERS: {
                 const.X_FORWARDED_FOR: "true",
                 const.X_FORWARDED_PORT: "true"
@@ -93,6 +89,14 @@
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
 
         # Test that a user without the load balancer role cannot
         # create a listener
@@ -146,10 +150,12 @@
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
         self.assertTrue(
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-        self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
 
     @decorators.idempotent_id('78ba6eb0-178c-477e-9156-b6775ca7b271')
     def test_listener_list(self):
@@ -336,7 +342,14 @@
                          listeners[2][const.DESCRIPTION])
 
         # Test fields
-        for field in const.SHOW_LISTENER_RESPONSE_FIELDS:
+        show_listener_response_fields = const.SHOW_LISTENER_RESPONSE_FIELDS
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            show_listener_response_fields.append('timeout_client_data')
+            show_listener_response_fields.append('timeout_member_connect')
+            show_listener_response_fields.append('timeout_member_data')
+            show_listener_response_fields.append('timeout_tcp_inspect')
+        for field in show_listener_response_fields:
             if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
                 continue
             listeners = self.mem_listener_client.list_listeners(
@@ -411,10 +424,6 @@
             const.PROTOCOL_PORT: 81,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.TIMEOUT_CLIENT_DATA: 1000,
-            const.TIMEOUT_MEMBER_CONNECT: 1000,
-            const.TIMEOUT_MEMBER_DATA: 1000,
-            const.TIMEOUT_TCP_INSPECT: 50,
             const.INSERT_HEADERS: {
                 const.X_FORWARDED_FOR: "true",
                 const.X_FORWARDED_PORT: "true"
@@ -425,6 +434,15 @@
             # const.SNI_CONTAINER_REFS: [],
         }
 
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addClassResourceCleanup(
             self.mem_listener_client.cleanup_listener,
@@ -469,10 +487,13 @@
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
         self.assertTrue(
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-        self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
 
         # Test that a user with lb_admin role can see the listener
         if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
@@ -525,10 +546,6 @@
             const.PROTOCOL_PORT: 82,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.TIMEOUT_CLIENT_DATA: 1000,
-            const.TIMEOUT_MEMBER_CONNECT: 1000,
-            const.TIMEOUT_MEMBER_DATA: 1000,
-            const.TIMEOUT_TCP_INSPECT: 50,
             const.INSERT_HEADERS: {
                 const.X_FORWARDED_FOR: "true",
                 const.X_FORWARDED_PORT: "true"
@@ -538,6 +555,14 @@
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
 
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addClassResourceCleanup(
@@ -573,10 +598,12 @@
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
         self.assertTrue(
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-        self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
 
         # Test that a user, without the load balancer member role, cannot
         # use this command
@@ -616,10 +643,6 @@
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
             const.CONNECTION_LIMIT: 400,
-            const.TIMEOUT_CLIENT_DATA: 2000,
-            const.TIMEOUT_MEMBER_CONNECT: 2000,
-            const.TIMEOUT_MEMBER_DATA: 2000,
-            const.TIMEOUT_TCP_INSPECT: 100,
             const.INSERT_HEADERS: {
                 const.X_FORWARDED_FOR: "false",
                 const.X_FORWARDED_PORT: "false"
@@ -629,6 +652,15 @@
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_update_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 2000,
+                const.TIMEOUT_MEMBER_CONNECT: 2000,
+                const.TIMEOUT_MEMBER_DATA: 2000,
+                const.TIMEOUT_TCP_INSPECT: 100,
+            })
+
         listener = self.mem_listener_client.update_listener(
             listener[const.ID], **listener_update_kwargs)
 
@@ -665,10 +697,12 @@
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
         self.assertFalse(
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
-        self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
-        self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
-        self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
+            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
+            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
+            self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
 
     @decorators.idempotent_id('16f11c82-f069-4592-8954-81b35a98e3b7')
     def test_listener_delete(self):
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 19f6044..35f2dde 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -90,6 +90,12 @@
                                      const.ACTIVE,
                                      CONF.load_balancer.lb_build_interval,
                                      CONF.load_balancer.lb_build_timeout)
+        if not CONF.load_balancer.test_with_noop:
+            lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                         lb[const.ID], const.OPERATING_STATUS,
+                                         const.ONLINE,
+                                         CONF.load_balancer.check_interval,
+                                         CONF.load_balancer.check_timeout)
 
         self.assertTrue(lb[const.ADMIN_STATE_UP])
         parser.parse(lb[const.CREATED_AT])
@@ -102,6 +108,7 @@
             self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
         else:
             self.assertEqual(const.ONLINE, lb[const.OPERATING_STATUS])
+
         self.assertEqual(self.os_roles_lb_member.credentials.project_id,
                          lb[const.PROJECT_ID])
         self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
@@ -270,6 +277,13 @@
                                       const.ACTIVE,
                                       CONF.load_balancer.lb_build_interval,
                                       CONF.load_balancer.lb_build_timeout)
+        if not CONF.load_balancer.test_with_noop:
+            lb1 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                          lb[const.ID], const.OPERATING_STATUS,
+                                          const.ONLINE,
+                                          CONF.load_balancer.check_interval,
+                                          CONF.load_balancer.check_timeout)
+
         # Time resolution for created_at is only to the second, and we need to
         # ensure that each object has a distinct creation time. Delaying one
         # second is both a simple and a reliable way to accomplish this.
@@ -293,6 +307,13 @@
                                       const.ACTIVE,
                                       CONF.load_balancer.lb_build_interval,
                                       CONF.load_balancer.lb_build_timeout)
+        if not CONF.load_balancer.test_with_noop:
+            lb2 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                          lb[const.ID], const.OPERATING_STATUS,
+                                          const.ONLINE,
+                                          CONF.load_balancer.check_interval,
+                                          CONF.load_balancer.check_timeout)
+
         # Time resolution for created_at is only to the second, and we need to
         # ensure that each object has a distinct creation time. Delaying one
         # second is both a simple and a reliable way to accomplish this.
@@ -709,6 +730,12 @@
                                      const.ACTIVE,
                                      CONF.load_balancer.lb_build_interval,
                                      CONF.load_balancer.lb_build_timeout)
+        if not CONF.load_balancer.test_with_noop:
+            lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                         lb[const.ID], const.OPERATING_STATUS,
+                                         const.ONLINE,
+                                         CONF.load_balancer.check_interval,
+                                         CONF.load_balancer.check_timeout)
 
         # Test that a user, without the load balancer member role, cannot
         # use this method
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index f5d34c8..18073cc 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -132,10 +132,15 @@
             const.ADDRESS: member_address,
             const.PROTOCOL_PORT: 80,
             const.WEIGHT: 50,
-            const.BACKUP: False,
             const.MONITOR_ADDRESS: member_monitor_address,
             const.MONITOR_PORT: 8080,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_kwargs.update({
+                const.BACKUP: False,
+            })
+
         if self.lb_member_vip_subnet:
             member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
@@ -173,8 +178,13 @@
         self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
 
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.PROTOCOL_PORT, const.WEIGHT,
                        const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            equal_items.append(const.BACKUP)
+
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
         else:
@@ -351,7 +361,11 @@
                          members[2][const.PROTOCOL_PORT])
 
         # Test fields
-        for field in const.SHOW_MEMBER_RESPONSE_FIELDS:
+        show_member_response_fields = const.SHOW_MEMBER_RESPONSE_FIELDS
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            show_member_response_fields.append('backup')
+        for field in show_member_response_fields:
             members = self.mem_member_client.list_members(
                 pool_id, query_params='{fields}={field}'.format(
                     fields=const.FIELDS, field=field))
@@ -421,10 +435,14 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: 81,
             const.WEIGHT: 50,
-            const.BACKUP: False,
             const.MONITOR_ADDRESS: '192.0.2.2',
             const.MONITOR_PORT: 8080,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_kwargs.update({
+                const.BACKUP: False,
+            })
         if self.lb_member_vip_subnet:
             member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
@@ -454,8 +472,13 @@
         self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
 
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.PROTOCOL_PORT, const.WEIGHT,
                        const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            equal_items.append(const.BACKUP)
+
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
         else:
@@ -513,10 +536,15 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: 82,
             const.WEIGHT: 50,
-            const.BACKUP: False,
             const.MONITOR_ADDRESS: '192.0.2.2',
             const.MONITOR_PORT: 8080,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_kwargs.update({
+                const.BACKUP: False,
+            })
+
         if self.lb_member_vip_subnet:
             member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
@@ -553,8 +581,13 @@
         UUID(member[const.ID])
 
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.PROTOCOL_PORT, const.WEIGHT,
                        const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            equal_items.append(const.BACKUP)
+
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
         else:
@@ -609,10 +642,15 @@
             const.NAME: new_name,
             const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
             const.WEIGHT: member[const.WEIGHT] + 1,
-            const.BACKUP: not member[const.BACKUP],
             const.MONITOR_ADDRESS: '192.0.2.3',
             const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_update_kwargs.update({
+                const.BACKUP: not member[const.BACKUP]
+            })
+
         member = self.mem_member_client.update_member(
             member[const.ID], **member_update_kwargs)
 
@@ -642,7 +680,12 @@
 
         # Test changed items
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
-                       const.BACKUP, const.MONITOR_ADDRESS, const.MONITOR_PORT]
+                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            equal_items.append(const.BACKUP)
+
         for item in equal_items:
             self.assertEqual(member_update_kwargs[item], member[item])
 
@@ -688,10 +731,15 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: 80,
             const.WEIGHT: 50,
-            const.BACKUP: False,
             const.MONITOR_ADDRESS: '192.0.2.2',
             const.MONITOR_PORT: 8080,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member1_kwargs.update({
+                const.BACKUP: False,
+            })
+
         if self.lb_member_vip_subnet:
             member1_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
@@ -717,10 +765,15 @@
             const.ADDRESS: '192.0.2.3',
             const.PROTOCOL_PORT: 81,
             const.WEIGHT: 51,
-            const.BACKUP: True,
             const.MONITOR_ADDRESS: '192.0.2.4',
             const.MONITOR_PORT: 8081,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member2_kwargs.update({
+                const.BACKUP: True,
+            })
+
         if self.lb_member_vip_subnet:
             member2_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
@@ -745,10 +798,15 @@
             const.ADDRESS: '192.0.2.5',
             const.PROTOCOL_PORT: 82,
             const.WEIGHT: 52,
-            const.BACKUP: True,
             const.MONITOR_ADDRESS: '192.0.2.6',
             const.MONITOR_PORT: 8082,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member3_kwargs.update({
+                const.BACKUP: True,
+            })
+
         if self.lb_member_vip_subnet:
             member3_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
index 6435e7b..4f1480c 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
@@ -117,43 +117,53 @@
         self.assertTrue(
             len(amphorae) >= 2 * self._expected_amp_count(amphorae))
 
-        # Make sure all of the fields exist on the amp list records
-        for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
-            self.assertIn(field, amphorae[0])
+        show_amphora_response_fields = const.SHOW_AMPHORA_RESPONSE_FIELDS
+        if self.mem_amphora_client.is_version_supported(
+                self.api_version, '2.1'):
+            show_amphora_response_fields.append('created_at')
+            show_amphora_response_fields.append('updated_at')
+            show_amphora_response_fields.append('image_id')
 
-        amp1_id = amphorae[0][const.ID]
-        amp1 = self.os_admin.amphora_client.show_amphora(amphora_id=amp1_id)
+        for amp in amphorae:
 
-        # Make sure all of the fields exist on the amp show record
-        for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
-            self.assertIn(field, amp1)
+            # Make sure all of the fields exist on the amp list records
+            for field in show_amphora_response_fields:
+                self.assertIn(field, amp)
 
-        # Verify a few of the fields are the right type
-        parser.parse(amp1[const.CREATED_AT])
-        parser.parse(amp1[const.UPDATED_AT])
-        UUID(amp1[const.ID])
-        UUID(amp1[const.COMPUTE_ID])
-        UUID(amp1[const.VRRP_PORT_ID])
-        self.assertIn(amp1[const.STATUS], const.AMPHORA_STATUSES)
-        # We might have gotten unassigned/spare amps?
-        if amp1[const.STATUS] == const.STATUS_ALLOCATED:
-            UUID(amp1[const.HA_PORT_ID])
-            UUID(amp1[const.LOADBALANCER_ID])
-            self.assertIn(amp1[const.ROLE], const.AMPHORA_ROLES)
-        else:
-            self.assertIsNone(amp1[const.ROLE])
+            amp_id = amp[const.ID]
+            amp_obj = self.os_admin.amphora_client.show_amphora(
+                amphora_id=amp_id)
 
-        # Test that all of the fields from the amp list match those from a show
-        for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
-            self.assertEqual(amphorae[0][field], amp1[field])
+            # Make sure all of the fields exist on the amp show record
+            for field in show_amphora_response_fields:
+                self.assertIn(field, amp_obj)
 
-        amp2_id = amphorae[1][const.ID]
-        amp2 = self.os_admin.amphora_client.show_amphora(amphora_id=amp2_id)
+            # Verify a few of the fields are the right type
+            if self.mem_amphora_client.is_version_supported(
+                    self.api_version, '2.1'):
+                parser.parse(amp_obj[const.CREATED_AT])
+                parser.parse(amp_obj[const.UPDATED_AT])
+            UUID(amp_obj[const.ID])
+            self.assertIn(amp_obj[const.STATUS], const.AMPHORA_STATUSES)
 
-        # Test that all of the fields from the amp list match those from a show
-        # (on another amphora)
-        for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
-            self.assertEqual(amphorae[1][field], amp2[field])
+            # We might have gotten unassigned/spare amps?
+            if amp_obj[const.STATUS] == const.STATUS_ALLOCATED:
+                # Only check the state of fields for the LB we created,
+                # otherwise some fields (HA_PORT_ID) may not yet be
+                # populated in amps for parallel tests.
+                if lb_id == amp_obj[const.LOADBALANCER_ID]:
+                    UUID(amp_obj[const.HA_PORT_ID])
+                    UUID(amp_obj[const.LOADBALANCER_ID])
+                    UUID(amp_obj[const.COMPUTE_ID])
+                    UUID(amp_obj[const.VRRP_PORT_ID])
+                    self.assertIn(amp_obj[const.ROLE], const.AMPHORA_ROLES)
+            else:
+                self.assertIsNone(amp_obj[const.ROLE])
+
+            # Test that all of the fields from the amp list match those
+            # from a show
+            for field in show_amphora_response_fields:
+                self.assertEqual(amp[field], amp_obj[field])
 
         # Test filtering by loadbalancer_id
         amphorae = self.os_admin.amphora_client.list_amphorae(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index c07bb4a..685c200 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -113,10 +113,6 @@
             const.PROTOCOL_PORT: 80,
             const.LOADBALANCER_ID: self.lb_id,
             const.CONNECTION_LIMIT: 200,
-            const.TIMEOUT_CLIENT_DATA: 1000,
-            const.TIMEOUT_MEMBER_CONNECT: 1000,
-            const.TIMEOUT_MEMBER_DATA: 1000,
-            const.TIMEOUT_TCP_INSPECT: 50,
             const.INSERT_HEADERS: {
                 const.X_FORWARDED_FOR: "true",
                 const.X_FORWARDED_PORT: "true"
@@ -126,6 +122,15 @@
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 1000,
+                const.TIMEOUT_MEMBER_CONNECT: 1000,
+                const.TIMEOUT_MEMBER_DATA: 1000,
+                const.TIMEOUT_TCP_INSPECT: 50,
+            })
+
         listener = self.mem_listener_client.create_listener(**listener_kwargs)
         self.addClassResourceCleanup(
             self.mem_listener_client.cleanup_listener,
@@ -160,11 +165,13 @@
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
         self.assertTrue(
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
-        self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
-        self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
         self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+            self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+            self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
 
         # Listener update
         new_name = data_utils.rand_name("lb_member_listener1-update")
@@ -175,10 +182,6 @@
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
             const.CONNECTION_LIMIT: 400,
-            const.TIMEOUT_CLIENT_DATA: 2000,
-            const.TIMEOUT_MEMBER_CONNECT: 2000,
-            const.TIMEOUT_MEMBER_DATA: 2000,
-            const.TIMEOUT_TCP_INSPECT: 100,
             const.INSERT_HEADERS: {
                 const.X_FORWARDED_FOR: "false",
                 const.X_FORWARDED_PORT: "false"
@@ -188,6 +191,15 @@
             # const.DEFAULT_TLS_CONTAINER_REF: '',
             # const.SNI_CONTAINER_REFS: [],
         }
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            listener_update_kwargs.update({
+                const.TIMEOUT_CLIENT_DATA: 2000,
+                const.TIMEOUT_MEMBER_CONNECT: 2000,
+                const.TIMEOUT_MEMBER_DATA: 2000,
+                const.TIMEOUT_TCP_INSPECT: 100,
+            })
+
         listener = self.mem_listener_client.update_listener(
             listener[const.ID], **listener_update_kwargs)
 
@@ -226,11 +238,13 @@
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
         self.assertFalse(
             strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
-        self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
-        self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
-        self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
-        self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
         self.assertEqual(self.pool2_id, listener[const.DEFAULT_POOL_ID])
+        if self.mem_listener_client.is_version_supported(
+                self.api_version, '2.1'):
+            self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
+            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
+            self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
+            self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
 
         # Listener delete
         waiters.wait_for_status(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 2a778e9..b30d651 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -109,10 +109,15 @@
             const.ADDRESS: '192.0.2.1',
             const.PROTOCOL_PORT: 80,
             const.WEIGHT: 50,
-            const.BACKUP: False,
             const.MONITOR_ADDRESS: '192.0.2.2',
             const.MONITOR_PORT: 8080,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_kwargs.update({
+                const.BACKUP: False,
+            })
+
         if self.lb_member_vip_subnet:
             member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
                 const.ID]
@@ -139,11 +144,25 @@
         parser.parse(member[const.CREATED_AT])
         parser.parse(member[const.UPDATED_AT])
         UUID(member[const.ID])
-        self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+
+        # Members may be in a transitional state initially
+        # like DOWN or MAINT, give it some time to stablize on
+        # NO_MONITOR. This is LIVE status.
+        member = waiters.wait_for_status(
+            self.mem_member_client.show_member,
+            member[const.ID], const.OPERATING_STATUS,
+            const.NO_MONITOR,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout,
+            pool_id=self.pool_id)
 
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
-                       const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+                       const.PROTOCOL_PORT, const.WEIGHT,
                        const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            equal_items.append(const.BACKUP)
+
         if const.SUBNET_ID in member_kwargs:
             equal_items.append(const.SUBNET_ID)
         else:
@@ -159,10 +178,15 @@
             const.NAME: new_name,
             const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
             const.WEIGHT: member[const.WEIGHT] + 1,
-            const.BACKUP: not member[const.BACKUP],
             const.MONITOR_ADDRESS: '192.0.2.3',
             const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
         }
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            member_update_kwargs.update({
+                const.BACKUP: not member[const.BACKUP],
+            })
+
         member = self.mem_member_client.update_member(
             member[const.ID], **member_update_kwargs)
 
@@ -181,7 +205,11 @@
 
         # Test changed items
         equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
-                       const.BACKUP, const.MONITOR_ADDRESS, const.MONITOR_PORT]
+                       const.MONITOR_ADDRESS, const.MONITOR_PORT]
+        if self.mem_member_client.is_version_supported(
+                self.api_version, '2.1'):
+            equal_items.append(const.BACKUP)
+
         for item in equal_items:
             self.assertEqual(member_update_kwargs[item], member[item])
 
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 2cc16e0..c30932c 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -53,6 +53,7 @@
     client_manager = clients.ManagerV2
     webserver1_response = 1
     webserver2_response = 5
+    used_ips = []
 
     @classmethod
     def skip_checks(cls):
@@ -123,6 +124,8 @@
 
         conf_lb = CONF.load_balancer
 
+        cls.api_version = cls.mem_lb_client.get_max_api_version()
+
         if conf_lb.test_subnet_override and not conf_lb.test_network_override:
             raise exceptions.InvalidConfiguration(
                 "Configuration value test_network_override must be "
@@ -366,6 +369,9 @@
             ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
         if cls.lb_member_vip_subnet:
             ip_index = data_utils.rand_int_id(start=10, end=100)
+            while ip_index in cls.used_ips:
+                ip_index = data_utils.rand_int_id(start=10, end=100)
+            cls.used_ips.append(ip_index)
             if ip_version == 4:
                 network = ipaddress.IPv4Network(
                     six.u(CONF.load_balancer.vip_subnet_cidr))