Merge "Sync Sphinx requirement"
diff --git a/octavia_tempest_plugin/clients.py b/octavia_tempest_plugin/clients.py
index 8409093..f85404c 100644
--- a/octavia_tempest_plugin/clients.py
+++ b/octavia_tempest_plugin/clients.py
@@ -41,7 +41,6 @@
     provider_client)
 
 CONF = config.CONF
-SERVICE_TYPE = 'load-balancer'
 
 
 class ManagerV2(clients.Manager):
@@ -49,28 +48,29 @@
     def __init__(self, credentials):
         super(ManagerV2, self).__init__(credentials)
 
+        params = dict(self.default_params)
+        params.update({
+            'auth_provider': self.auth_provider,
+            'service': CONF.load_balancer.catalog_type,
+            'region': CONF.load_balancer.region,
+            'endpoint_type': CONF.load_balancer.endpoint_type,
+            'build_interval': CONF.load_balancer.build_interval,
+            'build_timeout': CONF.load_balancer.build_timeout
+        })
+
         self.loadbalancer_client = loadbalancer_client.LoadbalancerClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.listener_client = listener_client.ListenerClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.pool_client = pool_client.PoolClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.member_client = member_client.MemberClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
+            **params)
+        self.listener_client = listener_client.ListenerClient(**params)
+        self.pool_client = pool_client.PoolClient(**params)
+        self.member_client = member_client.MemberClient(**params)
         self.healthmonitor_client = healthmonitor_client.HealthMonitorClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.l7policy_client = l7policy_client.L7PolicyClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.l7rule_client = l7rule_client.L7RuleClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.amphora_client = amphora_client.AmphoraClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
+            **params)
+        self.l7policy_client = l7policy_client.L7PolicyClient(**params)
+        self.l7rule_client = l7rule_client.L7RuleClient(**params)
+        self.amphora_client = amphora_client.AmphoraClient(**params)
         self.flavor_profile_client = flavor_profile_client.FlavorProfileClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.flavor_client = flavor_client.FlavorClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
-        self.provider_client = provider_client.ProviderClient(
-            self.auth_provider, SERVICE_TYPE, CONF.identity.region)
+            **params)
+        self.flavor_client = flavor_client.FlavorClient(**params)
+        self.provider_client = provider_client.ProviderClient(**params)
         self.flavor_capabilities_client = (
-            flavor_capabilities_client.FlavorCapabilitiesClient(
-                self.auth_provider, SERVICE_TYPE, CONF.identity.region))
+            flavor_capabilities_client.FlavorCapabilitiesClient(**params))
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 7ddcb7a..e767298 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -97,6 +97,7 @@
 SORT = 'sort'
 SINGLE = 'SINGLE'
 ACTIVE_STANDBY = 'ACTIVE_STANDBY'
+SUPPORTED_LB_TOPOLOGIES = (SINGLE, ACTIVE_STANDBY)
 
 # Protocols
 HTTP = 'HTTP'
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 026e941..dee2bd2 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -111,6 +111,10 @@
                 default={'amphora': 'The Octavia Amphora driver.',
                          'octavia': 'Deprecated alias of the Octavia Amphora '
                          'driver.'}),
+    cfg.StrOpt('loadbalancer_topology',
+               default=const.SINGLE,
+               choices=const.SUPPORTED_LB_TOPOLOGIES,
+               help='Load balancer topology configuration.'),
     # Networking
     cfg.BoolOpt('test_with_ipv6',
                 default=True,
@@ -192,4 +196,8 @@
                default="TCP",
                help="The type of L4 Protocol which is supported with the "
                     "provider driver."),
+    cfg.StrOpt('spare_pool_enabled',
+               default=False,
+               help="Wether spare pool is available with amphora provider "
+                    "driver or not."),
 ]
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
index 40418a2..97886b5 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -46,6 +46,10 @@
                                     "or 'octavia' (alias to 'amphora', "
                                     "deprecated) set.")
 
+        if CONF.load_balancer.loadbalancer_topology != const.ACTIVE_STANDBY:
+            raise cls.skipException("Configured load balancer topology is not "
+                                    "%s." % const.ACTIVE_STANDBY)
+
     @classmethod
     def resource_setup(cls):
         """Setup resources needed by the tests."""
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 555d34e..66d26cf 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -120,8 +120,9 @@
         self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
         self.assertIsNotNone(lb[const.VIP_PORT_ID])
         if lb_kwargs[const.VIP_SUBNET_ID]:
-            self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
-                             lb[const.VIP_ADDRESS])
+            if ip_version == 4 or self.lb_member_vip_ipv6_subnet_stateful:
+                self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
+                                 lb[const.VIP_ADDRESS])
             self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                              lb[const.VIP_SUBNET_ID])
 
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/__init__.py b/octavia_tempest_plugin/tests/spare_pool_scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/__init__.py
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/__init__.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/__init__.py
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
new file mode 100644
index 0000000..072bd20
--- /dev/null
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
@@ -0,0 +1,240 @@
+# Copyright 2019 Red Hat Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class SparePoolTest(test_base.LoadBalancerBaseTestWithCompute):
+
+    @classmethod
+    def skip_checks(cls):
+        super(SparePoolTest, cls).skip_checks()
+
+        if CONF.load_balancer.provider not in ['amphora', 'octavia']:
+            raise cls.skipException("Amphora tests require provider 'amphora' "
+                                    "or 'octavia' (alias to 'amphora', "
+                                    "deprecated) set")
+        if not CONF.loadbalancer_feature_enabled.spare_pool_enabled:
+            raise cls.skipException('[loadbalancer-feature-enabled] '
+                                    '"spare_pool_enabled" is set to False in '
+                                    'the Tempest configuration. Spare pool '
+                                    'tests will be skipped.')
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests"""
+        super(SparePoolTest, cls).resource_setup()
+
+    @decorators.idempotent_id('2ba3a2c2-de9d-4556-9535-cbe9209b4eaa')
+    def test_health_manager_failover_to_spare_amp(self):
+        """Tests Health Manager failover to amphora in spare pool.
+
+        * Check amphora spare pool availability
+        * Test the load balancer to make sure it is functioning
+        * Delete amphora compute instance associated to load balancer
+        * Validate load balancer fails over to spare amphora
+        * Send traffic through load balancer
+        * Validate amphora spare pool size is restored
+        """
+
+        # Check there is at least one amphora in spare pool
+        spare_amps = waiters.wait_for_spare_amps(
+            self.os_admin.amphora_client.list_amphorae,
+            CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+
+        # Setup a load balancer for the tests to use
+        lb_name = data_utils.rand_name("lb_spare_pool")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        self._setup_lb_network_kwargs(lb_kwargs, 4)
+
+        lb = self.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        self.lb_id = lb[const.ID]
+        self.addClassResourceCleanup(self.mem_lb_client.cleanup_loadbalancer,
+                                     self.lb_id)
+
+        if CONF.validation.connect_method == 'floating':
+            port_id = lb[const.VIP_PORT_ID]
+            result = self.lb_mem_float_ip_client.create_floatingip(
+                floating_network_id=CONF.network.public_network_id,
+                port_id=port_id)
+            floating_ip = result['floatingip']
+            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            self.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                self.lb_mem_float_ip_client.delete_floatingip,
+                self.lb_mem_float_ip_client.show_floatingip,
+                floatingip_id=floating_ip['id'])
+            self.lb_vip_address = floating_ip['floating_ip_address']
+        else:
+            self.lb_vip_address = lb[const.VIP_ADDRESS]
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+        # Confirm the spare pool has changed since last check
+        spare_amps_2 = waiters.wait_for_spare_amps(
+            self.os_admin.amphora_client.list_amphorae,
+            CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+        self.assertNotEqual(spare_amps, spare_amps_2)
+
+        listener_name = data_utils.rand_name("lb_member_listener1_spare")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '80',
+            const.LOADBALANCER_ID: self.lb_id,
+        }
+        listener = self.mem_listener_client.create_listener(**listener_kwargs)
+        self.listener_id = listener[const.ID]
+        self.addClassResourceCleanup(
+            self.mem_listener_client.cleanup_listener,
+            self.listener_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool1-spare")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LISTENER_ID: self.listener_id,
+        }
+        pool = self.mem_pool_client.create_pool(**pool_kwargs)
+        self.pool_id = pool[const.ID]
+        self.addClassResourceCleanup(
+            self.mem_pool_client.cleanup_pool,
+            self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+        waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+                                self.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1-spare")
+        member1_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+        member1 = self.mem_member_client.create_member(
+            **member1_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2-spare")
+        member2_kwargs = {
+            const.POOL_ID: self.pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: self.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if self.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = self.lb_member_2_subnet[const.ID]
+
+        member2 = self.mem_member_client.create_member(
+            **member2_kwargs)
+        self.addClassResourceCleanup(
+            self.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=self.pool_id,
+            lb_client=self.mem_lb_client, lb_id=self.lb_id)
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer, self.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Check there is at least one amphora in spare pool
+        spare_amps = waiters.wait_for_spare_amps(
+            self.os_admin.amphora_client.list_amphorae,
+            CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+
+        # Delete amphora compute instance
+        amp = self.os_admin.amphora_client.list_amphorae(
+            query_params='{loadbalancer_id}={lb_id}'.format(
+                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
+
+        self.os_admin_servers_client.delete_server(amp[0][const.COMPUTE_ID])
+
+        # Wait for the amphora failover to start
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.PENDING_UPDATE, CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Wait for the load balancer to return to ACTIVE so the
+        # cleanup steps will pass
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE, CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Confirm the spare pool has changed since last check
+        spare_amps_2 = waiters.wait_for_spare_amps(
+            self.os_admin.amphora_client.list_amphorae,
+            CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+        self.assertNotEqual(spare_amps, spare_amps_2)
+
+        # Check there is at least one amphora in spare pool
+        waiters.wait_for_spare_amps(self.os_admin.amphora_client.list_amphorae,
+                                    CONF.load_balancer.lb_build_interval,
+                                    CONF.load_balancer.lb_build_timeout)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index a4775da..e85fb0a 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -157,6 +157,7 @@
                                                  uuidutils.generate_uuid()}
                 cls.lb_member_1_ipv6_subnet = {'id': uuidutils.generate_uuid()}
                 cls.lb_member_2_ipv6_subnet = {'id': uuidutils.generate_uuid()}
+                cls.lb_member_vip_ipv6_subnet_stateful = True
             return
         elif CONF.load_balancer.test_network_override:
             if conf_lb.test_subnet_override:
@@ -182,6 +183,10 @@
                 cls.lb_member_vip_ipv6_subnet = override_ipv6_subnet
                 cls.lb_member_1_ipv6_subnet = override_ipv6_subnet
                 cls.lb_member_2_ipv6_subnet = override_ipv6_subnet
+                cls.lb_member_vip_ipv6_subnet_stateful = False
+                if (override_ipv6_subnet[0]['ipv6_address_mode'] ==
+                        'dhcpv6-stateful'):
+                    cls.lb_member_vip_ipv6_subnet_stateful = True
             else:
                 cls.lb_member_vip_ipv6_subnet = None
                 cls.lb_member_1_ipv6_subnet = None
@@ -303,6 +308,10 @@
             priv_ipv6_subnet = cls.os_admin.subnets_client.list_subnets(
                 name='ipv6-private-subnet')['subnets']
 
+            cls.lb_member_vip_ipv6_subnet_stateful = False
+            if (priv_ipv6_subnet[0]['ipv6_address_mode'] ==
+                    'dhcpv6-stateful'):
+                cls.lb_member_vip_ipv6_subnet_stateful = True
             if len(priv_ipv6_subnet) == 1:
                 cls.lb_member_vip_ipv6_subnet = priv_ipv6_subnet[0]
                 cls.lb_member_vip_ipv6_net = {
@@ -457,6 +466,10 @@
                     subnet = cls.os_admin.subnets_client.show_subnet(subnet_id)
                     network = ipaddress.IPv6Network(subnet['subnet']['cidr'])
                     lb_vip_address = str(network[ip_index])
+                    # If the subnet is IPv6 slaac or dhcpv6-stateless
+                    # neutron does not allow a fixed IP
+                    if not cls.lb_member_vip_ipv6_subnet_stateful:
+                        use_fixed_ip = False
             lb_kwargs[const.VIP_SUBNET_ID] = subnet_id
             if use_fixed_ip:
                 lb_kwargs[const.VIP_ADDRESS] = lb_vip_address
diff --git a/octavia_tempest_plugin/tests/validators.py b/octavia_tempest_plugin/tests/validators.py
index 2dc1d64..773fcc4 100644
--- a/octavia_tempest_plugin/tests/validators.py
+++ b/octavia_tempest_plugin/tests/validators.py
@@ -75,7 +75,7 @@
                 return
             except requests.exceptions.Timeout:
                 # Don't sleep as we have already waited the interval.
-                LOG.info('Request for () timed out. Retrying.'.format(URL))
+                LOG.info('Request for {} timed out. Retrying.'.format(URL))
             except (exceptions.InvalidHttpSuccessCode,
                     exceptions.InvalidHTTPResponseBody,
                     requests.exceptions.SSLError):
diff --git a/octavia_tempest_plugin/tests/waiters.py b/octavia_tempest_plugin/tests/waiters.py
index 5abb26e..89e8455 100644
--- a/octavia_tempest_plugin/tests/waiters.py
+++ b/octavia_tempest_plugin/tests/waiters.py
@@ -180,3 +180,31 @@
             raise exceptions.TimeoutException(message)
 
         time.sleep(check_interval)
+
+
+def wait_for_spare_amps(list_func, check_interval, check_timeout):
+    """Waits for amphorae in spare pool.
+
+    :param list_func: The tempest service client amphora list method.
+                        Ex. cls.os_admin.amphora_client.list_amphorae
+    :check_interval: How often to check the status, in seconds.
+    :check_timeout: The maximum time, in seconds, to check the status.
+    :raises TimeoutException: No amphora available in spare pool in the
+                              check_timeout period.
+    :returns: A list of amphorae in spare pool.
+    """
+
+    LOG.info('Waiting for amphorae in spare pool')
+    start = int(time.time())
+    while True:
+        spare_amps = list_func(
+            query_params='{status}={status_ready}'.format(
+                status=const.STATUS, status_ready=const.STATUS_READY))
+        if len(spare_amps) >= 1:
+            return spare_amps
+        if int(time.time()) - start >= check_timeout:
+            message = ("No available amphorae in spare pool within the "
+                       "required time {timeout}.".format(
+                           timeout=check_timeout))
+            raise exceptions.TimeoutException(message)
+        time.sleep(check_interval)
diff --git a/releasenotes/notes/Fix-service-client-params-41a0f7c9c6b53aac.yaml b/releasenotes/notes/Fix-service-client-params-41a0f7c9c6b53aac.yaml
new file mode 100644
index 0000000..16c04da
--- /dev/null
+++ b/releasenotes/notes/Fix-service-client-params-41a0f7c9c6b53aac.yaml
@@ -0,0 +1,5 @@
+---
+fixes:
+  - |
+    Fixed the service clients to use the tempest default service client
+    configuration settings.
diff --git a/tox.ini b/tox.ini
index f27ed30..1ca36b7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,11 +5,12 @@
 
 [testenv]
 usedevelop = True
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} {opts} {packages}
+install_command = pip install {opts} {packages}
 setenv =
    VIRTUAL_ENV={envdir}
    PYTHONWARNINGS=default::DeprecationWarning
-deps = -r{toxinidir}/test-requirements.txt
+deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+       -r{toxinidir}/test-requirements.txt
 commands =
   stestr run {posargs}
   stestr slowest
@@ -40,7 +41,7 @@
 [testenv:docs]
 basepython = python3
 deps =
-    -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
+    -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/doc/requirements.txt
 whitelist_externals = rm
@@ -51,7 +52,7 @@
 [testenv:releasenotes]
 basepython = python3
 deps =
-    -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
+    -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/doc/requirements.txt
 commands =
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 4692d82..db5e924 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -49,7 +49,7 @@
       - ^releasenotes/.*$
     vars:
       devstack_localrc:
-        TEMPEST_PLUGINS: "'/opt/stack/octavia-tempest-plugin'"
+        TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
         USE_PYTHON3: true
       devstack_local_conf:
         post-config:
@@ -140,7 +140,7 @@
           MULTI_HOST: 1
           SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
           HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
-          TEMPEST_PLUGINS: "'/opt/stack/octavia-tempest-plugin'"
+          TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
           USE_PYTHON3: true
           # Octavia specific settings
           OCTAVIA_CONTROLLER_IP_PORT_LIST: 192.168.0.3:5555,192.168.0.4:5555
@@ -429,6 +429,43 @@
     parent: octavia-v2-dsvm-tls-barbican
     override-checkout: stable/queens
 
+- job:
+    name: octavia-v2-dsvm-spare-pool
+    parent: octavia-v2-dsvm-scenario
+    vars:
+      tempest_test_regex: ^octavia_tempest_plugin.tests.spare_pool_scenario.v2
+      devstack_local_conf:
+        post-config:
+          $OCTAVIA_CONF:
+            house_keeping:
+              spare_amphora_pool_size: 1
+        test-config:
+          "$TEMPEST_CONFIG":
+            loadbalancer-feature-enabled:
+              spare_pool_enabled: True
+
+- job:
+    name: octavia-v2-dsvm-py2-spare-pool
+    parent: octavia-v2-dsvm-spare-pool
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+
+- job:
+    name: octavia-v2-dsvm-spare-pool-stable-stein
+    parent: octavia-v2-dsvm-spare-pool
+    override-checkout: stable/stein
+
+- job:
+    name: octavia-v2-dsvm-py2-spare-pool-stable-rocky
+    parent: octavia-v2-dsvm-py2-spare-pool
+    override-checkout: stable/rocky
+
+- job:
+    name: octavia-v2-dsvm-py2-spare-pool-stable-queens
+    parent: octavia-v2-dsvm-py2-spare-pool
+    override-checkout: stable/queens
+
 # Temporary transitional aliases for gates used in other repos
 # Remove once octavia has transitioned job names
 - job:
@@ -444,7 +481,7 @@
     parent: octavia-v2-dsvm-py2-scenario-centos-7
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario
+    name: octavia-v2-act-stdby-iptables-dsvm-scenario
     parent: octavia-dsvm-live-base
     pre-run: playbooks/act_stby_iptables/pre.yaml
     vars:
@@ -465,19 +502,20 @@
           "$TEMPEST_CONFIG":
             load_balancer:
               check_timeout: 180
+              loadbalancer_topology: 'ACTIVE_STANDBY'
       tempest_test_regex: ^octavia_tempest_plugin.tests.act_stdby_scenario.v2.test_active_standby_iptables
       tox_envlist: all
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-py2-scenario
-    parent: octavia-v2-act-stdby-dsvm-scenario
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
+    parent: octavia-v2-act-stdby-iptables-dsvm-scenario
     vars:
       devstack_localrc:
         USE_PYTHON3: False
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-py2-scenario-centos-7
-    parent: octavia-v2-act-stdby-dsvm-py2-scenario
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7
+    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
     nodeset: devstack-single-node-centos-7
     vars:
       devstack_localrc:
@@ -492,16 +530,16 @@
               amphora_ssh_user: centos
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-scenario-stable-stein
-    parent: octavia-v2-act-stdby-dsvm-scenario
+    name: octavia-v2-act-stdby-iptables-dsvm-scenario-stable-stein
+    parent: octavia-v2-act-stdby-iptables-dsvm-scenario
     override-checkout: stable/stein
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-py2-scenario-stable-rocky
-    parent: octavia-v2-act-stdby-dsvm-py2-scenario
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky
+    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
     override-checkout: stable/rocky
 
 - job:
-    name: octavia-v2-act-stdby-dsvm-py2-scenario-stable-queens
-    parent: octavia-v2-act-stdby-dsvm-py2-scenario
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens
+    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
     override-checkout: stable/queens
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index af516d7..2bb813e 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -26,17 +26,17 @@
             voting: false
         - octavia-v2-dsvm-py2-scenario-two-node:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario:
+        - octavia-v2-act-stdby-iptables-dsvm-scenario:
             voting: false
-        - octavia-v2-act-stdby-dsvm-py2-scenario:
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario:
             voting: false
-        - octavia-v2-act-stdby-dsvm-py2-scenario-centos-7:
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7:
             voting: false
-        - octavia-v2-act-stdby-dsvm-scenario-stable-stein:
+        - octavia-v2-act-stdby-iptables-dsvm-scenario-stable-stein:
             voting: false
-        - octavia-v2-act-stdby-dsvm-py2-scenario-stable-rocky:
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky:
             voting: false
-        - octavia-v2-act-stdby-dsvm-py2-scenario-stable-queens:
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens:
             voting: false
         - octavia-v2-dsvm-tls-barbican:
             voting: false
@@ -46,6 +46,16 @@
             voting: false
         - octavia-v2-dsvm-tls-barbican-stable-queens:
             voting: false
+        - octavia-v2-dsvm-spare-pool:
+            voting: false
+        - octavia-v2-dsvm-py2-spare-pool:
+            voting: false
+        - octavia-v2-dsvm-spare-pool-stable-stein:
+            voting: false
+        - octavia-v2-dsvm-py2-spare-pool-stable-rocky:
+            voting: false
+        - octavia-v2-dsvm-py2-spare-pool-stable-queens:
+            voting: false
     gate:
       queue: octavia
       jobs: