Merge "Fix octavia-tempest-plugin releasenotes theme"
diff --git a/.gitreview b/.gitreview
index a22fc23..9832cf7 100644
--- a/.gitreview
+++ b/.gitreview
@@ -1,4 +1,4 @@
 [gerrit]
-host=review.openstack.org
+host=review.opendev.org
 port=29418
 project=openstack/octavia-tempest-plugin.git
diff --git a/README.rst b/README.rst
index f2b51a7..b2c74ee 100644
--- a/README.rst
+++ b/README.rst
@@ -22,7 +22,7 @@
 
 * Free software: Apache license
 * Documentation: https://docs.openstack.org/octavia-tempest-plugin/latest/
-* Source: https://git.openstack.org/cgit/openstack/octavia-tempest-plugin
+* Source: https://opendev.org/openstack/octavia-tempest-plugin
 * Bugs: https://storyboard.openstack.org/#!/project/openstack/octavia-tempest-plugin
 
 Installing
diff --git a/octavia_tempest_plugin/clients.py b/octavia_tempest_plugin/clients.py
index c1894e3..8409093 100644
--- a/octavia_tempest_plugin/clients.py
+++ b/octavia_tempest_plugin/clients.py
@@ -18,6 +18,8 @@
 from octavia_tempest_plugin.services.load_balancer.v2 import (
     amphora_client)
 from octavia_tempest_plugin.services.load_balancer.v2 import (
+    flavor_capabilities_client)
+from octavia_tempest_plugin.services.load_balancer.v2 import (
     flavor_client)
 from octavia_tempest_plugin.services.load_balancer.v2 import (
     flavor_profile_client)
@@ -69,3 +71,6 @@
             self.auth_provider, SERVICE_TYPE, CONF.identity.region)
         self.provider_client = provider_client.ProviderClient(
             self.auth_provider, SERVICE_TYPE, CONF.identity.region)
+        self.flavor_capabilities_client = (
+            flavor_capabilities_client.FlavorCapabilitiesClient(
+                self.auth_provider, SERVICE_TYPE, CONF.identity.region))
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 0122424..7ddcb7a 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -84,6 +84,7 @@
 
 # Other constants
 ACTIVE = 'ACTIVE'
+PENDING_UPDATE = 'PENDING_UPDATE'
 ADMIN_STATE_UP_TRUE = 'true'
 ASC = 'asc'
 DELETED = 'DELETED'
@@ -101,6 +102,7 @@
 HTTP = 'HTTP'
 HTTPS = 'HTTPS'
 TCP = 'TCP'
+TERMINATED_HTTPS = 'TERMINATED_HTTPS'
 
 # HTTP Methods
 GET = 'GET'
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 701eb8e..026e941 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -151,8 +151,15 @@
     cfg.StrOpt('member_2_ipv6_subnet_cidr',
                default='fd77:1457:4cf0:26a8::/64',
                help='CIDR format subnet to use for the member 1 ipv6 subnet.'),
+    # Amphora specific options
+    cfg.StrOpt('amphora_ssh_user',
+               default='ubuntu',
+               help='The amphora SSH user.'),
+    cfg.StrOpt('amphora_ssh_key',
+               default='/tmp/octavia_ssh_key',
+               help='The amphora SSH key file.'),
     # Environment specific options
-    # These are used to accomidate clouds with specific limitations
+    # These are used to accomodate clouds with specific limitations
     cfg.IntOpt('random_server_name_length',
                default=0,
                help='If non-zero, generate a random name of the length '
@@ -167,18 +174,22 @@
 LBFeatureEnabledGroup = [
     cfg.BoolOpt('health_monitor_enabled',
                 default=True,
-                help="Whether Health Monitor is available with provider"
-                     " driver or not."),
+                help="Whether Health Monitor is available with provider "
+                     "driver or not."),
     cfg.BoolOpt('terminated_tls_enabled',
                 default=True,
                 help="Whether TLS termination is available with provider "
                      "driver or not."),
     cfg.BoolOpt('l7_protocol_enabled',
                 default=True,
-                help="Whether L7 Protocols are available with the provider"
-                     " driver or not."),
+                help="Whether L7 Protocols are available with the provider "
+                     "driver or not."),
+    cfg.BoolOpt('pool_algorithms_enabled',
+                default=True,
+                help="Whether pool algorithms are available with provider"
+                     "driver or not."),
     cfg.StrOpt('l4_protocol',
                default="TCP",
-               help="The type of L4 Protocol which is supported with the"
-                    " provider driver."),
+               help="The type of L4 Protocol which is supported with the "
+                    "provider driver."),
 ]
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/flavor_capabilities_client.py b/octavia_tempest_plugin/services/load_balancer/v2/flavor_capabilities_client.py
new file mode 100644
index 0000000..4c23042
--- /dev/null
+++ b/octavia_tempest_plugin/services/load_balancer/v2/flavor_capabilities_client.py
@@ -0,0 +1,76 @@
+#   Copyright 2019 Rackspace US Inc.  All rights reserved.
+#
+#   Licensed under the Apache License, Version 2.0 (the "License"); you may
+#   not use this file except in compliance with the License. You may obtain
+#   a copy of the License at
+#
+#        http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#   WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#   License for the specific language governing permissions and limitations
+#   under the License.
+#
+
+from octavia_tempest_plugin.services.load_balancer.v2 import base_client
+from octavia_tempest_plugin.services.load_balancer.v2 import provider_client
+
+Unset = base_client.Unset
+
+
+class FlavorCapabilitiesClient(base_client.BaseLBaaSClient):
+
+    list_root_tag = 'flavor_capabilities'
+
+    def __init__(self, *args, **kwargs):
+        super(FlavorCapabilitiesClient, self).__init__(*args, **kwargs)
+        providers_list_root_tag = provider_client.ProviderClient.list_root_tag
+        # /v2.0/lbaas/providers/<PROVIDER_UUID>/flavor_capabilities
+        self.uri = "{provider_base_uri}/{parent}/{object}".format(
+            provider_base_uri=self.base_uri.format(
+                object=providers_list_root_tag),
+            parent="{parent}",
+            object=self.list_root_tag
+        )
+
+    def list_flavor_capabilities(self, provider, query_params=None,
+                                 return_object_only=True):
+        """Get a list of provider flavor capability objects.
+
+        :param provider: The provider to query for flavor capabilities.
+        :param query_params: The optional query parameters to append to the
+                             request. Ex. fields=id&fields=name
+        :param return_object_only: If True, the response returns the object
+                                   inside the root tag. False returns the full
+                                   response from the API.
+        :raises AssertionError: if the expected_code isn't a valid http success
+                                response code
+        :raises BadRequest: If a 400 response code is received
+        :raises Conflict: If a 409 response code is received
+        :raises Forbidden: If a 403 response code is received
+        :raises Gone: If a 410 response code is received
+        :raises InvalidContentType: If a 415 response code is received
+        :raises InvalidHTTPResponseBody: The response body wasn't valid JSON
+        :raises InvalidHttpSuccessCode: if the read code isn't an expected
+                                        http success code
+        :raises NotFound: If a 404 response code is received
+        :raises NotImplemented: If a 501 response code is received
+        :raises OverLimit: If a 413 response code is received and over_limit is
+                           not in the response body
+        :raises RateLimitExceeded: If a 413 response code is received and
+                                   over_limit is in the response body
+        :raises ServerFault: If a 500 response code is received
+        :raises Unauthorized: If a 401 response code is received
+        :raises UnexpectedContentType: If the content-type of the response
+                                       isn't an expect type
+        :raises UnexpectedResponseCode: If a response code above 400 is
+                                        received and it doesn't fall into any
+                                        of the handled checks
+        :raises UnprocessableEntity: If a 422 response code is received and
+                                     couldn't be parsed
+        :returns: A list of flavor capability objects.
+        """
+        return self._list_objects(parent_id=provider,
+                                  query_params=query_params,
+                                  return_object_only=return_object_only)
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py b/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
new file mode 100644
index 0000000..40418a2
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -0,0 +1,303 @@
+# Copyright 2019 Rackspace US Inc.  All rights reserved.
+# Copyright 2019 Red Hat Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import testtools
+
+from oslo_log import log as logging
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+@testtools.skipUnless(
+    CONF.validation.run_validation,
+    'Active-Standby tests will not work without run_validation enabled.')
+class ActiveStandbyIptablesScenarioTest(
+    test_base.LoadBalancerBaseTestWithCompute):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ActiveStandbyIptablesScenarioTest, cls).skip_checks()
+
+        if CONF.load_balancer.provider not in ['amphora', 'octavia']:
+            raise cls.skipException("Amphora tests require provider 'amphora' "
+                                    "or 'octavia' (alias to 'amphora', "
+                                    "deprecated) set.")
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests."""
+        super(ActiveStandbyIptablesScenarioTest, cls).resource_setup()
+
+        lb_name = data_utils.rand_name("lb_member_lb1_actstdby")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        # TODO(rm_work): Make this work with ipv6 and split this test for both
+        ip_version = 4
+        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        cls.lb_id = lb[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_lb_client.cleanup_loadbalancer,
+            cls.lb_id)
+
+        if CONF.validation.connect_method == 'floating':
+            port_id = lb[const.VIP_PORT_ID]
+            result = cls.lb_mem_float_ip_client.create_floatingip(
+                floating_network_id=CONF.network.public_network_id,
+                port_id=port_id)
+            floating_ip = result['floatingip']
+            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_float_ip_client.delete_floatingip,
+                cls.lb_mem_float_ip_client.show_floatingip,
+                floatingip_id=floating_ip['id'])
+            cls.lb_vip_address = floating_ip['floating_ip_address']
+        else:
+            cls.lb_vip_address = lb[const.VIP_ADDRESS]
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+        listener_name = data_utils.rand_name("lb_member_listener1_actstdby")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '80',
+            const.LOADBALANCER_ID: cls.lb_id,
+        }
+        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+        cls.listener_id = listener[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool1_actstdby")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LISTENER_ID: cls.listener_id,
+        }
+        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+        cls.pool_id = pool[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1_actstdby")
+        member1_kwargs = {
+            const.POOL_ID: cls.pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: cls.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if cls.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
+
+        member1 = cls.mem_member_client.create_member(
+            **member1_kwargs)
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+        waiters.wait_for_status(
+            cls.mem_lb_client.show_loadbalancer, cls.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2_actstdby")
+        member2_kwargs = {
+            const.POOL_ID: cls.pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: cls.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if cls.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
+
+        member2 = cls.mem_member_client.create_member(
+            **member2_kwargs)
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+        waiters.wait_for_status(
+            cls.mem_lb_client.show_loadbalancer, cls.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+    @classmethod
+    def _log_vip_traffic(cls, amp, log_prefix):
+        ssh_key = cls._get_amphora_ssh_key()
+        linux_client = remote_client.RemoteClient(
+            amp['lb_network_ip'], CONF.load_balancer.amphora_ssh_user,
+            pkey=ssh_key)
+        linux_client.validate_authentication()
+
+        # Allow logging from non-init namespaces
+        # https://lore.kernel.org/patchwork/patch/673714/
+        linux_client.exec_command('echo 1 | sudo tee '
+                                  '/proc/sys/net/netfilter/nf_log_all_netns')
+
+        linux_client.exec_command('sudo ip netns exec amphora-haproxy '
+                                  'iptables -I INPUT 1 -d {0} -j LOG '
+                                  '--log-prefix "{1}"'
+                                  .format(amp['ha_ip'], log_prefix))
+
+    @classmethod
+    def _has_vip_traffic(cls, ip_address, log_prefix):
+        ssh_key = cls._get_amphora_ssh_key()
+        linux_client = remote_client.RemoteClient(
+            ip_address, CONF.load_balancer.amphora_ssh_user, pkey=ssh_key)
+        linux_client.validate_authentication()
+
+        try:
+            linux_client.exec_command('sudo journalctl -t kernel | grep {0}'
+                                      .format(log_prefix))
+            return True
+        except exceptions.SSHExecCommandFailed:
+            return False
+
+    @classmethod
+    def _get_active_standby_amps(cls, amps, log_prefix):
+        active = None
+        stby = None
+        for amp in amps:
+            if cls._has_vip_traffic(amp['lb_network_ip'], log_prefix):
+                if active:
+                    LOG.exception('Failed to determine single active amphora.')
+                    raise Exception('More than one amphora is forwarding VIP '
+                                    'traffic.')
+                active = amp
+            else:
+                stby = amp
+
+        return active, stby
+
+    @classmethod
+    def _get_amphora_ssh_key(cls):
+        key_file = CONF.load_balancer.amphora_ssh_key
+        if not key_file:
+            raise Exception("SSH key file not provided.")
+        if not os.path.isfile(key_file):
+            raise Exception("Could not find amphora ssh key file {1}."
+                            .format(key_file))
+        with open(key_file, 'r') as f:
+            return f.read()
+
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Active/Standby tests will not work in noop mode.')
+    @decorators.idempotent_id('deab2b3f-62c7-4a05-9e92-aa45a04773fd')
+    def test_active_standby_vrrp_failover(self):
+        """Tests active/standby VRRP failover
+
+        * Test the load balancer to make sure it is functioning
+        * Identifies the Master and Backup amphora
+        * Deletes the Master amphora
+        * Sends traffic through the load balancer
+        * Validates that the Backup has assumed the Master role
+        """
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Check there are two amphorae associated to the load balancer
+        amps = self.os_admin.amphora_client.list_amphorae(
+            query_params='{loadbalancer_id}={lb_id}'.format(
+                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
+        self.assertEqual(2, len(amps))
+
+        # Log VIP traffic
+        for amp in amps:
+            self._log_vip_traffic(amp, 'ACTSTBY-1')
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Which amphora is the active?
+        active = self._get_active_standby_amps(amps, 'ACTSTBY-1')[0]
+
+        # Delete active amphora
+        self.os_admin_servers_client.delete_server(active[const.COMPUTE_ID])
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Wait for the amphora failover to start
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.PENDING_UPDATE, CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Wait for the load balancer to return to ACTIVE
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE, CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+
+        # Check again there are two amphorae associated to the load balancer
+        amps = self.os_admin.amphora_client.list_amphorae(
+            query_params='{loadbalancer_id}={lb_id}'.format(
+                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
+        self.assertEqual(2, len(amps))
+
+        # Log VIP traffic
+        for amp in amps:
+            self._log_vip_traffic(amp, 'ACTSTBY-2')
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Ensure only one amphora is handling VIP traffic
+        self._get_active_standby_amps(amps, 'ACTSTBY-2')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 599305a..691c61c 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -178,7 +178,7 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
+            name=lb_name, provider=CONF.load_balancer.provider,
             vip_network_id=self.lb_member_vip_net[const.ID])
         lb_id = lb[const.ID]
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 4990c79..555d34e 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -149,7 +149,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-delete")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -195,7 +196,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-cascade_delete")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -299,6 +301,7 @@
         lb = self.mem_lb_client.create_loadbalancer(
             admin_state_up=True,
             description=lb_description,
+            provider=CONF.load_balancer.provider,
             name=lb_name,
             vip_network_id=self.lb_member_vip_net[const.ID])
         self.addCleanup(
@@ -329,6 +332,7 @@
         lb = self.mem_lb_client.create_loadbalancer(
             admin_state_up=False,
             description=lb_description,
+            provider=CONF.load_balancer.provider,
             name=lb_name,
             vip_network_id=self.lb_member_vip_net[const.ID])
         self.addCleanup(
@@ -663,7 +667,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-show_stats")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -724,7 +729,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-status")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -797,7 +803,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-failover")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 63d9e46..a63bddb 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -193,7 +193,7 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
+            name=lb_name, provider=CONF.load_balancer.provider,
             vip_network_id=self.lb_member_vip_net[const.ID])
         lb_id = lb[const.ID]
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 27663b2..25f741a 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -25,8 +25,6 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib import decorators
 
-from octavia_lib.common import constants as lib_consts
-
 from octavia_tempest_plugin.common import barbican_client_mgr
 from octavia_tempest_plugin.common import cert_utils
 from octavia_tempest_plugin.common import constants as const
@@ -222,7 +220,7 @@
         listener_name = data_utils.rand_name("lb_member_listener1-tls")
         listener_kwargs = {
             const.NAME: listener_name,
-            const.PROTOCOL: lib_consts.PROTOCOL_TERMINATED_HTTPS,
+            const.PROTOCOL: const.TERMINATED_HTTPS,
             const.PROTOCOL_PORT: '443',
             const.LOADBALANCER_ID: self.lb_id,
             const.DEFAULT_POOL_ID: self.pool_id,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
index 165424b..b91a368 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
@@ -78,7 +78,7 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
+            name=lb_name, provider=CONF.load_balancer.provider,
             vip_network_id=self.lb_member_vip_net[const.ID])
         lb_id = lb[const.ID]
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
index 1cdd727..d1090c7 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -51,9 +51,9 @@
                                 CONF.load_balancer.lb_build_interval,
                                 CONF.load_balancer.lb_build_timeout)
         cls.protocol = const.HTTP
-        lb_feature_enabled = CONF.loadbalancer_feature_enabled
-        if not lb_feature_enabled.l7_protocol_enabled:
-            cls.protocol = lb_feature_enabled.l4_protocol
+        cls.lb_feature_enabled = CONF.loadbalancer_feature_enabled
+        if not cls.lb_feature_enabled.l7_protocol_enabled:
+            cls.protocol = cls.lb_feature_enabled.l4_protocol
 
         listener_name = data_utils.rand_name("lb_member_listener1_pool")
         listener_kwargs = {
@@ -161,8 +161,12 @@
             const.NAME: new_name,
             const.DESCRIPTION: new_description,
             const.ADMIN_STATE_UP: True,
-            const.LB_ALGORITHM: const.LB_ALGORITHM_LEAST_CONNECTIONS,
         }
+
+        if self.lb_feature_enabled.pool_algorithms_enabled:
+            pool_update_kwargs[const.LB_ALGORITHM] = \
+                const.LB_ALGORITHM_LEAST_CONNECTIONS
+
         if self.protocol == const.HTTP:
             pool_update_kwargs[const.SESSION_PERSISTENCE] = {
                 const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE}
@@ -184,8 +188,9 @@
         self.assertEqual(new_name, pool[const.NAME])
         self.assertEqual(new_description, pool[const.DESCRIPTION])
         self.assertTrue(pool[const.ADMIN_STATE_UP])
-        self.assertEqual(const.LB_ALGORITHM_LEAST_CONNECTIONS,
-                         pool[const.LB_ALGORITHM])
+        if self.lb_feature_enabled.pool_algorithms_enabled:
+            self.assertEqual(const.LB_ALGORITHM_LEAST_CONNECTIONS,
+                             pool[const.LB_ALGORITHM])
         self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
         if self.protocol == const.HTTP:
             self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index edc2cbc..eba7e38 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -73,10 +73,15 @@
                                 CONF.load_balancer.lb_build_interval,
                                 CONF.load_balancer.lb_build_timeout)
 
+        protocol = const.HTTP
+        lb_feature_enabled = CONF.loadbalancer_feature_enabled
+        if not lb_feature_enabled.l7_protocol_enabled:
+            protocol = lb_feature_enabled.l4_protocol
+
         listener_name = data_utils.rand_name("lb_member_listener1_operations")
         listener_kwargs = {
             const.NAME: listener_name,
-            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL: protocol,
             const.PROTOCOL_PORT: '80',
             const.LOADBALANCER_ID: cls.lb_id,
         }
@@ -96,7 +101,7 @@
         pool_name = data_utils.rand_name("lb_member_pool1_operations")
         pool_kwargs = {
             const.NAME: pool_name,
-            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL: protocol,
             const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
             const.LISTENER_ID: cls.listener_id,
         }
@@ -173,6 +178,9 @@
         # Send some traffic
         self.check_members_balanced(self.lb_vip_address)
 
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.health_monitor_enabled,
+        'Health monitor testing is disabled')
     @decorators.idempotent_id('a16f8eb4-a77c-4b0e-8b1b-91c237039713')
     def test_healthmonitor_traffic(self):
         """Tests traffic is correctly routed based on healthmonitor status
@@ -385,6 +393,9 @@
         # Send some traffic and verify it is balanced again
         self.check_members_balanced(self.lb_vip_address)
 
+    @testtools.skipUnless(
+        CONF.loadbalancer_feature_enabled.l7_protocol_enabled,
+        'L7 protocol testing is disabled')
     @decorators.idempotent_id('3558186d-6dcd-4d9d-b7f7-adc190b66149')
     def test_l7policies_and_l7rules(self):
         """Tests sending traffic through a loadbalancer with l7rules
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index f8acc2b..a4775da 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -127,6 +127,7 @@
         cls.lb_admin_flavor_client = cls.os_roles_lb_admin.flavor_client
         cls.mem_flavor_client = cls.os_roles_lb_member.flavor_client
         cls.mem_provider_client = cls.os_roles_lb_member.provider_client
+        cls.os_admin_servers_client = cls.os_admin.servers_client
 
     @classmethod
     def resource_setup(cls):
@@ -298,14 +299,14 @@
 
         # Create tenant VIP IPv6 subnet
         if CONF.load_balancer.test_with_ipv6:
-            # See if ipv6-public-subnet exists and use it if so.
-            pub_ipv6_subnet = cls.os_admin.subnets_client.list_subnets(
-                name='ipv6-public-subnet')['subnets']
+            # See if ipv6-private-subnet exists and use it if so.
+            priv_ipv6_subnet = cls.os_admin.subnets_client.list_subnets(
+                name='ipv6-private-subnet')['subnets']
 
-            if len(pub_ipv6_subnet) == 1:
-                cls.lb_member_vip_ipv6_subnet = pub_ipv6_subnet[0]
+            if len(priv_ipv6_subnet) == 1:
+                cls.lb_member_vip_ipv6_subnet = priv_ipv6_subnet[0]
                 cls.lb_member_vip_ipv6_net = {
-                    'id': pub_ipv6_subnet[0]['network_id']}
+                    'id': priv_ipv6_subnet[0]['network_id']}
             else:
                 subnet_kwargs = {
                     'name': data_utils.rand_name("lb_member_vip_ipv6_subnet"),
@@ -314,6 +315,7 @@
                     'ip_version': 6}
                 result = cls.lb_mem_subnet_client.create_subnet(
                     **subnet_kwargs)
+                cls.lb_member_vip_ipv6_net = cls.lb_member_vip_net
                 cls.lb_member_vip_ipv6_subnet = result['subnet']
                 cls.addClassResourceCleanup(
                     waiters.wait_for_not_found,
diff --git a/playbooks/act_stby_iptables/pre.yaml b/playbooks/act_stby_iptables/pre.yaml
new file mode 100644
index 0000000..179569a
--- /dev/null
+++ b/playbooks/act_stby_iptables/pre.yaml
@@ -0,0 +1,16 @@
+# Tempest runs under an unpriviledged user, thus the user cannot read
+# /etc/octavia/.ssh/ created by the Octavia devstack plugin. As work-around,
+# a new SSH key is generated and given read access to all users.
+# See also OCTAVIA_USE_PREGENERATED_SSH_KEY and
+# OCTAVIA_PREGENERATED_SSH_KEY_PATH set in the
+# octavia-v2-act-stdby-dsvm-scenario job definition
+- hosts: all
+  name: Octavia DSVM jobs pre-run playbook
+  tasks:
+    - name: Generate Octavia RSA key
+      shell: ssh-keygen -t rsa -f /tmp/octavia_ssh_key -q -N ""
+    - name: Allow read permissions to other users
+      file:
+        path: /tmp/octavia_ssh_key
+        state: file
+        mode: 0644
diff --git a/requirements.txt b/requirements.txt
index c9f839c..3b78bc3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -6,7 +6,6 @@
 python-dateutil>=2.5.3 # BSD
 ipaddress>=1.0.17;python_version<'3.3' # PSF
 pbr!=2.1.0,>=2.0.0 # Apache-2.0
-octavia-lib>=1.0.0 # Apache-2.0
 oslo.config>=5.2.0 # Apache-2.0
 oslo.log>=3.36.0  # Apache-2.0
 oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
diff --git a/tox.ini b/tox.ini
index 64afde0..f27ed30 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,7 +5,7 @@
 
 [testenv]
 usedevelop = True
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt} {opts} {packages}
 setenv =
    VIRTUAL_ENV={envdir}
    PYTHONWARNINGS=default::DeprecationWarning
@@ -40,7 +40,7 @@
 [testenv:docs]
 basepython = python3
 deps =
-    -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+    -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/doc/requirements.txt
 whitelist_externals = rm
@@ -51,7 +51,7 @@
 [testenv:releasenotes]
 basepython = python3
 deps =
-    -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+    -c{env:UPPER_CONSTRAINTS_FILE:https://opendev.org/openstack/requirements/raw/branch/master/upper-constraints.txt}
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/doc/requirements.txt
 commands =
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 8c8ac73..8ba6ac1 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -81,7 +81,7 @@
         s-proxy: false
         tempest: true
       devstack_plugins:
-        octavia: https://git.openstack.org/openstack/octavia.git
+        octavia: https://opendev.org/openstack/octavia.git
 
 - job:
     name: octavia-dsvm-live-base
@@ -102,7 +102,7 @@
       devstack_services:
         neutron-qos: true
       devstack_plugins:
-        neutron: https://git.openstack.org/openstack/neutron.git
+        neutron: https://opendev.org/openstack/neutron.git
       zuul_copy_output:
         '/var/log/dib-build' : logs
 
@@ -145,8 +145,8 @@
           OCTAVIA_NODES: "main:{{ hostvars['controller']['nodepool']['private_ipv4'] }},second:{{ hostvars['controller2']['nodepool']['private_ipv4'] }}"
           OCTAVIA_USE_PREGENERATED_CERTS: true
         devstack_plugins:
-          neutron: https://git.openstack.org/openstack/neutron.git
-          octavia: https://git.openstack.org/openstack/octavia.git
+          neutron: https://opendev.org/openstack/neutron.git
+          octavia: https://opendev.org/openstack/octavia.git
       controller2:
         devstack_localrc:
           # From devstack "vars:"
@@ -180,7 +180,7 @@
           OCTAVIA_USE_PREGENERATED_CERTS: true
           OCTAVIA_MGMT_PORT_IP: 192.168.0.4
         devstack_plugins:
-          octavia: https://git.openstack.org/openstack/octavia.git
+          octavia: https://opendev.org/openstack/octavia.git
     group-vars:
       controller:
         devstack_local_conf:
@@ -287,6 +287,11 @@
         USE_PYTHON3: False
 
 - job:
+    name: octavia-v2-dsvm-noop-api-stable-stein
+    parent: octavia-v2-dsvm-noop-api
+    override-checkout: stable/stein
+
+- job:
     name: octavia-v2-dsvm-noop-py2-api-stable-rocky
     parent: octavia-v2-dsvm-noop-py2-api
     override-checkout: stable/rocky
@@ -317,6 +322,11 @@
         USE_PYTHON3: False
 
 - job:
+    name: octavia-v2-dsvm-scenario-stable-stein
+    parent: octavia-v2-dsvm-scenario
+    override-checkout: stable/stein
+
+- job:
     name: octavia-v2-dsvm-py2-scenario-stable-rocky
     parent: octavia-v2-dsvm-py2-scenario
     override-checkout: stable/rocky
@@ -364,6 +374,15 @@
         OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: bionic
 
 - job:
+    name: octavia-v2-dsvm-scenario-ubuntu-xenial
+    parent: octavia-v2-dsvm-scenario
+    nodeset: openstack-single-node-xenial
+    vars:
+      devstack_localrc:
+        OCTAVIA_AMP_BASE_OS: ubuntu
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: xenial
+
+- job:
     name: octavia-v2-dsvm-tls-barbican
     parent: octavia-v2-dsvm-scenario
     required-projects:
@@ -376,10 +395,25 @@
       devstack_services:
         barbican: true
       devstack_plugins:
-        barbican: https://git.openstack.org/openstack/barbican.git
+        barbican: https://opendev.org/openstack/barbican.git
       devstack_localrc:
         TEMPEST_PLUGINS: '"/opt/stack/octavia-tempest-plugin /opt/stack/barbican-tempest-plugin"'
 
+- job:
+    name: octavia-v2-dsvm-tls-barbican-stable-stein
+    parent: octavia-v2-dsvm-tls-barbican
+    override-checkout: stable/stein
+
+- job:
+    name: octavia-v2-dsvm-tls-barbican-stable-rocky
+    parent: octavia-v2-dsvm-tls-barbican
+    override-checkout: stable/rocky
+
+- job:
+    name: octavia-v2-dsvm-tls-barbican-stable-queens
+    parent: octavia-v2-dsvm-tls-barbican
+    override-checkout: stable/queens
+
 # Temporary transitional aliases for gates used in other repos
 # Remove once octavia has transitioned job names
 - job:
@@ -393,3 +427,66 @@
 - job:
     name: octavia-v2-dsvm-scenario-centos-7
     parent: octavia-v2-dsvm-py2-scenario-centos-7
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario
+    parent: octavia-dsvm-live-base
+    pre-run: playbooks/act_stby_iptables/pre.yaml
+    vars:
+      devstack_localrc:
+        OCTAVIA_USE_PREGENERATED_SSH_KEY: True
+        OCTAVIA_PREGENERATED_SSH_KEY_PATH: /tmp/octavia_ssh_key
+      devstack_local_conf:
+        post-config:
+          $OCTAVIA_CONF:
+            api_settings:
+              api_v1_enabled: False
+            controller_worker:
+              loadbalancer_topology: ACTIVE_STANDBY
+            task_flow:
+              engine: parallel
+              devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              check_timeout: 180
+      tempest_test_regex: ^octavia_tempest_plugin.tests.act_stdby_scenario.v2.test_active_standby_iptables
+      tox_envlist: all
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-py2-scenario
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-py2-scenario-centos-7
+    parent: octavia-v2-act-stdby-dsvm-py2-scenario
+    nodeset: devstack-single-node-centos-7
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+        OCTAVIA_AMP_BASE_OS: centos
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 7
+        OCTAVIA_AMP_IMAGE_SIZE: 3
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              amphora_ssh_user: centos
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-scenario-stable-stein
+    parent: octavia-v2-act-stdby-dsvm-scenario
+    override-checkout: stable/stein
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-py2-scenario-stable-rocky
+    parent: octavia-v2-act-stdby-dsvm-py2-scenario
+    override-checkout: stable/rocky
+
+- job:
+    name: octavia-v2-act-stdby-dsvm-py2-scenario-stable-queens
+    parent: octavia-v2-act-stdby-dsvm-py2-scenario
+    override-checkout: stable/queens
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index efde1e2..af516d7 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -10,10 +10,12 @@
       jobs:
         - octavia-v2-dsvm-noop-api
         - octavia-v2-dsvm-noop-py2-api
+        - octavia-v2-dsvm-noop-api-stable-stein
         - octavia-v2-dsvm-noop-py2-api-stable-rocky
         - octavia-v2-dsvm-noop-py2-api-stable-queens
         - octavia-v2-dsvm-scenario
         - octavia-v2-dsvm-py2-scenario
+        - octavia-v2-dsvm-scenario-stable-stein
         - octavia-v2-dsvm-py2-scenario-stable-rocky
         - octavia-v2-dsvm-py2-scenario-stable-queens
         - octavia-v2-dsvm-scenario-centos-7:
@@ -24,16 +26,36 @@
             voting: false
         - octavia-v2-dsvm-py2-scenario-two-node:
             voting: false
+        - octavia-v2-act-stdby-dsvm-scenario:
+            voting: false
+        - octavia-v2-act-stdby-dsvm-py2-scenario:
+            voting: false
+        - octavia-v2-act-stdby-dsvm-py2-scenario-centos-7:
+            voting: false
+        - octavia-v2-act-stdby-dsvm-scenario-stable-stein:
+            voting: false
+        - octavia-v2-act-stdby-dsvm-py2-scenario-stable-rocky:
+            voting: false
+        - octavia-v2-act-stdby-dsvm-py2-scenario-stable-queens:
+            voting: false
         - octavia-v2-dsvm-tls-barbican:
             voting: false
+        - octavia-v2-dsvm-tls-barbican-stable-stein:
+            voting: false
+        - octavia-v2-dsvm-tls-barbican-stable-rocky:
+            voting: false
+        - octavia-v2-dsvm-tls-barbican-stable-queens:
+            voting: false
     gate:
       queue: octavia
       jobs:
         - octavia-v2-dsvm-noop-api
         - octavia-v2-dsvm-noop-py2-api
+        - octavia-v2-dsvm-noop-api-stable-stein
         - octavia-v2-dsvm-noop-py2-api-stable-rocky
         - octavia-v2-dsvm-noop-py2-api-stable-queens
         - octavia-v2-dsvm-scenario
         - octavia-v2-dsvm-py2-scenario
+        - octavia-v2-dsvm-scenario-stable-stein
         - octavia-v2-dsvm-py2-scenario-stable-rocky
         - octavia-v2-dsvm-py2-scenario-stable-queens