Merge "Add a scenario test for spare pool"
diff --git a/README.rst b/README.rst
index f2b51a7..b2c74ee 100644
--- a/README.rst
+++ b/README.rst
@@ -22,7 +22,7 @@
 
 * Free software: Apache license
 * Documentation: https://docs.openstack.org/octavia-tempest-plugin/latest/
-* Source: https://git.openstack.org/cgit/openstack/octavia-tempest-plugin
+* Source: https://opendev.org/openstack/octavia-tempest-plugin
 * Bugs: https://storyboard.openstack.org/#!/project/openstack/octavia-tempest-plugin
 
 Installing
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index 67898a0..11dc59f 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -151,8 +151,15 @@
     cfg.StrOpt('member_2_ipv6_subnet_cidr',
                default='fd77:1457:4cf0:26a8::/64',
                help='CIDR format subnet to use for the member 1 ipv6 subnet.'),
+    # Amphora specific options
+    cfg.StrOpt('amphora_ssh_user',
+               default='ubuntu',
+               help='The amphora SSH user.'),
+    cfg.StrOpt('amphora_ssh_key',
+               default='/tmp/octavia_ssh_key',
+               help='The amphora SSH key file.'),
     # Environment specific options
-    # These are used to accomidate clouds with specific limitations
+    # These are used to accomodate clouds with specific limitations
     cfg.IntOpt('random_server_name_length',
                default=0,
                help='If non-zero, generate a random name of the length '
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py b/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/__init__.py
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/__init__.py
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
new file mode 100644
index 0000000..40418a2
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -0,0 +1,303 @@
+# Copyright 2019 Rackspace US Inc.  All rights reserved.
+# Copyright 2019 Red Hat Inc.  All rights reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import testtools
+
+from oslo_log import log as logging
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+@testtools.skipUnless(
+    CONF.validation.run_validation,
+    'Active-Standby tests will not work without run_validation enabled.')
+class ActiveStandbyIptablesScenarioTest(
+    test_base.LoadBalancerBaseTestWithCompute):
+
+    @classmethod
+    def skip_checks(cls):
+        super(ActiveStandbyIptablesScenarioTest, cls).skip_checks()
+
+        if CONF.load_balancer.provider not in ['amphora', 'octavia']:
+            raise cls.skipException("Amphora tests require provider 'amphora' "
+                                    "or 'octavia' (alias to 'amphora', "
+                                    "deprecated) set.")
+
+    @classmethod
+    def resource_setup(cls):
+        """Setup resources needed by the tests."""
+        super(ActiveStandbyIptablesScenarioTest, cls).resource_setup()
+
+        lb_name = data_utils.rand_name("lb_member_lb1_actstdby")
+        lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+                     const.NAME: lb_name}
+
+        # TODO(rm_work): Make this work with ipv6 and split this test for both
+        ip_version = 4
+        cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+        lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+        cls.lb_id = lb[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_lb_client.cleanup_loadbalancer,
+            cls.lb_id)
+
+        if CONF.validation.connect_method == 'floating':
+            port_id = lb[const.VIP_PORT_ID]
+            result = cls.lb_mem_float_ip_client.create_floatingip(
+                floating_network_id=CONF.network.public_network_id,
+                port_id=port_id)
+            floating_ip = result['floatingip']
+            LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+            cls.addClassResourceCleanup(
+                waiters.wait_for_not_found,
+                cls.lb_mem_float_ip_client.delete_floatingip,
+                cls.lb_mem_float_ip_client.show_floatingip,
+                floatingip_id=floating_ip['id'])
+            cls.lb_vip_address = floating_ip['floating_ip_address']
+        else:
+            cls.lb_vip_address = lb[const.VIP_ADDRESS]
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.lb_build_interval,
+                                CONF.load_balancer.lb_build_timeout)
+
+        listener_name = data_utils.rand_name("lb_member_listener1_actstdby")
+        listener_kwargs = {
+            const.NAME: listener_name,
+            const.PROTOCOL: const.HTTP,
+            const.PROTOCOL_PORT: '80',
+            const.LOADBALANCER_ID: cls.lb_id,
+        }
+        listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+        cls.listener_id = listener[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_listener_client.cleanup_listener,
+            cls.listener_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        pool_name = data_utils.rand_name("lb_member_pool1_actstdby")
+        pool_kwargs = {
+            const.NAME: pool_name,
+            const.PROTOCOL: const.HTTP,
+            const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+            const.LISTENER_ID: cls.listener_id,
+        }
+        pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+        cls.pool_id = pool[const.ID]
+        cls.addClassResourceCleanup(
+            cls.mem_pool_client.cleanup_pool,
+            cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+        waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+                                cls.lb_id, const.PROVISIONING_STATUS,
+                                const.ACTIVE,
+                                CONF.load_balancer.build_interval,
+                                CONF.load_balancer.build_timeout)
+
+        # Set up Member 1 for Webserver 1
+        member1_name = data_utils.rand_name("lb_member_member1_actstdby")
+        member1_kwargs = {
+            const.POOL_ID: cls.pool_id,
+            const.NAME: member1_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: cls.webserver1_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if cls.lb_member_1_subnet:
+            member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
+
+        member1 = cls.mem_member_client.create_member(
+            **member1_kwargs)
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member1[const.ID], pool_id=cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+        waiters.wait_for_status(
+            cls.mem_lb_client.show_loadbalancer, cls.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Set up Member 2 for Webserver 2
+        member2_name = data_utils.rand_name("lb_member_member2_actstdby")
+        member2_kwargs = {
+            const.POOL_ID: cls.pool_id,
+            const.NAME: member2_name,
+            const.ADMIN_STATE_UP: True,
+            const.ADDRESS: cls.webserver2_ip,
+            const.PROTOCOL_PORT: 80,
+        }
+        if cls.lb_member_2_subnet:
+            member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
+
+        member2 = cls.mem_member_client.create_member(
+            **member2_kwargs)
+        cls.addClassResourceCleanup(
+            cls.mem_member_client.cleanup_member,
+            member2[const.ID], pool_id=cls.pool_id,
+            lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+        waiters.wait_for_status(
+            cls.mem_lb_client.show_loadbalancer, cls.lb_id,
+            const.PROVISIONING_STATUS, const.ACTIVE,
+            CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+    @classmethod
+    def _log_vip_traffic(cls, amp, log_prefix):
+        ssh_key = cls._get_amphora_ssh_key()
+        linux_client = remote_client.RemoteClient(
+            amp['lb_network_ip'], CONF.load_balancer.amphora_ssh_user,
+            pkey=ssh_key)
+        linux_client.validate_authentication()
+
+        # Allow logging from non-init namespaces
+        # https://lore.kernel.org/patchwork/patch/673714/
+        linux_client.exec_command('echo 1 | sudo tee '
+                                  '/proc/sys/net/netfilter/nf_log_all_netns')
+
+        linux_client.exec_command('sudo ip netns exec amphora-haproxy '
+                                  'iptables -I INPUT 1 -d {0} -j LOG '
+                                  '--log-prefix "{1}"'
+                                  .format(amp['ha_ip'], log_prefix))
+
+    @classmethod
+    def _has_vip_traffic(cls, ip_address, log_prefix):
+        ssh_key = cls._get_amphora_ssh_key()
+        linux_client = remote_client.RemoteClient(
+            ip_address, CONF.load_balancer.amphora_ssh_user, pkey=ssh_key)
+        linux_client.validate_authentication()
+
+        try:
+            linux_client.exec_command('sudo journalctl -t kernel | grep {0}'
+                                      .format(log_prefix))
+            return True
+        except exceptions.SSHExecCommandFailed:
+            return False
+
+    @classmethod
+    def _get_active_standby_amps(cls, amps, log_prefix):
+        active = None
+        stby = None
+        for amp in amps:
+            if cls._has_vip_traffic(amp['lb_network_ip'], log_prefix):
+                if active:
+                    LOG.exception('Failed to determine single active amphora.')
+                    raise Exception('More than one amphora is forwarding VIP '
+                                    'traffic.')
+                active = amp
+            else:
+                stby = amp
+
+        return active, stby
+
+    @classmethod
+    def _get_amphora_ssh_key(cls):
+        key_file = CONF.load_balancer.amphora_ssh_key
+        if not key_file:
+            raise Exception("SSH key file not provided.")
+        if not os.path.isfile(key_file):
+            raise Exception("Could not find amphora ssh key file {1}."
+                            .format(key_file))
+        with open(key_file, 'r') as f:
+            return f.read()
+
+    @testtools.skipIf(CONF.load_balancer.test_with_noop,
+                      'Active/Standby tests will not work in noop mode.')
+    @decorators.idempotent_id('deab2b3f-62c7-4a05-9e92-aa45a04773fd')
+    def test_active_standby_vrrp_failover(self):
+        """Tests active/standby VRRP failover
+
+        * Test the load balancer to make sure it is functioning
+        * Identifies the Master and Backup amphora
+        * Deletes the Master amphora
+        * Sends traffic through the load balancer
+        * Validates that the Backup has assumed the Master role
+        """
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Check there are two amphorae associated to the load balancer
+        amps = self.os_admin.amphora_client.list_amphorae(
+            query_params='{loadbalancer_id}={lb_id}'.format(
+                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
+        self.assertEqual(2, len(amps))
+
+        # Log VIP traffic
+        for amp in amps:
+            self._log_vip_traffic(amp, 'ACTSTBY-1')
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Which amphora is the active?
+        active = self._get_active_standby_amps(amps, 'ACTSTBY-1')[0]
+
+        # Delete active amphora
+        self.os_admin_servers_client.delete_server(active[const.COMPUTE_ID])
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Wait for the amphora failover to start
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.PENDING_UPDATE, CONF.load_balancer.check_interval,
+            CONF.load_balancer.check_timeout)
+
+        # Wait for the load balancer to return to ACTIVE
+        waiters.wait_for_status(
+            self.mem_lb_client.show_loadbalancer,
+            self.lb_id, const.PROVISIONING_STATUS,
+            const.ACTIVE, CONF.load_balancer.lb_build_interval,
+            CONF.load_balancer.lb_build_timeout)
+
+        # Check again there are two amphorae associated to the load balancer
+        amps = self.os_admin.amphora_client.list_amphorae(
+            query_params='{loadbalancer_id}={lb_id}'.format(
+                loadbalancer_id=const.LOADBALANCER_ID, lb_id=self.lb_id))
+        self.assertEqual(2, len(amps))
+
+        # Log VIP traffic
+        for amp in amps:
+            self._log_vip_traffic(amp, 'ACTSTBY-2')
+
+        # Send some traffic
+        self.check_members_balanced(self.lb_vip_address)
+
+        # Ensure only one amphora is handling VIP traffic
+        self._get_active_standby_amps(amps, 'ACTSTBY-2')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 599305a..691c61c 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -178,7 +178,7 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb2_listener-list")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
+            name=lb_name, provider=CONF.load_balancer.provider,
             vip_network_id=self.lb_member_vip_net[const.ID])
         lb_id = lb[const.ID]
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 4990c79..66d26cf 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -120,8 +120,9 @@
         self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
         self.assertIsNotNone(lb[const.VIP_PORT_ID])
         if lb_kwargs[const.VIP_SUBNET_ID]:
-            self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
-                             lb[const.VIP_ADDRESS])
+            if ip_version == 4 or self.lb_member_vip_ipv6_subnet_stateful:
+                self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
+                                 lb[const.VIP_ADDRESS])
             self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
                              lb[const.VIP_SUBNET_ID])
 
@@ -149,7 +150,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-delete")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -195,7 +197,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-cascade_delete")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -299,6 +302,7 @@
         lb = self.mem_lb_client.create_loadbalancer(
             admin_state_up=True,
             description=lb_description,
+            provider=CONF.load_balancer.provider,
             name=lb_name,
             vip_network_id=self.lb_member_vip_net[const.ID])
         self.addCleanup(
@@ -329,6 +333,7 @@
         lb = self.mem_lb_client.create_loadbalancer(
             admin_state_up=False,
             description=lb_description,
+            provider=CONF.load_balancer.provider,
             name=lb_name,
             vip_network_id=self.lb_member_vip_net[const.ID])
         self.addCleanup(
@@ -663,7 +668,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-show_stats")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -724,7 +730,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-status")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
@@ -797,7 +804,8 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb1-failover")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name, vip_network_id=self.lb_member_vip_net[const.ID])
+            name=lb_name, provider=CONF.load_balancer.provider,
+            vip_network_id=self.lb_member_vip_net[const.ID])
         self.addClassResourceCleanup(
             self.mem_lb_client.cleanup_loadbalancer,
             lb[const.ID])
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index 63d9e46..a63bddb 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -193,7 +193,7 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
+            name=lb_name, provider=CONF.load_balancer.provider,
             vip_network_id=self.lb_member_vip_net[const.ID])
         lb_id = lb[const.ID]
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
index 165424b..b91a368 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
@@ -78,7 +78,7 @@
         """
         lb_name = data_utils.rand_name("lb_member_lb2_amphora-list")
         lb = self.mem_lb_client.create_loadbalancer(
-            name=lb_name,
+            name=lb_name, provider=CONF.load_balancer.provider,
             vip_network_id=self.lb_member_vip_net[const.ID])
         lb_id = lb[const.ID]
         self.addCleanup(
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 31b02c8..e85fb0a 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -157,6 +157,7 @@
                                                  uuidutils.generate_uuid()}
                 cls.lb_member_1_ipv6_subnet = {'id': uuidutils.generate_uuid()}
                 cls.lb_member_2_ipv6_subnet = {'id': uuidutils.generate_uuid()}
+                cls.lb_member_vip_ipv6_subnet_stateful = True
             return
         elif CONF.load_balancer.test_network_override:
             if conf_lb.test_subnet_override:
@@ -182,6 +183,10 @@
                 cls.lb_member_vip_ipv6_subnet = override_ipv6_subnet
                 cls.lb_member_1_ipv6_subnet = override_ipv6_subnet
                 cls.lb_member_2_ipv6_subnet = override_ipv6_subnet
+                cls.lb_member_vip_ipv6_subnet_stateful = False
+                if (override_ipv6_subnet[0]['ipv6_address_mode'] ==
+                        'dhcpv6-stateful'):
+                    cls.lb_member_vip_ipv6_subnet_stateful = True
             else:
                 cls.lb_member_vip_ipv6_subnet = None
                 cls.lb_member_1_ipv6_subnet = None
@@ -303,6 +308,10 @@
             priv_ipv6_subnet = cls.os_admin.subnets_client.list_subnets(
                 name='ipv6-private-subnet')['subnets']
 
+            cls.lb_member_vip_ipv6_subnet_stateful = False
+            if (priv_ipv6_subnet[0]['ipv6_address_mode'] ==
+                    'dhcpv6-stateful'):
+                cls.lb_member_vip_ipv6_subnet_stateful = True
             if len(priv_ipv6_subnet) == 1:
                 cls.lb_member_vip_ipv6_subnet = priv_ipv6_subnet[0]
                 cls.lb_member_vip_ipv6_net = {
@@ -315,6 +324,7 @@
                     'ip_version': 6}
                 result = cls.lb_mem_subnet_client.create_subnet(
                     **subnet_kwargs)
+                cls.lb_member_vip_ipv6_net = cls.lb_member_vip_net
                 cls.lb_member_vip_ipv6_subnet = result['subnet']
                 cls.addClassResourceCleanup(
                     waiters.wait_for_not_found,
@@ -456,6 +466,10 @@
                     subnet = cls.os_admin.subnets_client.show_subnet(subnet_id)
                     network = ipaddress.IPv6Network(subnet['subnet']['cidr'])
                     lb_vip_address = str(network[ip_index])
+                    # If the subnet is IPv6 slaac or dhcpv6-stateless
+                    # neutron does not allow a fixed IP
+                    if not cls.lb_member_vip_ipv6_subnet_stateful:
+                        use_fixed_ip = False
             lb_kwargs[const.VIP_SUBNET_ID] = subnet_id
             if use_fixed_ip:
                 lb_kwargs[const.VIP_ADDRESS] = lb_vip_address
diff --git a/octavia_tempest_plugin/tests/validators.py b/octavia_tempest_plugin/tests/validators.py
index 2dc1d64..773fcc4 100644
--- a/octavia_tempest_plugin/tests/validators.py
+++ b/octavia_tempest_plugin/tests/validators.py
@@ -75,7 +75,7 @@
                 return
             except requests.exceptions.Timeout:
                 # Don't sleep as we have already waited the interval.
-                LOG.info('Request for () timed out. Retrying.'.format(URL))
+                LOG.info('Request for {} timed out. Retrying.'.format(URL))
             except (exceptions.InvalidHttpSuccessCode,
                     exceptions.InvalidHTTPResponseBody,
                     requests.exceptions.SSLError):
diff --git a/playbooks/act_stby_iptables/pre.yaml b/playbooks/act_stby_iptables/pre.yaml
new file mode 100644
index 0000000..179569a
--- /dev/null
+++ b/playbooks/act_stby_iptables/pre.yaml
@@ -0,0 +1,16 @@
+# Tempest runs under an unpriviledged user, thus the user cannot read
+# /etc/octavia/.ssh/ created by the Octavia devstack plugin. As work-around,
+# a new SSH key is generated and given read access to all users.
+# See also OCTAVIA_USE_PREGENERATED_SSH_KEY and
+# OCTAVIA_PREGENERATED_SSH_KEY_PATH set in the
+# octavia-v2-act-stdby-dsvm-scenario job definition
+- hosts: all
+  name: Octavia DSVM jobs pre-run playbook
+  tasks:
+    - name: Generate Octavia RSA key
+      shell: ssh-keygen -t rsa -f /tmp/octavia_ssh_key -q -N ""
+    - name: Allow read permissions to other users
+      file:
+        path: /tmp/octavia_ssh_key
+        state: file
+        mode: 0644
diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py
index a435eb2..a346f01 100644
--- a/releasenotes/source/conf.py
+++ b/releasenotes/source/conf.py
@@ -38,10 +38,14 @@
 # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
 # ones.
 extensions = [
-    'oslosphinx',
+    'openstackdocstheme',
     'reno.sphinxext',
 ]
 
+# openstackdocstheme options
+repository_name = 'openstack/octavia-tempest-plugin'
+use_storyboard = True
+
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
 
@@ -111,7 +115,7 @@
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
-html_theme = 'default'
+html_theme = 'openstackdocs'
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
@@ -189,28 +193,29 @@
 # html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'GlanceReleaseNotesdoc'
+htmlhelp_basename = 'OctaviaTempestPluginReleaseNotesdoc'
 
 
 # -- Options for LaTeX output ---------------------------------------------
 
-latex_elements = {
-    # The paper size ('letterpaper' or 'a4paper').
-    # 'papersize': 'letterpaper',
-
-    # The font size ('10pt', '11pt' or '12pt').
-    # 'pointsize': '10pt',
-
-    # Additional stuff for the LaTeX preamble.
-    # 'preamble': '',
-}
+# latex_elements = {
+#    # The paper size ('letterpaper' or 'a4paper').
+#    # 'papersize': 'letterpaper',
+#
+#    # The font size ('10pt', '11pt' or '12pt').
+#    # 'pointsize': '10pt',
+#
+#    # Additional stuff for the LaTeX preamble.
+#    # 'preamble': '',
+# }
 
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title,
 #  author, documentclass [howto, manual, or own class]).
 latex_documents = [
-    ('index', 'GlanceReleaseNotes.tex', u'Glance Release Notes Documentation',
-     u'Glance Developers', 'manual'),
+    ('index', 'OctaviaTempestPluginReleaseNotes.tex',
+     u'Octavia Tempest Plugin Release Notes Documentation',
+     u'Octavia Developers', 'manual'),
 ]
 
 # The name of an image file (relative to this directory) to place at the top of
@@ -239,8 +244,9 @@
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    ('index', 'glancereleasenotes', u'Glance Release Notes Documentation',
-     [u'Glance Developers'], 1)
+    ('index', 'octaviatempestpluginreleasenotes',
+     u'Octavia Tempest Plugin Release Notes Documentation',
+     [u'Octavia Developers'], 1)
 ]
 
 # If true, show URL addresses after external links.
@@ -253,9 +259,10 @@
 # (source start file, target name, title, author,
 #  dir menu entry, description, category)
 texinfo_documents = [
-    ('index', 'GlanceReleaseNotes', u'Glance Release Notes Documentation',
-     u'Glance Developers', 'GlanceReleaseNotes',
-     'One line description of project.',
+    ('index', 'OctaviaTempestPluginReleaseNotes',
+     u'Octavia Tempest Plugin Release Notes Documentation',
+     u'Octavia Developers', 'OctaviaTempestPluginReleaseNotes',
+     'Octavia Load Balancing as a Service for OpenStack.',
      'Miscellaneous'),
 ]
 
diff --git a/tox.ini b/tox.ini
index 64afde0..1ca36b7 100644
--- a/tox.ini
+++ b/tox.ini
@@ -5,11 +5,12 @@
 
 [testenv]
 usedevelop = True
-install_command = pip install -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt} {opts} {packages}
+install_command = pip install {opts} {packages}
 setenv =
    VIRTUAL_ENV={envdir}
    PYTHONWARNINGS=default::DeprecationWarning
-deps = -r{toxinidir}/test-requirements.txt
+deps = -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
+       -r{toxinidir}/test-requirements.txt
 commands =
   stestr run {posargs}
   stestr slowest
@@ -40,7 +41,7 @@
 [testenv:docs]
 basepython = python3
 deps =
-    -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+    -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/doc/requirements.txt
 whitelist_externals = rm
@@ -51,7 +52,7 @@
 [testenv:releasenotes]
 basepython = python3
 deps =
-    -c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
+    -c{env:UPPER_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master}
     -r{toxinidir}/requirements.txt
     -r{toxinidir}/doc/requirements.txt
 commands =
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index d646ab9..5c89676 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -49,7 +49,7 @@
       - ^releasenotes/.*$
     vars:
       devstack_localrc:
-        TEMPEST_PLUGINS: "'/opt/stack/octavia-tempest-plugin'"
+        TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
         USE_PYTHON3: true
       devstack_local_conf:
         post-config:
@@ -99,12 +99,16 @@
               # Set these higher for non-nested virt nodepool instances
               connection_max_retries: 300
               build_active_retries: 300
+            amphora_agent:
+              forward_all_logs: True
       devstack_services:
         neutron-qos: true
       devstack_plugins:
         neutron: https://opendev.org/openstack/neutron.git
       zuul_copy_output:
         '/var/log/dib-build' : logs
+        '/var/log/octavia-amphora.log': logs
+        '/var/log/octavia-tenant-traffic.log': logs
 
 - job:
     name: octavia-dsvm-live-two-node-base
@@ -136,7 +140,7 @@
           MULTI_HOST: 1
           SERVICE_HOST: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
           HOST_IP: "{{ hostvars['controller']['nodepool']['private_ipv4'] }}"
-          TEMPEST_PLUGINS: "'/opt/stack/octavia-tempest-plugin'"
+          TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
           USE_PYTHON3: true
           # Octavia specific settings
           OCTAVIA_CONTROLLER_IP_PORT_LIST: 192.168.0.3:5555,192.168.0.4:5555
@@ -147,6 +151,10 @@
         devstack_plugins:
           neutron: https://opendev.org/openstack/neutron.git
           octavia: https://opendev.org/openstack/octavia.git
+        zuul_copy_output:
+          '/var/log/dib-build' : logs
+          '/var/log/octavia-amphora.log': logs
+          '/var/log/octavia-tenant-traffic.log': logs
       controller2:
         devstack_localrc:
           # From devstack "vars:"
@@ -181,6 +189,9 @@
           OCTAVIA_MGMT_PORT_IP: 192.168.0.4
         devstack_plugins:
           octavia: https://opendev.org/openstack/octavia.git
+        zuul_copy_output:
+          '/var/log/octavia-amphora.log': logs
+          '/var/log/octavia-tenant-traffic.log': logs
     group-vars:
       controller:
         devstack_local_conf:
@@ -190,6 +201,8 @@
                 debug: True
               api_settings:
                 api_v1_enabled: False
+              amphora_agent:
+                forward_all_logs: True
         devstack_services:
           base: false
           barbican: false
@@ -230,6 +243,8 @@
                 debug: True
               api_settings:
                 api_v1_enabled: False
+              amphora_agent:
+                forward_all_logs: True
         devstack_services:
           c-vol: false
           c-bak: false
@@ -374,6 +389,15 @@
         OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: bionic
 
 - job:
+    name: octavia-v2-dsvm-scenario-ubuntu-xenial
+    parent: octavia-v2-dsvm-scenario
+    nodeset: openstack-single-node-xenial
+    vars:
+      devstack_localrc:
+        OCTAVIA_AMP_BASE_OS: ubuntu
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: xenial
+
+- job:
     name: octavia-v2-dsvm-tls-barbican
     parent: octavia-v2-dsvm-scenario
     required-projects:
@@ -455,3 +479,66 @@
 - job:
     name: octavia-v2-dsvm-scenario-centos-7
     parent: octavia-v2-dsvm-py2-scenario-centos-7
+
+- job:
+    name: octavia-v2-act-stdby-iptables-dsvm-scenario
+    parent: octavia-dsvm-live-base
+    pre-run: playbooks/act_stby_iptables/pre.yaml
+    vars:
+      devstack_localrc:
+        OCTAVIA_USE_PREGENERATED_SSH_KEY: True
+        OCTAVIA_PREGENERATED_SSH_KEY_PATH: /tmp/octavia_ssh_key
+      devstack_local_conf:
+        post-config:
+          $OCTAVIA_CONF:
+            api_settings:
+              api_v1_enabled: False
+            controller_worker:
+              loadbalancer_topology: ACTIVE_STANDBY
+            task_flow:
+              engine: parallel
+              devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              check_timeout: 180
+      tempest_test_regex: ^octavia_tempest_plugin.tests.act_stdby_scenario.v2.test_active_standby_iptables
+      tox_envlist: all
+
+- job:
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
+    parent: octavia-v2-act-stdby-iptables-dsvm-scenario
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+
+- job:
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7
+    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
+    nodeset: devstack-single-node-centos-7
+    vars:
+      devstack_localrc:
+        USE_PYTHON3: False
+        OCTAVIA_AMP_BASE_OS: centos
+        OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 7
+        OCTAVIA_AMP_IMAGE_SIZE: 3
+      devstack_local_conf:
+        test-config:
+          "$TEMPEST_CONFIG":
+            load_balancer:
+              amphora_ssh_user: centos
+
+- job:
+    name: octavia-v2-act-stdby-iptables-dsvm-scenario-stable-stein
+    parent: octavia-v2-act-stdby-iptables-dsvm-scenario
+    override-checkout: stable/stein
+
+- job:
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky
+    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
+    override-checkout: stable/rocky
+
+- job:
+    name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens
+    parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
+    override-checkout: stable/queens
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index 3050b26..2bb813e 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -5,6 +5,7 @@
       - check-requirements
       - publish-openstack-docs-pti
       - tempest-plugin-jobs
+      - release-notes-jobs-python3
     check:
       jobs:
         - octavia-v2-dsvm-noop-api
@@ -25,6 +26,18 @@
             voting: false
         - octavia-v2-dsvm-py2-scenario-two-node:
             voting: false
+        - octavia-v2-act-stdby-iptables-dsvm-scenario:
+            voting: false
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario:
+            voting: false
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7:
+            voting: false
+        - octavia-v2-act-stdby-iptables-dsvm-scenario-stable-stein:
+            voting: false
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky:
+            voting: false
+        - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens:
+            voting: false
         - octavia-v2-dsvm-tls-barbican:
             voting: false
         - octavia-v2-dsvm-tls-barbican-stable-stein: