Add traffic tests using an IPv6 VIP
Adds a traffic scenario test that has an IPv6 VIP address and mixed
IPv4/IPv6 members. It tests that connections to the IPv6 VIP are
evenly balanced across the mixed members.
Change-Id: I6bb7be14379174be9018a74b07356ecd85089f45
Story: 1627892
Task: 27532
Depends-On: https://review.openstack.org/#/c/611460/
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 35f2dde..4990c79 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -108,12 +108,16 @@
self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
else:
self.assertEqual(const.ONLINE, lb[const.OPERATING_STATUS])
+ if ip_version == 4:
+ self.assertEqual(self.lb_member_vip_net[const.ID],
+ lb[const.VIP_NETWORK_ID])
+ else:
+ self.assertEqual(self.lb_member_vip_ipv6_net[const.ID],
+ lb[const.VIP_NETWORK_ID])
self.assertEqual(self.os_roles_lb_member.credentials.project_id,
lb[const.PROJECT_ID])
self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
- self.assertEqual(self.lb_member_vip_net[const.ID],
- lb[const.VIP_NETWORK_ID])
self.assertIsNotNone(lb[const.VIP_PORT_ID])
if lb_kwargs[const.VIP_SUBNET_ID]:
self.assertEqual(lb_kwargs[const.VIP_ADDRESS],
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
new file mode 100644
index 0000000..690ea8f
--- /dev/null
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_ipv6_traffic_ops.py
@@ -0,0 +1,169 @@
+# Copyright 2018 Rackspace, US Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+
+
+class IPv6TrafficOperationsScenarioTest(
+ test_base.LoadBalancerBaseTestWithCompute):
+ """Test traffic operations with an IPv6 VIP."""
+
+ @classmethod
+ def skip_checks(cls):
+ super(IPv6TrafficOperationsScenarioTest, cls).skip_checks()
+
+ if not CONF.validation.run_validation:
+ raise cls.skipException('Traffic tests will not work without '
+ 'run_validation enabled.')
+
+ if CONF.load_balancer.test_with_noop:
+ raise cls.skipException('Traffic tests will not work in noop '
+ 'mode.')
+
+ if not CONF.load_balancer.test_with_ipv6:
+ raise cls.skipException('IPv6 traffic ops tests require Octavia '
+ 'IPv6 testing enabled')
+
+ @classmethod
+ def resource_setup(cls):
+ """Setup resources needed by the tests."""
+ super(IPv6TrafficOperationsScenarioTest, cls).resource_setup()
+
+ lb_name = data_utils.rand_name("lb_member_lb1_ipv6_ops")
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+
+ ip_version = 6
+ cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+ lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+ cls.lb_id = lb[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_lb_client.cleanup_loadbalancer,
+ cls.lb_id)
+
+ cls.lb_vip_address = lb[const.VIP_ADDRESS]
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
+
+ listener_name = data_utils.rand_name("lb_member_listener1_ipv6_ops")
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.PROTOCOL: const.HTTP,
+ const.PROTOCOL_PORT: '80',
+ const.LOADBALANCER_ID: cls.lb_id,
+ }
+ listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+ cls.listener_id = listener[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_listener_client.cleanup_listener,
+ cls.listener_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ pool_name = data_utils.rand_name("lb_member_pool1_ipv6_ops")
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.LISTENER_ID: cls.listener_id,
+ }
+ pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+ cls.pool_id = pool[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_pool_client.cleanup_pool,
+ cls.pool_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ @decorators.idempotent_id('219ac17d-c5c1-4e7e-a9d5-0764d7ce7746')
+ def test_ipv6_vip_mixed_ipv4_ipv6_members_traffic(self):
+ """Tests traffic through a loadbalancer with IPv4 and IPv6 members.
+
+ * Set up members on a loadbalancer.
+ * Test traffic to ensure it is balanced properly.
+ """
+ # Set up Member 1 for Webserver 1
+ member1_name = data_utils.rand_name("lb_member_member1-traffic")
+ member1_kwargs = {
+ const.POOL_ID: self.pool_id,
+ const.NAME: member1_name,
+ const.ADMIN_STATE_UP: True,
+ const.ADDRESS: self.webserver1_ip,
+ const.PROTOCOL_PORT: 80,
+ }
+ if self.lb_member_1_subnet:
+ member1_kwargs[const.SUBNET_ID] = self.lb_member_1_subnet[const.ID]
+
+ member1 = self.mem_member_client.create_member(
+ **member1_kwargs)
+ self.addCleanup(
+ self.mem_member_client.cleanup_member,
+ member1[const.ID], pool_id=self.pool_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ # Set up Member 2 for Webserver 2
+ member2_name = data_utils.rand_name("lb_member_member2-traffic")
+ member2_kwargs = {
+ const.POOL_ID: self.pool_id,
+ const.NAME: member2_name,
+ const.ADMIN_STATE_UP: True,
+ const.ADDRESS: self.webserver2_ipv6,
+ const.PROTOCOL_PORT: 80,
+ }
+ if self.lb_member_2_ipv6_subnet:
+ member2_kwargs[const.SUBNET_ID] = (
+ self.lb_member_2_ipv6_subnet[const.ID])
+
+ member2 = self.mem_member_client.create_member(
+ **member2_kwargs)
+ self.addCleanup(
+ self.mem_member_client.cleanup_member,
+ member2[const.ID], pool_id=self.pool_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ # Send some traffic
+ self.check_members_balanced(self.lb_vip_address)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
index a10784b..cfc3ee5 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_load_balancer.py
@@ -79,8 +79,12 @@
self.assertEqual(self.os_roles_lb_member.credentials.project_id,
lb[const.PROJECT_ID])
self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
- self.assertEqual(self.lb_member_vip_net[const.ID],
- lb[const.VIP_NETWORK_ID])
+ if ip_version == 4:
+ self.assertEqual(self.lb_member_vip_net[const.ID],
+ lb[const.VIP_NETWORK_ID])
+ else:
+ self.assertEqual(self.lb_member_vip_ipv6_net[const.ID],
+ lb[const.VIP_NETWORK_ID])
self.assertIsNotNone(lb[const.VIP_PORT_ID])
if lb_kwargs[const.VIP_SUBNET_ID]:
self.assertEqual(lb_kwargs[const.VIP_SUBNET_ID],
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 085c814..c443347 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -140,6 +140,7 @@
cls.lb_member_2_net = {'id': uuidutils.generate_uuid()}
cls.lb_member_2_subnet = {'id': uuidutils.generate_uuid()}
if CONF.load_balancer.test_with_ipv6:
+ cls.lb_member_vip_ipv6_net = {'id': uuidutils.generate_uuid()}
cls.lb_member_vip_ipv6_subnet = {'id':
uuidutils.generate_uuid()}
cls.lb_member_1_ipv6_subnet = {'id': uuidutils.generate_uuid()}
@@ -250,20 +251,30 @@
# Create tenant VIP IPv6 subnet
if CONF.load_balancer.test_with_ipv6:
- subnet_kwargs = {
- 'name': data_utils.rand_name("lb_member_vip_ipv6_subnet"),
- 'network_id': cls.lb_member_vip_net['id'],
- 'cidr': CONF.load_balancer.vip_ipv6_subnet_cidr,
- 'ip_version': 6}
- result = cls.lb_mem_subnet_client.create_subnet(**subnet_kwargs)
- cls.lb_member_vip_ipv6_subnet = result['subnet']
+ # See if ipv6-public-subnet exists and use it if so.
+ pub_ipv6_subnet = cls.os_admin.subnets_client.list_subnets(
+ name='ipv6-public-subnet')['subnets']
+
+ if len(pub_ipv6_subnet) == 1:
+ cls.lb_member_vip_ipv6_subnet = pub_ipv6_subnet[0]
+ cls.lb_member_vip_ipv6_net = {
+ 'id': pub_ipv6_subnet[0]['network_id']}
+ else:
+ subnet_kwargs = {
+ 'name': data_utils.rand_name("lb_member_vip_ipv6_subnet"),
+ 'network_id': cls.lb_member_vip_net['id'],
+ 'cidr': CONF.load_balancer.vip_ipv6_subnet_cidr,
+ 'ip_version': 6}
+ result = cls.lb_mem_subnet_client.create_subnet(
+ **subnet_kwargs)
+ cls.lb_member_vip_ipv6_subnet = result['subnet']
+ cls.addClassResourceCleanup(
+ waiters.wait_for_not_found,
+ cls.lb_mem_subnet_client.delete_subnet,
+ cls.lb_mem_subnet_client.show_subnet,
+ cls.lb_member_vip_ipv6_subnet['id'])
LOG.info('lb_member_vip_ipv6_subnet: {}'.format(
cls.lb_member_vip_ipv6_subnet))
- cls.addClassResourceCleanup(
- waiters.wait_for_not_found,
- cls.lb_mem_subnet_client.delete_subnet,
- cls.lb_mem_subnet_client.show_subnet,
- cls.lb_member_vip_ipv6_subnet['id'])
# Create tenant member 1 network
network_kwargs = {
@@ -376,21 +387,27 @@
use_fixed_ip=False):
if not ip_version:
ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
- if cls.lb_member_vip_subnet:
+ if cls.lb_member_vip_subnet or cls.lb_member_vip_ipv6_subnet:
ip_index = data_utils.rand_int_id(start=10, end=100)
while ip_index in cls.used_ips:
ip_index = data_utils.rand_int_id(start=10, end=100)
cls.used_ips.append(ip_index)
if ip_version == 4:
- network = ipaddress.IPv4Network(
- six.u(CONF.load_balancer.vip_subnet_cidr))
- lb_vip_address = str(network[ip_index])
subnet_id = cls.lb_member_vip_subnet[const.ID]
+ if CONF.load_balancer.test_with_noop:
+ lb_vip_address = '198.18.33.33'
+ else:
+ subnet = cls.os_admin.subnets_client.show_subnet(subnet_id)
+ network = ipaddress.IPv4Network(subnet['subnet']['cidr'])
+ lb_vip_address = str(network[ip_index])
else:
- network = ipaddress.IPv6Network(
- six.u(CONF.load_balancer.vip_ipv6_subnet_cidr))
- lb_vip_address = str(network[ip_index])
subnet_id = cls.lb_member_vip_ipv6_subnet[const.ID]
+ if CONF.load_balancer.test_with_noop:
+ lb_vip_address = '2001:db8:33:33:33:33:33:33'
+ else:
+ subnet = cls.os_admin.subnets_client.show_subnet(subnet_id)
+ network = ipaddress.IPv6Network(subnet['subnet']['cidr'])
+ lb_vip_address = str(network[ip_index])
lb_kwargs[const.VIP_SUBNET_ID] = subnet_id
if use_fixed_ip:
lb_kwargs[const.VIP_ADDRESS] = lb_vip_address
@@ -769,6 +786,9 @@
session = requests.Session()
response_counts = {}
+ if ipaddress.ip_address(vip_address).version == 6:
+ vip_address = '[{}]'.format(vip_address)
+
self._wait_for_lb_functional(vip_address)
# Send a number requests to lb vip
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index 425adbd..e90c706 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -237,6 +237,7 @@
o-cw: true
o-hm: true
o-hk: true
+ q-agt: true
- job:
name: octavia-dsvm-noop-base