Merge "Handle NotImplemented flavor exception"
diff --git a/README.rst b/README.rst
index b2c74ee..8b5bd3c 100644
--- a/README.rst
+++ b/README.rst
@@ -1,4 +1,7 @@
-========================
+======================
+Octavia Tempest Plugin
+======================
+
Team and repository tags
========================
@@ -7,7 +10,6 @@
.. Change things from this point on
-==============================
Tempest integration of Octavia
==============================
diff --git a/doc/requirements.txt b/doc/requirements.txt
index ddf8411..90ae115 100644
--- a/doc/requirements.txt
+++ b/doc/requirements.txt
@@ -9,3 +9,6 @@
# releasenotes
reno>=2.5.0 # Apache-2.0
+
+# PDF Docs
+sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
diff --git a/doc/source/conf.py b/doc/source/conf.py
index 2d5c49f..5bbe295 100755
--- a/doc/source/conf.py
+++ b/doc/source/conf.py
@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
+import datetime
import os
import sys
@@ -29,7 +30,8 @@
'sphinx.ext.viewcode',
'openstackdocstheme',
'oslo_config.sphinxext',
- 'sphinxcontrib.apidoc'
+ 'sphinxcontrib.apidoc',
+ 'sphinxcontrib.rsvgconverter'
]
# autodoc generation is a bit aggressive and a nuisance when doing heavy
@@ -45,7 +47,17 @@
# General information about the project.
project = u'octavia-tempest-plugin'
-copyright = u'2017, OpenStack Foundation'
+copyright = u'2017-2019, OpenStack Foundation'
+
+# The version info for the project you're documenting, acts as replacement for
+# |version| and |release|, also used in various other places throughout the
+# built documents.
+#
+# Version info
+from octavia_tempest_plugin.version import version_info as octavia_tempest_ver
+release = octavia_tempest_ver.release_string()
+# The short X.Y version.
+version = octavia_tempest_ver.version_string()
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
@@ -60,6 +72,14 @@
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['octavia_tempest_plugin.']
+repository_name = 'openstack/octavia-tempest-plugin'
+bug_project = '910'
+bug_tag = 'docs'
+
+apidoc_output_dir = '_build/modules'
+apidoc_module_dir = '../../octavia_tempest_plugin'
+apidoc_excluded_paths = []
+
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
@@ -81,23 +101,66 @@
# If false, no index is generated.
html_use_index = True
+# -- Options for LaTeX output -------------------------------------------------
+
+# Fix Unicode character for sphinx_feature_classification
+# Sphinx default latex engine (pdflatex) doesn't know much unicode
+latex_preamble = r"""
+\usepackage{newunicodechar}
+\newunicodechar{✖}{\sffamily X}
+\setcounter{tocdepth}{2}
+\authoraddress{\textcopyright %s OpenStack Foundation}
+""" % datetime.datetime.now().year
+
+latex_elements = {
+ # The paper size ('letterpaper' or 'a4paper').
+ # 'papersize': 'letterpaper',
+
+ # The font size ('10pt', '11pt' or '12pt').
+ # 'pointsize': '10pt',
+
+ # Additional stuff for the LaTeX preamble.
+ # openany: Skip blank pages in generated PDFs
+ 'extraclassoptions': 'openany,oneside',
+ 'makeindex': '',
+ 'printindex': '',
+ 'preamble': latex_preamble
+}
+
+# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664
+# Some distros are missing xindy
+latex_use_xindy = False
+
+# Fix missing apostrophe
+smartquotes_excludes = {'builders': ['latex']}
+
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
-latex_documents = [
- ('index',
- '%s.tex' % project,
- u'%s Documentation' % project,
- u'OpenStack Foundation', 'manual'),
-]
+latex_documents = [(
+ 'index',
+ 'doc-octavia-tempest-plugin.tex',
+ u'Octavia Tempest Plugin Documentation',
+ u'OpenStack Octavia Team',
+ 'manual'
+)]
-# Example configuration for intersphinx: refer to the Python standard library.
-#intersphinx_mapping = {'http://docs.python.org/': None}
+# The name of an image file (relative to this directory) to place at the top of
+# the title page.
+# latex_logo = None
-repository_name = 'openstack/octavia-tempest-plugin'
-bug_project = '910'
-bug_tag = 'docs'
+# For "manual" documents, if this is true, then toplevel headings are parts,
+# not chapters.
+# latex_use_parts = False
-apidoc_output_dir = '_build/modules'
-apidoc_module_dir = '../../octavia_tempest_plugin'
-apidoc_excluded_paths = []
+# If true, show page references after internal links.
+# latex_show_pagerefs = False
+
+# If true, show URL addresses after external links.
+# latex_show_urls = False
+
+# Documents to append as an appendix to all manuals.
+# latex_appendices = []
+
+# If false, no module index is generated.
+latex_domain_indices = False
diff --git a/doc/source/index.rst b/doc/source/index.rst
index 494a0b6..b991832 100644
--- a/doc/source/index.rst
+++ b/doc/source/index.rst
@@ -6,8 +6,6 @@
Welcome to octavia-tempest-plugin's documentation!
==================================================
-Contents:
-
.. toctree::
:maxdepth: 2
@@ -16,14 +14,16 @@
contributing
configref
-Indices and tables
-==================
+.. only:: html
-.. toctree::
- :hidden:
+ Indices and tables
+ ------------------
- _build/modules/modules
+ .. toctree::
+ :hidden:
-* :ref:`genindex`
-* :ref:`modindex`
-* :ref:`search`
+ _build/modules/modules
+
+ * :ref:`genindex`
+ * :ref:`modindex`
+ * :ref:`search`
diff --git a/octavia_tempest_plugin/clients.py b/octavia_tempest_plugin/clients.py
index f85404c..f2a7767 100644
--- a/octavia_tempest_plugin/clients.py
+++ b/octavia_tempest_plugin/clients.py
@@ -52,7 +52,7 @@
params.update({
'auth_provider': self.auth_provider,
'service': CONF.load_balancer.catalog_type,
- 'region': CONF.load_balancer.region,
+ 'region': CONF.load_balancer.region or CONF.identity.region,
'endpoint_type': CONF.load_balancer.endpoint_type,
'build_interval': CONF.load_balancer.build_interval,
'build_timeout': CONF.load_balancer.build_timeout
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index e767298..44bf3e5 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -104,6 +104,7 @@
HTTPS = 'HTTPS'
TCP = 'TCP'
TERMINATED_HTTPS = 'TERMINATED_HTTPS'
+UDP = 'UDP'
# HTTP Methods
GET = 'GET'
@@ -195,6 +196,9 @@
STATUS_PENDING_DELETE, STATUS_DELETED, STATUS_ERROR
)
+# Amphora providers list
+AMPHORA_PROVIDERS = ['amphora', 'amphorav2', 'octavia']
+
# Flavor capabilities
LOADBALANCER_TOPOLOGY = 'loadbalancer_topology'
diff --git a/octavia_tempest_plugin/config.py b/octavia_tempest_plugin/config.py
index d951a44..29dc1da 100644
--- a/octavia_tempest_plugin/config.py
+++ b/octavia_tempest_plugin/config.py
@@ -66,7 +66,7 @@
default=5,
help='Interval to check for status changes.'),
cfg.IntOpt('check_timeout',
- default=60,
+ default=120,
help='Timeout, in seconds, to wait for a status change.'),
cfg.BoolOpt('test_with_noop',
default=False,
@@ -108,8 +108,12 @@
'octavia.api.drivers entrypoint. Example: '
'amphora:The Octavia Amphora driver.,'
'octavia:Deprecated alias of the Octavia '
- 'Amphora driver.'),
+ 'Amphora driver.,'
+ 'amphorav2:The Octavia Amphora driver that uses '
+ 'taskflow jobboard persistence.'),
default={'amphora': 'The Octavia Amphora driver.',
+ 'amphorav2': 'The Octavia Amphora driver that uses '
+ 'taskflow jobboard persistence.',
'octavia': 'Deprecated alias of the Octavia Amphora '
'driver.'}),
cfg.StrOpt('loadbalancer_topology',
@@ -184,6 +188,9 @@
cfg.StrOpt('availability_zone',
default=None,
help='Availability zone to use for creating servers.'),
+ cfg.BoolOpt('test_reuse_connection', default=True,
+ help='Reuse TCP connections while testing LB with '
+ 'HTTP members (keep-alive).'),
]
lb_feature_enabled_group = cfg.OptGroup(name='loadbalancer-feature-enabled',
@@ -209,10 +216,10 @@
default="TCP",
help="The type of L4 Protocol which is supported with the "
"provider driver."),
- cfg.StrOpt('spare_pool_enabled',
- default=False,
- help="Wether spare pool is available with amphora provider "
- "driver or not."),
+ cfg.BoolOpt('spare_pool_enabled',
+ default=False,
+ help="Wether spare pool is available with amphora provider "
+ "driver or not."),
cfg.BoolOpt('session_persistence_enabled',
default=True,
help="Whether session persistence is supported with the "
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
new file mode 100644
index 0000000..612bbe2
--- /dev/null
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby.py
@@ -0,0 +1,287 @@
+# Copyright 2018 GoDaddy
+# Copyright 2018 Rackspace US Inc. All rights reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import requests
+import testtools
+import time
+
+from oslo_log import log as logging
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+@testtools.skipUnless(
+ CONF.validation.run_validation,
+ 'Active-Standby tests will not work without run_validation enabled.')
+class ActiveStandbyScenarioTest(test_base.LoadBalancerBaseTestWithCompute):
+
+ @classmethod
+ def resource_setup(cls):
+ """Setup resources needed by the tests."""
+ super(ActiveStandbyScenarioTest, cls).resource_setup()
+
+ # We have to do this here as the api_version and clients are not
+ # setup in time to use a decorator or the skip_checks mixin
+ if not cls.lb_admin_flavor_profile_client.is_version_supported(
+ cls.api_version, '2.3'):
+ return
+
+ lb_name = data_utils.rand_name("lb_member_lb1_actstdby")
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+
+ # TODO(rm_work): Make this work with ipv6 and split this test for both
+ ip_version = 4
+ cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+ lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+ cls.lb_id = lb[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_lb_client.cleanup_loadbalancer,
+ cls.lb_id)
+
+ if CONF.validation.connect_method == 'floating':
+ port_id = lb[const.VIP_PORT_ID]
+ result = cls.lb_mem_float_ip_client.create_floatingip(
+ floating_network_id=CONF.network.public_network_id,
+ port_id=port_id)
+ floating_ip = result['floatingip']
+ LOG.info('lb1_floating_ip: {}'.format(floating_ip))
+ cls.addClassResourceCleanup(
+ waiters.wait_for_not_found,
+ cls.lb_mem_float_ip_client.delete_floatingip,
+ cls.lb_mem_float_ip_client.show_floatingip,
+ floatingip_id=floating_ip['id'])
+ cls.lb_vip_address = floating_ip['floating_ip_address']
+ else:
+ cls.lb_vip_address = lb[const.VIP_ADDRESS]
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
+
+ listener_name = data_utils.rand_name("lb_member_listener1_actstdby")
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.PROTOCOL: const.HTTP,
+ const.PROTOCOL_PORT: '80',
+ const.LOADBALANCER_ID: cls.lb_id,
+ }
+ listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+ cls.listener_id = listener[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_listener_client.cleanup_listener,
+ cls.listener_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ pool_name = data_utils.rand_name("lb_member_pool1_actstdby")
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.LISTENER_ID: cls.listener_id,
+ }
+ pool = cls.mem_pool_client.create_pool(**pool_kwargs)
+ cls.pool_id = pool[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_pool_client.cleanup_pool,
+ cls.pool_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Set up Member 1 for Webserver 1
+ member1_name = data_utils.rand_name("lb_member_member1_actstdby")
+ member1_kwargs = {
+ const.POOL_ID: cls.pool_id,
+ const.NAME: member1_name,
+ const.ADMIN_STATE_UP: True,
+ const.ADDRESS: cls.webserver1_ip,
+ const.PROTOCOL_PORT: 80,
+ }
+ if cls.lb_member_1_subnet:
+ member1_kwargs[const.SUBNET_ID] = cls.lb_member_1_subnet[const.ID]
+
+ member1 = cls.mem_member_client.create_member(
+ **member1_kwargs)
+ cls.addClassResourceCleanup(
+ cls.mem_member_client.cleanup_member,
+ member1[const.ID], pool_id=cls.pool_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+ waiters.wait_for_status(
+ cls.mem_lb_client.show_loadbalancer, cls.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ # Set up Member 2 for Webserver 2
+ member2_name = data_utils.rand_name("lb_member_member2_actstdby")
+ member2_kwargs = {
+ const.POOL_ID: cls.pool_id,
+ const.NAME: member2_name,
+ const.ADMIN_STATE_UP: True,
+ const.ADDRESS: cls.webserver2_ip,
+ const.PROTOCOL_PORT: 80,
+ }
+ if cls.lb_member_2_subnet:
+ member2_kwargs[const.SUBNET_ID] = cls.lb_member_2_subnet[const.ID]
+
+ member2 = cls.mem_member_client.create_member(
+ **member2_kwargs)
+ cls.addClassResourceCleanup(
+ cls.mem_member_client.cleanup_member,
+ member2[const.ID], pool_id=cls.pool_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+ waiters.wait_for_status(
+ cls.mem_lb_client.show_loadbalancer, cls.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
+ @testtools.skipIf(CONF.load_balancer.test_with_noop,
+ 'Active/Standby tests will not work in noop mode.')
+ @decorators.idempotent_id('e591fa7a-0eee-485a-8ca0-5cf1a556bdf0')
+ def test_active_standby_vrrp_failover(self):
+ """Tests active/standby VRRP failover
+
+ * Test the load balancer to make sure it is functioning
+ * Identifies the Master and Backup amphora
+ * Deletes the Master amphora
+ * Sends traffic through the load balancer
+ * Validates that the Backup has assumed the Master role
+ """
+ # We have to do this here as the api_version and clients are not
+ # setup in time to use a decorator or the skip_checks mixin
+ if not self.mem_listener_client.is_version_supported(
+ self.api_version, '2.4'):
+ raise self.skipException(
+ 'Active/Standby VRRP failover tests require '
+ 'Octavia API version 2.3 or newer.')
+
+ session = requests.Session()
+
+ # Send some traffic
+ self.check_members_balanced(self.lb_vip_address)
+
+ # Get the amphorae associated with this load balancer
+ amphorae = self.os_admin.amphora_client.list_amphorae(
+ query_params='{loadbalancer_id}={lb_id}'.format(
+ loadbalancer_id=const.LOADBALANCER_ID,
+ lb_id=self.lb_id))
+
+ # TODO(johnsom): Fix when LB flavors support act/stdby
+ if len(amphorae) < 2:
+ self.skipTest('Load balancer must be using active/standby '
+ 'topology for the VRRP failover test.')
+
+ # Generate traffic on the LB so we can identify the current Master
+ r = session.get('http://{0}'.format(self.lb_vip_address), timeout=2)
+
+ # Cycle through the amps to find the master
+ master_amp = None
+ backup_amp = None
+ start = int(time.time())
+ while True:
+ for amp in amphorae:
+ amphora_stats = self.os_admin.amphora_client.get_amphora_stats(
+ amp[const.ID])
+ for listener in amphora_stats:
+ if listener[const.TOTAL_CONNECTIONS] > 0:
+ master_amp = amp
+ break
+ # check if we left the listener for loop by finding the master
+ if master_amp:
+ break
+ # If we found the master and broke out of the amp for loop, break
+ # out of the while loop too.
+ if master_amp:
+ break
+ if int(time.time()) - start >= CONF.load_balancer.check_timeout:
+ message = ('Unable to find Master amphora in {timeout} '
+ 'seconds.'.format(
+ timeout=CONF.load_balancer.check_timeout))
+ raise exceptions.TimeoutException(message)
+ time.sleep(CONF.load_balancer.check_interval)
+
+ # Find the backup amphora and check it is ready for the test
+ for amp in amphorae:
+ if amp[const.ID] == master_amp[const.ID]:
+ continue
+ else:
+ backup_amp = amp
+ self.assertIsNotNone(backup_amp)
+ amphora_stats = self.os_admin.amphora_client.get_amphora_stats(
+ backup_amp[const.ID])
+ for listener in amphora_stats:
+ self.assertEqual(0, listener[const.TOTAL_CONNECTIONS])
+
+ # Delete the master amphora compute instance
+ self.os_admin_servers_client.delete_server(
+ master_amp[const.COMPUTE_ID])
+
+ # Pass some traffic through the LB
+ # Note: We want this to loop for longer than the heartbeat interval
+ # to make sure a stats update has come in to the HM
+ for x in range(0, 20):
+ try:
+ r = session.get('http://{0}'.format(self.lb_vip_address),
+ timeout=1)
+ LOG.info('Got response: %s', r.text)
+ except Exception:
+ LOG.info('Load balancer request failed. Looping')
+ time.sleep(1)
+
+ # Check that the Backup amphora is now Master
+ amphora_stats = self.os_admin.amphora_client.get_amphora_stats(
+ backup_amp[const.ID])
+ connections = 0
+ for listener in amphora_stats:
+ connections += listener[const.TOTAL_CONNECTIONS]
+ self.assertTrue(connections > 0)
+ LOG.info('Backup amphora is now Master.')
+ # Wait for the amphora failover to start
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.PENDING_UPDATE, CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+ # Wait for the load balancer to return to ACTIVE so the
+ # cleanup steps will pass
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE, CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
diff --git a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
index 7e6f7e8..8399f67 100644
--- a/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
+++ b/octavia_tempest_plugin/tests/act_stdby_scenario/v2/test_active_standby_iptables.py
@@ -273,9 +273,6 @@
# Delete active amphora
self.os_admin_servers_client.delete_server(active[const.COMPUTE_ID])
- # Send some traffic
- self.check_members_balanced(self.lb_vip_address)
-
# Wait for the amphora failover to start
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
@@ -283,6 +280,9 @@
const.PENDING_UPDATE, CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+ # Send some traffic (checks VRRP failover)
+ self.check_members_balanced(self.lb_vip_address)
+
# Wait for the load balancer to return to ACTIVE
waiters.wait_for_status(
self.mem_lb_client.show_loadbalancer,
diff --git a/octavia_tempest_plugin/tests/api/v2/test_amphora.py b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
index 7873679..7cf77dd 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_amphora.py
@@ -30,7 +30,7 @@
@classmethod
def skip_checks(cls):
super(AmphoraAPITest, cls).skip_checks()
- if CONF.load_balancer.provider not in ['amphora', 'octavia']:
+ if CONF.load_balancer.provider not in const.AMPHORA_PROVIDERS:
raise cls.skipException('Amphora tests only run with the amphora '
'provider enabled.')
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 691c61c..3a45656 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -161,6 +161,140 @@
self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+ @decorators.idempotent_id('cceac303-4db5-4d5a-9f6e-ff33780a5f29')
+ def test_listener_create_on_same_port(self):
+ """Tests listener creation on same port number.
+
+ * Create a first listener.
+ * Create a new listener on an existing port, but with a different
+ protocol.
+ * Create a second listener with the same parameters and ensure that
+ an error is triggered.
+ * Create a third listener with another protocol over TCP, and ensure
+ that it fails.
+ """
+
+ # Using listeners on the same port for TCP and UDP was not supported
+ # before Train. Use 2.11 API version as reference to detect previous
+ # releases and skip the test.
+ if not self.mem_listener_client.is_version_supported(
+ self.api_version, '2.11'):
+ raise self.skipException('TCP and UDP listeners on same port fix '
+ 'is only available on Octavia API '
+ 'version 2.11 or newer.')
+
+ listener_name = data_utils.rand_name("lb_member_listener1-create")
+
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: self.protocol,
+ const.PROTOCOL_PORT: 8080,
+ const.LOADBALANCER_ID: self.lb_id,
+ const.CONNECTION_LIMIT: 200
+ }
+
+ try:
+ listener = self.mem_listener_client.create_listener(
+ **listener_kwargs)
+ except exceptions.BadRequest as e:
+ faultstring = e.resp_body.get('faultstring', '')
+ if ("Invalid input for field/attribute protocol." in faultstring
+ and "Value should be one of:" in faultstring):
+ raise self.skipException("Skipping unsupported protocol")
+ raise e
+
+ self.addClassResourceCleanup(
+ self.mem_listener_client.cleanup_listener,
+ listener[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ if self.protocol == const.UDP:
+ protocol = const.TCP
+ else:
+ protocol = const.UDP
+
+ # Create a listener on the same port, but with a different protocol
+ listener2_name = data_utils.rand_name("lb_member_listener2-create")
+
+ listener2_kwargs = {
+ const.NAME: listener2_name,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: protocol,
+ const.PROTOCOL_PORT: 8080,
+ const.LOADBALANCER_ID: self.lb_id,
+ const.CONNECTION_LIMIT: 200,
+ }
+
+ try:
+ listener2 = self.mem_listener_client.create_listener(
+ **listener2_kwargs)
+ except exceptions.BadRequest as e:
+ faultstring = e.resp_body.get('faultstring', '')
+ if ("Invalid input for field/attribute protocol." in faultstring
+ and "Value should be one of:" in faultstring):
+ raise self.skipException("Skipping unsupported protocol")
+ raise e
+
+ self.addClassResourceCleanup(
+ self.mem_listener_client.cleanup_listener,
+ listener2[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Create a listener on the same port, with an already used protocol
+ listener3_name = data_utils.rand_name("lb_member_listener3-create")
+
+ listener3_kwargs = {
+ const.NAME: listener3_name,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: protocol,
+ const.PROTOCOL_PORT: 8080,
+ const.LOADBALANCER_ID: self.lb_id,
+ const.CONNECTION_LIMIT: 200,
+ }
+
+ self.assertRaises(
+ exceptions.Conflict,
+ self.mem_listener_client.create_listener,
+ **listener3_kwargs)
+
+ # Create a listener on the same port, with another protocol over TCP,
+ # only if layer-7 protocols are enabled
+ lb_feature_enabled = CONF.loadbalancer_feature_enabled
+ if lb_feature_enabled.l7_protocol_enabled:
+ if self.protocol == const.HTTP:
+ protocol = const.HTTPS
+ else:
+ protocol = const.HTTP
+
+ listener4_name = data_utils.rand_name("lb_member_listener4-create")
+
+ listener4_kwargs = {
+ const.NAME: listener4_name,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: protocol,
+ const.PROTOCOL_PORT: 8080,
+ const.LOADBALANCER_ID: self.lb_id,
+ const.CONNECTION_LIMIT: 200,
+ }
+
+ self.assertRaises(
+ exceptions.Conflict,
+ self.mem_listener_client.create_listener,
+ **listener4_kwargs)
+
@decorators.idempotent_id('78ba6eb0-178c-477e-9156-b6775ca7b271')
def test_listener_list(self):
"""Tests listener list API and field filtering.
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 10ae85d..b4dde1f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -826,7 +826,7 @@
lb = self.mem_lb_client.show_loadbalancer(lb[const.ID])
self.assertEqual(const.ACTIVE, lb[const.PROVISIONING_STATUS])
- if CONF.load_balancer.provider in ['amphora', 'octavia']:
+ if CONF.load_balancer.provider in const.AMPHORA_PROVIDERS:
before_amphorae = self.lb_admin_amphora_client.list_amphorae(
query_params='{loadbalancer_id}={lb_id}'.format(
loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))
@@ -840,7 +840,7 @@
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
- if CONF.load_balancer.provider in ['amphora', 'octavia']:
+ if CONF.load_balancer.provider in const.AMPHORA_PROVIDERS:
after_amphorae = self.lb_admin_amphora_client.list_amphorae(
query_params='{loadbalancer_id}={lb_id}'.format(
loadbalancer_id=const.LOADBALANCER_ID, lb_id=lb[const.ID]))
diff --git a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
index 25f741a..0fe1d81 100644
--- a/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
+++ b/octavia_tempest_plugin/tests/barbican_scenario/v2/test_tls_barbican.py
@@ -53,6 +53,42 @@
'barbican service.')
@classmethod
+ def _generate_load_certificate(cls, barbican_mgr, ca_cert, ca_key, name):
+ new_cert, new_key = cert_utils.generate_server_cert_and_key(
+ ca_cert, ca_key, name)
+
+ LOG.debug('%s Cert: %s', name, new_cert.public_bytes(
+ serialization.Encoding.PEM))
+ LOG.debug('%s private Key: %s', name, new_key.private_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PrivateFormat.TraditionalOpenSSL,
+ encryption_algorithm=serialization.NoEncryption()))
+ new_public_key = new_key.public_key()
+ LOG.debug('%s public Key: %s', name, new_public_key.public_bytes(
+ encoding=serialization.Encoding.PEM,
+ format=serialization.PublicFormat.SubjectPublicKeyInfo))
+
+ # Create the pkcs12 bundle
+ pkcs12 = cert_utils.generate_pkcs12_bundle(new_cert, new_key)
+ LOG.debug('%s PKCS12 bundle: %s', name, base64.b64encode(pkcs12))
+
+ new_secret_ref = barbican_mgr.store_secret(pkcs12)
+ cls.addClassResourceCleanup(barbican_mgr.delete_secret, new_secret_ref)
+
+ # Set the barbican ACL if the Octavia API version doesn't do it
+ # automatically.
+ if not cls.mem_lb_client.is_version_supported(
+ cls.api_version, '2.1'):
+ user_list = cls.os_admin.users_v3_client.list_users(
+ name=CONF.load_balancer.octavia_svc_username)
+ msg = 'Only one user named "{0}" should exist, {1} found.'.format(
+ CONF.load_balancer.octavia_svc_username,
+ len(user_list['users']))
+ assert 1 == len(user_list['users']), msg
+ barbican_mgr.add_acl(new_secret_ref, user_list['users'][0]['id'])
+ return new_cert, new_key, new_secret_ref
+
+ @classmethod
def resource_setup(cls):
"""Setup resources needed by the tests."""
super(TLSWithBarbicanTest, cls).resource_setup()
@@ -70,45 +106,35 @@
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo))
- # Create a server cert and key
- cls.server_uuid = uuidutils.generate_uuid()
- server_cert, server_key = cert_utils.generate_server_cert_and_key(
- cls.ca_cert, ca_key, cls.server_uuid)
-
- LOG.debug('Server Cert: %s' % server_cert.public_bytes(
- serialization.Encoding.PEM))
- LOG.debug('Server private Key: %s' % server_key.private_bytes(
- encoding=serialization.Encoding.PEM,
- format=serialization.PrivateFormat.TraditionalOpenSSL,
- encryption_algorithm=serialization.NoEncryption()))
- server_public_key = server_key.public_key()
- LOG.debug('Server public Key: %s' % server_public_key.public_bytes(
- encoding=serialization.Encoding.PEM,
- format=serialization.PublicFormat.SubjectPublicKeyInfo))
-
- # Create the pkcs12 bundle
- pkcs12 = cert_utils.generate_pkcs12_bundle(server_cert, server_key)
- LOG.debug('Server PKCS12 bundle: %s' % base64.b64encode(pkcs12))
-
# Load the secret into the barbican service under the
# os_roles_lb_member tenant
barbican_mgr = barbican_client_mgr.BarbicanClientManager(
cls.os_roles_lb_member)
- cls.secret_ref = barbican_mgr.store_secret(pkcs12)
- cls.addClassResourceCleanup(barbican_mgr.delete_secret, cls.secret_ref)
+ # Create a server cert and key
+ # This will be used as the "default certificate" in SNI tests.
+ cls.server_uuid = uuidutils.generate_uuid()
+ LOG.debug('Server (default) UUID: %s' % cls.server_uuid)
- # Set the barbican ACL if the Octavia API version doesn't do it
- # automatically.
- if not cls.mem_lb_client.is_version_supported(
- cls.api_version, '2.1'):
- user_list = cls.os_admin.users_v3_client.list_users(
- name=CONF.load_balancer.octavia_svc_username)
- msg = 'Only one user named "{0}" should exist, {1} found.'.format(
- CONF.load_balancer.octavia_svc_username,
- len(user_list['users']))
- assert 1 == len(user_list['users']), msg
- barbican_mgr.add_acl(cls.secret_ref, user_list['users'][0]['id'])
+ server_cert, server_key, cls.server_secret_ref = (
+ cls._generate_load_certificate(barbican_mgr, cls.ca_cert,
+ ca_key, cls.server_uuid))
+
+ # Create the SNI1 cert and key
+ cls.SNI1_uuid = uuidutils.generate_uuid()
+ LOG.debug('SNI1 UUID: %s' % cls.SNI1_uuid)
+
+ SNI1_cert, SNI1_key, cls.SNI1_secret_ref = (
+ cls._generate_load_certificate(barbican_mgr, cls.ca_cert,
+ ca_key, cls.SNI1_uuid))
+
+ # Create the SNI2 cert and key
+ cls.SNI2_uuid = uuidutils.generate_uuid()
+ LOG.debug('SNI2 UUID: %s' % cls.SNI2_uuid)
+
+ SNI2_cert, SNI2_key, cls.SNI2_secret_ref = (
+ cls._generate_load_certificate(barbican_mgr, cls.ca_cert,
+ ca_key, cls.SNI2_uuid))
# Setup a load balancer for the tests to use
lb_name = data_utils.rand_name("lb_member_lb1-tls")
@@ -224,7 +250,7 @@
const.PROTOCOL_PORT: '443',
const.LOADBALANCER_ID: self.lb_id,
const.DEFAULT_POOL_ID: self.pool_id,
- const.DEFAULT_TLS_CONTAINER_REF: self.secret_ref,
+ const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
}
listener = self.mem_listener_client.create_listener(**listener_kwargs)
self.listener_id = listener[const.ID]
@@ -242,7 +268,7 @@
# Test HTTPS listener load balancing.
# Note: certificate validation tests will follow this test
self.check_members_balanced(self.lb_vip_address, protocol='https',
- verify=False)
+ verify=False, protocol_port=443)
def _verify_cb(connection, x509, errno, errdepth, retcode):
"""Callback for certificate validation."""
@@ -250,9 +276,17 @@
if errdepth != 0:
return True
if errno == 0:
+ received_cn = x509.get_subject().commonName
+ received_name = self._get_cert_name(received_cn)
+ expected_cn = '{}.example.com'.format(self.server_uuid)
+ msg = ('ERROR: Received certificate "{received_name}" with CN '
+ '{received_cn} is not the expected certificate '
+ '"default" with CN {expected_cn}.'.format(
+ received_name=received_name,
+ received_cn=received_cn,
+ expected_cn=expected_cn))
# Make sure the certificate is the one we generated
- self.assertEqual('{}.example.com'.format(self.server_uuid),
- x509.get_subject().commonName)
+ self.assertEqual(expected_cn, received_cn, message=msg)
else:
LOG.error('Certificate with CN: {0} failed validation with '
'OpenSSL verify errno {1}'.format(
@@ -270,3 +304,317 @@
sock.connect((self.lb_vip_address, 443))
# Validate the certificate is signed by the ca_cert we created
sock.do_handshake()
+
+ @decorators.idempotent_id('08405802-4411-4454-b008-8607408f424a')
+ def test_basic_tls_SNI_traffic(self):
+
+ listener_name = data_utils.rand_name("lb_member_listener1-tls-sni")
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.PROTOCOL: const.TERMINATED_HTTPS,
+ const.PROTOCOL_PORT: '443',
+ const.LOADBALANCER_ID: self.lb_id,
+ const.DEFAULT_POOL_ID: self.pool_id,
+ const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+ const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref,
+ self.SNI2_secret_ref],
+ }
+ listener = self.mem_listener_client.create_listener(**listener_kwargs)
+ self.listener_id = listener[const.ID]
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener,
+ self.listener_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Test HTTPS listener load balancing.
+ # Note: certificate validation tests will follow this test
+ self.check_members_balanced(self.lb_vip_address, protocol='https',
+ verify=False, protocol_port=443)
+
+ def _verify_server_cb(connection, x509, errno, errdepth, retcode):
+ return _verify_cb(connection, x509, errno, errdepth, retcode,
+ name=self.server_uuid)
+
+ def _verify_SNI1_cb(connection, x509, errno, errdepth, retcode):
+ return _verify_cb(connection, x509, errno, errdepth, retcode,
+ name=self.SNI1_uuid)
+
+ def _verify_SNI2_cb(connection, x509, errno, errdepth, retcode):
+ return _verify_cb(connection, x509, errno, errdepth, retcode,
+ name=self.SNI2_uuid)
+
+ def _verify_cb(connection, x509, errno, errdepth, retcode, name):
+ """Callback for certificate validation."""
+ # don't validate names of root certificates
+ if errdepth != 0:
+ return True
+ if errno == 0:
+ received_cn = x509.get_subject().commonName
+ received_name = self._get_cert_name(received_cn)
+ expected_cn = '{}.example.com'.format(name)
+ expected_name = self._get_cert_name(name)
+ msg = ('ERROR: Received certificate "{received_name}" with CN '
+ '{received_cn} is not the expected certificate '
+ '"{expected_name}" with CN {expected_cn}.'.format(
+ received_name=received_name,
+ received_cn=received_cn,
+ expected_name=expected_name,
+ expected_cn=expected_cn))
+ # Make sure the certificate is the one we generated
+ self.assertEqual(expected_cn, received_cn, message=msg)
+ else:
+ LOG.error('Certificate with CN: {0} failed validation with '
+ 'OpenSSL verify errno {1}'.format(
+ x509.get_subject().commonName, errno))
+ return False
+ return True
+
+ # Test that the default certificate is used with no SNI host request
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_server_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the default certificate is used with bogus SNI host request
+ context = SSL.Context(SSL.TLSv1_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_server_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name('bogus.example.com'.encode())
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the SNI1 certificate is used when SNI1 host is specified
+ context = SSL.Context(SSL.TLSv1_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_SNI1_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name(
+ '{}.example.com'.format(self.SNI1_uuid).encode())
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the SNI2 certificate is used when SNI2 host is specified
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_SNI2_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name(
+ '{}.example.com'.format(self.SNI2_uuid).encode())
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ def _get_cert_name(self, lookup_string):
+ if self.server_uuid in lookup_string:
+ return 'default'
+ elif self.SNI1_uuid in lookup_string:
+ return 'SNI1'
+ elif self.SNI2_uuid in lookup_string:
+ return 'SNI2'
+ else:
+ return 'Unknown'
+
+ @decorators.idempotent_id('bfac9bf4-8cd0-4519-8d99-5ad0c75abf5c')
+ def test_basic_tls_SNI_multi_listener_traffic(self):
+ """Make sure certificates are only used on the correct listeners."""
+
+ listener_name = data_utils.rand_name("lb_member_listener1-tls-sni")
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.PROTOCOL: const.TERMINATED_HTTPS,
+ const.PROTOCOL_PORT: '443',
+ const.LOADBALANCER_ID: self.lb_id,
+ const.DEFAULT_POOL_ID: self.pool_id,
+ const.DEFAULT_TLS_CONTAINER_REF: self.server_secret_ref,
+ const.SNI_CONTAINER_REFS: [self.SNI1_secret_ref],
+ }
+ listener = self.mem_listener_client.create_listener(**listener_kwargs)
+ self.listener_id = listener[const.ID]
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener,
+ self.listener_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Test HTTPS listener load balancing.
+ # Note: certificate validation tests will follow this test
+ self.check_members_balanced(self.lb_vip_address, protocol='https',
+ verify=False, protocol_port=443)
+
+ listener2_name = data_utils.rand_name("lb_member_listener2-tls-sni")
+ listener2_kwargs = {
+ const.NAME: listener2_name,
+ const.PROTOCOL: const.TERMINATED_HTTPS,
+ const.PROTOCOL_PORT: '8443',
+ const.LOADBALANCER_ID: self.lb_id,
+ const.DEFAULT_POOL_ID: self.pool_id,
+ const.DEFAULT_TLS_CONTAINER_REF: self.SNI2_secret_ref,
+ }
+ listener2 = self.mem_listener_client.create_listener(
+ **listener2_kwargs)
+ self.listener2_id = listener2[const.ID]
+ self.addCleanup(
+ self.mem_listener_client.cleanup_listener,
+ self.listener2_id,
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Test HTTPS listener load balancing.
+ # Note: certificate validation tests will follow this test
+ self.check_members_balanced(self.lb_vip_address, protocol='https',
+ verify=False, protocol_port=8443)
+
+ def _verify_server_cb(connection, x509, errno, errdepth, retcode):
+ return _verify_cb(connection, x509, errno, errdepth, retcode,
+ name=self.server_uuid)
+
+ def _verify_SNI1_cb(connection, x509, errno, errdepth, retcode):
+ return _verify_cb(connection, x509, errno, errdepth, retcode,
+ name=self.SNI1_uuid)
+
+ def _verify_SNI2_cb(connection, x509, errno, errdepth, retcode):
+ return _verify_cb(connection, x509, errno, errdepth, retcode,
+ name=self.SNI2_uuid)
+
+ def _verify_cb(connection, x509, errno, errdepth, retcode, name):
+ """Callback for certificate validation."""
+ # don't validate names of root certificates
+ if errdepth != 0:
+ return True
+ if errno == 0:
+ received_cn = x509.get_subject().commonName
+ received_name = self._get_cert_name(received_cn)
+ expected_cn = '{}.example.com'.format(name)
+ expected_name = self._get_cert_name(name)
+ msg = ('ERROR: Received certificate "{received_name}" with CN '
+ '{received_cn} is not the expected certificate '
+ '"{expected_name}" with CN {expected_cn}.'.format(
+ received_name=received_name,
+ received_cn=received_cn,
+ expected_name=expected_name,
+ expected_cn=expected_cn))
+ # Make sure the certificate is the one we generated
+ self.assertEqual(expected_cn, received_cn, message=msg)
+ else:
+ LOG.error('Certificate with CN: {0} failed validation with '
+ 'OpenSSL verify errno {1}'.format(
+ x509.get_subject().commonName, errno))
+ return False
+ return True
+
+ # Test that the default certificate is used with no SNI host request
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_server_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the SNI1 certificate is used when SNI1 host is specified
+ context = SSL.Context(SSL.TLSv1_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_SNI1_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name(
+ '{}.example.com'.format(self.SNI1_uuid).encode())
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the default certificate is used when SNI2 host is specified
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_server_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name(
+ '{}.example.com'.format(self.SNI2_uuid).encode())
+ sock.connect((self.lb_vip_address, 443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the SNI2 certificate is used with no SNI host request
+ # on listener 2, SNI2 is the default cert for listener 2
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_SNI2_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.connect((self.lb_vip_address, 8443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the SNI2 certificate is used with listener 1 host request
+ # on listener 2, SNI2 is the default cert for listener 2
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_SNI2_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name(
+ '{}.example.com'.format(self.server_uuid).encode())
+ sock.connect((self.lb_vip_address, 8443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
+
+ # Test that the SNI2 certificate is used with SNI1 host request
+ # on listener 2, SNI2 is the default cert for listener 2
+ context = SSL.Context(SSL.SSLv23_METHOD)
+ context.set_verify(SSL.VERIFY_PEER | SSL.VERIFY_FAIL_IF_NO_PEER_CERT,
+ _verify_SNI2_cb)
+ ca_store = context.get_cert_store()
+ ca_store.add_cert(X509.from_cryptography(self.ca_cert))
+ sock = socket.socket()
+ sock = SSL.Connection(context, sock)
+ sock.set_tlsext_host_name(
+ '{}.example.com'.format(self.SNI1_uuid).encode())
+ sock.connect((self.lb_vip_address, 8443))
+ # Validate the certificate is signed by the ca_cert we created
+ sock.do_handshake()
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
index 30a116c..9101321 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
@@ -34,7 +34,7 @@
def skip_checks(cls):
super(AmphoraScenarioTest, cls).skip_checks()
- if CONF.load_balancer.provider not in ['amphora', 'octavia']:
+ if CONF.load_balancer.provider not in const.AMPHORA_PROVIDERS:
raise cls.skipException("Amphora tests require provider 'amphora' "
"or 'octavia' (alias to 'amphora', "
" deprecated) set")
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
index eba7e38..c3610d5 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_traffic_ops.py
@@ -51,6 +51,12 @@
cls.mem_lb_client.cleanup_loadbalancer,
cls.lb_id)
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
+
if CONF.validation.connect_method == 'floating':
port_id = lb[const.VIP_PORT_ID]
result = cls.lb_mem_float_ip_client.create_floatingip(
@@ -67,12 +73,6 @@
else:
cls.lb_vip_address = lb[const.VIP_ADDRESS]
- waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
- cls.lb_id, const.PROVISIONING_STATUS,
- const.ACTIVE,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
-
protocol = const.HTTP
lb_feature_enabled = CONF.loadbalancer_feature_enabled
if not lb_feature_enabled.l7_protocol_enabled:
@@ -338,6 +338,7 @@
const.ONLINE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout,
+ error_ok=True,
pool_id=self.pool_id)
waiters.wait_for_status(
self.mem_member_client.show_member,
diff --git a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
index 072bd20..80c886b 100644
--- a/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
+++ b/octavia_tempest_plugin/tests/spare_pool_scenario/v2/test_spare_pool.py
@@ -31,7 +31,7 @@
def skip_checks(cls):
super(SparePoolTest, cls).skip_checks()
- if CONF.load_balancer.provider not in ['amphora', 'octavia']:
+ if CONF.load_balancer.provider not in const.AMPHORA_PROVIDERS:
raise cls.skipException("Amphora tests require provider 'amphora' "
"or 'octavia' (alias to 'amphora', "
"deprecated) set")
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 0e8a909..5033ade 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -861,8 +861,10 @@
raise Exception()
def check_members_balanced(self, vip_address, traffic_member_count=2,
- protocol='http', verify=True):
- session = requests.Session()
+ protocol='http', verify=True, protocol_port=80):
+ handler = requests
+ if CONF.load_balancer.test_reuse_connection:
+ handler = requests.Session()
response_counts = {}
if ipaddress.ip_address(vip_address).version == 6:
@@ -873,7 +875,8 @@
# Send a number requests to lb vip
for i in range(20):
try:
- r = session.get('{0}://{1}'.format(protocol, vip_address),
+ r = handler.get('{0}://{1}:{2}'.format(protocol, vip_address,
+ protocol_port),
timeout=2, verify=verify)
if r.content in response_counts:
diff --git a/octavia_tempest_plugin/tests/waiters.py b/octavia_tempest_plugin/tests/waiters.py
index 89e8455..eb7410a 100644
--- a/octavia_tempest_plugin/tests/waiters.py
+++ b/octavia_tempest_plugin/tests/waiters.py
@@ -28,7 +28,7 @@
def wait_for_status(show_client, id, status_key, status,
check_interval, check_timeout, root_tag=None,
- **kwargs):
+ error_ok=False, **kwargs):
"""Waits for an object to reach a specific status.
:param show_client: The tempest service client show method.
@@ -40,6 +40,7 @@
:check_interval: How often to check the status, in seconds.
:check_timeout: The maximum time, in seconds, to check the status.
:root_tag: The root tag on the response to remove, if any.
+ :error_ok: When true, ERROR status will not raise an exception.
:raises CommandFailed: Raised if the object goes into ERROR and ERROR was
not the desired status.
:raises TimeoutException: The object did not achieve the status or ERROR in
@@ -75,7 +76,8 @@
if caller:
message = '({caller}) {message}'.format(caller=caller,
message=message)
- raise exceptions.UnexpectedResponseCode(message)
+ if not error_ok:
+ raise exceptions.UnexpectedResponseCode(message)
elif int(time.time()) - start >= check_timeout:
message = (
'{name} {field} failed to update to {expected_status} within '
diff --git a/octavia_tempest_plugin/version.py b/octavia_tempest_plugin/version.py
new file mode 100644
index 0000000..0f2cf15
--- /dev/null
+++ b/octavia_tempest_plugin/version.py
@@ -0,0 +1,32 @@
+# Copyright 2011-2014 OpenStack Foundation
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import pbr.version
+
+OCTAVIA_TEMPEST_VENDOR = "OpenStack Foundation"
+OCTAVIA_TEMPEST_PRODUCT = "OpenStack Octavia tempest plugin"
+
+version_info = pbr.version.VersionInfo('octavia-tempest-plugin')
+
+
+def vendor_string():
+ return OCTAVIA_TEMPEST_VENDOR
+
+
+def product_string():
+ return OCTAVIA_TEMPEST_PRODUCT
+
+
+def version_string_with_package():
+ return version_info.version_string()
diff --git a/tox.ini b/tox.ini
index 1ca36b7..33b941c 100644
--- a/tox.ini
+++ b/tox.ini
@@ -49,6 +49,17 @@
rm -rf doc/build
sphinx-build -W -b html doc/source doc/build/html
+[testenv:pdf-docs]
+basepython = python3
+deps = {[testenv:docs]deps}
+whitelist_externals =
+ make
+ rm
+commands =
+ rm -rf doc/build/pdf
+ sphinx-build -W -b latex doc/source doc/build/pdf
+ make -C doc/build/pdf
+
[testenv:releasenotes]
basepython = python3
deps =
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index aa204fd..98b8a43 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -84,6 +84,58 @@
octavia: https://opendev.org/openstack/octavia.git
- job:
+ name: octavia-dsvm-base-ipv6-only
+ parent: devstack-tempest-ipv6
+ timeout: 7800
+ required-projects:
+ - openstack/octavia
+ - openstack/octavia-lib
+ - openstack/octavia-tempest-plugin
+ - openstack/python-octaviaclient
+ pre-run: playbooks/Octavia-DSVM/pre.yaml
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ vars:
+ devstack_localrc:
+ TEMPEST_PLUGINS: /opt/stack/octavia-tempest-plugin
+ USE_PYTHON3: true
+ devstack_local_conf:
+ post-config:
+ $OCTAVIA_CONF:
+ DEFAULT:
+ debug: True
+ devstack_services:
+ c-bak: false
+ ceilometer-acentral: false
+ ceilometer-acompute: false
+ ceilometer-alarm-evaluator: false
+ ceilometer-alarm-notifier: false
+ ceilometer-anotification: false
+ ceilometer-api: false
+ ceilometer-collector: false
+ c-sch: false
+ c-api: false
+ c-vol: false
+ cinder: false
+ octavia: true
+ o-api: true
+ o-cw: true
+ o-hm: true
+ o-hk: true
+ swift: false
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ tempest: true
+ devstack_plugins:
+ octavia: https://opendev.org/openstack/octavia.git
+
+- job:
name: octavia-dsvm-live-base
parent: octavia-dsvm-base
timeout: 9000
@@ -116,6 +168,38 @@
'/var/log/octavia-tenant-traffic.log': logs
- job:
+ name: octavia-dsvm-live-base-ipv6-only
+ parent: octavia-dsvm-base-ipv6-only
+ timeout: 9000
+ required-projects:
+ - openstack/diskimage-builder
+ vars:
+ devstack_localrc:
+ DIB_LOCAL_ELEMENTS: openstack-ci-mirrors
+ devstack_local_conf:
+ post-config:
+ $OCTAVIA_CONF:
+ haproxy_amphora:
+ # Set these higher for non-nested virt nodepool instances
+ connection_max_retries: 1200
+ build_active_retries: 300
+ amphora_agent:
+ forward_all_logs: True
+ test-config:
+ "$TEMPEST_CONFIG":
+ load_balancer:
+ check_interval: 1
+ check_timeout: 180
+ devstack_services:
+ neutron-qos: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron.git
+ zuul_copy_output:
+ '/var/log/dib-build' : logs
+ '/var/log/octavia-amphora.log': logs
+ '/var/log/octavia-tenant-traffic.log': logs
+
+- job:
name: octavia-dsvm-live-two-node-base
parent: octavia-dsvm-base
nodeset: octavia-two-node
@@ -314,6 +398,11 @@
USE_PYTHON3: False
- job:
+ name: octavia-v2-dsvm-noop-api-stable-train
+ parent: octavia-v2-dsvm-noop-api
+ override-checkout: stable/train
+
+- job:
name: octavia-v2-dsvm-noop-api-stable-stein
parent: octavia-v2-dsvm-noop-api
override-checkout: stable/stein
@@ -349,6 +438,26 @@
- ^octavia_tempest_plugin/tests/(?!scenario/|\w+\.py).*
- job:
+ name: octavia-v2-dsvm-scenario-ipv6-only
+ parent: octavia-dsvm-live-base-ipv6-only
+ vars:
+ devstack_local_conf:
+ post-config:
+ $OCTAVIA_CONF:
+ api_settings:
+ api_v1_enabled: False
+ tempest_concurrency: 2
+ tempest_test_regex: ^octavia_tempest_plugin.tests.scenario.v2
+ tox_envlist: all
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^api-ref/.*$
+ - ^doc/.*$
+ - ^etc/.*$
+ - ^releasenotes/.*$
+ - ^octavia_tempest_plugin/tests/(?!scenario/|\w+\.py).*
+
+- job:
name: octavia-v2-dsvm-py2-scenario
parent: octavia-v2-dsvm-scenario
vars:
@@ -356,6 +465,11 @@
USE_PYTHON3: False
- job:
+ name: octavia-v2-dsvm-scenario-stable-train
+ parent: octavia-v2-dsvm-scenario
+ override-checkout: stable/train
+
+- job:
name: octavia-v2-dsvm-scenario-stable-stein
parent: octavia-v2-dsvm-scenario
override-checkout: stable/stein
@@ -473,6 +587,11 @@
- ^octavia_tempest_plugin/tests/(?!barbican_scenario/|\w+\.py).*
- job:
+ name: octavia-v2-dsvm-tls-barbican-stable-train
+ parent: octavia-v2-dsvm-tls-barbican
+ override-checkout: stable/train
+
+- job:
name: octavia-v2-dsvm-tls-barbican-stable-stein
parent: octavia-v2-dsvm-tls-barbican
override-checkout: stable/stein
@@ -510,6 +629,11 @@
USE_PYTHON3: False
- job:
+ name: octavia-v2-dsvm-spare-pool-stable-train
+ parent: octavia-v2-dsvm-spare-pool
+ override-checkout: stable/train
+
+- job:
name: octavia-v2-dsvm-spare-pool-stable-stein
parent: octavia-v2-dsvm-spare-pool
override-checkout: stable/stein
@@ -524,6 +648,49 @@
parent: octavia-v2-dsvm-py2-spare-pool
override-checkout: stable/queens
+- job:
+ name: octavia-v2-dsvm-cinder-amphora
+ parent: octavia-v2-dsvm-scenario
+ required-projects:
+ - openstack/cinder
+ - openstack/diskimage-builder
+ - openstack/octavia
+ - openstack/octavia-lib
+ - openstack/python-cinderclient
+ - openstack/python-octaviaclient
+ vars:
+ devstack_local_conf:
+ post-config:
+ $OCTAVIA_CONF:
+ controller_worker:
+ volume_driver: volume_cinder_driver
+ cinder:
+ volume_size: 2
+ devstack_services:
+ c-bak: true
+ ceilometer-acentral: false
+ ceilometer-acompute: false
+ ceilometer-alarm-evaluator: false
+ ceilometer-alarm-notifier: false
+ ceilometer-anotification: false
+ ceilometer-api: false
+ ceilometer-collector: false
+ c-sch: true
+ c-api: true
+ c-vol: true
+ cinder: true
+ octavia: true
+ o-api: true
+ o-cw: true
+ o-hm: true
+ o-hk: true
+ swift: false
+ s-account: false
+ s-container: false
+ s-object: false
+ s-proxy: false
+ tempest: true
+
# Temporary transitional aliases for gates used in other repos
# Remove once octavia has transitioned job names
- job:
@@ -555,7 +722,6 @@
loadbalancer_topology: ACTIVE_STANDBY
task_flow:
engine: parallel
- devstack_local_conf:
test-config:
"$TEMPEST_CONFIG":
load_balancer:
@@ -590,11 +756,6 @@
amphora_ssh_user: centos
- job:
- name: octavia-v2-act-stdby-iptables-dsvm-scenario-stable-stein
- parent: octavia-v2-act-stdby-iptables-dsvm-scenario
- override-checkout: stable/stein
-
-- job:
name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky
parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
override-checkout: stable/rocky
@@ -603,3 +764,37 @@
name: octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens
parent: octavia-v2-act-stdby-iptables-dsvm-py2-scenario
override-checkout: stable/queens
+
+- job:
+ name: octavia-v2-act-stdby-dsvm-scenario
+ parent: octavia-dsvm-live-base
+ vars:
+ devstack_local_conf:
+ post-config:
+ $OCTAVIA_CONF:
+ api_settings:
+ api_v1_enabled: False
+ controller_worker:
+ loadbalancer_topology: ACTIVE_STANDBY
+ task_flow:
+ engine: parallel
+ tempest_concurrency: 2
+ tempest_test_regex: ^octavia_tempest_plugin.tests.act_stdby_scenario.v2.test_active_standby\.
+ tox_envlist: all
+
+- job:
+ name: octavia-v2-act-stdby-dsvm-py2-scenario
+ parent: octavia-v2-act-stdby-dsvm-scenario
+ vars:
+ devstack_localrc:
+ USE_PYTHON3: False
+
+- job:
+ name: octavia-v2-act-stdby-dsvm-scenario-stable-train
+ parent: octavia-v2-act-stdby-dsvm-scenario
+ override-checkout: stable/train
+
+- job:
+ name: octavia-v2-act-stdby-dsvm-scenario-stable-stein
+ parent: octavia-v2-act-stdby-dsvm-scenario
+ override-checkout: stable/stein
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index 490d68b..6b363fe 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -10,14 +10,18 @@
jobs:
- octavia-v2-dsvm-noop-api
- octavia-v2-dsvm-noop-py2-api
+ - octavia-v2-dsvm-noop-api-stable-train
- octavia-v2-dsvm-noop-api-stable-stein
- octavia-v2-dsvm-noop-py2-api-stable-rocky
- octavia-v2-dsvm-noop-py2-api-stable-queens
- octavia-v2-dsvm-scenario
- octavia-v2-dsvm-py2-scenario
+ - octavia-v2-dsvm-scenario-stable-train
- octavia-v2-dsvm-scenario-stable-stein
- octavia-v2-dsvm-py2-scenario-stable-rocky
- octavia-v2-dsvm-py2-scenario-stable-queens
+ - octavia-v2-dsvm-scenario-ipv6-only:
+ voting: false
- octavia-v2-dsvm-scenario-centos-7:
voting: false
- octavia-v2-dsvm-scenario-ubuntu-bionic:
@@ -26,20 +30,22 @@
voting: false
- octavia-v2-act-stdby-dsvm-py2-scenario-two-node:
voting: false
- - octavia-v2-act-stdby-iptables-dsvm-scenario:
- voting: false
- - octavia-v2-act-stdby-iptables-dsvm-py2-scenario:
- voting: false
- - octavia-v2-act-stdby-iptables-dsvm-py2-scenario-centos-7:
- voting: false
- - octavia-v2-act-stdby-iptables-dsvm-scenario-stable-stein:
- voting: false
- octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-rocky:
voting: false
- octavia-v2-act-stdby-iptables-dsvm-py2-scenario-stable-queens:
voting: false
+ - octavia-v2-act-stdby-dsvm-scenario:
+ voting: false
+ - octavia-v2-act-stdby-dsvm-py2-scenario:
+ voting: false
+ - octavia-v2-act-stdby-dsvm-scenario-stable-train:
+ voting: false
+ - octavia-v2-act-stdby-dsvm-scenario-stable-stein:
+ voting: false
- octavia-v2-dsvm-tls-barbican:
voting: false
+ - octavia-v2-dsvm-tls-barbican-stable-train:
+ voting: false
- octavia-v2-dsvm-tls-barbican-stable-stein:
voting: false
- octavia-v2-dsvm-tls-barbican-stable-rocky:
@@ -50,22 +56,29 @@
voting: false
- octavia-v2-dsvm-py2-spare-pool:
voting: false
+ - octavia-v2-dsvm-spare-pool-stable-train:
+ voting: false
- octavia-v2-dsvm-spare-pool-stable-stein:
voting: false
- octavia-v2-dsvm-py2-spare-pool-stable-rocky:
voting: false
- octavia-v2-dsvm-py2-spare-pool-stable-queens:
voting: false
+ - octavia-v2-dsvm-cinder-amphora:
+ voting: false
gate:
+ fail-fast: true
queue: octavia
jobs:
- octavia-v2-dsvm-noop-api
- octavia-v2-dsvm-noop-py2-api
+ - octavia-v2-dsvm-noop-api-stable-train
- octavia-v2-dsvm-noop-api-stable-stein
- octavia-v2-dsvm-noop-py2-api-stable-rocky
- octavia-v2-dsvm-noop-py2-api-stable-queens
- octavia-v2-dsvm-scenario
- octavia-v2-dsvm-py2-scenario
+ - octavia-v2-dsvm-scenario-stable-train
- octavia-v2-dsvm-scenario-stable-stein
- octavia-v2-dsvm-py2-scenario-stable-rocky
- octavia-v2-dsvm-py2-scenario-stable-queens