Merge "Update links in README.rst"
diff --git a/README.rst b/README.rst
index 9cbf464..f2b51a7 100644
--- a/README.rst
+++ b/README.rst
@@ -41,7 +41,7 @@
To run a single test case, call with full path, for example::
- $ tox -e all-plugin -- octavia_tempest_plugin.tests.v2.scenario.test_basic_ops.BasicOpsTest.test_basic_ops
+ $ tox -e all-plugin -- octavia_tempest_plugin.tests.scenario.v2.test_traffic_ops.TrafficOperationsScenarioTest.test_basic_traffic
To retrieve a list of all tempest tests, run::
diff --git a/octavia_tempest_plugin/common/constants.py b/octavia_tempest_plugin/common/constants.py
index 871f930..4154e7b 100644
--- a/octavia_tempest_plugin/common/constants.py
+++ b/octavia_tempest_plugin/common/constants.py
@@ -192,14 +192,12 @@
UPDATED_AT, VIP_ADDRESS, VIP_NETWORK_ID, VIP_PORT_ID, VIP_SUBNET_ID,
VIP_QOS_POLICY_ID)
-SHOW_LISTENER_RESPONSE_FIELDS = (
+SHOW_LISTENER_RESPONSE_FIELDS = [
ID, NAME, DESCRIPTION, PROVISIONING_STATUS, OPERATING_STATUS,
ADMIN_STATE_UP, PROTOCOL, PROTOCOL_PORT, CONNECTION_LIMIT,
DEFAULT_TLS_CONTAINER_REF, SNI_CONTAINER_REFS, PROJECT_ID,
- DEFAULT_POOL_ID, L7_POLICIES, INSERT_HEADERS, CREATED_AT, UPDATED_AT,
- TIMEOUT_CLIENT_DATA, TIMEOUT_MEMBER_CONNECT, TIMEOUT_MEMBER_DATA,
- TIMEOUT_TCP_INSPECT
-)
+ DEFAULT_POOL_ID, L7_POLICIES, INSERT_HEADERS, CREATED_AT, UPDATED_AT
+]
SHOW_POOL_RESPONSE_FIELDS = (
ID, NAME, DESCRIPTION, PROVISIONING_STATUS, OPERATING_STATUS,
@@ -207,10 +205,10 @@
CREATED_AT, UPDATED_AT
)
-SHOW_MEMBER_RESPONSE_FIELDS = (
+SHOW_MEMBER_RESPONSE_FIELDS = [
ID, NAME, PROVISIONING_STATUS, OPERATING_STATUS, ADMIN_STATE_UP,
- ADDRESS, PROTOCOL_PORT, WEIGHT, BACKUP, MONITOR_PORT, MONITOR_ADDRESS
-)
+ ADDRESS, PROTOCOL_PORT, WEIGHT, MONITOR_PORT, MONITOR_ADDRESS
+]
SHOW_HEALTHMONITOR_RESPONSE_FIELDS = (
ID, NAME, PROVISIONING_STATUS, OPERATING_STATUS, ADMIN_STATE_UP,
@@ -229,9 +227,8 @@
KEY, INVERT
)
-SHOW_AMPHORA_RESPONSE_FIELDS = (
+SHOW_AMPHORA_RESPONSE_FIELDS = [
ID, LOADBALANCER_ID, COMPUTE_ID, LB_NETWORK_IP, VRRP_IP, HA_IP,
VRRP_PORT_ID, HA_PORT_ID, CERT_EXPIRATION, CERT_BUSY, ROLE, STATUS,
- VRRP_INTERFACE, VRRP_ID, VRRP_PRIORITY, CACHED_ZONE, IMAGE_ID,
- CREATED_AT, UPDATED_AT
-)
+ VRRP_INTERFACE, VRRP_ID, VRRP_PRIORITY, CACHED_ZONE
+]
diff --git a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
index f2aaf84..97e91d9 100644
--- a/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
+++ b/octavia_tempest_plugin/services/load_balancer/v2/base_client.py
@@ -361,6 +361,20 @@
wait_func = self._show_object
LOG.info("Starting cleanup for %s %s...", self.root_tag, obj_id)
+
+ try:
+ request_uri = '{0}/{1}'.format(uri, obj_id)
+ response, body = self.get(request_uri)
+ resp_obj = json.loads(body.decode('utf-8'))[self.root_tag]
+ if (response.status == 404 or
+ resp_obj['provisioning_status'] == const.DELETED):
+ raise exceptions.NotFound()
+ except exceptions.NotFound:
+ # Already gone, cleanup complete
+ LOG.info("%s %s is already gone. Cleanup considered complete.",
+ self.root_tag, obj_id)
+ return
+
LOG.info("Waiting for %s %s to be ACTIVE...",
wait_client.root_tag, wait_id)
try:
@@ -417,3 +431,53 @@
except exceptions.NotFound:
return True
return False
+
+ def get_max_api_version(self):
+ """Get the maximum version available on the API endpoint.
+
+ :return: Maximum version string available on the endpoint.
+ """
+ response, body = self.get('/')
+ self.expected_success(200, response.status)
+
+ versions_list = json.loads(body.decode('utf-8'))['versions']
+ current_versions = (version for version in versions_list if
+ version['status'] == 'CURRENT')
+ max_version = '0.0'
+ for version in current_versions:
+
+ ver_string = version['id']
+ if ver_string.startswith("v"):
+ ver_string = ver_string[1:]
+
+ ver_split = list(map(int, ver_string.split('.')))
+ max_split = list(map(int, max_version.split('.')))
+
+ if len(ver_split) > 2:
+ raise exceptions.InvalidAPIVersionString(version=ver_string)
+
+ if ver_split[0] > max_split[0] or (
+ ver_split[0] == max_split[0] and
+ ver_split[1] >= max_split[1]):
+ max_version = ver_string
+
+ if max_version == '0.0':
+ raise exceptions.InvalidAPIVersionString(version=max_version)
+
+ return max_version
+
+ def is_version_supported(self, api_version, version):
+ """Check if a version is supported by the API.
+
+ :param api_version: Reference endpoint API version.
+ :param version: Version to check against API version.
+ :return: boolean if the version is supported.
+ """
+
+ api_split = list(map(int, api_version.split('.')))
+ ver_split = list(map(int, version.split('.')))
+
+ if api_split[0] > ver_split[0] or (
+ api_split[0] == ver_split[0] and api_split[1] >= ver_split[1]):
+ return True
+ return False
diff --git a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
index a1bb301..4a43e7b 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_healthmonitor.py
@@ -788,3 +788,10 @@
const.PROVISIONING_STATUS,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
index 90c6db4..255bbde 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7policy.py
@@ -787,3 +787,10 @@
const.PROVISIONING_STATUS,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
index 3520b35..395a3ad 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_l7rule.py
@@ -673,3 +673,10 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout,
l7policy_id=self.l7policy_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index de412c6..38da0ae 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -76,10 +76,6 @@
const.PROTOCOL_PORT: 80,
const.LOADBALANCER_ID: self.lb_id,
const.CONNECTION_LIMIT: 200,
- const.TIMEOUT_CLIENT_DATA: 1000,
- const.TIMEOUT_MEMBER_CONNECT: 1000,
- const.TIMEOUT_MEMBER_DATA: 1000,
- const.TIMEOUT_TCP_INSPECT: 50,
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true"
@@ -93,6 +89,14 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ listener_kwargs.update({
+ const.TIMEOUT_CLIENT_DATA: 1000,
+ const.TIMEOUT_MEMBER_CONNECT: 1000,
+ const.TIMEOUT_MEMBER_DATA: 1000,
+ const.TIMEOUT_TCP_INSPECT: 50,
+ })
# Test that a user without the load balancer role cannot
# create a listener
@@ -146,10 +150,12 @@
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
self.assertTrue(
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
- self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+ self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
@decorators.idempotent_id('78ba6eb0-178c-477e-9156-b6775ca7b271')
def test_listener_list(self):
@@ -336,7 +342,14 @@
listeners[2][const.DESCRIPTION])
# Test fields
- for field in const.SHOW_LISTENER_RESPONSE_FIELDS:
+ show_listener_response_fields = const.SHOW_LISTENER_RESPONSE_FIELDS
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ show_listener_response_fields.append('timeout_client_data')
+ show_listener_response_fields.append('timeout_member_connect')
+ show_listener_response_fields.append('timeout_member_data')
+ show_listener_response_fields.append('timeout_tcp_inspect')
+ for field in show_listener_response_fields:
if field in (const.DEFAULT_POOL_ID, const.L7_POLICIES):
continue
listeners = self.mem_listener_client.list_listeners(
@@ -411,10 +424,6 @@
const.PROTOCOL_PORT: 81,
const.LOADBALANCER_ID: self.lb_id,
const.CONNECTION_LIMIT: 200,
- const.TIMEOUT_CLIENT_DATA: 1000,
- const.TIMEOUT_MEMBER_CONNECT: 1000,
- const.TIMEOUT_MEMBER_DATA: 1000,
- const.TIMEOUT_TCP_INSPECT: 50,
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true"
@@ -425,6 +434,15 @@
# const.SNI_CONTAINER_REFS: [],
}
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ listener_kwargs.update({
+ const.TIMEOUT_CLIENT_DATA: 1000,
+ const.TIMEOUT_MEMBER_CONNECT: 1000,
+ const.TIMEOUT_MEMBER_DATA: 1000,
+ const.TIMEOUT_TCP_INSPECT: 50,
+ })
+
listener = self.mem_listener_client.create_listener(**listener_kwargs)
self.addClassResourceCleanup(
self.mem_listener_client.cleanup_listener,
@@ -469,10 +487,13 @@
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
self.assertTrue(
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
- self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+ self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
# Test that a user with lb_admin role can see the listener
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
@@ -525,10 +546,6 @@
const.PROTOCOL_PORT: 82,
const.LOADBALANCER_ID: self.lb_id,
const.CONNECTION_LIMIT: 200,
- const.TIMEOUT_CLIENT_DATA: 1000,
- const.TIMEOUT_MEMBER_CONNECT: 1000,
- const.TIMEOUT_MEMBER_DATA: 1000,
- const.TIMEOUT_TCP_INSPECT: 50,
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true"
@@ -538,6 +555,14 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ listener_kwargs.update({
+ const.TIMEOUT_CLIENT_DATA: 1000,
+ const.TIMEOUT_MEMBER_CONNECT: 1000,
+ const.TIMEOUT_MEMBER_DATA: 1000,
+ const.TIMEOUT_TCP_INSPECT: 50,
+ })
listener = self.mem_listener_client.create_listener(**listener_kwargs)
self.addClassResourceCleanup(
@@ -573,10 +598,12 @@
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
self.assertTrue(
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
- self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+ self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
# Test that a user, without the load balancer member role, cannot
# use this command
@@ -616,10 +643,6 @@
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
const.CONNECTION_LIMIT: 400,
- const.TIMEOUT_CLIENT_DATA: 2000,
- const.TIMEOUT_MEMBER_CONNECT: 2000,
- const.TIMEOUT_MEMBER_DATA: 2000,
- const.TIMEOUT_TCP_INSPECT: 100,
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "false",
const.X_FORWARDED_PORT: "false"
@@ -629,6 +652,15 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ listener_update_kwargs.update({
+ const.TIMEOUT_CLIENT_DATA: 2000,
+ const.TIMEOUT_MEMBER_CONNECT: 2000,
+ const.TIMEOUT_MEMBER_DATA: 2000,
+ const.TIMEOUT_TCP_INSPECT: 100,
+ })
+
listener = self.mem_listener_client.update_listener(
listener[const.ID], **listener_update_kwargs)
@@ -665,10 +697,12 @@
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
self.assertFalse(
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
- self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
- self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
- self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
+ self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
+ self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
+ self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
@decorators.idempotent_id('16f11c82-f069-4592-8954-81b35a98e3b7')
def test_listener_delete(self):
@@ -723,3 +757,10 @@
const.PROVISIONING_STATUS,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 19f6044..35f2dde 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -90,6 +90,12 @@
const.ACTIVE,
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
self.assertTrue(lb[const.ADMIN_STATE_UP])
parser.parse(lb[const.CREATED_AT])
@@ -102,6 +108,7 @@
self.assertEqual(const.OFFLINE, lb[const.OPERATING_STATUS])
else:
self.assertEqual(const.ONLINE, lb[const.OPERATING_STATUS])
+
self.assertEqual(self.os_roles_lb_member.credentials.project_id,
lb[const.PROJECT_ID])
self.assertEqual(CONF.load_balancer.provider, lb[const.PROVIDER])
@@ -270,6 +277,13 @@
const.ACTIVE,
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ lb1 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
# second is both a simple and a reliable way to accomplish this.
@@ -293,6 +307,13 @@
const.ACTIVE,
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ lb2 = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+
# Time resolution for created_at is only to the second, and we need to
# ensure that each object has a distinct creation time. Delaying one
# second is both a simple and a reliable way to accomplish this.
@@ -709,6 +730,12 @@
const.ACTIVE,
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ lb = waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
# Test that a user, without the load balancer member role, cannot
# use this method
diff --git a/octavia_tempest_plugin/tests/api/v2/test_member.py b/octavia_tempest_plugin/tests/api/v2/test_member.py
index 7c7a5c4..18073cc 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_member.py
@@ -132,10 +132,15 @@
const.ADDRESS: member_address,
const.PROTOCOL_PORT: 80,
const.WEIGHT: 50,
- const.BACKUP: False,
const.MONITOR_ADDRESS: member_monitor_address,
const.MONITOR_PORT: 8080,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_kwargs.update({
+ const.BACKUP: False,
+ })
+
if self.lb_member_vip_subnet:
member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -173,8 +178,13 @@
self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
- const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+ const.PROTOCOL_PORT, const.WEIGHT,
const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ equal_items.append(const.BACKUP)
+
if const.SUBNET_ID in member_kwargs:
equal_items.append(const.SUBNET_ID)
else:
@@ -351,7 +361,11 @@
members[2][const.PROTOCOL_PORT])
# Test fields
- for field in const.SHOW_MEMBER_RESPONSE_FIELDS:
+ show_member_response_fields = const.SHOW_MEMBER_RESPONSE_FIELDS
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ show_member_response_fields.append('backup')
+ for field in show_member_response_fields:
members = self.mem_member_client.list_members(
pool_id, query_params='{fields}={field}'.format(
fields=const.FIELDS, field=field))
@@ -421,10 +435,14 @@
const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: 81,
const.WEIGHT: 50,
- const.BACKUP: False,
const.MONITOR_ADDRESS: '192.0.2.2',
const.MONITOR_PORT: 8080,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_kwargs.update({
+ const.BACKUP: False,
+ })
if self.lb_member_vip_subnet:
member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -454,8 +472,13 @@
self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
- const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+ const.PROTOCOL_PORT, const.WEIGHT,
const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ equal_items.append(const.BACKUP)
+
if const.SUBNET_ID in member_kwargs:
equal_items.append(const.SUBNET_ID)
else:
@@ -513,10 +536,15 @@
const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: 82,
const.WEIGHT: 50,
- const.BACKUP: False,
const.MONITOR_ADDRESS: '192.0.2.2',
const.MONITOR_PORT: 8080,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_kwargs.update({
+ const.BACKUP: False,
+ })
+
if self.lb_member_vip_subnet:
member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -553,8 +581,13 @@
UUID(member[const.ID])
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
- const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+ const.PROTOCOL_PORT, const.WEIGHT,
const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ equal_items.append(const.BACKUP)
+
if const.SUBNET_ID in member_kwargs:
equal_items.append(const.SUBNET_ID)
else:
@@ -609,10 +642,15 @@
const.NAME: new_name,
const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
const.WEIGHT: member[const.WEIGHT] + 1,
- const.BACKUP: not member[const.BACKUP],
const.MONITOR_ADDRESS: '192.0.2.3',
const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_update_kwargs.update({
+ const.BACKUP: not member[const.BACKUP]
+ })
+
member = self.mem_member_client.update_member(
member[const.ID], **member_update_kwargs)
@@ -642,7 +680,12 @@
# Test changed items
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
- const.BACKUP, const.MONITOR_ADDRESS, const.MONITOR_PORT]
+ const.MONITOR_ADDRESS, const.MONITOR_PORT]
+
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ equal_items.append(const.BACKUP)
+
for item in equal_items:
self.assertEqual(member_update_kwargs[item], member[item])
@@ -688,10 +731,15 @@
const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: 80,
const.WEIGHT: 50,
- const.BACKUP: False,
const.MONITOR_ADDRESS: '192.0.2.2',
const.MONITOR_PORT: 8080,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member1_kwargs.update({
+ const.BACKUP: False,
+ })
+
if self.lb_member_vip_subnet:
member1_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -717,10 +765,15 @@
const.ADDRESS: '192.0.2.3',
const.PROTOCOL_PORT: 81,
const.WEIGHT: 51,
- const.BACKUP: True,
const.MONITOR_ADDRESS: '192.0.2.4',
const.MONITOR_PORT: 8081,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member2_kwargs.update({
+ const.BACKUP: True,
+ })
+
if self.lb_member_vip_subnet:
member2_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -745,10 +798,15 @@
const.ADDRESS: '192.0.2.5',
const.PROTOCOL_PORT: 82,
const.WEIGHT: 52,
- const.BACKUP: True,
const.MONITOR_ADDRESS: '192.0.2.6',
const.MONITOR_PORT: 8082,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member3_kwargs.update({
+ const.BACKUP: True,
+ })
+
if self.lb_member_vip_subnet:
member3_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -857,3 +915,10 @@
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout,
pool_id=self.pool_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
index f73a67c..28b95b6 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_pool.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -711,3 +711,10 @@
const.PROVISIONING_STATUS,
CONF.load_balancer.check_interval,
CONF.load_balancer.check_timeout)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
index 0a1d25a..4f1480c 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_amphora.py
@@ -117,41 +117,53 @@
self.assertTrue(
len(amphorae) >= 2 * self._expected_amp_count(amphorae))
- # Make sure all of the fields exist on the amp list records
- for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
- self.assertIn(field, amphorae[0])
+ show_amphora_response_fields = const.SHOW_AMPHORA_RESPONSE_FIELDS
+ if self.mem_amphora_client.is_version_supported(
+ self.api_version, '2.1'):
+ show_amphora_response_fields.append('created_at')
+ show_amphora_response_fields.append('updated_at')
+ show_amphora_response_fields.append('image_id')
- amp1_id = amphorae[0][const.ID]
- amp1 = self.os_admin.amphora_client.show_amphora(amphora_id=amp1_id)
+ for amp in amphorae:
- # Make sure all of the fields exist on the amp show record
- for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
- self.assertIn(field, amp1)
+ # Make sure all of the fields exist on the amp list records
+ for field in show_amphora_response_fields:
+ self.assertIn(field, amp)
- # Verify a few of the fields are the right type
- parser.parse(amp1[const.CREATED_AT])
- parser.parse(amp1[const.UPDATED_AT])
- UUID(amp1[const.ID])
- UUID(amp1[const.COMPUTE_ID])
- UUID(amp1[const.VRRP_PORT_ID])
- self.assertIn(amp1[const.ROLE], const.AMPHORA_ROLES)
- self.assertIn(amp1[const.STATUS], const.AMPHORA_STATUSES)
- # We might have gotten unassigned/spare amps?
- if amp1[const.STATUS] == const.STATUS_ALLOCATED:
- UUID(amp1[const.HA_PORT_ID])
- UUID(amp1[const.LOADBALANCER_ID])
+ amp_id = amp[const.ID]
+ amp_obj = self.os_admin.amphora_client.show_amphora(
+ amphora_id=amp_id)
- # Test that all of the fields from the amp list match those from a show
- for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
- self.assertEqual(amphorae[0][field], amp1[field])
+ # Make sure all of the fields exist on the amp show record
+ for field in show_amphora_response_fields:
+ self.assertIn(field, amp_obj)
- amp2_id = amphorae[1][const.ID]
- amp2 = self.os_admin.amphora_client.show_amphora(amphora_id=amp2_id)
+ # Verify a few of the fields are the right type
+ if self.mem_amphora_client.is_version_supported(
+ self.api_version, '2.1'):
+ parser.parse(amp_obj[const.CREATED_AT])
+ parser.parse(amp_obj[const.UPDATED_AT])
+ UUID(amp_obj[const.ID])
+ self.assertIn(amp_obj[const.STATUS], const.AMPHORA_STATUSES)
- # Test that all of the fields from the amp list match those from a show
- # (on another amphora)
- for field in const.SHOW_AMPHORA_RESPONSE_FIELDS:
- self.assertEqual(amphorae[1][field], amp2[field])
+ # We might have gotten unassigned/spare amps?
+ if amp_obj[const.STATUS] == const.STATUS_ALLOCATED:
+ # Only check the state of fields for the LB we created,
+ # otherwise some fields (HA_PORT_ID) may not yet be
+ # populated in amps for parallel tests.
+ if lb_id == amp_obj[const.LOADBALANCER_ID]:
+ UUID(amp_obj[const.HA_PORT_ID])
+ UUID(amp_obj[const.LOADBALANCER_ID])
+ UUID(amp_obj[const.COMPUTE_ID])
+ UUID(amp_obj[const.VRRP_PORT_ID])
+ self.assertIn(amp_obj[const.ROLE], const.AMPHORA_ROLES)
+ else:
+ self.assertIsNone(amp_obj[const.ROLE])
+
+ # Test that all of the fields from the amp list match those
+ # from a show
+ for field in show_amphora_response_fields:
+ self.assertEqual(amp[field], amp_obj[field])
# Test filtering by loadbalancer_id
amphorae = self.os_admin.amphora_client.list_amphorae(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index c07bb4a..685c200 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -113,10 +113,6 @@
const.PROTOCOL_PORT: 80,
const.LOADBALANCER_ID: self.lb_id,
const.CONNECTION_LIMIT: 200,
- const.TIMEOUT_CLIENT_DATA: 1000,
- const.TIMEOUT_MEMBER_CONNECT: 1000,
- const.TIMEOUT_MEMBER_DATA: 1000,
- const.TIMEOUT_TCP_INSPECT: 50,
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "true",
const.X_FORWARDED_PORT: "true"
@@ -126,6 +122,15 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ listener_kwargs.update({
+ const.TIMEOUT_CLIENT_DATA: 1000,
+ const.TIMEOUT_MEMBER_CONNECT: 1000,
+ const.TIMEOUT_MEMBER_DATA: 1000,
+ const.TIMEOUT_TCP_INSPECT: 50,
+ })
+
listener = self.mem_listener_client.create_listener(**listener_kwargs)
self.addClassResourceCleanup(
self.mem_listener_client.cleanup_listener,
@@ -160,11 +165,13 @@
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
self.assertTrue(
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
- self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
- self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
self.assertEqual(self.pool1_id, listener[const.DEFAULT_POOL_ID])
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ self.assertEqual(1000, listener[const.TIMEOUT_CLIENT_DATA])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_CONNECT])
+ self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
+ self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
# Listener update
new_name = data_utils.rand_name("lb_member_listener1-update")
@@ -175,10 +182,6 @@
const.DESCRIPTION: new_description,
const.ADMIN_STATE_UP: True,
const.CONNECTION_LIMIT: 400,
- const.TIMEOUT_CLIENT_DATA: 2000,
- const.TIMEOUT_MEMBER_CONNECT: 2000,
- const.TIMEOUT_MEMBER_DATA: 2000,
- const.TIMEOUT_TCP_INSPECT: 100,
const.INSERT_HEADERS: {
const.X_FORWARDED_FOR: "false",
const.X_FORWARDED_PORT: "false"
@@ -188,6 +191,15 @@
# const.DEFAULT_TLS_CONTAINER_REF: '',
# const.SNI_CONTAINER_REFS: [],
}
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ listener_update_kwargs.update({
+ const.TIMEOUT_CLIENT_DATA: 2000,
+ const.TIMEOUT_MEMBER_CONNECT: 2000,
+ const.TIMEOUT_MEMBER_DATA: 2000,
+ const.TIMEOUT_TCP_INSPECT: 100,
+ })
+
listener = self.mem_listener_client.update_listener(
listener[const.ID], **listener_update_kwargs)
@@ -226,11 +238,13 @@
strutils.bool_from_string(insert_headers[const.X_FORWARDED_FOR]))
self.assertFalse(
strutils.bool_from_string(insert_headers[const.X_FORWARDED_PORT]))
- self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
- self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
- self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
- self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
self.assertEqual(self.pool2_id, listener[const.DEFAULT_POOL_ID])
+ if self.mem_listener_client.is_version_supported(
+ self.api_version, '2.1'):
+ self.assertEqual(2000, listener[const.TIMEOUT_CLIENT_DATA])
+ self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_CONNECT])
+ self.assertEqual(2000, listener[const.TIMEOUT_MEMBER_DATA])
+ self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
# Listener delete
waiters.wait_for_status(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_member.py b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
index 2a778e9..b30d651 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_member.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_member.py
@@ -109,10 +109,15 @@
const.ADDRESS: '192.0.2.1',
const.PROTOCOL_PORT: 80,
const.WEIGHT: 50,
- const.BACKUP: False,
const.MONITOR_ADDRESS: '192.0.2.2',
const.MONITOR_PORT: 8080,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_kwargs.update({
+ const.BACKUP: False,
+ })
+
if self.lb_member_vip_subnet:
member_kwargs[const.SUBNET_ID] = self.lb_member_vip_subnet[
const.ID]
@@ -139,11 +144,25 @@
parser.parse(member[const.CREATED_AT])
parser.parse(member[const.UPDATED_AT])
UUID(member[const.ID])
- self.assertEqual(const.NO_MONITOR, member[const.OPERATING_STATUS])
+
+ # Members may be in a transitional state initially
+ # like DOWN or MAINT, give it some time to stablize on
+ # NO_MONITOR. This is LIVE status.
+ member = waiters.wait_for_status(
+ self.mem_member_client.show_member,
+ member[const.ID], const.OPERATING_STATUS,
+ const.NO_MONITOR,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout,
+ pool_id=self.pool_id)
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.ADDRESS,
- const.PROTOCOL_PORT, const.WEIGHT, const.BACKUP,
+ const.PROTOCOL_PORT, const.WEIGHT,
const.MONITOR_ADDRESS, const.MONITOR_PORT]
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ equal_items.append(const.BACKUP)
+
if const.SUBNET_ID in member_kwargs:
equal_items.append(const.SUBNET_ID)
else:
@@ -159,10 +178,15 @@
const.NAME: new_name,
const.ADMIN_STATE_UP: not member[const.ADMIN_STATE_UP],
const.WEIGHT: member[const.WEIGHT] + 1,
- const.BACKUP: not member[const.BACKUP],
const.MONITOR_ADDRESS: '192.0.2.3',
const.MONITOR_PORT: member[const.MONITOR_PORT] + 1,
}
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ member_update_kwargs.update({
+ const.BACKUP: not member[const.BACKUP],
+ })
+
member = self.mem_member_client.update_member(
member[const.ID], **member_update_kwargs)
@@ -181,7 +205,11 @@
# Test changed items
equal_items = [const.NAME, const.ADMIN_STATE_UP, const.WEIGHT,
- const.BACKUP, const.MONITOR_ADDRESS, const.MONITOR_PORT]
+ const.MONITOR_ADDRESS, const.MONITOR_PORT]
+ if self.mem_member_client.is_version_supported(
+ self.api_version, '2.1'):
+ equal_items.append(const.BACKUP)
+
for item in equal_items:
self.assertEqual(member_update_kwargs[item], member[item])
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 2cc16e0..c70c2f1 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -53,6 +53,7 @@
client_manager = clients.ManagerV2
webserver1_response = 1
webserver2_response = 5
+ used_ips = []
@classmethod
def skip_checks(cls):
@@ -123,6 +124,8 @@
conf_lb = CONF.load_balancer
+ cls.api_version = cls.mem_lb_client.get_max_api_version()
+
if conf_lb.test_subnet_override and not conf_lb.test_network_override:
raise exceptions.InvalidConfiguration(
"Configuration value test_network_override must be "
@@ -188,15 +191,16 @@
if cls.lb_member_2_subnet:
LOG.debug('Octavia Setup: lb_member_2_subnet = {}'.format(
cls.lb_member_2_subnet[const.ID]))
- if cls.lb_member_vip_ipv6_subnet:
- LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = {}'.format(
- cls.lb_member_vip_ipv6_subnet[const.ID]))
- if cls.lb_member_1_ipv6_subnet:
- LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = {}'.format(
- cls.lb_member_1_ipv6_subnet[const.ID]))
- if cls.lb_member_2_ipv6_subnet:
- LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = {}'.format(
- cls.lb_member_2_ipv6_subnet[const.ID]))
+ if CONF.load_balancer.test_with_ipv6:
+ if cls.lb_member_vip_ipv6_subnet:
+ LOG.debug('Octavia Setup: lb_member_vip_ipv6_subnet = '
+ '{}'.format(cls.lb_member_vip_ipv6_subnet[const.ID]))
+ if cls.lb_member_1_ipv6_subnet:
+ LOG.debug('Octavia Setup: lb_member_1_ipv6_subnet = {}'.format(
+ cls.lb_member_1_ipv6_subnet[const.ID]))
+ if cls.lb_member_2_ipv6_subnet:
+ LOG.debug('Octavia Setup: lb_member_2_ipv6_subnet = {}'.format(
+ cls.lb_member_2_ipv6_subnet[const.ID]))
@classmethod
def _create_networks(cls):
@@ -366,6 +370,9 @@
ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
if cls.lb_member_vip_subnet:
ip_index = data_utils.rand_int_id(start=10, end=100)
+ while ip_index in cls.used_ips:
+ ip_index = data_utils.rand_int_id(start=10, end=100)
+ cls.used_ips.append(ip_index)
if ip_version == 4:
network = ipaddress.IPv4Network(
six.u(CONF.load_balancer.vip_subnet_cidr))
diff --git a/playbooks/Octavia-DSVM/pre.yaml b/playbooks/Octavia-DSVM/pre.yaml
index 1e7987c..9d6beb7 100644
--- a/playbooks/Octavia-DSVM/pre.yaml
+++ b/playbooks/Octavia-DSVM/pre.yaml
@@ -2,10 +2,11 @@
name: Octavia DSVM jobs pre-run playbook
tasks:
- shell:
+ executable: /bin/bash
cmd: |
set -e
set -x
- if $(egrep --quiet '(vmx|svm)' /proc/cpuinfo) && [[ ! $(hostname) =~ "ovh" ]]; then
+ if $(egrep --quiet '(vmx|svm)' /proc/cpuinfo) && [[ ( ! $(hostname) =~ "ovh" && ! $(hostname) =~ "limestone" ) ]]; then
export DEVSTACK_GATE_LIBVIRT_TYPE=kvm
fi
diff --git a/tox.ini b/tox.ini
index 70b834d..d7d9ddd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -15,12 +15,15 @@
stestr slowest
[testenv:pep8]
+basepython = python3
commands = flake8 {posargs}
[testenv:venv]
+basepython = python3
commands = {posargs}
[testenv:cover]
+basepython = python3
setenv =
{[testenv]setenv}
PYTHON=coverage run --source octavia_tempest_plugin --parallel-mode
@@ -35,6 +38,7 @@
coverage xml -o cover/coverage.xml
[testenv:docs]
+basepython = python3
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
@@ -45,6 +49,7 @@
sphinx-build -W -b html doc/source doc/build/html
[testenv:releasenotes]
+basepython = python3
deps =
-c{env:UPPER_CONSTRAINTS_FILE:https://git.openstack.org/cgit/openstack/requirements/plain/upper-constraints.txt}
-r{toxinidir}/requirements.txt
@@ -53,6 +58,7 @@
sphinx-build -a -E -W -d releasenotes/build/doctrees -b html releasenotes/source releasenotes/build/html
[testenv:debug]
+basepython = python3
commands = oslo_debug_helper {posargs}
[flake8]
@@ -64,6 +70,7 @@
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build
[testenv:genconfig]
+basepython = python3
whitelist_externals = mkdir
commands =
mkdir -p etc
diff --git a/zuul.d/jobs.yaml b/zuul.d/jobs.yaml
index c56bee8..d1414b1 100644
--- a/zuul.d/jobs.yaml
+++ b/zuul.d/jobs.yaml
@@ -15,7 +15,7 @@
- ^releasenotes/.*$
vars:
devstack_localrc:
- TEMPEST_PLUGINS: "'{{ ansible_user_dir }}/src/git.openstack.org/openstack/octavia-tempest-plugin'"
+ TEMPEST_PLUGINS: "'/opt/stack/octavia-tempest-plugin'"
devstack_local_conf:
post-config:
$OCTAVIA_CONF:
@@ -51,6 +51,7 @@
- job:
name: octavia-dsvm-live-base
parent: octavia-dsvm-base
+ timeout: 9000
required-projects:
- openstack/barbican
- openstack/diskimage-builder
@@ -107,7 +108,7 @@
- job:
name: octavia-v2-dsvm-scenario
- parent: octavia-dsvm-base
+ parent: octavia-dsvm-live-base
vars:
devstack_local_conf:
post-config:
@@ -124,3 +125,20 @@
vars:
devstack_localrc:
USE_PYTHON3: true
+- job:
+ name: octavia-v2-dsvm-scenario-centos-7
+ parent: octavia-v2-dsvm-scenario
+ nodeset: devstack-single-node-centos-7
+ vars:
+ devstack_localrc:
+ OCTAVIA_AMP_BASE_OS: centos
+ OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: 7
+ OCTAVIA_AMP_IMAGE_SIZE: 3
+
+- job:
+ name: octavia-v2-dsvm-scenario-ubuntu-bionic
+ parent: octavia-v2-dsvm-scenario
+ vars:
+ devstack_localrc:
+ OCTAVIA_AMP_BASE_OS: ubuntu
+ OCTAVIA_AMP_DISTRIBUTION_RELEASE_ID: bionic
diff --git a/zuul.d/projects.yaml b/zuul.d/projects.yaml
index f09a634..2064db5 100644
--- a/zuul.d/projects.yaml
+++ b/zuul.d/projects.yaml
@@ -1,12 +1,19 @@
# Note: Some official OpenStack wide jobs are still defined in the
# project-config repository
- project:
+ templates:
+ - check-requirements
+ - publish-openstack-docs-pti
+ - tempest-plugin-jobs
check:
jobs:
- octavia-v2-dsvm-noop-api
- octavia-v2-dsvm-noop-py35-api
- octavia-v2-dsvm-scenario
- octavia-v2-dsvm-py35-scenario
+ - octavia-v2-dsvm-scenario-centos-7
+ - octavia-v2-dsvm-scenario-ubuntu-bionic:
+ voting: false
gate:
queue: octavia
jobs:
@@ -14,3 +21,4 @@
- octavia-v2-dsvm-noop-py35-api
- octavia-v2-dsvm-scenario
- octavia-v2-dsvm-py35-scenario
+ - octavia-v2-dsvm-scenario-centos-7