Create api+scenario tests for pools
This patch implements pool tests for the Octavia
Tempest Plugin.
Change-Id: Id8dadfa292a698273240a4d3513288053e7c62c5
Story: 2001387
Task: 5969
diff --git a/octavia_tempest_plugin/tests/api/v2/test_listener.py b/octavia_tempest_plugin/tests/api/v2/test_listener.py
index 204906f..12a045f 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_listener.py
@@ -38,8 +38,7 @@
super(ListenerAPITest, cls).resource_setup()
lb_name = data_utils.rand_name("lb_member_lb1_listener")
- lb_kwargs = {const.ADMIN_STATE_UP: False,
- const.PROVIDER: CONF.load_balancer.provider,
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
const.NAME: lb_name}
ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
@@ -61,8 +60,8 @@
def test_listener_create(self):
"""Tests listener create and basic show APIs.
- * Tests that users without the listener member role cannot
- * create listeners.
+ * Tests that users without the loadbalancer member role cannot
+ create listeners.
* Create a fully populated listener.
* Show listener details.
* Validate the show reflects the requested values.
@@ -106,12 +105,24 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
listener = waiters.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ listener = waiters.wait_for_status(
+ self.mem_listener_client.show_listener,
+ listener[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
self.assertEqual(listener_name, listener[const.NAME])
self.assertEqual(listener_description, listener[const.DESCRIPTION])
@@ -157,7 +168,7 @@
name=lb_name,
vip_network_id=self.lb_member_vip_net[const.ID])
lb_id = lb[const.ID]
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb_id)
@@ -180,7 +191,7 @@
}
listener1 = self.mem_listener_client.create_listener(
**listener1_kwargs)
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_listener_client.cleanup_listener,
listener1[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
@@ -212,7 +223,7 @@
}
listener2 = self.mem_listener_client.create_listener(
**listener2_kwargs)
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_listener_client.cleanup_listener,
listener2[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
@@ -244,7 +255,7 @@
}
listener3 = self.mem_listener_client.create_listener(
**listener3_kwargs)
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_listener_client.cleanup_listener,
listener3[const.ID],
lb_client=self.mem_lb_client, lb_id=lb_id)
@@ -260,6 +271,19 @@
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ # Wait for the enabled listeners to come ONLINE
+ listener1 = waiters.wait_for_status(
+ self.mem_listener_client.show_listener, listener1[const.ID],
+ const.OPERATING_STATUS, const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ listener2 = waiters.wait_for_status(
+ self.mem_listener_client.show_listener, listener2[const.ID],
+ const.OPERATING_STATUS, const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
# Test that a different user cannot list listeners
if not CONF.load_balancer.RBAC_test_type == const.NONE:
member2_client = self.os_roles_lb_member2.listener_client
@@ -322,12 +346,12 @@
self.assertEqual(listener3[field], listeners[2][field])
# Test multiple fields at the same time
- listeners = self.mem_listener_client.list_listeners(
- query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
- '{fields}={created}'.format(
- lb_id=lb_id, fields=const.FIELDS,
- admin=const.ADMIN_STATE_UP,
- created=const.CREATED_AT))
+ listeners = self.mem_listener_client.list_listeners(
+ query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
+ '{fields}={created}'.format(
+ lb_id=lb_id, fields=const.FIELDS,
+ admin=const.ADMIN_STATE_UP,
+ created=const.CREATED_AT))
self.assertEqual(2, len(listeners[0]))
self.assertTrue(listeners[0][const.ADMIN_STATE_UP])
parser.parse(listeners[0][const.CREATED_AT])
@@ -364,17 +388,6 @@
self.assertEqual(listener1[const.DESCRIPTION],
listeners[0][const.DESCRIPTION])
- # Attempt to clean up ahead of time
- try:
- self.mem_lb_client.delete_loadbalancer(lb_id, cascade=True)
- waiters.wait_for_deleted_status_or_not_found(
- self.mem_lb_client.show_loadbalancer, lb_id,
- const.PROVISIONING_STATUS,
- CONF.load_balancer.check_interval,
- CONF.load_balancer.check_timeout)
- except Exception:
- pass
-
@decorators.idempotent_id('6e299eae-6907-4dfc-89c2-e57709d25d3d')
def test_listener_show(self):
"""Tests listener show API.
@@ -415,12 +428,24 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
listener = waiters.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ listener = waiters.wait_for_status(
+ self.mem_listener_client.show_listener,
+ listener[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
self.assertEqual(listener_name, listener[const.NAME])
self.assertEqual(listener_description, listener[const.DESCRIPTION])
@@ -476,7 +501,7 @@
@decorators.idempotent_id('aaae0298-5778-4c7e-a27a-01549a71b319')
def test_listener_update(self):
- """Tests listener show API and field filtering.
+ """Tests listener update and show APIs.
* Create a fully populated listener.
* Show listener details.
@@ -484,7 +509,7 @@
* Validates that other accounts cannot update the listener.
* Update the listener details.
* Show listener details.
- * Validate the show reflects the initial values.
+ * Validate the show reflects the updated values.
"""
listener_name = data_utils.rand_name("lb_member_listener1-update")
listener_description = data_utils.arbitrary_string(size=255)
@@ -517,6 +542,11 @@
listener[const.ID],
lb_client=self.mem_lb_client, lb_id=self.lb_id)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
listener = waiters.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
@@ -530,11 +560,8 @@
parser.parse(listener[const.CREATED_AT])
parser.parse(listener[const.UPDATED_AT])
UUID(listener[const.ID])
- # Operating status is a measured status, so no-op will not go online
- if CONF.load_balancer.test_with_noop:
- self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
- else:
- self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
+ # Operating status will be OFFLINE while admin_state_up = False
+ self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(const.HTTP, listener[const.PROTOCOL])
self.assertEqual(82, listener[const.PROTOCOL_PORT])
self.assertEqual(200, listener[const.CONNECTION_LIMIT])
@@ -548,10 +575,6 @@
self.assertEqual(1000, listener[const.TIMEOUT_MEMBER_DATA])
self.assertEqual(50, listener[const.TIMEOUT_TCP_INSPECT])
- new_name = data_utils.rand_name("lb_member_listener1-update")
- new_description = data_utils.arbitrary_string(size=255,
- base_text='new')
-
# Test that a user, without the load balancer member role, cannot
# use this command
if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
@@ -582,6 +605,9 @@
listener_check[const.PROVISIONING_STATUS])
self.assertFalse(listener_check[const.ADMIN_STATE_UP])
+ new_name = data_utils.rand_name("lb_member_listener1-UPDATED")
+ new_description = data_utils.arbitrary_string(size=255,
+ base_text='new')
listener_update_kwargs = {
const.NAME: new_name,
const.DESCRIPTION: new_description,
@@ -603,16 +629,33 @@
listener = self.mem_listener_client.update_listener(
listener[const.ID], **listener_update_kwargs)
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
listener = waiters.wait_for_status(
self.mem_listener_client.show_listener,
listener[const.ID], const.PROVISIONING_STATUS,
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ listener = waiters.wait_for_status(
+ self.mem_listener_client.show_listener,
+ listener[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
self.assertEqual(new_name, listener[const.NAME])
self.assertEqual(new_description, listener[const.DESCRIPTION])
self.assertTrue(listener[const.ADMIN_STATE_UP])
+ # Operating status is a measured status, so no-op will not go online
+ if CONF.load_balancer.test_with_noop:
+ self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
+ else:
+ self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(400, listener[const.CONNECTION_LIMIT])
insert_headers = listener[const.INSERT_HEADERS]
self.assertFalse(
diff --git a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
index 7dadd49..26da704 100644
--- a/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
+++ b/octavia_tempest_plugin/tests/api/v2/test_load_balancer.py
@@ -221,7 +221,7 @@
CONF.load_balancer.lb_build_interval,
CONF.load_balancer.lb_build_timeout)
- # Helper functions for test load balancer list
+ # Helper functions for test loadbalancer list
def _filter_lbs_by_id(self, lbs, ids):
return [lb for lb in lbs if lb['id'] not in ids]
@@ -260,7 +260,7 @@
# TODO(johnsom) Add QoS
# vip_qos_policy_id=lb_qos_policy_id)
vip_network_id=self.lb_member_vip_net[const.ID])
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -283,7 +283,7 @@
description=lb_description,
name=lb_name,
vip_network_id=self.lb_member_vip_net[const.ID])
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -306,7 +306,7 @@
description=lb_description,
name=lb_name,
vip_network_id=self.lb_member_vip_net[const.ID])
- self.addClassResourceCleanup(
+ self.addCleanup(
self.mem_lb_client.cleanup_loadbalancer,
lb[const.ID])
@@ -412,25 +412,6 @@
self.assertEqual(lb2[const.DESCRIPTION], lbs[1][const.DESCRIPTION])
self.assertEqual(lb1[const.DESCRIPTION], lbs[0][const.DESCRIPTION])
- # Attempt to clean up so that one full test run doesn't start 10+
- # amps before the cleanup phase fires
- created_lb_ids = lb1[const.ID], lb2[const.ID], lb3[const.ID]
- for lb_id in created_lb_ids:
- try:
- self.mem_lb_client.delete_loadbalancer(lb_id)
- except Exception:
- pass
-
- for lb_id in created_lb_ids:
- try:
- waiters.wait_for_deleted_status_or_not_found(
- self.mem_lb_client.show_loadbalancer, lb_id,
- const.PROVISIONING_STATUS,
- CONF.load_balancer.lb_build_interval,
- CONF.load_balancer.lb_build_timeout)
- except Exception:
- pass
-
@decorators.idempotent_id('826ae612-8717-4c64-a8a7-cb9570a85870')
def test_load_balancer_show(self):
"""Tests load balancer show API.
@@ -528,7 +509,7 @@
@decorators.idempotent_id('b75a4d15-49d2-4149-a745-635eed1aacc3')
def test_load_balancer_update(self):
- """Tests load balancer show API and field filtering.
+ """Tests load balancer update and show APIs.
* Create a fully populated load balancer.
* Show load balancer details.
@@ -536,7 +517,7 @@
* Validates that other accounts cannot update the load balancer.
* Update the load balancer details.
* Show load balancer details.
- * Validate the show reflects the initial values.
+ * Validate the show reflects the updated values.
"""
lb_name = data_utils.rand_name("lb_member_lb1-update")
lb_description = data_utils.arbitrary_string(size=255)
diff --git a/octavia_tempest_plugin/tests/api/v2/test_pool.py b/octavia_tempest_plugin/tests/api/v2/test_pool.py
new file mode 100644
index 0000000..77c267b
--- /dev/null
+++ b/octavia_tempest_plugin/tests/api/v2/test_pool.py
@@ -0,0 +1,714 @@
+# Copyright 2018 GoDaddy
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+from uuid import UUID
+
+from dateutil import parser
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+
+
+class PoolAPITest(test_base.LoadBalancerBaseTest):
+ """Test the pool object API."""
+
+ @classmethod
+ def resource_setup(cls):
+ """Setup resources needed by the tests."""
+ super(PoolAPITest, cls).resource_setup()
+
+ lb_name = data_utils.rand_name("lb_member_lb1_pool")
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+
+ ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
+ cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+ lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+ cls.lb_id = lb[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_lb_client.cleanup_loadbalancer,
+ cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
+
+ listener_name = data_utils.rand_name("lb_member_listener1_pool")
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.PROTOCOL: const.HTTP,
+ const.PROTOCOL_PORT: '80',
+ const.LOADBALANCER_ID: cls.lb_id,
+ }
+ listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+ cls.listener_id = listener[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_listener_client.cleanup_listener,
+ cls.listener_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ @decorators.idempotent_id('7587fe48-87ba-4538-9f03-190911f100ff')
+ def test_pool_create_standalone(self):
+ self._test_pool_create(has_listener=False)
+
+ @decorators.idempotent_id('c9c0df79-f07e-428c-ae57-b9d4078eec79')
+ def test_pool_create_with_listener(self):
+ self._test_pool_create(has_listener=True)
+
+ def _test_pool_create(self, has_listener):
+ """Tests pool create and basic show APIs.
+
+ * Tests that users without the loadbalancer member role cannot
+ create pools.
+ * Create a fully populated pool.
+ * Show pool details.
+ * Validate the show reflects the requested values.
+ """
+ pool_name = data_utils.rand_name("lb_member_pool1-create")
+ pool_description = data_utils.arbitrary_string(size=255)
+ pool_sp_cookie_name = 'my_cookie'
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.DESCRIPTION: pool_description,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool_sp_cookie_name,
+ },
+ }
+ if has_listener:
+ pool_kwargs[const.LISTENER_ID] = self.listener_id
+ else:
+ pool_kwargs[const.LOADBALANCER_ID] = self.lb_id
+
+ # Test that a user without the load balancer role cannot
+ # create a pool
+ if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.os_primary.pool_client.create_pool,
+ **pool_kwargs)
+
+ pool = self.mem_pool_client.create_pool(**pool_kwargs)
+ self.addClassResourceCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ if has_listener and not CONF.load_balancer.test_with_noop:
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ self.assertEqual(pool_name, pool[const.NAME])
+ self.assertEqual(pool_description, pool[const.DESCRIPTION])
+ self.assertTrue(pool[const.ADMIN_STATE_UP])
+ parser.parse(pool[const.CREATED_AT])
+ parser.parse(pool[const.UPDATED_AT])
+ UUID(pool[const.ID])
+ # Operating status for a pool without members will be:
+ if has_listener and not CONF.load_balancer.test_with_noop:
+ # ONLINE if it is attached to a listener and is a live test
+ self.assertEqual(const.ONLINE, pool[const.OPERATING_STATUS])
+ else:
+ # OFFLINE if it is just on the LB directly or is in noop mode
+ self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ self.assertEqual(const.HTTP, pool[const.PROTOCOL])
+ self.assertEqual(1, len(pool[const.LOADBALANCERS]))
+ self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
+ if has_listener:
+ self.assertEqual(1, len(pool[const.LISTENERS]))
+ self.assertEqual(self.listener_id,
+ pool[const.LISTENERS][0][const.ID])
+ else:
+ self.assertEmpty(pool[const.LISTENERS])
+ self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
+ pool[const.LB_ALGORITHM])
+ self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
+ self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
+ pool[const.SESSION_PERSISTENCE][const.TYPE])
+ self.assertEqual(pool_sp_cookie_name,
+ pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
+
+ @decorators.idempotent_id('6959a32e-fb34-4f3e-be68-8880c6450016')
+ def test_pool_list(self):
+ """Tests pool list API and field filtering.
+
+ * Create a clean loadbalancer.
+ * Create three pools.
+ * Validates that other accounts cannot list the pools.
+ * List the pools using the default sort order.
+ * List the pools using descending sort order.
+ * List the pools using ascending sort order.
+ * List the pools returning one field at a time.
+ * List the pools returning two fields.
+ * List the pools filtering to one of the three.
+ * List the pools filtered, one field, and sorted.
+ """
+ lb_name = data_utils.rand_name("lb_member_lb2_pool-list")
+ lb = self.mem_lb_client.create_loadbalancer(
+ name=lb_name,
+ vip_network_id=self.lb_member_vip_net[const.ID])
+ lb_id = lb[const.ID]
+ self.addCleanup(
+ self.mem_lb_client.cleanup_loadbalancer,
+ lb_id)
+
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
+
+ pool1_name = data_utils.rand_name("lb_member_pool2-list")
+ pool1_desc = 'B'
+ pool1_sp_cookie_name = 'my_cookie1'
+ pool1_kwargs = {
+ const.NAME: pool1_name,
+ const.DESCRIPTION: pool1_desc,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool1_sp_cookie_name,
+ },
+ const.LOADBALANCER_ID: lb_id,
+ }
+ pool1 = self.mem_pool_client.create_pool(
+ **pool1_kwargs)
+ self.addCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool1[const.ID],
+ lb_client=self.mem_lb_client, lb_id=lb_id)
+ pool1 = waiters.wait_for_status(
+ self.mem_pool_client.show_pool, pool1[const.ID],
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ # Time resolution for created_at is only to the second, and we need to
+ # ensure that each object has a distinct creation time. Delaying one
+ # second is both a simple and a reliable way to accomplish this.
+ time.sleep(1)
+
+ pool2_name = data_utils.rand_name("lb_member_pool1-list")
+ pool2_desc = 'A'
+ pool2_sp_cookie_name = 'my_cookie2'
+ pool2_kwargs = {
+ const.NAME: pool2_name,
+ const.DESCRIPTION: pool2_desc,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool2_sp_cookie_name,
+ },
+ const.LOADBALANCER_ID: lb_id,
+ }
+ pool2 = self.mem_pool_client.create_pool(
+ **pool2_kwargs)
+ self.addCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool2[const.ID],
+ lb_client=self.mem_lb_client, lb_id=lb_id)
+ pool2 = waiters.wait_for_status(
+ self.mem_pool_client.show_pool, pool2[const.ID],
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ # Time resolution for created_at is only to the second, and we need to
+ # ensure that each object has a distinct creation time. Delaying one
+ # second is both a simple and a reliable way to accomplish this.
+ time.sleep(1)
+
+ pool3_name = data_utils.rand_name("lb_member_pool3-list")
+ pool3_desc = 'C'
+ pool3_kwargs = {
+ const.NAME: pool3_name,
+ const.DESCRIPTION: pool3_desc,
+ const.ADMIN_STATE_UP: False,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ # No session persistence, just so there's one test for that
+ const.LOADBALANCER_ID: lb_id,
+ }
+ pool3 = self.mem_pool_client.create_pool(
+ **pool3_kwargs)
+ self.addCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool3[const.ID],
+ lb_client=self.mem_lb_client, lb_id=lb_id)
+ pool3 = waiters.wait_for_status(
+ self.mem_pool_client.show_pool, pool3[const.ID],
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ waiters.wait_for_status(self.mem_lb_client.show_loadbalancer,
+ lb_id,
+ const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Test that a different user cannot list pools
+ if not CONF.load_balancer.RBAC_test_type == const.NONE:
+ member2_client = self.os_roles_lb_member2.pool_client
+ primary = member2_client.list_pools(
+ query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
+ self.assertEqual(0, len(primary))
+
+ # Test that a user without the lb member role cannot list load
+ # balancers
+ if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.os_primary.pool_client.list_pools)
+
+ # Check the default sort order, created_at
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}'.format(lb_id=lb_id))
+ self.assertEqual(pool1[const.DESCRIPTION],
+ pools[0][const.DESCRIPTION])
+ self.assertEqual(pool2[const.DESCRIPTION],
+ pools[1][const.DESCRIPTION])
+ self.assertEqual(pool3[const.DESCRIPTION],
+ pools[2][const.DESCRIPTION])
+
+ # Test sort descending by description
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}&{sort}={descr}:{desc}'
+ .format(lb_id=lb_id, sort=const.SORT,
+ descr=const.DESCRIPTION, desc=const.DESC))
+ self.assertEqual(pool1[const.DESCRIPTION],
+ pools[1][const.DESCRIPTION])
+ self.assertEqual(pool2[const.DESCRIPTION],
+ pools[2][const.DESCRIPTION])
+ self.assertEqual(pool3[const.DESCRIPTION],
+ pools[0][const.DESCRIPTION])
+
+ # Test sort ascending by description
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}&{sort}={descr}:{asc}'
+ .format(lb_id=lb_id, sort=const.SORT,
+ descr=const.DESCRIPTION, asc=const.ASC))
+ self.assertEqual(pool1[const.DESCRIPTION],
+ pools[1][const.DESCRIPTION])
+ self.assertEqual(pool2[const.DESCRIPTION],
+ pools[0][const.DESCRIPTION])
+ self.assertEqual(pool3[const.DESCRIPTION],
+ pools[2][const.DESCRIPTION])
+
+ # Test fields
+ for field in const.SHOW_POOL_RESPONSE_FIELDS:
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}&{fields}={field}'
+ .format(lb_id=lb_id,
+ fields=const.FIELDS, field=field))
+ self.assertEqual(1, len(pools[0]))
+ self.assertEqual(pool1[field], pools[0][field])
+ self.assertEqual(pool2[field], pools[1][field])
+ self.assertEqual(pool3[field], pools[2][field])
+
+ # Test multiple fields at the same time
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}&{fields}={admin}&'
+ '{fields}={created}'.format(
+ lb_id=lb_id, fields=const.FIELDS,
+ admin=const.ADMIN_STATE_UP,
+ created=const.CREATED_AT))
+ self.assertEqual(2, len(pools[0]))
+ self.assertTrue(pools[0][const.ADMIN_STATE_UP])
+ parser.parse(pools[0][const.CREATED_AT])
+ self.assertTrue(pools[1][const.ADMIN_STATE_UP])
+ parser.parse(pools[1][const.CREATED_AT])
+ self.assertFalse(pools[2][const.ADMIN_STATE_UP])
+ parser.parse(pools[2][const.CREATED_AT])
+
+ # Test filtering
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}&{desc}={lb_desc}'.format(
+ lb_id=lb_id, desc=const.DESCRIPTION,
+ lb_desc=pool2[const.DESCRIPTION]))
+ self.assertEqual(1, len(pools))
+ self.assertEqual(pool2[const.DESCRIPTION],
+ pools[0][const.DESCRIPTION])
+
+ # Test combined params
+ pools = self.mem_pool_client.list_pools(
+ query_params='loadbalancer_id={lb_id}&{admin}={true}&'
+ '{fields}={descr}&{fields}={id}&'
+ '{sort}={descr}:{desc}'.format(
+ lb_id=lb_id, admin=const.ADMIN_STATE_UP,
+ true=const.ADMIN_STATE_UP_TRUE,
+ fields=const.FIELDS, descr=const.DESCRIPTION,
+ id=const.ID, sort=const.SORT, desc=const.DESC))
+ # Should get two pools
+ self.assertEqual(2, len(pools))
+ # pools should have two fields
+ self.assertEqual(2, len(pools[0]))
+ # Should be in descending order
+ self.assertEqual(pool2[const.DESCRIPTION],
+ pools[1][const.DESCRIPTION])
+ self.assertEqual(pool1[const.DESCRIPTION],
+ pools[0][const.DESCRIPTION])
+
+ @decorators.idempotent_id('b7932438-1aea-4175-a50c-984fee1c0cad')
+ def test_pool_show(self):
+ """Tests pool show API.
+
+ * Create a fully populated pool.
+ * Show pool details.
+ * Validate the show reflects the requested values.
+ * Validates that other accounts cannot see the pool.
+ """
+ pool_name = data_utils.rand_name("lb_member_pool1-show")
+ pool_description = data_utils.arbitrary_string(size=255)
+ pool_sp_cookie_name = 'my_cookie'
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.DESCRIPTION: pool_description,
+ const.ADMIN_STATE_UP: True,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool_sp_cookie_name,
+ },
+ const.LOADBALANCER_ID: self.lb_id,
+ }
+
+ pool = self.mem_pool_client.create_pool(**pool_kwargs)
+ self.addClassResourceCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ self.assertEqual(pool_name, pool[const.NAME])
+ self.assertEqual(pool_description, pool[const.DESCRIPTION])
+ self.assertTrue(pool[const.ADMIN_STATE_UP])
+ parser.parse(pool[const.CREATED_AT])
+ parser.parse(pool[const.UPDATED_AT])
+ UUID(pool[const.ID])
+ # Operating status for pools will always be offline without members
+ self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ self.assertEqual(const.HTTP, pool[const.PROTOCOL])
+ self.assertEqual(1, len(pool[const.LOADBALANCERS]))
+ self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
+ self.assertEmpty(pool[const.LISTENERS])
+ self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
+ pool[const.LB_ALGORITHM])
+ self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
+ self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
+ pool[const.SESSION_PERSISTENCE][const.TYPE])
+ self.assertEqual(pool_sp_cookie_name,
+ pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
+
+ # Test that a user with lb_admin role can see the pool
+ if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+ pool_client = self.os_roles_lb_admin.pool_client
+ pool_adm = pool_client.show_pool(pool[const.ID])
+ self.assertEqual(pool_name, pool_adm[const.NAME])
+
+ # Test that a user with cloud admin role can see the pool
+ if not CONF.load_balancer.RBAC_test_type == const.NONE:
+ adm = self.os_admin.pool_client.show_pool(
+ pool[const.ID])
+ self.assertEqual(pool_name, adm[const.NAME])
+
+ # Test that a different user, with load balancer member role, cannot
+ # see this pool
+ if not CONF.load_balancer.RBAC_test_type == const.NONE:
+ member2_client = self.os_roles_lb_member2.pool_client
+ self.assertRaises(exceptions.Forbidden,
+ member2_client.show_pool,
+ pool[const.ID])
+
+ # Test that a user, without the load balancer member role, cannot
+ # show pools
+ if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.os_primary.pool_client.show_pool,
+ pool[const.ID])
+
+ @decorators.idempotent_id('7bd0a6bf-57b4-46a6-83ef-f9991896658a')
+ def test_pool_update(self):
+ """Tests pool update and show APIs.
+
+ * Create a fully populated pool.
+ * Show pool details.
+ * Validate the show reflects the initial values.
+ * Validates that other accounts cannot update the pool.
+ * Update the pool details.
+ * Show pool details.
+ * Validate the show reflects the updated values.
+ """
+ pool_name = data_utils.rand_name("lb_member_pool1-update")
+ pool_description = data_utils.arbitrary_string(size=255)
+ pool_sp_cookie_name = 'my_cookie'
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.DESCRIPTION: pool_description,
+ const.ADMIN_STATE_UP: False,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool_sp_cookie_name,
+ },
+ const.LOADBALANCER_ID: self.lb_id,
+ }
+
+ pool = self.mem_pool_client.create_pool(**pool_kwargs)
+ self.addClassResourceCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ self.assertEqual(pool_name, pool[const.NAME])
+ self.assertEqual(pool_description, pool[const.DESCRIPTION])
+ self.assertFalse(pool[const.ADMIN_STATE_UP])
+ parser.parse(pool[const.CREATED_AT])
+ parser.parse(pool[const.UPDATED_AT])
+ UUID(pool[const.ID])
+ # Operating status for pools will always be offline without members
+ self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ self.assertEqual(const.HTTP, pool[const.PROTOCOL])
+ self.assertEqual(1, len(pool[const.LOADBALANCERS]))
+ self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
+ self.assertEmpty(pool[const.LISTENERS])
+ self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
+ pool[const.LB_ALGORITHM])
+ self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
+ self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
+ pool[const.SESSION_PERSISTENCE][const.TYPE])
+ self.assertEqual(pool_sp_cookie_name,
+ pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
+
+ # Test that a user, without the load balancer member role, cannot
+ # use this command
+ if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.os_primary.pool_client.update_pool,
+ pool[const.ID], admin_state_up=True)
+
+ # Assert we didn't go into PENDING_*
+ pool_check = self.mem_pool_client.show_pool(
+ pool[const.ID])
+ self.assertEqual(const.ACTIVE,
+ pool_check[const.PROVISIONING_STATUS])
+ self.assertFalse(pool_check[const.ADMIN_STATE_UP])
+
+ # Test that a user, without the load balancer member role, cannot
+ # update this pool
+ if not CONF.load_balancer.RBAC_test_type == const.NONE:
+ member2_client = self.os_roles_lb_member2.pool_client
+ self.assertRaises(exceptions.Forbidden,
+ member2_client.update_pool,
+ pool[const.ID], admin_state_up=True)
+
+ # Assert we didn't go into PENDING_*
+ pool_check = self.mem_pool_client.show_pool(
+ pool[const.ID])
+ self.assertEqual(const.ACTIVE,
+ pool_check[const.PROVISIONING_STATUS])
+ self.assertFalse(pool_check[const.ADMIN_STATE_UP])
+
+ new_name = data_utils.rand_name("lb_member_pool1-UPDATED")
+ new_description = data_utils.arbitrary_string(size=255,
+ base_text='new')
+ pool_update_kwargs = {
+ const.NAME: new_name,
+ const.DESCRIPTION: new_description,
+ const.ADMIN_STATE_UP: True,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE,
+ },
+ }
+ pool = self.mem_pool_client.update_pool(
+ pool[const.ID], **pool_update_kwargs)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ self.assertEqual(new_name, pool[const.NAME])
+ self.assertEqual(new_description, pool[const.DESCRIPTION])
+ self.assertTrue(pool[const.ADMIN_STATE_UP])
+ self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
+ pool[const.LB_ALGORITHM])
+ self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
+ self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
+ pool[const.SESSION_PERSISTENCE][const.TYPE])
+ self.assertIsNone(
+ pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))
+
+ # Also test removing a Session Persistence
+ pool_update_kwargs = {
+ const.SESSION_PERSISTENCE: None,
+ }
+ pool = self.mem_pool_client.update_pool(
+ pool[const.ID], **pool_update_kwargs)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer, self.lb_id,
+ const.PROVISIONING_STATUS, const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+ self.assertIsNone(pool.get(const.SESSION_PERSISTENCE))
+
+ @decorators.idempotent_id('35ed3800-7a4a-47a6-9b94-c1033fff1112')
+ def test_pool_delete(self):
+ """Tests pool create and delete APIs.
+
+ * Creates a pool.
+ * Validates that other accounts cannot delete the pool
+ * Deletes the pool.
+ * Validates the pool is in the DELETED state.
+ """
+ pool_name = data_utils.rand_name("lb_member_pool1-delete")
+ pool_sp_cookie_name = 'my_cookie'
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool_sp_cookie_name,
+ },
+ const.LOADBALANCER_ID: self.lb_id,
+ }
+ pool = self.mem_pool_client.create_pool(**pool_kwargs)
+ self.addClassResourceCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ # Test that a user without the load balancer role cannot
+ # delete this pool
+ if CONF.load_balancer.RBAC_test_type == const.ADVANCED:
+ self.assertRaises(
+ exceptions.Forbidden,
+ self.os_primary.pool_client.delete_pool,
+ pool[const.ID])
+
+ # Test that a different user, with the load balancer member role
+ # cannot delete this pool
+ if not CONF.load_balancer.RBAC_test_type == const.NONE:
+ member2_client = self.os_roles_lb_member2.pool_client
+ self.assertRaises(exceptions.Forbidden,
+ member2_client.delete_pool,
+ pool[const.ID])
+
+ self.mem_pool_client.delete_pool(pool[const.ID])
+
+ waiters.wait_for_deleted_status_or_not_found(
+ self.mem_pool_client.show_pool, pool[const.ID],
+ const.PROVISIONING_STATUS,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
index 7252dae..9560937 100644
--- a/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_listener.py
@@ -35,8 +35,7 @@
super(ListenerScenarioTest, cls).resource_setup()
lb_name = data_utils.rand_name("lb_member_lb1_listener")
- lb_kwargs = {const.ADMIN_STATE_UP: False,
- const.PROVIDER: CONF.load_balancer.provider,
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
const.NAME: lb_name}
ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
@@ -107,6 +106,7 @@
parser.parse(listener[const.CREATED_AT])
parser.parse(listener[const.UPDATED_AT])
UUID(listener[const.ID])
+ # Operating status will be OFFLINE while admin_state_up = False
self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
self.assertEqual(const.HTTP, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
@@ -152,10 +152,22 @@
const.ACTIVE,
CONF.load_balancer.build_interval,
CONF.load_balancer.build_timeout)
+ if not CONF.load_balancer.test_with_noop:
+ listener = waiters.wait_for_status(
+ self.mem_listener_client.show_listener,
+ listener[const.ID], const.OPERATING_STATUS,
+ const.ONLINE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
self.assertEqual(new_name, listener[const.NAME])
self.assertEqual(new_description, listener[const.DESCRIPTION])
self.assertTrue(listener[const.ADMIN_STATE_UP])
+ # Operating status is a measured status, so no-op will not go online
+ if CONF.load_balancer.test_with_noop:
+ self.assertEqual(const.OFFLINE, listener[const.OPERATING_STATUS])
+ else:
+ self.assertEqual(const.ONLINE, listener[const.OPERATING_STATUS])
self.assertEqual(const.HTTP, listener[const.PROTOCOL])
self.assertEqual(80, listener[const.PROTOCOL_PORT])
self.assertEqual(400, listener[const.CONNECTION_LIMIT])
@@ -170,6 +182,12 @@
self.assertEqual(100, listener[const.TIMEOUT_TCP_INSPECT])
# Listener delete
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
self.mem_listener_client.delete_listener(listener[const.ID])
waiters.wait_for_deleted_status_or_not_found(
diff --git a/octavia_tempest_plugin/tests/scenario/v2/test_pool.py b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
new file mode 100644
index 0000000..6015223
--- /dev/null
+++ b/octavia_tempest_plugin/tests/scenario/v2/test_pool.py
@@ -0,0 +1,195 @@
+# Copyright 2018 GoDaddy
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from uuid import UUID
+
+from dateutil import parser
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.common import constants as const
+from octavia_tempest_plugin.tests import test_base
+from octavia_tempest_plugin.tests import waiters
+
+CONF = config.CONF
+
+
+class PoolScenarioTest(test_base.LoadBalancerBaseTest):
+
+ @classmethod
+ def resource_setup(cls):
+ """Setup resources needed by the tests."""
+ super(PoolScenarioTest, cls).resource_setup()
+
+ lb_name = data_utils.rand_name("lb_member_lb1_pool")
+ lb_kwargs = {const.PROVIDER: CONF.load_balancer.provider,
+ const.NAME: lb_name}
+
+ ip_version = 6 if CONF.load_balancer.test_with_ipv6 else 4
+ cls._setup_lb_network_kwargs(lb_kwargs, ip_version)
+
+ lb = cls.mem_lb_client.create_loadbalancer(**lb_kwargs)
+ cls.lb_id = lb[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_lb_client.cleanup_loadbalancer,
+ cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.lb_build_interval,
+ CONF.load_balancer.lb_build_timeout)
+
+ listener_name = data_utils.rand_name("lb_member_listener1_pool")
+ listener_kwargs = {
+ const.NAME: listener_name,
+ const.PROTOCOL: const.HTTP,
+ const.PROTOCOL_PORT: '80',
+ const.LOADBALANCER_ID: cls.lb_id,
+ }
+ listener = cls.mem_listener_client.create_listener(**listener_kwargs)
+ cls.listener_id = listener[const.ID]
+ cls.addClassResourceCleanup(
+ cls.mem_listener_client.cleanup_listener,
+ cls.listener_id,
+ lb_client=cls.mem_lb_client, lb_id=cls.lb_id)
+
+ waiters.wait_for_status(cls.mem_lb_client.show_loadbalancer,
+ cls.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ @decorators.idempotent_id('dfa120bf-81b9-4f22-bb5e-7df660c18173')
+ def test_pool_standalone_CRUD(self):
+ self._test_pool_CRUD(has_listener=False)
+
+ @decorators.idempotent_id('087da8ab-79c7-48ba-871c-5769185cea3e')
+ def test_pool_with_listener_CRUD(self):
+ self._test_pool_CRUD(has_listener=True)
+
+ def _test_pool_CRUD(self, has_listener):
+ """Tests pool create, read, update, delete
+
+ * Create a fully populated pool.
+ * Show pool details.
+ * Update the pool.
+ * Delete the pool.
+ """
+ # Pool create
+ pool_name = data_utils.rand_name("lb_member_pool1-CRUD")
+ pool_description = data_utils.arbitrary_string(size=255)
+ pool_sp_cookie_name = 'my_cookie'
+ pool_kwargs = {
+ const.NAME: pool_name,
+ const.DESCRIPTION: pool_description,
+ const.ADMIN_STATE_UP: False,
+ const.PROTOCOL: const.HTTP,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_ROUND_ROBIN,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_APP_COOKIE,
+ const.COOKIE_NAME: pool_sp_cookie_name,
+ },
+ }
+ if has_listener:
+ pool_kwargs[const.LISTENER_ID] = self.listener_id
+ else:
+ pool_kwargs[const.LOADBALANCER_ID] = self.lb_id
+
+ pool = self.mem_pool_client.create_pool(**pool_kwargs)
+ self.addClassResourceCleanup(
+ self.mem_pool_client.cleanup_pool,
+ pool[const.ID],
+ lb_client=self.mem_lb_client, lb_id=self.lb_id)
+
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ self.assertEqual(pool_name, pool[const.NAME])
+ self.assertEqual(pool_description, pool[const.DESCRIPTION])
+ self.assertFalse(pool[const.ADMIN_STATE_UP])
+ parser.parse(pool[const.CREATED_AT])
+ parser.parse(pool[const.UPDATED_AT])
+ UUID(pool[const.ID])
+ self.assertEqual(const.OFFLINE, pool[const.OPERATING_STATUS])
+ self.assertEqual(const.HTTP, pool[const.PROTOCOL])
+ self.assertEqual(1, len(pool[const.LOADBALANCERS]))
+ self.assertEqual(self.lb_id, pool[const.LOADBALANCERS][0][const.ID])
+ if has_listener:
+ self.assertEqual(1, len(pool[const.LISTENERS]))
+ self.assertEqual(self.listener_id,
+ pool[const.LISTENERS][0][const.ID])
+ else:
+ self.assertEmpty(pool[const.LISTENERS])
+ self.assertEqual(const.LB_ALGORITHM_ROUND_ROBIN,
+ pool[const.LB_ALGORITHM])
+ self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
+ self.assertEqual(const.SESSION_PERSISTENCE_APP_COOKIE,
+ pool[const.SESSION_PERSISTENCE][const.TYPE])
+ self.assertEqual(pool_sp_cookie_name,
+ pool[const.SESSION_PERSISTENCE][const.COOKIE_NAME])
+
+ # Pool update
+ new_name = data_utils.rand_name("lb_member_pool1-update")
+ new_description = data_utils.arbitrary_string(size=255,
+ base_text='new')
+ pool_update_kwargs = {
+ const.NAME: new_name,
+ const.DESCRIPTION: new_description,
+ const.ADMIN_STATE_UP: True,
+ const.LB_ALGORITHM: const.LB_ALGORITHM_LEAST_CONNECTIONS,
+ const.SESSION_PERSISTENCE: {
+ const.TYPE: const.SESSION_PERSISTENCE_HTTP_COOKIE,
+ },
+ }
+ pool = self.mem_pool_client.update_pool(
+ pool[const.ID], **pool_update_kwargs)
+
+ pool = waiters.wait_for_status(
+ self.mem_pool_client.show_pool,
+ pool[const.ID], const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.build_interval,
+ CONF.load_balancer.build_timeout)
+
+ self.assertEqual(new_name, pool[const.NAME])
+ self.assertEqual(new_description, pool[const.DESCRIPTION])
+ self.assertTrue(pool[const.ADMIN_STATE_UP])
+ self.assertEqual(const.LB_ALGORITHM_LEAST_CONNECTIONS,
+ pool[const.LB_ALGORITHM])
+ self.assertIsNotNone(pool.get(const.SESSION_PERSISTENCE))
+ self.assertEqual(const.SESSION_PERSISTENCE_HTTP_COOKIE,
+ pool[const.SESSION_PERSISTENCE][const.TYPE])
+ self.assertIsNone(
+ pool[const.SESSION_PERSISTENCE].get(const.COOKIE_NAME))
+
+ # Pool delete
+ waiters.wait_for_status(
+ self.mem_lb_client.show_loadbalancer,
+ self.lb_id, const.PROVISIONING_STATUS,
+ const.ACTIVE,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
+ self.mem_pool_client.delete_pool(pool[const.ID])
+
+ waiters.wait_for_deleted_status_or_not_found(
+ self.mem_pool_client.show_pool, pool[const.ID],
+ const.PROVISIONING_STATUS,
+ CONF.load_balancer.check_interval,
+ CONF.load_balancer.check_timeout)
diff --git a/octavia_tempest_plugin/tests/test_base.py b/octavia_tempest_plugin/tests/test_base.py
index 73843b7..9862e12 100644
--- a/octavia_tempest_plugin/tests/test_base.py
+++ b/octavia_tempest_plugin/tests/test_base.py
@@ -104,6 +104,7 @@
cls.lb_mem_subnet_client = cls.os_roles_lb_member.subnets_client
cls.mem_lb_client = cls.os_roles_lb_member.loadbalancer_client
cls.mem_listener_client = cls.os_roles_lb_member.listener_client
+ cls.mem_pool_client = cls.os_roles_lb_member.pool_client
@classmethod
def resource_setup(cls):