Create scenario tests for loadbalancers
This patch implements the tempest plugin for basic load balancer
operations in Octavia. It contains tests for loadbalancer API and
a minimal operation test for loadbalancing functionality.
Steps for testing in devstack environment:
- Clone octavia-tempest-plugin repo, check out this patch, install
octavia-tempest-plugin project.
- Create a tempest work directory by running 'tempest init <workdir>'.
In the etc/tempest.conf, add 'loadbalancer = true' in
'service_available' section.
- Set a big value to 'OS_TEST_TIMEOUT' in .testr.conf
- Add or modify other related config options (image, network, flavor,
validation, etc).
- Run 'tempest run --regex ^octavia_tempest_plugin'
Co-Authored-By: Lingxian Kong <anlin.kong@gmail.com>
Co-Authored-By: Adam Harwell <flux.adam@gmail.com>
Change-Id: Ibc2904f431b15dfca2ff8e38e0d4d06c1430abea
diff --git a/octavia_tempest_plugin/tests/server_util.py b/octavia_tempest_plugin/tests/server_util.py
new file mode 100644
index 0000000..2500195
--- /dev/null
+++ b/octavia_tempest_plugin/tests/server_util.py
@@ -0,0 +1,322 @@
+# Copyright 2017 Catalyst IT Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import pkg_resources
+import random
+import shlex
+import string
+import subprocess
+import tempfile
+import time
+
+from oslo_log import log as logging
+from oslo_utils import excutils
+from tempest import config
+from tempest.lib.common import fixed_network
+from tempest.lib.common import rest_client
+from tempest.lib.common.utils.linux import remote_client
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+SERVER_BINARY = pkg_resources.resource_filename(
+ 'octavia_tempest_plugin.contrib.httpd', 'httpd.bin')
+
+
+class BuildErrorException(exceptions.TempestException):
+ message = "Server %(server_id)s failed to build and is in ERROR status"
+
+
+def _get_task_state(body):
+ return body.get('OS-EXT-STS:task_state', None)
+
+
+def wait_for_server_status(client, server_id, status, ready_wait=True,
+ extra_timeout=0, raise_on_error=True):
+ """Waits for a server to reach a given status."""
+
+ # NOTE(afazekas): UNKNOWN status possible on ERROR
+ # or in a very early stage.
+ body = client.show_server(server_id)['server']
+ old_status = server_status = body['status']
+ old_task_state = task_state = _get_task_state(body)
+ start_time = int(time.time())
+ timeout = client.build_timeout + extra_timeout
+ while True:
+ # NOTE(afazekas): Now the BUILD status only reached
+ # between the UNKNOWN->ACTIVE transition.
+ # TODO(afazekas): enumerate and validate the stable status set
+ if status == 'BUILD' and server_status != 'UNKNOWN':
+ return
+ if server_status == status:
+ if ready_wait:
+ if status == 'BUILD':
+ return
+ # NOTE(afazekas): The instance is in "ready for action state"
+ # when no task in progress
+ if task_state is None:
+ # without state api extension 3 sec usually enough
+ time.sleep(CONF.compute.ready_wait)
+ return
+ else:
+ return
+
+ time.sleep(client.build_interval)
+ body = client.show_server(server_id)['server']
+ server_status = body['status']
+ task_state = _get_task_state(body)
+ if (server_status != old_status) or (task_state != old_task_state):
+ LOG.info('State transition "%s" ==> "%s" after %d second wait',
+ '/'.join((old_status, str(old_task_state))),
+ '/'.join((server_status, str(task_state))),
+ time.time() - start_time)
+ if (server_status == 'ERROR') and raise_on_error:
+ if 'fault' in body:
+ raise BuildErrorException(body['fault'],
+ server_id=server_id)
+ else:
+ raise BuildErrorException(server_id=server_id)
+
+ timed_out = int(time.time()) - start_time >= timeout
+
+ if timed_out:
+ expected_task_state = 'None' if ready_wait else 'n/a'
+ message = ('Server %(server_id)s failed to reach %(status)s '
+ 'status and task state "%(expected_task_state)s" '
+ 'within the required time (%(timeout)s s).' %
+ {'server_id': server_id,
+ 'status': status,
+ 'expected_task_state': expected_task_state,
+ 'timeout': timeout})
+ message += ' Current status: %s.' % server_status
+ message += ' Current task state: %s.' % task_state
+ caller = test_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
+ old_status = server_status
+ old_task_state = task_state
+
+
+def wait_for_server_termination(client, server_id, ignore_error=False):
+ """Waits for server to reach termination."""
+ try:
+ body = client.show_server(server_id)['server']
+ except exceptions.NotFound:
+ return
+ old_status = body['status']
+ old_task_state = _get_task_state(body)
+ start_time = int(time.time())
+ while True:
+ time.sleep(client.build_interval)
+ try:
+ body = client.show_server(server_id)['server']
+ except exceptions.NotFound:
+ return
+ server_status = body['status']
+ task_state = _get_task_state(body)
+ if (server_status != old_status) or (task_state != old_task_state):
+ LOG.info('State transition "%s" ==> "%s" after %d second wait',
+ '/'.join((old_status, str(old_task_state))),
+ '/'.join((server_status, str(task_state))),
+ time.time() - start_time)
+ if server_status == 'ERROR' and not ignore_error:
+ raise exceptions.DeleteErrorException(resource_id=server_id)
+
+ if int(time.time()) - start_time >= client.build_timeout:
+ raise exceptions.TimeoutException
+ old_status = server_status
+ old_task_state = task_state
+
+
+def create_server(clients, name=None, flavor=None, image_id=None,
+ validatable=False, validation_resources=None,
+ tenant_network=None, wait_until=None, availability_zone=None,
+ **kwargs):
+ """Common wrapper utility returning a test server.
+
+ This method is a common wrapper returning a test server that can be
+ pingable or sshable.
+
+ :param name: Name of the server to be provisioned. If not defined a random
+ string ending with '-instance' will be generated.
+ :param flavor: Flavor of the server to be provisioned. If not defined,
+ CONF.compute.flavor_ref will be used instead.
+ :param image_id: ID of the image to be used to provision the server. If not
+ defined, CONF.compute.image_ref will be used instead.
+ :param clients: Client manager which provides OpenStack Tempest clients.
+ :param validatable: Whether the server will be pingable or sshable.
+ :param validation_resources: Resources created for the connection to the
+ server. Include a keypair, a security group and an IP.
+ :param tenant_network: Tenant network to be used for creating a server.
+ :param wait_until: Server status to wait for the server to reach after
+ its creation.
+ :returns: a tuple
+ """
+ if name is None:
+ r = random.SystemRandom()
+ name = "m{}".format("".join(
+ [r.choice(string.ascii_uppercase + string.digits)
+ for i in range(
+ CONF.loadbalancer.random_server_name_length - 1)]
+ ))
+ if flavor is None:
+ flavor = CONF.compute.flavor_ref
+ if image_id is None:
+ image_id = CONF.compute.image_ref
+ if availability_zone is None:
+ availability_zone = CONF.loadbalancer.availability_zone
+
+ kwargs = fixed_network.set_networks_kwarg(
+ tenant_network, kwargs) or {}
+
+ if availability_zone:
+ kwargs.update({'availability_zone': availability_zone})
+
+ if CONF.validation.run_validation and validatable:
+ LOG.debug("Provisioning test server with validation resources %s",
+ validation_resources)
+ if 'security_groups' in kwargs:
+ kwargs['security_groups'].append(
+ {'name': validation_resources['security_group']['name']})
+ else:
+ try:
+ kwargs['security_groups'] = [
+ {'name': validation_resources['security_group']['name']}]
+ except KeyError:
+ LOG.debug("No security group provided.")
+
+ if 'key_name' not in kwargs:
+ try:
+ kwargs['key_name'] = validation_resources['keypair']['name']
+ except KeyError:
+ LOG.debug("No key provided.")
+
+ if CONF.validation.connect_method == 'floating':
+ if wait_until is None:
+ wait_until = 'ACTIVE'
+
+ body = clients.servers_client.create_server(name=name, imageRef=image_id,
+ flavorRef=flavor,
+ **kwargs)
+ server = rest_client.ResponseBody(body.response, body['server'])
+
+ def _setup_validation_fip():
+ if CONF.service_available.neutron:
+ ifaces = clients.interfaces_client.list_interfaces(server['id'])
+ validation_port = None
+ for iface in ifaces['interfaceAttachments']:
+ if not tenant_network or (iface['net_id'] ==
+ tenant_network['id']):
+ validation_port = iface['port_id']
+ break
+ if not validation_port:
+ # NOTE(artom) This will get caught by the catch-all clause in
+ # the wait_until loop below
+ raise ValueError('Unable to setup floating IP for validation: '
+ 'port not found on tenant network')
+ clients.floating_ips_client.update_floatingip(
+ validation_resources['floating_ip']['id'],
+ port_id=validation_port)
+ else:
+ fip_client = clients.compute_floating_ips_client
+ fip_client.associate_floating_ip_to_server(
+ floating_ip=validation_resources['floating_ip']['ip'],
+ server_id=server['id'])
+
+ if wait_until:
+ try:
+ wait_for_server_status(
+ clients.servers_client, server['id'], wait_until)
+
+ # Multiple validatable servers are not supported for now. Their
+ # creation will fail with the condition above (l.58).
+ if CONF.validation.run_validation and validatable:
+ if CONF.validation.connect_method == 'floating':
+ _setup_validation_fip()
+
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ try:
+ clients.servers_client.delete_server(server['id'])
+ except Exception:
+ LOG.exception('Deleting server %s failed', server['id'])
+ try:
+ wait_for_server_termination(clients.servers_client,
+ server['id'])
+ except Exception:
+ LOG.exception('Server %s failed to delete in time',
+ server['id'])
+
+ return server
+
+
+def clear_server(servers_client, id):
+ try:
+ servers_client.delete_server(id)
+ except exceptions.NotFound:
+ pass
+ wait_for_server_termination(servers_client, id)
+
+
+def _execute(cmd, cwd=None):
+ args = shlex.split(cmd)
+ subprocess_args = {'stdout': subprocess.PIPE,
+ 'stderr': subprocess.STDOUT,
+ 'cwd': cwd}
+ proc = subprocess.Popen(args, **subprocess_args)
+ stdout, stderr = proc.communicate()
+ if proc.returncode != 0:
+ LOG.error('Command %s returned with exit status %s, output %s, '
+ 'error %s', cmd, proc.returncode, stdout, stderr)
+ raise exceptions.CommandFailed(proc.returncode, cmd, stdout, stderr)
+ return stdout
+
+
+def copy_file(floating_ip, private_key, local_file, remote_file):
+ """Copy web server script to instance."""
+ with tempfile.NamedTemporaryFile() as key:
+ key.write(private_key.encode('utf-8'))
+ key.flush()
+ dest = (
+ "%s@%s:%s" %
+ (CONF.validation.image_ssh_user, floating_ip, remote_file)
+ )
+ cmd = ("scp -v -o UserKnownHostsFile=/dev/null "
+ "-o StrictHostKeyChecking=no "
+ "-i %(key_file)s %(file)s %(dest)s" % {'key_file': key.name,
+ 'file': local_file,
+ 'dest': dest})
+ return _execute(cmd)
+
+
+def run_webserver(connect_ip, private_key):
+ httpd = "/dev/shm/httpd.bin"
+
+ linux_client = remote_client.RemoteClient(
+ connect_ip,
+ CONF.validation.image_ssh_user,
+ pkey=private_key,
+ )
+ linux_client.validate_authentication()
+
+ # TODO(kong): We may figure out an elegant way to copy file to instance
+ # in future.
+ LOG.debug("Copying the webserver binary to the server.")
+ copy_file(connect_ip, private_key, SERVER_BINARY, httpd)
+
+ LOG.debug("Starting services on the server.")
+ linux_client.exec_command('sudo screen -d -m %s -port 80 -id 1' % httpd)
+ linux_client.exec_command('sudo screen -d -m %s -port 81 -id 2' % httpd)
diff --git a/octavia_tempest_plugin/tests/test_octavia_tempest_plugin.py b/octavia_tempest_plugin/tests/test_octavia_tempest_plugin.py
deleted file mode 100644
index 7805347..0000000
--- a/octavia_tempest_plugin/tests/test_octavia_tempest_plugin.py
+++ /dev/null
@@ -1,28 +0,0 @@
-# -*- coding: utf-8 -*-
-
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-"""
-test_octavia_tempest_plugin
-----------------------------------
-
-Tests for `octavia_tempest_plugin` module.
-"""
-
-from octavia_tempest_plugin.tests import base
-
-
-class TestOctavia_tempest_plugin(base.TestCase):
-
- def test_something(self):
- pass
diff --git a/octavia_tempest_plugin/tests/v2/__init__.py b/octavia_tempest_plugin/tests/v2/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/v2/__init__.py
diff --git a/octavia_tempest_plugin/tests/v2/api/test_loadbalancer.py b/octavia_tempest_plugin/tests/v2/api/test_loadbalancer.py
new file mode 100644
index 0000000..4325321
--- /dev/null
+++ b/octavia_tempest_plugin/tests/v2/api/test_loadbalancer.py
@@ -0,0 +1,61 @@
+# Copyright 2017 GoDaddy
+# Copyright 2017 Catalyst IT Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.tests.v2 import base
+
+
+class LoadbalancerTest(base.BaseLoadbalancerTest):
+ name_prefix = 'Tempest-LoadbalancerTest'
+
+ @decorators.idempotent_id('94c66b04-1ab3-4375-a921-89e48d833c1d')
+ @decorators.attr(type='slow')
+ def test_crud_loadbalancer(self):
+ # Create loadbalancer
+ params = {}
+ if self.vip_network_id:
+ params['vip_network_id'] = self.vip_network_id
+ if self.vip_subnet_id:
+ params['vip_subnet_id'] = self.vip_subnet_id
+ lb_id = self.create_loadbalancer(**params)['id']
+
+ # Get loadbalancers
+ resp, body = self.lb_client.get_list_objs('loadbalancers')
+ self.assertEqual(200, resp.status)
+ self.assertIn(
+ lb_id,
+ [item['id'] for item in body['loadbalancers']]
+ )
+
+ # Update loadbalancer
+ new_name = data_utils.rand_name('lb', prefix=self.name_prefix)
+ self.update_loadbalancer(lb_id, name=new_name)
+
+ # Get loadbalancer
+ resp, body = self.lb_client.get_obj('loadbalancers', lb_id)
+ self.assertEqual(200, resp.status)
+ self.assertEqual(new_name, body['loadbalancer']['name'])
+
+ # Delete loadbalancer
+ self.delete_loadbalancer(lb_id)
+
+ # Get loadbalancers
+ resp, body = self.lb_client.get_list_objs('loadbalancers')
+ self.assertEqual(200, resp.status)
+ self.assertNotIn(
+ lb_id,
+ [item['id'] for item in body['loadbalancers']]
+ )
diff --git a/octavia_tempest_plugin/tests/v2/base.py b/octavia_tempest_plugin/tests/v2/base.py
new file mode 100644
index 0000000..7fec324
--- /dev/null
+++ b/octavia_tempest_plugin/tests/v2/base.py
@@ -0,0 +1,524 @@
+# Copyright 2017 GoDaddy
+# Copyright 2017 Catalyst IT Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+import time
+
+from oslo_log import log as logging
+import requests
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
+from tempest.lib import exceptions as lib_exc
+from tempest import test
+import tenacity
+
+from octavia_tempest_plugin.tests import server_util
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class BaseLoadbalancerTest(test.BaseTestCase):
+ credentials = (['lbmember', CONF.loadbalancer.member_role], 'admin')
+ name_prefix = 'Tempest-BaseLoadbalancerTest'
+ vip_network_id = None
+ vip_subnet_id = None
+ vip_address = None
+ member_subnet_id = None
+ member_network_id = None
+ vm_ip = None
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaseLoadbalancerTest, cls).skip_checks()
+
+ if not CONF.service_available.loadbalancer:
+ raise cls.skipException("Loadbalancing service is not available.")
+
+ service_list = {
+ 'loadbalancing': CONF.service_available.loadbalancer,
+ 'compute': CONF.service_available.nova,
+ 'image': CONF.service_available.glance,
+ 'neutron': CONF.service_available.neutron
+ }
+ for srv, available in service_list.items():
+ if not available:
+ raise cls.skipException("Service %s is not available." % srv)
+
+ @classmethod
+ def setup_clients(cls):
+ super(BaseLoadbalancerTest, cls).setup_clients()
+
+ cls.lb_client = cls.os_roles_lbmember.octavia_v2.LoadbalancerClient()
+ cls.servers_client = cls.os_roles_lbmember.servers_client
+ cls.networks_client = cls.os_roles_lbmember.networks_client
+ cls.subnets_client = cls.os_roles_lbmember.subnets_client
+ cls.interfaces_client = cls.os_roles_lbmember.interfaces_client
+ cls.sg_rule_client = cls.os_roles_lbmember.security_group_rules_client
+ cls.floatingip_client = cls.os_roles_lbmember.floating_ips_client
+ cls.floatingip_adm_client = cls.os_admin.floating_ips_client
+ cls.routers_adm_client = cls.os_admin.routers_client
+
+ if CONF.identity.auth_version == 'v3':
+ project_id = cls.os_roles_lbmember.auth_provider.auth_data[1][
+ 'project']['id']
+ else:
+ project_id = cls.os_roles_lbmember.auth_provider.auth_data[
+ 1]['token']['tenant']['id']
+
+ cls.tenant_id = project_id
+ cls.user_id = cls.os_roles_lbmember.auth_provider.auth_data[1][
+ 'user']['id']
+
+ @classmethod
+ def resource_setup(cls):
+ """Creates network resources."""
+ super(BaseLoadbalancerTest, cls).resource_setup()
+ if not CONF.loadbalancer.vip_network_id:
+ network_name = data_utils.rand_name(
+ 'network',
+ prefix=cls.name_prefix
+ )
+ body = cls.networks_client.create_network(name=network_name)
+ cls.vip_network_id = body['network']['id']
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.networks_client.delete_network,
+ cls.vip_network_id
+ )
+
+ subnet_name = data_utils.rand_name(
+ 'subnet',
+ prefix=cls.name_prefix
+ )
+ body = cls.subnets_client.create_subnet(
+ name=subnet_name,
+ network_id=cls.vip_network_id,
+ cidr='10.100.1.0/24',
+ ip_version=4,
+ gateway_ip='10.100.1.1',
+ )
+ cls.vip_subnet_id = body['subnet']['id']
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.subnets_client.delete_subnet,
+ cls.vip_subnet_id
+ )
+ cls.member_network_id = cls.vip_network_id
+ cls.member_subnet_id = cls.vip_subnet_id
+
+ if CONF.validation.connect_method == 'floating':
+ router_name = data_utils.rand_name(
+ 'router',
+ prefix=cls.name_prefix
+ )
+ kwargs = {
+ 'name': router_name,
+ 'tenant_id': cls.tenant_id
+ }
+ if CONF.network.public_network_id:
+ kwargs['external_gateway_info'] = dict(
+ network_id=CONF.network.public_network_id
+ )
+ body = cls.routers_adm_client.create_router(**kwargs)
+ cls.router_id = body['router']['id']
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.routers_adm_client.delete_router,
+ cls.router_id,
+ )
+
+ cls.routers_adm_client.add_router_interface(
+ cls.router_id, subnet_id=cls.member_subnet_id
+ )
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ cls.routers_adm_client.remove_router_interface,
+ cls.router_id,
+ subnet_id=cls.member_subnet_id
+ )
+ else:
+ cls.vip_network_id = CONF.loadbalancer.vip_network_id
+ cls.vip_subnet_id = CONF.loadbalancer.vip_subnet_id
+ cls.member_subnet_id = CONF.loadbalancer.premade_server_subnet_id
+
+ @tenacity.retry(
+ wait=tenacity.wait_fixed(CONF.loadbalancer.lb_build_interval),
+ stop=tenacity.stop_after_delay(CONF.loadbalancer.lb_build_timeout),
+ retry=tenacity.retry_if_exception_type(AssertionError)
+ )
+ def await_loadbalancer_active(self, id, name=None):
+ resp, body = self.lb_client.get_obj('loadbalancers', id)
+ self.assertEqual(200, resp.status)
+
+ lb = body['loadbalancer']
+
+ if lb['provisioning_status'] == 'ERROR':
+ raise Exception('Failed to wait for loadbalancer to be active, '
+ 'actual provisioning_status: ERROR')
+
+ self.assertEqual('ACTIVE', lb['provisioning_status'])
+
+ if name:
+ self.assertEqual(name, lb['name'])
+
+ @tenacity.retry(
+ wait=tenacity.wait_fixed(CONF.loadbalancer.lb_build_interval),
+ stop=tenacity.stop_after_delay(CONF.loadbalancer.lb_build_timeout),
+ retry=tenacity.retry_if_exception_type(AssertionError)
+ )
+ def await_loadbalancer_deleted(self, id):
+ resp, body = self.lb_client.get_obj('loadbalancers', id)
+ self.assertEqual(200, resp.status)
+
+ lb = body['loadbalancer']
+ self.assertEqual('DELETED', lb['provisioning_status'])
+
+ @tenacity.retry(
+ wait=tenacity.wait_fixed(CONF.loadbalancer.lb_build_interval),
+ stop=tenacity.stop_after_delay(CONF.loadbalancer.lb_build_timeout),
+ retry=tenacity.retry_if_exception_type(AssertionError)
+ )
+ def await_listener_active(self, id, name=None):
+ resp, body = self.lb_client.get_obj('listeners', id)
+ self.assertEqual(200, resp.status)
+
+ listener = body['listener']
+
+ if listener['provisioning_status'] == 'ERROR':
+ raise Exception('Failed to wait for listener to be active, actual '
+ 'provisioning_status: ERROR')
+
+ self.assertEqual('ACTIVE', listener['provisioning_status'])
+ self.assertEqual('ONLINE', listener['operating_status'])
+
+ if name:
+ self.assertEqual(name, listener['name'])
+
+ def create_loadbalancer(self, **kwargs):
+ name = data_utils.rand_name('lb', prefix=self.name_prefix)
+ payload = {'loadbalancer': {'name': name}}
+ payload['loadbalancer'].update(kwargs)
+
+ resp, body = self.lb_client.post_json('loadbalancers', payload)
+ self.assertEqual(201, resp.status)
+
+ lb = body['loadbalancer']
+ lb_id = lb['id']
+
+ self.addCleanup(self.delete_loadbalancer, lb_id, ignore_error=True)
+ LOG.info('Waiting for loadbalancer %s to be active', lb_id)
+ self.await_loadbalancer_active(
+ lb_id,
+ name=payload['loadbalancer']['name']
+ )
+
+ self.lb_id = lb['id']
+ self.vip_port = lb['vip_port_id']
+ if CONF.validation.connect_method == 'floating':
+ self.vip_address = self._associate_floatingip()
+ else:
+ self.vip_address = lb['vip_address']
+
+ return lb
+
+ def update_loadbalancer(self, lb_id, **kwargs):
+ new_name = data_utils.rand_name('lb', prefix=self.name_prefix)
+ payload = {'loadbalancer': {'name': new_name}}
+ payload['loadbalancer'].update(kwargs)
+
+ resp, _ = self.lb_client.put_json('loadbalancers', lb_id, payload)
+ self.assertEqual(200, resp.status)
+
+ # Wait for loadbalancer to be active
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after update', lb_id
+ )
+ self.await_loadbalancer_active(lb_id)
+
+ def delete_loadbalancer(self, id, ignore_error=False):
+ """Delete loadbalancer and wait for it to be deleted.
+
+ Only if loadbalancer is deleted completely can other network resources
+ be deleted.
+ """
+ resp = self.lb_client.delete_resource('loadbalancers', id,
+ ignore_error=ignore_error,
+ cascade=True)
+ if resp:
+ self.assertEqual(204, resp.status)
+
+ LOG.info('Waiting for loadbalancer %s to be deleted', id)
+ self.await_loadbalancer_deleted(id)
+
+ def create_listener(self, lb_id, **kwargs):
+ name = data_utils.rand_name('listener', prefix=self.name_prefix)
+ payload = {
+ 'listener': {
+ 'protocol': 'HTTP',
+ 'protocol_port': '80',
+ 'loadbalancer_id': lb_id,
+ 'name': name
+ }
+ }
+ payload['listener'].update(kwargs)
+
+ resp, body = self.lb_client.post_json('listeners', payload)
+ self.assertEqual(201, resp.status)
+
+ listener_id = body['listener']['id']
+
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after listener %s '
+ 'creation', lb_id, listener_id
+ )
+ self.addCleanup(self.delete_listener, listener_id, lb_id,
+ ignore_error=True)
+ self.await_loadbalancer_active(lb_id)
+
+ return body['listener']
+
+ def update_listener(self, listener_id, lb_id, **kwargs):
+ new_name = data_utils.rand_name('listener', prefix=self.name_prefix)
+ payload = {'listener': {'name': new_name}}
+ payload['listener'].update(kwargs)
+
+ resp, _ = self.lb_client.put_json('listeners', listener_id, payload)
+ self.assertEqual(200, resp.status)
+
+ # Wait for loadbalancer to be active
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after listener %s '
+ 'update', lb_id, listener_id
+ )
+ self.await_loadbalancer_active(lb_id)
+
+ def delete_listener(self, id, lb_id, ignore_error=False):
+ resp = self.lb_client.delete_resource('listeners', id,
+ ignore_error=ignore_error)
+ if resp:
+ self.assertEqual(204, resp.status)
+
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after deleting '
+ 'listener %s', lb_id, id
+ )
+ self.await_loadbalancer_active(lb_id)
+
+ def create_pool(self, lb_id, **kwargs):
+ name = data_utils.rand_name('pool', prefix=self.name_prefix)
+ payload = {
+ 'pool': {
+ 'name': name,
+ 'loadbalancer_id': lb_id,
+ 'lb_algorithm': 'ROUND_ROBIN',
+ 'protocol': 'HTTP'
+ }
+ }
+ payload['pool'].update(kwargs)
+
+ resp, body = self.lb_client.post_json('pools', payload)
+ self.assertEqual(201, resp.status)
+
+ pool_id = body['pool']['id']
+
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after pool %s creation',
+ lb_id, pool_id
+ )
+ self.addCleanup(self.delete_pool, pool_id, lb_id, ignore_error=True)
+ self.await_loadbalancer_active(lb_id)
+
+ return body['pool']
+
+ def update_pool(self, pool_id, lb_id, **kwargs):
+ new_name = data_utils.rand_name('pool', prefix=self.name_prefix)
+ payload = {'pool': {'name': new_name}}
+ payload['pool'].update(kwargs)
+
+ resp, _ = self.lb_client.put_json('pools', pool_id, payload)
+ self.assertEqual(200, resp.status)
+
+ # Wait for loadbalancer to be active
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after pool %s update',
+ lb_id, pool_id
+ )
+ self.await_loadbalancer_active(lb_id)
+
+ def delete_pool(self, id, lb_id, ignore_error=False):
+ resp = self.lb_client.delete_resource('pools', id,
+ ignore_error=ignore_error)
+ if resp:
+ self.assertEqual(204, resp.status)
+
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after deleting '
+ 'pool %s', lb_id, id
+ )
+ self.await_loadbalancer_active(lb_id)
+
+ def create_member(self, pool_id, lb_id, **kwargs):
+ name = data_utils.rand_name('member', prefix=self.name_prefix)
+ payload = {'member': {'name': name}}
+ payload['member'].update(kwargs)
+
+ resp, body = self.lb_client.post_json(
+ 'pools/%s/members' % pool_id, payload
+ )
+ self.assertEqual(201, resp.status)
+
+ member_id = body['member']['id']
+
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after adding '
+ 'member %s', lb_id, member_id
+ )
+ self.addCleanup(self.delete_member, member_id, pool_id,
+ lb_id, ignore_error=True)
+ self.await_loadbalancer_active(lb_id)
+
+ return body['member']
+
+ def delete_member(self, id, pool_id, lb_id, ignore_error=False):
+ resp = self.lb_client.delete_resource(
+ 'pools/%s/members' % pool_id,
+ id,
+ ignore_error=ignore_error
+ )
+ if resp:
+ self.assertEqual(204, resp.status)
+
+ LOG.info(
+ 'Waiting for loadbalancer %s to be active after deleting '
+ 'member %s', lb_id, id
+ )
+ self.await_loadbalancer_active(lb_id)
+
+ def _wait_for_lb_functional(self, vip_address):
+ session = requests.Session()
+ start = time.time()
+
+ while time.time() - start < CONF.loadbalancer.lb_build_timeout:
+ try:
+ session.get("http://{0}".format(vip_address), timeout=2)
+ time.sleep(1)
+ return
+ except Exception:
+ LOG.warning('Server is not passing initial traffic. Waiting.')
+ time.sleep(1)
+ LOG.error('Server did not begin passing traffic within the timeout '
+ 'period. Failing test.')
+ raise lib_exc.ServerFault()
+
+ def check_members_balanced(self):
+ session = requests.Session()
+ response_counts = {}
+
+ self._wait_for_lb_functional(self.vip_address)
+
+ # Send a number requests to lb vip
+ for i in range(20):
+ try:
+ r = session.get('http://{0}'.format(self.vip_address),
+ timeout=2)
+ LOG.debug('Loadbalancer response: %s', r.content)
+
+ if r.content in response_counts:
+ response_counts[r.content] += 1
+ else:
+ response_counts[r.content] = 1
+
+ except Exception:
+ LOG.exception('Failed to send request to loadbalancer vip')
+ raise lib_exc.BadRequest(message='Failed to connect to lb')
+
+ # Ensure the correct number of members
+ self.assertEqual(2, len(response_counts))
+
+ # Ensure both members got the same number of responses
+ self.assertEqual(1, len(set(response_counts.values())))
+
+ def _delete_floatingip(self, floating_ip):
+ self.floatingip_adm_client.update_floatingip(
+ floating_ip,
+ port_id=None
+ )
+ test_utils.call_and_ignore_notfound_exc(
+ self.floatingip_adm_client.delete_floatingip, floating_ip
+ )
+
+ def _associate_floatingip(self):
+ # Associate floatingip with loadbalancer vip
+ floatingip = self.floatingip_adm_client.create_floatingip(
+ floating_network_id=CONF.network.public_network_id
+ )['floatingip']
+ floatip_vip = floatingip['floating_ip_address']
+ self.addCleanup(self._delete_floatingip, floatingip['id'])
+
+ LOG.debug('Floating ip %s created.', floatip_vip)
+
+ self.floatingip_adm_client.update_floatingip(
+ floatingip['id'],
+ port_id=self.vip_port
+ )
+
+ LOG.debug('Floating ip %s associated with vip.', floatip_vip)
+ return floatip_vip
+
+ def create_backend(self):
+ if CONF.loadbalancer.premade_server_ip:
+ self.vm_ip = CONF.loadbalancer.premade_server_ip
+ return
+
+ vr_resources = self.vr.resources
+ vm = server_util.create_server(
+ self.os_roles_lbmember,
+ validatable=True,
+ validation_resources=vr_resources,
+ wait_until='ACTIVE',
+ tenant_network=({'id': self.member_network_id}
+ if self.member_network_id else None),
+ )
+ self.addCleanup(
+ server_util.clear_server,
+ self.os_roles_lbmember.servers_client,
+ vm['id']
+ )
+
+ # Get vm private ip address.
+ ifaces = self.interfaces_client.list_interfaces(vm['id'])
+ for iface in ifaces['interfaceAttachments']:
+ if not self.member_network_id or (iface['net_id'] ==
+ self.vip_network_id):
+ for ip_info in iface['fixed_ips']:
+ if not self.vip_subnet_id or (ip_info['subnet_id'] ==
+ self.vip_subnet_id):
+ self.vm_ip = ip_info['ip_address']
+ break
+ if self.vm_ip:
+ break
+
+ self.assertIsNotNone(self.vm_ip)
+
+ if CONF.validation.connect_method == 'floating':
+ connect_ip = vr_resources['floating_ip']['floating_ip_address']
+ else:
+ connect_ip = self.vm_ip
+
+ server_util.run_webserver(
+ connect_ip,
+ vr_resources['keypair']['private_key']
+ )
+ LOG.debug('Web servers are running inside %s', vm['id'])
diff --git a/octavia_tempest_plugin/tests/v2/scenario/__init__.py b/octavia_tempest_plugin/tests/v2/scenario/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/octavia_tempest_plugin/tests/v2/scenario/__init__.py
diff --git a/octavia_tempest_plugin/tests/v2/scenario/test_basic_ops.py b/octavia_tempest_plugin/tests/v2/scenario/test_basic_ops.py
new file mode 100644
index 0000000..25d16ff
--- /dev/null
+++ b/octavia_tempest_plugin/tests/v2/scenario/test_basic_ops.py
@@ -0,0 +1,92 @@
+# Copyright 2017 Catalyst IT Ltd
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo_log import log as logging
+
+from tempest import config
+from tempest.lib.common import validation_resources as vr
+from tempest.lib import decorators
+
+from octavia_tempest_plugin.tests.v2 import base
+
+LOG = logging.getLogger(__name__)
+CONF = config.CONF
+
+
+class BasicOpsTest(base.BaseLoadbalancerTest):
+ name_prefix = 'Tempest-BasicOpsTest'
+
+ def setUp(self):
+ super(BasicOpsTest, self).setUp()
+
+ # Setup network resources for instance
+ resources = dict(
+ keypair=True,
+ security_group=True,
+ security_group_rules=True,
+ floating_ip=CONF.validation.connect_method == 'floating'
+ )
+ self.vr = self.useFixture(
+ vr.ValidationResourcesFixture(
+ self.os_roles_lbmember,
+ use_neutron=True,
+ floating_network_id=CONF.network.public_network_id,
+ **resources
+ )
+ )
+
+ # Add security group rule to allow http request
+ self.sg_rule_client.create_security_group_rule(
+ security_group_id=self.vr.resources['security_group']['id'],
+ protocol='tcp',
+ ethertype='IPv4',
+ port_range_min=80,
+ port_range_max=81,
+ direction='ingress'
+ )
+
+ self.create_backend()
+
+ @decorators.idempotent_id('250ebc41-645e-43fb-a79a-e3035f338e2a')
+ @decorators.attr(type='slow')
+ def test_basic_ops(self):
+ # Create loadbalancer
+ params = {}
+ if self.vip_network_id:
+ params['vip_network_id'] = self.vip_network_id
+ if self.vip_subnet_id:
+ params['vip_subnet_id'] = self.vip_subnet_id
+
+ self.create_loadbalancer(**params)
+
+ # Create pool
+ pool = self.create_pool(self.lb_id)
+ self.pool_id = pool['id']
+
+ # Create listener
+ params = {'default_pool_id': self.pool_id}
+ listener = self.create_listener(self.lb_id, **params)
+ self.listener_id = listener['id']
+
+ # Add members to the pool
+ for port in [80, 81]:
+ params = {
+ 'address': self.vm_ip,
+ 'protocol_port': port,
+ }
+ if self.member_subnet_id:
+ params['subnet_id'] = self.member_subnet_id
+
+ self.create_member(self.pool_id, self.lb_id, **params)
+
+ self.check_members_balanced()