Migrate test_load_balancer_basic to tempest client
This patch migrates the load balancer basic ops scenario test from official
client to tempest clients.
It also removes the NeutronRetriever class from the test script
Partially-implements: bp:tempest-client-scenarios
Change-Id: I4f3da202646821671d0e98707031a49973c256b2
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index aa2d686..ca0f8a5 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -878,6 +878,41 @@
return rules
+ def _create_pool(self, lb_method, protocol, subnet_id):
+ """Wrapper utility that returns a test pool."""
+ client = self.network_client
+ name = data_utils.rand_name('pool')
+ _, resp_pool = client.create_pool(protocol=protocol, name=name,
+ subnet_id=subnet_id,
+ lb_method=lb_method)
+ pool = net_resources.DeletablePool(client=client, **resp_pool['pool'])
+ self.assertEqual(pool['name'], name)
+ self.addCleanup(self.delete_wrapper, pool.delete)
+ return pool
+
+ def _create_member(self, address, protocol_port, pool_id):
+ """Wrapper utility that returns a test member."""
+ client = self.network_client
+ _, resp_member = client.create_member(protocol_port=protocol_port,
+ pool_id=pool_id,
+ address=address)
+ member = net_resources.DeletableMember(client=client,
+ **resp_member['member'])
+ self.addCleanup(self.delete_wrapper, member.delete)
+ return member
+
+ def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
+ """Wrapper utility that returns a test vip."""
+ client = self.network_client
+ name = data_utils.rand_name('vip')
+ _, resp_vip = client.create_vip(protocol=protocol, name=name,
+ subnet_id=subnet_id, pool_id=pool_id,
+ protocol_port=protocol_port)
+ vip = net_resources.DeletableVip(client=client, **resp_vip['vip'])
+ self.assertEqual(vip['name'], name)
+ self.addCleanup(self.delete_wrapper, vip.delete)
+ return vip
+
def _ssh_to_server(self, server, private_key):
ssh_login = CONF.compute.image_ssh_user
return self.get_remote_client(server,
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 35e50e8..5e83ff9 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -28,7 +28,7 @@
config = config.CONF
-class TestLoadBalancerBasic(manager.NetworkScenarioTest):
+class TestLoadBalancerBasic(manager.NeutronScenarioTest):
"""
This test checks basic load balancing.
@@ -72,7 +72,7 @@
super(TestLoadBalancerBasic, self).setUp()
self.server_ips = {}
self.server_fixed_ips = {}
- self._create_security_group()
+ self._create_security_group_for_test()
self._set_net_and_subnet()
def _set_net_and_subnet(self):
@@ -103,8 +103,8 @@
subnet = self._list_subnets(network_id=self.network['id'])[0]
self.subnet = net_common.AttributeDict(subnet)
- def _create_security_group(self):
- self.security_group = self._create_security_group_neutron(
+ def _create_security_group_for_test(self):
+ self.security_group = self._create_security_group(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
@@ -117,35 +117,35 @@
'port_range_max': port,
}
self._create_security_group_rule(
- client=self.network_client,
secgroup=self.security_group,
tenant_id=self.tenant_id,
**rule)
def _create_server(self, name):
- keypair = self.create_keypair(name='keypair-%s' % name)
- security_groups = [self.security_group.name]
+ keypair = self.create_keypair()
+ security_groups = [self.security_group]
create_kwargs = {
'nics': [
{'net-id': self.network['id']},
],
- 'key_name': keypair.name,
+ 'key_name': keypair['name'],
'security_groups': security_groups,
}
- server = self.create_server(name=name,
- create_kwargs=create_kwargs)
- self.servers_keypairs[server.id] = keypair
net_name = self.network['name']
+ server = self.create_server(name=name, create_kwargs=create_kwargs)
+ self.servers_keypairs[server['id']] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
self.floating_ips[floating_ip] = server
- self.server_ips[server.id] = floating_ip.floating_ip_address
+ self.server_ips[server['id']] = floating_ip.floating_ip_address
else:
- self.server_ips[server.id] = server.networks[net_name][0]
- self.server_fixed_ips[server.id] = server.networks[net_name][0]
+ self.server_ips[server['id']] =\
+ server['addresses'][net_name][0]['addr']
+ self.server_fixed_ips[server['id']] =\
+ server['addresses'][net_name][0]['addr']
self.assertTrue(self.servers_keypairs)
return server
@@ -162,8 +162,8 @@
2. Start two http backends listening on ports 80 and 88 respectively
"""
for server_id, ip in self.server_ips.iteritems():
- private_key = self.servers_keypairs[server_id].private_key
- server_name = self.compute_client.servers.get(server_id).name
+ private_key = self.servers_keypairs[server_id]['private_key']
+ server_name = self.servers_client.get_server(server_id)[1]['name']
username = config.scenario.ssh_user
ssh_client = self.get_remote_client(
server_or_ip=ip,
@@ -269,11 +269,7 @@
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
- self.status_timeout(NeutronRetriever(self.network_client,
- self.network_client.vip_path,
- net_common.DeletableVip),
- self.vip.id,
- expected_status='ACTIVE')
+ self.vip.wait_for_status('ACTIVE')
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
self._assign_floating_ip_to_vip(self.vip)
@@ -286,8 +282,8 @@
# vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
# However the linuxbridge-agent does, and it is necessary to add a
# security group with a rule that allows tcp port 80 to the vip port.
- body = {'port': {'security_groups': [self.security_group.id]}}
- self.network_client.update_port(self.vip.port_id, body)
+ self.network_client.update_port(
+ self.vip.port_id, security_groups=[self.security_group.id])
def _check_load_balancing(self):
"""
@@ -318,27 +314,3 @@
self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
-
-
-class NeutronRetriever(object):
- """
- Helper class to make possible handling neutron objects returned by GET
- requests as attribute dicts.
-
- Whet get() method is called, the returned dictionary is wrapped into
- a corresponding DeletableResource class which provides attribute access
- to dictionary values.
-
- Usage:
- This retriever is used to allow using status_timeout from
- tempest.manager with Neutron objects.
- """
-
- def __init__(self, network_client, path, resource):
- self.network_client = network_client
- self.path = path
- self.resource = resource
-
- def get(self, thing_id):
- obj = self.network_client.get(self.path % thing_id)
- return self.resource(client=self.network_client, **obj.values()[0])
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 94ba5aa..5ad5f37 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -13,6 +13,7 @@
import time
import urllib
+from tempest.common.utils import misc
from tempest import config
from tempest import exceptions
@@ -227,3 +228,39 @@
except exceptions.NotFound:
return True
return False
+
+ def wait_for_resource_status(self, fetch, status, interval=None,
+ timeout=None):
+ """
+ @summary: Waits for a network resource to reach a status
+ @param fetch: the callable to be used to query the resource status
+ @type fecth: callable that takes no parameters and returns the resource
+ @param status: the status that the resource has to reach
+ @type status: String
+ @param interval: the number of seconds to wait between each status
+ query
+ @type interval: Integer
+ @param timeout: the maximum number of seconds to wait for the resource
+ to reach the desired status
+ @type timeout: Integer
+ """
+ if not interval:
+ interval = self.build_interval
+ if not timeout:
+ timeout = self.build_timeout
+ start_time = time.time()
+
+ while time.time() - start_time <= timeout:
+ resource = fetch()
+ if resource['status'] == status:
+ return
+ time.sleep(interval)
+
+ # At this point, the wait has timed out
+ message = 'Resource %s' % (str(resource))
+ message += ' failed to reach status %s' % status
+ message += ' within the required time %s' % timeout
+ caller = misc.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index b2feb87..2b182d0 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -51,9 +51,19 @@
def delete(self):
return
+ @abc.abstractmethod
+ def show(self):
+ return
+
def __hash__(self):
return hash(self.id)
+ def wait_for_status(self, status):
+ if not hasattr(self, 'status'):
+ return
+
+ return self.client.wait_for_resource_status(self.show, status)
+
class DeletableNetwork(DeletableResource):
@@ -161,3 +171,8 @@
def delete(self):
self.client.delete_vip(self.id)
+
+ def show(self):
+ _, result = self.client.show_vip(self.id)
+ super(DeletableVip, self).update(**result['vip'])
+ return self