Merge "Migrate object_storage API tests to resource_* fixtures"
diff --git a/tempest/api/baremetal/admin/base.py b/tempest/api/baremetal/admin/base.py
index 4ccb7e1..3b12b8e 100644
--- a/tempest/api/baremetal/admin/base.py
+++ b/tempest/api/baremetal/admin/base.py
@@ -28,6 +28,10 @@
# which has no external dependencies.
SUPPORTED_DRIVERS = ['fake']
+# NOTE(jroll): resources must be deleted in a specific order, this list
+# defines the resource types to clean up, and the correct order.
+RESOURCE_TYPES = ['port', 'node', 'chassis']
+
def creates(resource):
"""Decorator that adds resources to the appropriate cleanup list."""
@@ -66,16 +70,17 @@
mgr = clients.AdminManager()
cls.client = mgr.baremetal_client
cls.power_timeout = CONF.baremetal.power_timeout
- cls.created_objects = {'chassis': set(),
- 'port': set(),
- 'node': set()}
+ cls.created_objects = {}
+ for resource in RESOURCE_TYPES:
+ cls.created_objects[resource] = set()
@classmethod
def resource_cleanup(cls):
"""Ensure that all created objects get destroyed."""
try:
- for resource, uuids in cls.created_objects.iteritems():
+ for resource in RESOURCE_TYPES:
+ uuids = cls.created_objects[resource]
delete_method = getattr(cls.client, 'delete_%s' % resource)
for u in uuids:
delete_method(u, ignore_errors=exc.NotFound)
diff --git a/tempest/api/baremetal/admin/test_chassis.py b/tempest/api/baremetal/admin/test_chassis.py
index 4dde2ce..6f83412 100644
--- a/tempest/api/baremetal/admin/test_chassis.py
+++ b/tempest/api/baremetal/admin/test_chassis.py
@@ -75,3 +75,9 @@
description=new_description))
_, chassis = self.client.show_chassis(uuid)
self.assertEqual(chassis['description'], new_description)
+
+ @test.attr(type='smoke')
+ def test_chassis_node_list(self):
+ _, node = self.create_node(self.chassis['uuid'])
+ _, body = self.client.list_chassis_nodes(self.chassis['uuid'])
+ self.assertIn(node['uuid'], [n['uuid'] for n in body['nodes']])
diff --git a/tempest/api/baremetal/admin/test_nodes.py b/tempest/api/baremetal/admin/test_nodes.py
index b9b9b55..8ccd36b 100644
--- a/tempest/api/baremetal/admin/test_nodes.py
+++ b/tempest/api/baremetal/admin/test_nodes.py
@@ -13,6 +13,8 @@
import six
from tempest.api.baremetal.admin import base
+from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import exceptions as exc
from tempest import test
@@ -33,6 +35,17 @@
self.assertIn(key, actual)
self.assertEqual(value, actual[key])
+ def _associate_node_with_instance(self):
+ self.client.set_node_power_state(self.node['uuid'], 'power off')
+ waiters.wait_for_bm_node_status(self.client, self.node['uuid'],
+ 'power_state', 'power off')
+ instance_uuid = data_utils.rand_uuid()
+ self.client.update_node(self.node['uuid'],
+ instance_uuid=instance_uuid)
+ self.addCleanup(self.client.update_node,
+ uuid=self.node['uuid'], instance_uuid=None)
+ return instance_uuid
+
@test.attr(type='smoke')
def test_create_node(self):
params = {'cpu_arch': 'x86_64',
@@ -63,6 +76,34 @@
[i['uuid'] for i in body['nodes']])
@test.attr(type='smoke')
+ def test_list_nodes_association(self):
+ _, body = self.client.list_nodes(associated=True)
+ self.assertNotIn(self.node['uuid'],
+ [n['uuid'] for n in body['nodes']])
+
+ self._associate_node_with_instance()
+
+ _, body = self.client.list_nodes(associated=True)
+ self.assertIn(self.node['uuid'], [n['uuid'] for n in body['nodes']])
+
+ _, body = self.client.list_nodes(associated=False)
+ self.assertNotIn(self.node['uuid'], [n['uuid'] for n in body['nodes']])
+
+ @test.attr(type='smoke')
+ def test_node_port_list(self):
+ _, port = self.create_port(self.node['uuid'],
+ data_utils.rand_mac_address())
+ _, body = self.client.list_node_ports(self.node['uuid'])
+ self.assertIn(port['uuid'],
+ [p['uuid'] for p in body['ports']])
+
+ @test.attr(type='smoke')
+ def test_node_port_list_no_ports(self):
+ _, node = self.create_node(self.chassis['uuid'])
+ _, body = self.client.list_node_ports(node['uuid'])
+ self.assertEmpty(body['ports'])
+
+ @test.attr(type='smoke')
def test_update_node(self):
props = {'cpu_arch': 'x86_64',
'cpu_num': '12',
@@ -120,3 +161,10 @@
_, body = self.client.get_console(self.node['uuid'])
self.assertEqual(True, body['console_enabled'])
+
+ @test.attr(type='smoke')
+ def test_get_node_by_instance_uuid(self):
+ instance_uuid = self._associate_node_with_instance()
+ _, body = self.client.show_node_by_instance_uuid(instance_uuid)
+ self.assertEqual(len(body['nodes']), 1)
+ self.assertIn(self.node['uuid'], [n['uuid'] for n in body['nodes']])
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index a15cf41..dc3d6bc 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -72,19 +72,6 @@
cls.image_ids = []
- @test.skip_because(bug="1006725")
- @test.attr(type=['negative', 'gate'])
- def test_create_image_specify_multibyte_character_image_name(self):
- if self.__class__._interface == "xml":
- raise self.skipException("Not testable in XML")
- # invalid multibyte sequence from:
- # http://stackoverflow.com/questions/1301402/
- # example-invalid-utf8-string
- invalid_name = data_utils.rand_name(u'\xc3\x28')
- self.assertRaises(exceptions.BadRequest,
- self.client.create_image, self.server_id,
- invalid_name)
-
@test.attr(type=['negative', 'gate'])
def test_create_image_specify_invalid_metadata(self):
# Return an error when creating image with invalid metadata
diff --git a/tempest/api/identity/admin/v3/test_tokens.py b/tempest/api/identity/admin/v3/test_tokens.py
index 68d61f6..230e09f 100644
--- a/tempest/api/identity/admin/v3/test_tokens.py
+++ b/tempest/api/identity/admin/v3/test_tokens.py
@@ -47,7 +47,6 @@
self.assertRaises(exceptions.NotFound, self.client.get_token,
subject_token)
- @test.skip_because(bug="1351026")
@test.attr(type='gate')
def test_rescope_token(self):
"""Rescope a token.
diff --git a/tempest/cli/simple_read_only/compute/__init__.py b/tempest/cli/simple_read_only/compute/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/compute/__init__.py
diff --git a/tempest/cli/simple_read_only/data_processing/__init__.py b/tempest/cli/simple_read_only/data_processing/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/data_processing/__init__.py
diff --git a/tempest/cli/simple_read_only/identity/__init__.py b/tempest/cli/simple_read_only/identity/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/identity/__init__.py
diff --git a/tempest/cli/simple_read_only/image/__init__.py b/tempest/cli/simple_read_only/image/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/image/__init__.py
diff --git a/tempest/cli/simple_read_only/network/__init__.py b/tempest/cli/simple_read_only/network/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/network/__init__.py
diff --git a/tempest/cli/simple_read_only/object_storage/__init__.py b/tempest/cli/simple_read_only/object_storage/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/object_storage/__init__.py
diff --git a/tempest/cli/simple_read_only/orchestration/__init__.py b/tempest/cli/simple_read_only/orchestration/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/orchestration/__init__.py
diff --git a/tempest/cli/simple_read_only/orchestration/test_heat.py b/tempest/cli/simple_read_only/orchestration/test_heat.py
index 019818b..430cdf1 100644
--- a/tempest/cli/simple_read_only/orchestration/test_heat.py
+++ b/tempest/cli/simple_read_only/orchestration/test_heat.py
@@ -38,6 +38,9 @@
"not available")
raise cls.skipException(msg)
super(SimpleReadOnlyHeatClientTest, cls).setUpClass()
+ cls.heat_template_path = os.path.join(os.path.dirname(
+ os.path.dirname(os.path.realpath(__file__))),
+ 'heat_templates/heat_minimal.yaml')
def test_heat_stack_list(self):
self.heat('stack-list')
@@ -70,17 +73,13 @@
self.assertIsInstance(json.loads(rsrc_schema), dict)
def test_heat_template_validate_yaml(self):
- filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
- 'heat_templates/heat_minimal.yaml')
- ret = self.heat('template-validate -f %s' % filepath)
+ ret = self.heat('template-validate -f %s' % self.heat_template_path)
# On success template-validate returns a json representation
# of the template parameters
self.assertIsInstance(json.loads(ret), dict)
def test_heat_template_validate_hot(self):
- filepath = os.path.join(os.path.dirname(os.path.realpath(__file__)),
- 'heat_templates/heat_minimal_hot.yaml')
- ret = self.heat('template-validate -f %s' % filepath)
+ ret = self.heat('template-validate -f %s' % self.heat_template_path)
self.assertIsInstance(json.loads(ret), dict)
def test_heat_help(self):
diff --git a/tempest/cli/simple_read_only/telemetry/__init__.py b/tempest/cli/simple_read_only/telemetry/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/telemetry/__init__.py
diff --git a/tempest/cli/simple_read_only/volume/__init__.py b/tempest/cli/simple_read_only/volume/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/cli/simple_read_only/volume/__init__.py
diff --git a/tempest/cli/simple_read_only/volume/test_cinder.py b/tempest/cli/simple_read_only/volume/test_cinder.py
index 3a9a7a6..e44a577 100644
--- a/tempest/cli/simple_read_only/volume/test_cinder.py
+++ b/tempest/cli/simple_read_only/volume/test_cinder.py
@@ -121,8 +121,12 @@
self.assertTableStruct(zone_list, ['Name', 'Status'])
def test_cinder_endpoints(self):
- endpoints = self.parser.listing(self.cinder('endpoints'))
- self.assertTableStruct(endpoints, ['nova', 'Value'])
+ out = self.cinder('endpoints')
+ tables = self.parser.tables(out)
+ for table in tables:
+ headers = table['headers']
+ self.assertTrue(2 >= len(headers))
+ self.assertEqual('Value', headers[1])
def test_cinder_service_list(self):
service_list = self.parser.listing(self.cinder('service-list'))
diff --git a/tempest/common/accounts.py b/tempest/common/accounts.py
index c491169..7423c17 100644
--- a/tempest/common/accounts.py
+++ b/tempest/common/accounts.py
@@ -38,7 +38,12 @@
def __init__(self, name):
super(Accounts, self).__init__(name)
- accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
+ if os.path.isfile(CONF.auth.test_accounts_file):
+ accounts = read_accounts_yaml(CONF.auth.test_accounts_file)
+ self.use_default_creds = False
+ else:
+ accounts = {}
+ self.use_default_creds = True
self.hash_dict = self.get_hash_dict(accounts)
self.accounts_dir = os.path.join(CONF.lock_path, 'test_accounts')
self.isolated_creds = {}
@@ -77,6 +82,9 @@
raise exceptions.InvalidConfiguration(msg)
def _get_creds(self):
+ if self.use_default_creds:
+ raise exceptions.InvalidConfiguration(
+ "Account file %s doesn't exist" % CONF.auth.test_accounts_file)
free_hash = self._get_free_hash(self.hash_dict.keys())
return self.hash_dict[free_hash]
@@ -150,16 +158,22 @@
def get_primary_creds(self):
if self.isolated_creds.get('primary'):
return self.isolated_creds.get('primary')
- creds = self.get_creds(0)
- primary_credential = auth.get_credentials(**creds)
+ if not self.use_default_creds:
+ creds = self.get_creds(0)
+ primary_credential = auth.get_credentials(**creds)
+ else:
+ primary_credential = auth.get_default_credentials('user')
self.isolated_creds['primary'] = primary_credential
return primary_credential
def get_alt_creds(self):
if self.isolated_creds.get('alt'):
return self.isolated_creds.get('alt')
- creds = self.get_creds(1)
- alt_credential = auth.get_credentials(**creds)
+ if not self.use_default_creds:
+ creds = self.get_creds(1)
+ alt_credential = auth.get_credentials(**creds)
+ else:
+ alt_credential = auth.get_default_credentials('alt_user')
self.isolated_creds['alt'] = alt_credential
return alt_credential
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index c4f1214..52568cb 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -131,3 +131,31 @@
if caller:
message = '(%s) %s' % (caller, message)
raise exceptions.TimeoutException(message)
+
+
+def wait_for_bm_node_status(client, node_id, attr, status):
+ """Waits for a baremetal node attribute to reach given status.
+
+ The client should have a show_node(node_uuid) method to get the node.
+ """
+ _, node = client.show_node(node_id)
+ start = int(time.time())
+
+ while node[attr] != status:
+ time.sleep(client.build_interval)
+ _, node = client.show_node(node_id)
+ if node[attr] == status:
+ return
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Node %(node_id)s failed to reach %(attr)s=%(status)s '
+ 'within the required time (%(timeout)s s).' %
+ {'node_id': node_id,
+ 'attr': attr,
+ 'status': status,
+ 'timeout': client.build_timeout})
+ message += ' Current state of %s: %s.' % (attr, node[attr])
+ caller = misc_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
diff --git a/tempest/manager.py b/tempest/manager.py
index fb2842f..75aee96 100644
--- a/tempest/manager.py
+++ b/tempest/manager.py
@@ -51,17 +51,17 @@
self.client_attr_names = []
@classmethod
- def get_auth_provider_class(cls, auth_version):
- if auth_version == 'v2':
- return auth.KeystoneV2AuthProvider
- else:
+ def get_auth_provider_class(cls, credentials):
+ if isinstance(credentials, auth.KeystoneV3Credentials):
return auth.KeystoneV3AuthProvider
+ else:
+ return auth.KeystoneV2AuthProvider
def get_auth_provider(self, credentials):
if credentials is None:
raise exceptions.InvalidCredentials(
'Credentials must be specified')
- auth_provider_class = self.get_auth_provider_class(self.auth_version)
+ auth_provider_class = self.get_auth_provider_class(credentials)
return auth_provider_class(
client_type=getattr(self, 'client_type', None),
interface=getattr(self, 'interface', None),
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 6e35a31..2d7bc24 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -16,13 +16,10 @@
import logging
import os
-import re
import subprocess
-import time
from cinderclient import exceptions as cinder_exceptions
import glanceclient
-from heatclient import exc as heat_exceptions
import netaddr
from neutronclient.common import exceptions as exc
from novaclient import exceptions as nova_exceptions
@@ -38,7 +35,6 @@
from tempest import config
from tempest import exceptions
from tempest.openstack.common import log
-from tempest.openstack.common import timeutils
from tempest.services.network import resources as net_resources
import tempest.test
@@ -77,6 +73,7 @@
)
cls.admin_manager = clients.Manager(cls.admin_credentials())
# Clients (in alphabetical order)
+ cls.flavors_client = cls.manager.flavors_client
cls.floating_ips_client = cls.manager.floating_ips_client
# Glance image client v1
cls.image_client = cls.manager.image_client
@@ -92,6 +89,8 @@
cls.interface_client = cls.manager.interfaces_client
# Neutron network client
cls.network_client = cls.manager.network_client
+ # Heat client
+ cls.orchestration_client = cls.manager.orchestration_client
@classmethod
def _get_credentials(cls, get_creds, ctype):
@@ -148,7 +147,7 @@
def addCleanup_with_wait(self, waiter_callable, thing_id, thing_id_param,
cleanup_callable, cleanup_args=None,
cleanup_kwargs=None, ignore_error=True):
- """Adds wait for ansyc resource deletion at the end of cleanups
+ """Adds wait for async resource deletion at the end of cleanups
@param waiter_callable: callable to wait for the resource to delete
@param thing_id: the id of the resource to be cleaned-up
@@ -215,26 +214,6 @@
if create_kwargs is None:
create_kwargs = {}
- fixed_network_name = CONF.compute.fixed_network_name
- if 'nics' not in create_kwargs and fixed_network_name:
- _, networks = self.networks_client.list_networks()
- # If several networks found, set the NetID on which to connect the
- # server to avoid the following error "Multiple possible networks
- # found, use a Network ID to be more specific."
- # See Tempest #1250866
- if len(networks) > 1:
- for network in networks:
- if network['label'] == fixed_network_name:
- create_kwargs['nics'] = [{'net-id': network['id']}]
- break
- # If we didn't find the network we were looking for :
- else:
- msg = ("The network on which the NIC of the server must "
- "be connected can not be found : "
- "fixed_network_name=%s. Starting instance without "
- "specifying a network.") % fixed_network_name
- LOG.info(msg)
-
LOG.debug("Creating a server (name: %s, image: %s, flavor: %s)",
name, image, flavor)
_, server = self.servers_client.create_server(name, image, flavor,
@@ -343,8 +322,9 @@
if isinstance(server_or_ip, six.string_types):
ip = server_or_ip
else:
- network_name_for_ssh = CONF.compute.network_for_ssh
- ip = server_or_ip.networks[network_name_for_ssh][0]
+ addr = server_or_ip['addresses'][CONF.compute.network_for_ssh][0]
+ ip = addr['addr']
+
if username is None:
username = CONF.scenario.ssh_user
if private_key is None:
@@ -461,6 +441,35 @@
_, volume = self.volumes_client.get_volume(self.volume['id'])
self.assertEqual('available', volume['status'])
+ def rebuild_server(self, server_id, image=None,
+ preserve_ephemeral=False, wait=True,
+ rebuild_kwargs=None):
+ if image is None:
+ image = CONF.compute.image_ref
+
+ rebuild_kwargs = rebuild_kwargs or {}
+
+ LOG.debug("Rebuilding server (id: %s, image: %s, preserve eph: %s)",
+ server_id, image, preserve_ephemeral)
+ self.servers_client.rebuild(server_id=server_id, image_ref=image,
+ preserve_ephemeral=preserve_ephemeral,
+ **rebuild_kwargs)
+ if wait:
+ self.servers_client.wait_for_server_status(server_id, 'ACTIVE')
+
+ def ping_ip_address(self, ip_address, should_succeed=True):
+ cmd = ['ping', '-c1', '-w1', ip_address]
+
+ def ping():
+ proc = subprocess.Popen(cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE)
+ proc.communicate()
+ return (proc.returncode == 0) == should_succeed
+
+ return tempest.test.call_until_true(
+ ping, CONF.compute.ping_timeout, 1)
+
# TODO(yfried): change this class name to NetworkScenarioTest once client
# migration is complete
@@ -639,19 +648,6 @@
self.assertIsNone(floating_ip.port_id)
return floating_ip
- def _ping_ip_address(self, ip_address, should_succeed=True):
- cmd = ['ping', '-c1', '-w1', ip_address]
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.wait()
- return (proc.returncode == 0) == should_succeed
-
- return tempest.test.call_until_true(
- ping, CONF.compute.ping_timeout, 1)
-
def _check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
@@ -671,8 +667,8 @@
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self._ping_ip_address(ip_address,
- should_succeed=should_connect),
+ self.assertTrue(self.ping_ip_address(ip_address,
+ should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
@@ -900,6 +896,41 @@
return rules
+ def _create_pool(self, lb_method, protocol, subnet_id):
+ """Wrapper utility that returns a test pool."""
+ client = self.network_client
+ name = data_utils.rand_name('pool')
+ _, resp_pool = client.create_pool(protocol=protocol, name=name,
+ subnet_id=subnet_id,
+ lb_method=lb_method)
+ pool = net_resources.DeletablePool(client=client, **resp_pool['pool'])
+ self.assertEqual(pool['name'], name)
+ self.addCleanup(self.delete_wrapper, pool.delete)
+ return pool
+
+ def _create_member(self, address, protocol_port, pool_id):
+ """Wrapper utility that returns a test member."""
+ client = self.network_client
+ _, resp_member = client.create_member(protocol_port=protocol_port,
+ pool_id=pool_id,
+ address=address)
+ member = net_resources.DeletableMember(client=client,
+ **resp_member['member'])
+ self.addCleanup(self.delete_wrapper, member.delete)
+ return member
+
+ def _create_vip(self, protocol, protocol_port, subnet_id, pool_id):
+ """Wrapper utility that returns a test vip."""
+ client = self.network_client
+ name = data_utils.rand_name('vip')
+ _, resp_vip = client.create_vip(protocol=protocol, name=name,
+ subnet_id=subnet_id, pool_id=pool_id,
+ protocol_port=protocol_port)
+ vip = net_resources.DeletableVip(client=client, **resp_vip['vip'])
+ self.assertEqual(vip['name'], name)
+ self.addCleanup(self.delete_wrapper, vip.delete)
+ return vip
+
def _ssh_to_server(self, server, private_key):
ssh_login = CONF.compute.image_ssh_user
return self.get_remote_client(server,
@@ -1503,7 +1534,7 @@
ERROR = 'error'
-class BaremetalScenarioTest(OfficialClientTest):
+class BaremetalScenarioTest(ScenarioTest):
@classmethod
def setUpClass(cls):
super(BaremetalScenarioTest, cls).setUpClass()
@@ -1514,12 +1545,13 @@
raise cls.skipException(msg)
# use an admin client manager for baremetal client
- admin_creds = cls.admin_credentials()
- manager = clients.OfficialClientManager(credentials=admin_creds)
+ manager = clients.Manager(
+ credentials=cls.admin_credentials()
+ )
cls.baremetal_client = manager.baremetal_client
# allow any issues obtaining the node list to raise early
- cls.baremetal_client.node.list()
+ cls.baremetal_client.list_nodes()
def _node_state_timeout(self, node_id, state_attr,
target_states, timeout=10, interval=1):
@@ -1528,7 +1560,7 @@
def check_state():
node = self.get_node(node_id=node_id)
- if getattr(node, state_attr) in target_states:
+ if node.get(state_attr) in target_states:
return True
return False
@@ -1568,14 +1600,20 @@
def get_node(self, node_id=None, instance_id=None):
if node_id:
- return self.baremetal_client.node.get(node_id)
+ _, body = self.baremetal_client.show_node(node_id)
+ return body
elif instance_id:
- return self.baremetal_client.node.get_by_instance_uuid(instance_id)
+ _, body = self.baremetal_client.show_node_by_instance_uuid(
+ instance_id)
+ if body['nodes']:
+ return body['nodes'][0]
- def get_ports(self, node_id):
+ def get_ports(self, node_uuid):
ports = []
- for port in self.baremetal_client.node.list_ports(node_id):
- ports.append(self.baremetal_client.port.get(port.uuid))
+ _, body = self.baremetal_client.list_node_ports(node_uuid)
+ for port in body['ports']:
+ _, p = self.baremetal_client.show_port(port['uuid'])
+ ports.append(p)
return ports
def add_keypair(self):
@@ -1590,42 +1628,37 @@
def boot_instance(self):
create_kwargs = {
- 'key_name': self.keypair.id
+ 'key_name': self.keypair['name']
}
self.instance = self.create_server(
wait_on_boot=False, create_kwargs=create_kwargs)
- self.addCleanup_with_wait(self.compute_client.servers,
- self.instance.id,
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[self.instance])
+ self.wait_node(self.instance['id'])
+ self.node = self.get_node(instance_id=self.instance['id'])
- self.wait_node(self.instance.id)
- self.node = self.get_node(instance_id=self.instance.id)
-
- self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_ON)
+ self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
self.wait_provisioning_state(
- self.node.uuid,
+ self.node['uuid'],
[BaremetalProvisionStates.DEPLOYWAIT,
BaremetalProvisionStates.ACTIVE],
timeout=15)
- self.wait_provisioning_state(self.node.uuid,
+ self.wait_provisioning_state(self.node['uuid'],
BaremetalProvisionStates.ACTIVE,
timeout=CONF.baremetal.active_timeout)
- self.status_timeout(
- self.compute_client.servers, self.instance.id, 'ACTIVE')
-
- self.node = self.get_node(instance_id=self.instance.id)
- self.instance = self.compute_client.servers.get(self.instance.id)
+ self.servers_client.wait_for_server_status(self.instance['id'],
+ 'ACTIVE')
+ self.node = self.get_node(instance_id=self.instance['id'])
+ _, self.instance = self.servers_client.get_server(self.instance['id'])
def terminate_instance(self):
- self.instance.delete()
- self.wait_power_state(self.node.uuid, BaremetalPowerStates.POWER_OFF)
+ self.servers_client.delete_server(self.instance['id'])
+ self.wait_power_state(self.node['uuid'],
+ BaremetalPowerStates.POWER_OFF)
self.wait_provisioning_state(
- self.node.uuid,
+ self.node['uuid'],
BaremetalProvisionStates.NOSTATE,
timeout=CONF.baremetal.unprovision_timeout)
@@ -1847,19 +1880,6 @@
self.assertIsNone(floating_ip.port_id)
return floating_ip
- def _ping_ip_address(self, ip_address, should_succeed=True):
- cmd = ['ping', '-c1', '-w1', ip_address]
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.wait()
- return (proc.returncode == 0) == should_succeed
-
- return tempest.test.call_until_true(
- ping, CONF.compute.ping_timeout, 1)
-
def _create_pool(self, lb_method, protocol, subnet_id):
"""Wrapper utility that returns a test pool."""
name = data_utils.rand_name('pool-')
@@ -1931,8 +1951,8 @@
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
- self.assertTrue(self._ping_ip_address(ip_address,
- should_succeed=should_connect),
+ self.assertTrue(self.ping_ip_address(ip_address,
+ should_succeed=should_connect),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
@@ -2241,7 +2261,7 @@
return network, subnet, router
-class OrchestrationScenarioTest(OfficialClientTest):
+class OrchestrationScenarioTest(ScenarioTest):
"""
Base class for orchestration scenario tests
"""
@@ -2271,106 +2291,17 @@
@classmethod
def _get_default_network(cls):
- networks = cls.network_client.list_networks()
- for net in networks['networks']:
- if net['name'] == CONF.compute.fixed_network_name:
+ _, networks = cls.networks_client.list_networks()
+ for net in networks:
+ if net['label'] == CONF.compute.fixed_network_name:
return net
@staticmethod
def _stack_output(stack, output_key):
"""Return a stack output value for a given key."""
- return next((o['output_value'] for o in stack.outputs
+ return next((o['output_value'] for o in stack['outputs']
if o['output_key'] == output_key), None)
- def _ping_ip_address(self, ip_address, should_succeed=True):
- cmd = ['ping', '-c1', '-w1', ip_address]
-
- def ping():
- proc = subprocess.Popen(cmd,
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- proc.wait()
- return (proc.returncode == 0) == should_succeed
-
- return tempest.test.call_until_true(
- ping, CONF.orchestration.build_timeout, 1)
-
- def _wait_for_resource_status(self, stack_identifier, resource_name,
- status, failure_pattern='^.*_FAILED$'):
- """Waits for a Resource to reach a given status."""
- fail_regexp = re.compile(failure_pattern)
- build_timeout = CONF.orchestration.build_timeout
- build_interval = CONF.orchestration.build_interval
-
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start,
- timeutils.utcnow()) < build_timeout:
- try:
- res = self.client.resources.get(
- stack_identifier, resource_name)
- except heat_exceptions.HTTPNotFound:
- # ignore this, as the resource may not have
- # been created yet
- pass
- else:
- if res.resource_status == status:
- return
- if fail_regexp.search(res.resource_status):
- raise exceptions.StackResourceBuildErrorException(
- resource_name=res.resource_name,
- stack_identifier=stack_identifier,
- resource_status=res.resource_status,
- resource_status_reason=res.resource_status_reason)
- time.sleep(build_interval)
-
- message = ('Resource %s failed to reach %s status within '
- 'the required time (%s s).' %
- (res.resource_name, status, build_timeout))
- raise exceptions.TimeoutException(message)
-
- def _wait_for_stack_status(self, stack_identifier, status,
- failure_pattern='^.*_FAILED$'):
- """
- Waits for a Stack to reach a given status.
-
- Note this compares the full $action_$status, e.g
- CREATE_COMPLETE, not just COMPLETE which is exposed
- via the status property of Stack in heatclient
- """
- fail_regexp = re.compile(failure_pattern)
- build_timeout = CONF.orchestration.build_timeout
- build_interval = CONF.orchestration.build_interval
-
- start = timeutils.utcnow()
- while timeutils.delta_seconds(start,
- timeutils.utcnow()) < build_timeout:
- try:
- stack = self.client.stacks.get(stack_identifier)
- except heat_exceptions.HTTPNotFound:
- # ignore this, as the stackource may not have
- # been created yet
- pass
- else:
- if stack.stack_status == status:
- return
- if fail_regexp.search(stack.stack_status):
- raise exceptions.StackBuildErrorException(
- stack_identifier=stack_identifier,
- stack_status=stack.stack_status,
- stack_status_reason=stack.stack_status_reason)
- time.sleep(build_interval)
-
- message = ('Stack %s failed to reach %s status within '
- 'the required time (%s s).' %
- (stack.stack_name, status, build_timeout))
- raise exceptions.TimeoutException(message)
-
- def _stack_delete(self, stack_identifier):
- try:
- self.client.stacks.delete(stack_identifier)
- except heat_exceptions.HTTPNotFound:
- pass
-
class SwiftScenarioTest(ScenarioTest):
"""
diff --git a/tempest/scenario/orchestration/test_autoscaling.py b/tempest/scenario/orchestration/test_autoscaling.py
deleted file mode 100644
index 8894106..0000000
--- a/tempest/scenario/orchestration/test_autoscaling.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import time
-
-import heatclient.exc as heat_exceptions
-
-from tempest import config
-from tempest.scenario import manager
-from tempest import test
-
-CONF = config.CONF
-
-
-class AutoScalingTest(manager.OrchestrationScenarioTest):
-
- def setUp(self):
- super(AutoScalingTest, self).setUp()
- if not CONF.orchestration.image_ref:
- raise self.skipException("No image available to test")
- self.client = self.orchestration_client
-
- def assign_keypair(self):
- self.stack_name = self._stack_rand_name()
- if CONF.orchestration.keypair_name:
- self.keypair_name = CONF.orchestration.keypair_name
- else:
- self.keypair = self.create_keypair()
- self.keypair_name = self.keypair.id
-
- def launch_stack(self):
- net = self._get_default_network()
- self.parameters = {
- 'KeyName': self.keypair_name,
- 'InstanceType': CONF.orchestration.instance_type,
- 'ImageId': CONF.orchestration.image_ref,
- 'StackStart': str(time.time()),
- 'Subnet': net['subnets'][0]
- }
-
- # create the stack
- self.template = self._load_template(__file__, 'test_autoscaling.yaml')
- self.client.stacks.create(
- stack_name=self.stack_name,
- template=self.template,
- parameters=self.parameters)
-
- self.stack = self.client.stacks.get(self.stack_name)
- self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
-
- # if a keypair was set, do not delete the stack on exit to allow
- # for manual post-mortums
- if not CONF.orchestration.keypair_name:
- self.addCleanup(self.client.stacks.delete, self.stack)
-
- @test.skip_because(bug="1257575")
- @test.attr(type='slow')
- @test.services('orchestration', 'compute')
- def test_scale_up_then_down(self):
-
- self.assign_keypair()
- self.launch_stack()
-
- sid = self.stack_identifier
- timeout = CONF.orchestration.build_timeout
- interval = 10
-
- self.assertEqual('CREATE', self.stack.action)
- # wait for create to complete.
- self.status_timeout(self.client.stacks, sid, 'COMPLETE',
- error_status='FAILED')
-
- self.stack.get()
- self.assertEqual('CREATE_COMPLETE', self.stack.stack_status)
-
- # the resource SmokeServerGroup is implemented as a nested
- # stack, so servers can be counted by counting the resources
- # inside that nested stack
- resource = self.client.resources.get(sid, 'SmokeServerGroup')
- nested_stack_id = resource.physical_resource_id
-
- def server_count():
- # the number of servers is the number of resources
- # in the nested stack
- self.server_count = len(
- self.client.resources.list(nested_stack_id))
- return self.server_count
-
- def assertScale(from_servers, to_servers):
- test.call_until_true(lambda: server_count() == to_servers,
- timeout, interval)
- self.assertEqual(to_servers, self.server_count,
- 'Failed scaling from %d to %d servers. '
- 'Current server count: %s' % (
- from_servers, to_servers,
- self.server_count))
-
- # he marched them up to the top of the hill
- assertScale(1, 2)
- assertScale(2, 3)
-
- # and he marched them down again
- assertScale(3, 2)
- assertScale(2, 1)
-
- # delete stack on completion
- self.stack.delete()
- self.status_timeout(self.client.stacks, sid, 'COMPLETE',
- error_status='FAILED',
- not_found_exception=heat_exceptions.NotFound)
-
- try:
- self.stack.get()
- self.assertEqual('DELETE_COMPLETE', self.stack.stack_status)
- except heat_exceptions.NotFound:
- pass
diff --git a/tempest/scenario/orchestration/test_autoscaling.yaml b/tempest/scenario/orchestration/test_autoscaling.yaml
deleted file mode 100644
index 4651284..0000000
--- a/tempest/scenario/orchestration/test_autoscaling.yaml
+++ /dev/null
@@ -1,185 +0,0 @@
-HeatTemplateFormatVersion: '2012-12-12'
-Description: |
- Template which tests autoscaling and load balancing
-Parameters:
- KeyName:
- Type: String
- InstanceType:
- Type: String
- ImageId:
- Type: String
- Subnet:
- Type: String
- StackStart:
- Description: Epoch seconds when the stack was launched
- Type: Number
- ConsumeStartSeconds:
- Description: Seconds after invocation when memory should be consumed
- Type: Number
- Default: '60'
- ConsumeStopSeconds:
- Description: Seconds after StackStart when memory should be released
- Type: Number
- Default: '420'
- ScaleUpThreshold:
- Description: Memory percentage threshold to scale up on
- Type: String
- Default: '70'
- ScaleDownThreshold:
- Description: Memory percentage threshold to scale down on
- Type: String
- Default: '60'
- ConsumeMemoryLimit:
- Description: Memory percentage threshold to consume
- Type: Number
- Default: '71'
-Resources:
- SmokeServerGroup:
- Type: AWS::AutoScaling::AutoScalingGroup
- Properties:
- AvailabilityZones: {'Fn::GetAZs': ''}
- LaunchConfigurationName: {Ref: LaunchConfig}
- MinSize: '1'
- MaxSize: '3'
- VPCZoneIdentifier: [{Ref: Subnet}]
- SmokeServerScaleUpPolicy:
- Type: AWS::AutoScaling::ScalingPolicy
- Properties:
- AdjustmentType: ChangeInCapacity
- AutoScalingGroupName: {Ref: SmokeServerGroup}
- Cooldown: '60'
- ScalingAdjustment: '1'
- SmokeServerScaleDownPolicy:
- Type: AWS::AutoScaling::ScalingPolicy
- Properties:
- AdjustmentType: ChangeInCapacity
- AutoScalingGroupName: {Ref: SmokeServerGroup}
- Cooldown: '60'
- ScalingAdjustment: '-1'
- MEMAlarmHigh:
- Type: AWS::CloudWatch::Alarm
- Properties:
- AlarmDescription: Scale-up if MEM > ScaleUpThreshold% for 10 seconds
- MetricName: MemoryUtilization
- Namespace: system/linux
- Statistic: Average
- Period: '10'
- EvaluationPeriods: '1'
- Threshold: {Ref: ScaleUpThreshold}
- AlarmActions: [{Ref: SmokeServerScaleUpPolicy}]
- Dimensions:
- - Name: AutoScalingGroupName
- Value: {Ref: SmokeServerGroup}
- ComparisonOperator: GreaterThanThreshold
- MEMAlarmLow:
- Type: AWS::CloudWatch::Alarm
- Properties:
- AlarmDescription: Scale-down if MEM < ScaleDownThreshold% for 10 seconds
- MetricName: MemoryUtilization
- Namespace: system/linux
- Statistic: Average
- Period: '10'
- EvaluationPeriods: '1'
- Threshold: {Ref: ScaleDownThreshold}
- AlarmActions: [{Ref: SmokeServerScaleDownPolicy}]
- Dimensions:
- - Name: AutoScalingGroupName
- Value: {Ref: SmokeServerGroup}
- ComparisonOperator: LessThanThreshold
- CfnUser:
- Type: AWS::IAM::User
- SmokeKeys:
- Type: AWS::IAM::AccessKey
- Properties:
- UserName: {Ref: CfnUser}
- SmokeSecurityGroup:
- Type: AWS::EC2::SecurityGroup
- Properties:
- GroupDescription: Standard firewall rules
- SecurityGroupIngress:
- - {IpProtocol: tcp, FromPort: '22', ToPort: '22', CidrIp: 0.0.0.0/0}
- - {IpProtocol: tcp, FromPort: '80', ToPort: '80', CidrIp: 0.0.0.0/0}
- LaunchConfig:
- Type: AWS::AutoScaling::LaunchConfiguration
- Metadata:
- AWS::CloudFormation::Init:
- config:
- files:
- /etc/cfn/cfn-credentials:
- content:
- Fn::Replace:
- - $AWSAccessKeyId: {Ref: SmokeKeys}
- $AWSSecretKey: {'Fn::GetAtt': [SmokeKeys, SecretAccessKey]}
- - |
- AWSAccessKeyId=$AWSAccessKeyId
- AWSSecretKey=$AWSSecretKey
- mode: '000400'
- owner: root
- group: root
- /root/watch_loop:
- content:
- Fn::Replace:
- - _hi_: {Ref: MEMAlarmHigh}
- _lo_: {Ref: MEMAlarmLow}
- - |
- #!/bin/bash
- while :
- do
- /opt/aws/bin/cfn-push-stats --watch _hi_ --mem-util
- /opt/aws/bin/cfn-push-stats --watch _lo_ --mem-util
- sleep 4
- done
- mode: '000700'
- owner: root
- group: root
- /root/consume_memory:
- content:
- Fn::Replace:
- - StackStart: {Ref: StackStart}
- ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
- ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
- ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
- - |
- #!/usr/bin/env python
- import psutil
- import time
- import datetime
- import sys
- a = []
- sleep_until_consume = ConsumeStartSeconds
- stack_start = StackStart
- consume_stop_time = stack_start + ConsumeStopSeconds
- memory_limit = ConsumeMemoryLimit
- if sleep_until_consume > 0:
- sys.stdout.flush()
- time.sleep(sleep_until_consume)
- while psutil.virtual_memory().percent < memory_limit:
- sys.stdout.flush()
- a.append(' ' * 10**5)
- time.sleep(0.1)
- sleep_until_exit = consume_stop_time - time.time()
- if sleep_until_exit > 0:
- time.sleep(sleep_until_exit)
- mode: '000700'
- owner: root
- group: root
- Properties:
- ImageId: {Ref: ImageId}
- InstanceType: {Ref: InstanceType}
- KeyName: {Ref: KeyName}
- SecurityGroups: [{Ref: SmokeSecurityGroup}]
- UserData:
- Fn::Base64:
- Fn::Replace:
- - ConsumeStopSeconds: {Ref: ConsumeStopSeconds}
- ConsumeStartSeconds: {Ref: ConsumeStartSeconds}
- ConsumeMemoryLimit: {Ref: ConsumeMemoryLimit}
- - |
- #!/bin/bash -v
- /opt/aws/bin/cfn-init
- # report on memory consumption every 4 seconds
- /root/watch_loop &
- # wait ConsumeStartSeconds then ramp up memory consumption
- # until it is over ConsumeMemoryLimit%
- # then exits ConsumeStopSeconds seconds after stack launch
- /root/consume_memory > /root/consume_memory.log &
diff --git a/tempest/scenario/orchestration/test_server_cfn_init.py b/tempest/scenario/orchestration/test_server_cfn_init.py
index 36e6126..dd7e7d4 100644
--- a/tempest/scenario/orchestration/test_server_cfn_init.py
+++ b/tempest/scenario/orchestration/test_server_cfn_init.py
@@ -38,7 +38,7 @@
self.keypair_name = CONF.orchestration.keypair_name
else:
self.keypair = self.create_keypair()
- self.keypair_name = self.keypair.id
+ self.keypair_name = self.keypair['name']
def launch_stack(self):
net = self._get_default_network()
@@ -52,40 +52,44 @@
# create the stack
self.template = self._load_template(__file__, self.template_name)
- self.client.stacks.create(
- stack_name=self.stack_name,
+ _, stack = self.client.create_stack(
+ name=self.stack_name,
template=self.template,
parameters=self.parameters)
+ stack = stack['stack']
- self.stack = self.client.stacks.get(self.stack_name)
- self.stack_identifier = '%s/%s' % (self.stack_name, self.stack.id)
- self.addCleanup(self._stack_delete, self.stack_identifier)
+ _, self.stack = self.client.get_stack(stack['id'])
+ self.stack_identifier = '%s/%s' % (self.stack_name, self.stack['id'])
+ self.addCleanup(self.delete_wrapper,
+ self.orchestration_client.delete_stack,
+ self.stack_identifier)
def check_stack(self):
sid = self.stack_identifier
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'WaitHandle', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'SmokeSecurityGroup', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'SmokeKeys', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'CfnUser', 'CREATE_COMPLETE')
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'SmokeServer', 'CREATE_COMPLETE')
- server_resource = self.client.resources.get(sid, 'SmokeServer')
- server_id = server_resource.physical_resource_id
- server = self.compute_client.servers.get(server_id)
- server_ip = server.networks[CONF.compute.network_for_ssh][0]
+ _, server_resource = self.client.get_resource(sid, 'SmokeServer')
+ server_id = server_resource['physical_resource_id']
+ _, server = self.servers_client.get_server(server_id)
+ server_ip =\
+ server['addresses'][CONF.compute.network_for_ssh][0]['addr']
- if not self._ping_ip_address(server_ip):
+ if not self.ping_ip_address(server_ip):
self._log_console_output(servers=[server])
self.fail(
"Timed out waiting for %s to become reachable" % server_ip)
try:
- self._wait_for_resource_status(
+ self.client.wait_for_resource_status(
sid, 'WaitCondition', 'CREATE_COMPLETE')
except (exceptions.StackResourceBuildErrorException,
exceptions.TimeoutException) as e:
@@ -96,9 +100,9 @@
# logs to be compared
self._log_console_output(servers=[server])
- self._wait_for_stack_status(sid, 'CREATE_COMPLETE')
+ self.client.wait_for_stack_status(sid, 'CREATE_COMPLETE')
- stack = self.client.stacks.get(sid)
+ _, stack = self.client.get_stack(sid)
# This is an assert of great significance, as it means the following
# has happened:
diff --git a/tempest/scenario/test_baremetal_basic_ops.py b/tempest/scenario/test_baremetal_basic_ops.py
index 9ad6bc4..35571c6 100644
--- a/tempest/scenario/test_baremetal_basic_ops.py
+++ b/tempest/scenario/test_baremetal_basic_ops.py
@@ -41,26 +41,23 @@
expected state transitions
"""
def rebuild_instance(self, preserve_ephemeral=False):
- self.rebuild_server(self.instance,
+ self.rebuild_server(server_id=self.instance['id'],
preserve_ephemeral=preserve_ephemeral,
wait=False)
- node = self.get_node(instance_id=self.instance.id)
- self.instance = self.compute_client.servers.get(self.instance.id)
-
- self.addCleanup_with_wait(self.compute_client.servers,
- self.instance.id,
- cleanup_callable=self.delete_wrapper,
- cleanup_args=[self.instance])
+ node = self.get_node(instance_id=self.instance['id'])
# We should remain on the same node
- self.assertEqual(self.node.uuid, node.uuid)
+ self.assertEqual(self.node['uuid'], node['uuid'])
self.node = node
- self.status_timeout(self.compute_client.servers, self.instance.id,
- 'REBUILD')
- self.status_timeout(self.compute_client.servers, self.instance.id,
- 'ACTIVE')
+ self.servers_client.wait_for_server_status(
+ server_id=self.instance['id'],
+ status='REBUILD',
+ ready_wait=False)
+ self.servers_client.wait_for_server_status(
+ server_id=self.instance['id'],
+ status='ACTIVE')
def create_remote_file(self, client, filename):
"""Create a file on the remote client connection.
@@ -99,23 +96,26 @@
def get_flavor_ephemeral_size(self):
"""Returns size of the ephemeral partition in GiB."""
- f_id = self.instance.flavor['id']
- ephemeral = self.compute_client.flavors.get(f_id).ephemeral
- if ephemeral != 'N/A':
- return int(ephemeral)
- return None
+ f_id = self.instance['flavor']['id']
+ _, flavor = self.flavors_client.get_flavor_details(f_id)
+ ephemeral = flavor.get('OS-FLV-EXT-DATA:ephemeral')
+ if not ephemeral or ephemeral == 'N/A':
+ return None
+ return int(ephemeral)
def add_floating_ip(self):
- floating_ip = self.compute_client.floating_ips.create()
- self.instance.add_floating_ip(floating_ip)
- return floating_ip.ip
+ _, floating_ip = self.floating_ips_client.create_floating_ip()
+ self.floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], self.instance['id'])
+ return floating_ip['ip']
def validate_ports(self):
- for port in self.get_ports(self.node.uuid):
- n_port_id = port.extra['vif_port_id']
- n_port = self.network_client.show_port(n_port_id)['port']
- self.assertEqual(n_port['device_id'], self.instance.id)
- self.assertEqual(n_port['mac_address'], port.address)
+ for port in self.get_ports(self.node['uuid']):
+ n_port_id = port['extra']['vif_port_id']
+ _, body = self.network_client.show_port(n_port_id)
+ n_port = body['port']
+ self.assertEqual(n_port['device_id'], self.instance['id'])
+ self.assertEqual(n_port['mac_address'], port['address'])
@test.services('baremetal', 'compute', 'image', 'network')
def test_baremetal_server_ops(self):
@@ -133,18 +133,23 @@
# the same size as our flavor definition.
eph_size = self.get_flavor_ephemeral_size()
self.assertIsNotNone(eph_size)
- self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
+ if eph_size > 0:
+ preserve_ephemeral = True
- # Create the test file
- self.create_remote_file(vm_client, test_filename)
+ self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
+ # Create the test file
+ self.create_remote_file(vm_client, test_filename)
+ else:
+ preserve_ephemeral = False
- # Rebuild and preserve the ephemeral partition
- self.rebuild_instance(True)
+ # Rebuild and preserve the ephemeral partition if it exists
+ self.rebuild_instance(preserve_ephemeral)
self.verify_connectivity()
# Check that we maintained our data
- vm_client = self.get_remote_client(self.instance)
- self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
- vm_client.exec_command('ls ' + test_filename)
+ if eph_size > 0:
+ vm_client = self.get_remote_client(self.instance)
+ self.verify_partition(vm_client, 'ephemeral0', '/mnt', eph_size)
+ vm_client.exec_command('ls ' + test_filename)
self.terminate_instance()
diff --git a/tempest/scenario/test_load_balancer_basic.py b/tempest/scenario/test_load_balancer_basic.py
index 35e50e8..5e83ff9 100644
--- a/tempest/scenario/test_load_balancer_basic.py
+++ b/tempest/scenario/test_load_balancer_basic.py
@@ -28,7 +28,7 @@
config = config.CONF
-class TestLoadBalancerBasic(manager.NetworkScenarioTest):
+class TestLoadBalancerBasic(manager.NeutronScenarioTest):
"""
This test checks basic load balancing.
@@ -72,7 +72,7 @@
super(TestLoadBalancerBasic, self).setUp()
self.server_ips = {}
self.server_fixed_ips = {}
- self._create_security_group()
+ self._create_security_group_for_test()
self._set_net_and_subnet()
def _set_net_and_subnet(self):
@@ -103,8 +103,8 @@
subnet = self._list_subnets(network_id=self.network['id'])[0]
self.subnet = net_common.AttributeDict(subnet)
- def _create_security_group(self):
- self.security_group = self._create_security_group_neutron(
+ def _create_security_group_for_test(self):
+ self.security_group = self._create_security_group(
tenant_id=self.tenant_id)
self._create_security_group_rules_for_port(self.port1)
self._create_security_group_rules_for_port(self.port2)
@@ -117,35 +117,35 @@
'port_range_max': port,
}
self._create_security_group_rule(
- client=self.network_client,
secgroup=self.security_group,
tenant_id=self.tenant_id,
**rule)
def _create_server(self, name):
- keypair = self.create_keypair(name='keypair-%s' % name)
- security_groups = [self.security_group.name]
+ keypair = self.create_keypair()
+ security_groups = [self.security_group]
create_kwargs = {
'nics': [
{'net-id': self.network['id']},
],
- 'key_name': keypair.name,
+ 'key_name': keypair['name'],
'security_groups': security_groups,
}
- server = self.create_server(name=name,
- create_kwargs=create_kwargs)
- self.servers_keypairs[server.id] = keypair
net_name = self.network['name']
+ server = self.create_server(name=name, create_kwargs=create_kwargs)
+ self.servers_keypairs[server['id']] = keypair
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
public_network_id = config.network.public_network_id
floating_ip = self._create_floating_ip(
server, public_network_id)
self.floating_ips[floating_ip] = server
- self.server_ips[server.id] = floating_ip.floating_ip_address
+ self.server_ips[server['id']] = floating_ip.floating_ip_address
else:
- self.server_ips[server.id] = server.networks[net_name][0]
- self.server_fixed_ips[server.id] = server.networks[net_name][0]
+ self.server_ips[server['id']] =\
+ server['addresses'][net_name][0]['addr']
+ self.server_fixed_ips[server['id']] =\
+ server['addresses'][net_name][0]['addr']
self.assertTrue(self.servers_keypairs)
return server
@@ -162,8 +162,8 @@
2. Start two http backends listening on ports 80 and 88 respectively
"""
for server_id, ip in self.server_ips.iteritems():
- private_key = self.servers_keypairs[server_id].private_key
- server_name = self.compute_client.servers.get(server_id).name
+ private_key = self.servers_keypairs[server_id]['private_key']
+ server_name = self.servers_client.get_server(server_id)[1]['name']
username = config.scenario.ssh_user
ssh_client = self.get_remote_client(
server_or_ip=ip,
@@ -269,11 +269,7 @@
protocol_port=80,
subnet_id=self.subnet.id,
pool_id=self.pool.id)
- self.status_timeout(NeutronRetriever(self.network_client,
- self.network_client.vip_path,
- net_common.DeletableVip),
- self.vip.id,
- expected_status='ACTIVE')
+ self.vip.wait_for_status('ACTIVE')
if (config.network.public_network_id and not
config.network.tenant_networks_reachable):
self._assign_floating_ip_to_vip(self.vip)
@@ -286,8 +282,8 @@
# vip port - see https://bugs.launchpad.net/neutron/+bug/1163569
# However the linuxbridge-agent does, and it is necessary to add a
# security group with a rule that allows tcp port 80 to the vip port.
- body = {'port': {'security_groups': [self.security_group.id]}}
- self.network_client.update_port(self.vip.port_id, body)
+ self.network_client.update_port(
+ self.vip.port_id, security_groups=[self.security_group.id])
def _check_load_balancing(self):
"""
@@ -318,27 +314,3 @@
self._start_servers()
self._create_load_balancer()
self._check_load_balancing()
-
-
-class NeutronRetriever(object):
- """
- Helper class to make possible handling neutron objects returned by GET
- requests as attribute dicts.
-
- Whet get() method is called, the returned dictionary is wrapped into
- a corresponding DeletableResource class which provides attribute access
- to dictionary values.
-
- Usage:
- This retriever is used to allow using status_timeout from
- tempest.manager with Neutron objects.
- """
-
- def __init__(self, network_client, path, resource):
- self.network_client = network_client
- self.path = path
- self.resource = resource
-
- def get(self, thing_id):
- obj = self.network_client.get(self.path % thing_id)
- return self.resource(client=self.network_client, **obj.values()[0])
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index c145551..84e1048 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -61,8 +61,8 @@
network, subnet, router = self.create_networks()
public_network_id = CONF.network.public_network_id
create_kwargs = {
- 'nics': [
- {'net-id': network.id},
+ 'networks': [
+ {'uuid': network.id},
],
'key_name': self.keypair['name'],
'security_groups': [security_group],
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index e8dba6a..10dfb66 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -147,8 +147,8 @@
self.keypairs[keypair['name']] = keypair
security_groups = [self.security_group]
create_kwargs = {
- 'nics': [
- {'net-id': network.id},
+ 'networks': [
+ {'uuid': network.id},
],
'key_name': keypair['name'],
'security_groups': security_groups,
diff --git a/tempest/scenario/test_security_groups_basic_ops.py b/tempest/scenario/test_security_groups_basic_ops.py
index 520c232..20505eb 100644
--- a/tempest/scenario/test_security_groups_basic_ops.py
+++ b/tempest/scenario/test_security_groups_basic_ops.py
@@ -234,8 +234,8 @@
if security_groups is None:
security_groups = [tenant.security_groups['default']]
create_kwargs = {
- 'nics': [
- {'net-id': tenant.network.id},
+ 'networks': [
+ {'uuid': tenant.network.id},
],
'key_name': tenant.keypair['name'],
'security_groups': security_groups,
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 4783273..f2c3dcd 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -15,7 +15,6 @@
import time
-from cinderclient import exceptions as cinder_exceptions
import testtools
from tempest.common.utils import data_utils
@@ -30,7 +29,7 @@
LOG = logging.getLogger(__name__)
-class TestStampPattern(manager.OfficialClientTest):
+class TestStampPattern(manager.ScenarioTest):
"""
This test is for snapshotting an instance/volume and attaching the volume
created from snapshot to the instance booted from snapshot.
@@ -59,13 +58,13 @@
raise cls.skipException("Cinder volume snapshots are disabled")
def _wait_for_volume_snapshot_status(self, volume_snapshot, status):
- self.status_timeout(self.volume_client.volume_snapshots,
- volume_snapshot.id, status)
+ self.snapshots_client.wait_for_snapshot_status(volume_snapshot['id'],
+ status)
def _boot_image(self, image_id):
- security_groups = [self.security_group.name]
+ security_groups = [self.security_group]
create_kwargs = {
- 'key_name': self.keypair.name,
+ 'key_name': self.keypair['name'],
'security_groups': security_groups
}
return self.create_server(image=image_id, create_kwargs=create_kwargs)
@@ -74,53 +73,54 @@
self.keypair = self.create_keypair()
def _create_floating_ip(self):
- floating_ip = self.compute_client.floating_ips.create()
- self.addCleanup(self.delete_wrapper, floating_ip)
+ _, floating_ip = self.floating_ips_client.create_floating_ip()
+ self.addCleanup(self.delete_wrapper,
+ self.floating_ips_client.delete_floating_ip,
+ floating_ip['id'])
return floating_ip
def _add_floating_ip(self, server, floating_ip):
- server.add_floating_ip(floating_ip)
+ self.floating_ips_client.associate_floating_ip_to_server(
+ floating_ip['ip'], server['id'])
def _ssh_to_server(self, server_or_ip):
return self.get_remote_client(server_or_ip)
def _create_volume_snapshot(self, volume):
snapshot_name = data_utils.rand_name('scenario-snapshot-')
- volume_snapshots = self.volume_client.volume_snapshots
- snapshot = volume_snapshots.create(
- volume.id, display_name=snapshot_name)
+ _, snapshot = self.snapshots_client.create_snapshot(
+ volume['id'], display_name=snapshot_name)
def cleaner():
- volume_snapshots.delete(snapshot)
+ self.snapshots_client.delete_snapshot(snapshot['id'])
try:
- while volume_snapshots.get(snapshot.id):
+ while self.snapshots_client.get_snapshot(snapshot['id']):
time.sleep(1)
- except cinder_exceptions.NotFound:
+ except exceptions.NotFound:
pass
self.addCleanup(cleaner)
self._wait_for_volume_status(volume, 'available')
- self._wait_for_volume_snapshot_status(snapshot, 'available')
- self.assertEqual(snapshot_name, snapshot.display_name)
+ self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
+ 'available')
+ self.assertEqual(snapshot_name, snapshot['display_name'])
return snapshot
def _wait_for_volume_status(self, volume, status):
- self.status_timeout(
- self.volume_client.volumes, volume.id, status)
+ self.volumes_client.wait_for_volume_status(volume['id'], status)
def _create_volume(self, snapshot_id=None):
return self.create_volume(snapshot_id=snapshot_id)
def _attach_volume(self, server, volume):
- attach_volume_client = self.compute_client.volumes.create_server_volume
- attached_volume = attach_volume_client(server.id,
- volume.id,
- '/dev/vdb')
- self.assertEqual(volume.id, attached_volume.id)
+ # TODO(andreaf) we should use device from config instead if vdb
+ _, attached_volume = self.servers_client.attach_volume(
+ server['id'], volume['id'], device='/dev/vdb')
+ attached_volume = attached_volume['volumeAttachment']
+ self.assertEqual(volume['id'], attached_volume['id'])
self._wait_for_volume_status(attached_volume, 'in-use')
def _detach_volume(self, server, volume):
- detach_volume_client = self.compute_client.volumes.delete_server_volume
- detach_volume_client(server.id, volume.id)
+ self.servers_client.detach_volume(server['id'], volume['id'])
self._wait_for_volume_status(volume, 'available')
def _wait_for_volume_available_on_the_system(self, server_or_ip):
@@ -157,7 +157,7 @@
def test_stamp_pattern(self):
# prepare for booting a instance
self._add_keypair()
- self.security_group = self._create_security_group_nova()
+ self.security_group = self._create_security_group()
# boot an instance and create a timestamp file in it
volume = self._create_volume()
@@ -167,7 +167,7 @@
if CONF.compute.use_floatingip_for_ssh:
floating_ip_for_server = self._create_floating_ip()
self._add_floating_ip(server, floating_ip_for_server)
- ip_for_server = floating_ip_for_server.ip
+ ip_for_server = floating_ip_for_server['ip']
else:
ip_for_server = server
@@ -184,17 +184,17 @@
# create second volume from the snapshot(volume2)
volume_from_snapshot = self._create_volume(
- snapshot_id=volume_snapshot.id)
+ snapshot_id=volume_snapshot['id'])
# boot second instance from the snapshot(instance2)
- server_from_snapshot = self._boot_image(snapshot_image.id)
+ server_from_snapshot = self._boot_image(snapshot_image['id'])
# create and add floating IP to server_from_snapshot
if CONF.compute.use_floatingip_for_ssh:
floating_ip_for_snapshot = self._create_floating_ip()
self._add_floating_ip(server_from_snapshot,
floating_ip_for_snapshot)
- ip_for_snapshot = floating_ip_for_snapshot.ip
+ ip_for_snapshot = floating_ip_for_snapshot['ip']
else:
ip_for_snapshot = server_from_snapshot
diff --git a/tempest/services/baremetal/base.py b/tempest/services/baremetal/base.py
index 0b97f74..4933300 100644
--- a/tempest/services/baremetal/base.py
+++ b/tempest/services/baremetal/base.py
@@ -95,9 +95,13 @@
for ch in get_change(value, path + '%s/' % name):
yield ch
else:
- yield {'path': path + name,
- 'value': value,
- 'op': 'replace'}
+ if value is None:
+ yield {'path': path + name,
+ 'op': 'remove'}
+ else:
+ yield {'path': path + name,
+ 'value': value,
+ 'op': 'replace'}
patch = [ch for ch in get_change(kw)
if ch['path'].lstrip('/') in allowed_attributes]
diff --git a/tempest/services/baremetal/v1/base_v1.py b/tempest/services/baremetal/v1/base_v1.py
index 032e1da..9359808 100644
--- a/tempest/services/baremetal/v1/base_v1.py
+++ b/tempest/services/baremetal/v1/base_v1.py
@@ -27,9 +27,9 @@
self.uri_prefix = 'v%s' % self.version
@base.handle_errors
- def list_nodes(self):
+ def list_nodes(self, **kwargs):
"""List all existing nodes."""
- return self._list_request('nodes')
+ return self._list_request('nodes', **kwargs)
@base.handle_errors
def list_chassis(self):
@@ -37,11 +37,21 @@
return self._list_request('chassis')
@base.handle_errors
+ def list_chassis_nodes(self, chassis_uuid):
+ """List all nodes associated with a chassis."""
+ return self._list_request('/chassis/%s/nodes' % chassis_uuid)
+
+ @base.handle_errors
def list_ports(self, **kwargs):
"""List all existing ports."""
return self._list_request('ports', **kwargs)
@base.handle_errors
+ def list_node_ports(self, uuid):
+ """List all ports associated with the node."""
+ return self._list_request('/nodes/%s/ports' % uuid)
+
+ @base.handle_errors
def list_nodestates(self, uuid):
"""List all existing states."""
return self._list_request('/nodes/%s/states' % uuid)
@@ -68,6 +78,21 @@
return self._show_request('nodes', uuid)
@base.handle_errors
+ def show_node_by_instance_uuid(self, instance_uuid):
+ """
+ Gets a node associated with given instance uuid.
+
+ :param uuid: Unique identifier of the node in UUID format.
+ :return: Serialized node as a dictionary.
+
+ """
+ uri = '/nodes/detail?instance_uuid=%s' % instance_uuid
+
+ return self._show_request('nodes',
+ uuid=None,
+ uri=uri)
+
+ @base.handle_errors
def show_chassis(self, uuid):
"""
Gets a specific chassis.
@@ -203,7 +228,8 @@
'properties/cpu_num',
'properties/storage',
'properties/memory',
- 'driver')
+ 'driver',
+ 'instance_uuid')
patch = self._make_patch(node_attributes, **kwargs)
diff --git a/tempest/services/compute/json/servers_client.py b/tempest/services/compute/json/servers_client.py
index 88b68d3..947ba7a 100644
--- a/tempest/services/compute/json/servers_client.py
+++ b/tempest/services/compute/json/servers_client.py
@@ -175,11 +175,12 @@
return resp, body
def wait_for_server_status(self, server_id, status, extra_timeout=0,
- raise_on_error=True):
+ raise_on_error=True, ready_wait=True):
"""Waits for a server to reach a given status."""
return waiters.wait_for_server_status(self, server_id, status,
extra_timeout=extra_timeout,
- raise_on_error=raise_on_error)
+ raise_on_error=raise_on_error,
+ ready_wait=ready_wait)
def wait_for_server_termination(self, server_id, ignore_error=False):
"""Waits for server to reach termination."""
diff --git a/tempest/services/network/network_client_base.py b/tempest/services/network/network_client_base.py
index 94ba5aa..5ad5f37 100644
--- a/tempest/services/network/network_client_base.py
+++ b/tempest/services/network/network_client_base.py
@@ -13,6 +13,7 @@
import time
import urllib
+from tempest.common.utils import misc
from tempest import config
from tempest import exceptions
@@ -227,3 +228,39 @@
except exceptions.NotFound:
return True
return False
+
+ def wait_for_resource_status(self, fetch, status, interval=None,
+ timeout=None):
+ """
+ @summary: Waits for a network resource to reach a status
+ @param fetch: the callable to be used to query the resource status
+ @type fecth: callable that takes no parameters and returns the resource
+ @param status: the status that the resource has to reach
+ @type status: String
+ @param interval: the number of seconds to wait between each status
+ query
+ @type interval: Integer
+ @param timeout: the maximum number of seconds to wait for the resource
+ to reach the desired status
+ @type timeout: Integer
+ """
+ if not interval:
+ interval = self.build_interval
+ if not timeout:
+ timeout = self.build_timeout
+ start_time = time.time()
+
+ while time.time() - start_time <= timeout:
+ resource = fetch()
+ if resource['status'] == status:
+ return
+ time.sleep(interval)
+
+ # At this point, the wait has timed out
+ message = 'Resource %s' % (str(resource))
+ message += ' failed to reach status %s' % status
+ message += ' within the required time %s' % timeout
+ caller = misc.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ raise exceptions.TimeoutException(message)
diff --git a/tempest/services/network/resources.py b/tempest/services/network/resources.py
index b2feb87..2b182d0 100644
--- a/tempest/services/network/resources.py
+++ b/tempest/services/network/resources.py
@@ -51,9 +51,19 @@
def delete(self):
return
+ @abc.abstractmethod
+ def show(self):
+ return
+
def __hash__(self):
return hash(self.id)
+ def wait_for_status(self, status):
+ if not hasattr(self, 'status'):
+ return
+
+ return self.client.wait_for_resource_status(self.show, status)
+
class DeletableNetwork(DeletableResource):
@@ -161,3 +171,8 @@
def delete(self):
self.client.delete_vip(self.id)
+
+ def show(self):
+ _, result = self.client.show_vip(self.id)
+ super(DeletableVip, self).update(**result['vip'])
+ return self
diff --git a/tempest/stress/actions/ssh_floating.py b/tempest/stress/actions/ssh_floating.py
index 478cd07..d78112c 100644
--- a/tempest/stress/actions/ssh_floating.py
+++ b/tempest/stress/actions/ssh_floating.py
@@ -30,7 +30,7 @@
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
- proc.wait()
+ proc.communicate()
success = proc.returncode == 0
return success
diff --git a/tempest/tests/common/test_accounts.py b/tempest/tests/common/test_accounts.py
index a0b3496..cf7ce65 100644
--- a/tempest/tests/common/test_accounts.py
+++ b/tempest/tests/common/test_accounts.py
@@ -57,6 +57,7 @@
'tempest.common.accounts.read_accounts_yaml',
return_value=self.test_accounts))
cfg.CONF.set_default('test_accounts_file', '', group='auth')
+ self.useFixture(mockpatch.Patch('os.path.isfile', return_value=True))
def _get_hash_list(self, accounts_list):
hash_list = []
@@ -220,6 +221,7 @@
'tempest.common.accounts.read_accounts_yaml',
return_value=self.test_accounts))
cfg.CONF.set_default('test_accounts_file', '', group='auth')
+ self.useFixture(mockpatch.Patch('os.path.isfile', return_value=True))
def test_get_creds(self):
test_accounts_class = accounts.NotLockingAccounts('test_name')
@@ -229,4 +231,4 @@
self.assertIsNotNone(creds, msg)
self.assertRaises(exceptions.InvalidConfiguration,
test_accounts_class.get_creds,
- id=len(self.test_accounts))
\ No newline at end of file
+ id=len(self.test_accounts))
diff --git a/tempest/tests/test_wrappers.py b/tempest/tests/test_wrappers.py
index 3f4ac7d..0fd41f9 100644
--- a/tempest/tests/test_wrappers.py
+++ b/tempest/tests/test_wrappers.py
@@ -62,14 +62,11 @@
p = subprocess.Popen(
"bash %s" % cmd, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
- # wait in the general case is dangerous, however the amount of
- # data coming back on those pipes is small enough it shouldn't be
- # a problem.
- p.wait()
+ out, err = p.communicate()
self.assertEqual(
p.returncode, expected,
- "Stdout: %s; Stderr: %s" % (p.stdout.read(), p.stderr.read()))
+ "Stdout: %s; Stderr: %s" % (out, err))
def test_pretty_tox(self):
# Git init is required for the pbr testr command. pbr requires a git
diff --git a/tempest/thirdparty/boto/test.py b/tempest/thirdparty/boto/test.py
index 4bf71f3..f94d880 100644
--- a/tempest/thirdparty/boto/test.py
+++ b/tempest/thirdparty/boto/test.py
@@ -108,8 +108,8 @@
CODE_RE = '.*' # regexp makes sense in group match
def match(self, exc):
- """:returns: Retruns with an error string if not matches,
- returns with None when matches.
+ """:returns: Returns with an error string if it does not match,
+ returns with None when it matches.
"""
if not isinstance(exc, exception.BotoServerError):
return "%r not an BotoServerError instance" % exc
@@ -485,7 +485,7 @@
@classmethod
def destroy_volume_wait(cls, volume):
- """Delete volume, tryies to detach first.
+ """Delete volume, tries to detach first.
Use just for teardown!
"""
exc_num = 0
@@ -518,7 +518,7 @@
@classmethod
def destroy_snapshot_wait(cls, snapshot):
- """delete snaphot, wait until not exists."""
+ """delete snapshot, wait until it ceases to exist."""
snapshot.delete()
def _update():
diff --git a/tools/check_logs.py b/tools/check_logs.py
index 917aaaf..7cf9d85 100755
--- a/tools/check_logs.py
+++ b/tools/check_logs.py
@@ -80,7 +80,6 @@
def scan_content(name, content, regexp, whitelist):
had_errors = False
- print_log_name = True
for line in content:
if not line.startswith("Stderr:") and regexp.match(line):
whitelisted = False
@@ -91,13 +90,8 @@
whitelisted = True
break
if not whitelisted or dump_all_errors:
- if print_log_name:
- print("\nLog File Has Errors: %s" % name)
- print_log_name = False
if not whitelisted:
had_errors = True
- print("*** Not Whitelisted ***"),
- print(line.rstrip())
return had_errors
@@ -151,17 +145,21 @@
whitelists = loaded
logs_with_errors = process_files(files_to_process, urls_to_process,
whitelists)
- if logs_with_errors:
- print("Logs have errors")
- if is_grenade:
- print("Currently not failing grenade runs with errors")
- return 0
+
failed = False
- for log in logs_with_errors:
- if log not in allowed_dirty:
- print("Log: %s not allowed to have ERRORS or TRACES" % log)
- failed = True
+ if logs_with_errors:
+ log_files = set(logs_with_errors)
+ for log in log_files:
+ msg = '%s log file has errors' % log
+ if log not in allowed_dirty:
+ msg += ' and is not allowed to have them'
+ failed = True
+ print(msg)
+ print("\nPlease check the respective log files to see the errors")
if failed:
+ if is_grenade:
+ print("Currently not failing grenade runs with errors")
+ return 0
return 1
print("ok")
return 0
diff --git a/tox.ini b/tox.ini
index 6ec0b2c..492c4f6 100644
--- a/tox.ini
+++ b/tox.ini
@@ -6,28 +6,23 @@
[testenv]
setenv = VIRTUAL_ENV={envdir}
OS_TEST_PATH=./tempest/test_discover
- PYTHONHASHSEED=0
usedevelop = True
install_command = pip install -U {opts} {packages}
[testenv:py26]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
[testenv:py33]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
[testenv:py27]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py test --slowest --testr-arg='tempest\.tests {posargs}'
[testenv:cover]
setenv = OS_TEST_PATH=./tempest/tests
- PYTHONHASHSEED=0
commands = python setup.py testr --coverage --testr-arg='tempest\.tests {posargs}'
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
@@ -96,6 +91,7 @@
-r{toxinidir}/test-requirements.txt
[testenv:pep8]
+setenv = PYTHONHASHSEED=0
commands =
flake8 {posargs}
{toxinidir}/tools/config/check_uptodate.sh