Merge "Add T111 history to HACKING.rst"
diff --git a/README.rst b/README.rst
index c859ddd..7da83cd 100644
--- a/README.rst
+++ b/README.rst
@@ -1,6 +1,14 @@
Tempest - The OpenStack Integration Test Suite
==============================================
+.. image:: https://img.shields.io/pypi/v/tempest.svg
+ :target: https://pypi.python.org/pypi/tempest/
+ :alt: Latest Version
+
+.. image:: https://img.shields.io/pypi/dm/tempest.svg
+ :target: https://pypi.python.org/pypi/tempest/
+ :alt: Downloads
+
This is a set of integration tests to be run against a live OpenStack
cluster. Tempest has batteries of tests for OpenStack API validation,
Scenarios, and other specific tests useful in validating an OpenStack
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index ead6db3..94635ff 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -81,7 +81,8 @@
body = self.volumes_client.show_volume(volume_id)['volume']
if body['status'] == 'in-use':
self.servers_client.detach_volume(server_id, volume_id)
- self.volumes_client.wait_for_volume_status(volume_id, 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume_id, 'available')
self.volumes_client.delete_volume(volume_id)
def _test_live_migration(self, state='ACTIVE', volume_backed=False):
@@ -152,14 +153,15 @@
volume = self.volumes_client.create_volume(
display_name='test')['volume']
- self.volumes_client.wait_for_volume_status(volume['id'],
- 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
self.addCleanup(self._volume_clean_up, server_id, volume['id'])
# Attach the volume to the server
self.servers_client.attach_volume(server_id, volumeId=volume['id'],
device='/dev/xvdb')
- self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'in-use')
self._migrate_server_to(server_id, target_host)
waiters.wait_for_server_status(self.servers_client,
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 01a8e58..e9c8e30 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -52,7 +52,8 @@
def _detach(self, server_id, volume_id):
if self.attachment:
self.servers_client.detach_volume(server_id, volume_id)
- self.volumes_client.wait_for_volume_status(volume_id, 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume_id, 'available')
def _delete_volume(self):
# Delete the created Volumes
@@ -77,15 +78,16 @@
self.volume = self.volumes_client.create_volume(
size=CONF.volume.volume_size, display_name='test')['volume']
self.addCleanup(self._delete_volume)
- self.volumes_client.wait_for_volume_status(self.volume['id'],
- 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ self.volume['id'], 'available')
# Attach the volume to the server
self.attachment = self.servers_client.attach_volume(
self.server['id'],
volumeId=self.volume['id'],
device='/dev/%s' % self.device)['volumeAttachment']
- self.volumes_client.wait_for_volume_status(self.volume['id'], 'in-use')
+ waiters.wait_for_volume_status(self.volumes_client,
+ self.volume['id'], 'in-use')
self.addCleanup(self._detach, self.server['id'], self.volume['id'])
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 60e6e6c..f19717e 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -13,6 +13,7 @@
import six
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -80,8 +81,8 @@
else:
self.volume_id_list_without_prefix.append(
self.volume['id'])
- self.admin_volume_client.wait_for_volume_status(
- self.volume['id'], 'available')
+ waiters.wait_for_volume_status(self.admin_volume_client,
+ self.volume['id'], 'available')
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_snapshots_actions.py b/tempest/api/volume/admin/test_snapshots_actions.py
index f2bf613..26a5a45 100644
--- a/tempest/api/volume/admin/test_snapshots_actions.py
+++ b/tempest/api/volume/admin/test_snapshots_actions.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -41,18 +42,17 @@
vol_name = data_utils.rand_name(cls.__name__ + '-Volume')
cls.name_field = cls.special_fields['name_field']
params = {cls.name_field: vol_name}
- cls.volume = \
- cls.volumes_client.create_volume(**params)['volume']
- cls.volumes_client.wait_for_volume_status(cls.volume['id'],
- 'available')
+ cls.volume = cls.volumes_client.create_volume(**params)['volume']
+ waiters.wait_for_volume_status(cls.volumes_client,
+ cls.volume['id'], 'available')
# Create a test shared snapshot for tests
snap_name = data_utils.rand_name(cls.__name__ + '-Snapshot')
params = {cls.name_field: snap_name}
cls.snapshot = cls.client.create_snapshot(
volume_id=cls.volume['id'], **params)['snapshot']
- cls.client.wait_for_snapshot_status(cls.snapshot['id'],
- 'available')
+ waiters.wait_for_snapshot_status(cls.client,
+ cls.snapshot['id'], 'available')
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/volume/admin/test_volume_types.py b/tempest/api/volume/admin/test_volume_types.py
index c032d9c..7202881 100644
--- a/tempest/api/volume/admin/test_volume_types.py
+++ b/tempest/api/volume/admin/test_volume_types.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -66,12 +67,14 @@
"to the requested name")
self.assertIsNotNone(volume['id'],
"Field volume id is empty or not found.")
- self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
# Update volume with new volume_type
self.volumes_client.retype_volume(volume['id'],
new_type=volume_types[1]['id'])
- self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
# Get volume details and Verify
fetched_volume = self.volumes_client.show_volume(
diff --git a/tempest/api/volume/admin/test_volumes_actions.py b/tempest/api/volume/admin/test_volumes_actions.py
index 253a3e1..bdb313f 100644
--- a/tempest/api/volume/admin/test_volumes_actions.py
+++ b/tempest/api/volume/admin/test_volumes_actions.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils as utils
+from tempest.common import waiters
from tempest import test
@@ -35,7 +36,8 @@
params = {cls.name_field: vol_name}
cls.volume = cls.client.create_volume(**params)['volume']
- cls.client.wait_for_volume_status(cls.volume['id'], 'available')
+ waiters.wait_for_volume_status(cls.client,
+ cls.volume['id'], 'available')
@classmethod
def resource_cleanup(cls):
@@ -61,7 +63,8 @@
vol_name = utils.rand_name('Volume')
params = {self.name_field: vol_name}
temp_volume = self.client.create_volume(**params)['volume']
- self.client.wait_for_volume_status(temp_volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client,
+ temp_volume['id'], 'available')
return temp_volume
diff --git a/tempest/api/volume/admin/test_volumes_backup.py b/tempest/api/volume/admin/test_volumes_backup.py
index 30c6a15..b09cd2c 100644
--- a/tempest/api/volume/admin/test_volumes_backup.py
+++ b/tempest/api/volume/admin/test_volumes_backup.py
@@ -15,6 +15,7 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest import test
@@ -50,8 +51,8 @@
self.addCleanup(self.backups_adm_client.delete_backup,
backup['id'])
self.assertEqual(backup_name, backup['name'])
- self.admin_volume_client.wait_for_volume_status(
- self.volume['id'], 'available')
+ waiters.wait_for_volume_status(self.admin_volume_client,
+ self.volume['id'], 'available')
self.backups_adm_client.wait_for_backup_status(backup['id'],
'available')
@@ -74,8 +75,8 @@
self.assertEqual(backup['id'], restore['backup_id'])
self.backups_adm_client.wait_for_backup_status(backup['id'],
'available')
- self.admin_volume_client.wait_for_volume_status(
- restore['volume_id'], 'available')
+ waiters.wait_for_volume_status(self.admin_volume_client,
+ restore['volume_id'], 'available')
@decorators.skip_because(bug='1455043')
@test.idempotent_id('a99c54a1-dd80-4724-8a13-13bf58d4068d')
@@ -117,8 +118,8 @@
self.addCleanup(self.admin_volume_client.delete_volume,
restore['volume_id'])
self.assertEqual(import_backup['id'], restore['backup_id'])
- self.admin_volume_client.wait_for_volume_status(restore['volume_id'],
- 'available')
+ waiters.wait_for_volume_status(self.admin_volume_client,
+ restore['volume_id'], 'available')
# Verify if restored volume is there in volume list
volumes = self.admin_volume_client.list_volumes()['volumes']
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 82dc2c9..14819e3 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -15,6 +15,7 @@
from tempest.common import compute
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib import exceptions as lib_exc
@@ -112,7 +113,8 @@
volume = cls.volumes_client.create_volume(**kwargs)['volume']
cls.volumes.append(volume)
- cls.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(cls.volumes_client,
+ volume['id'], 'available')
return volume
@classmethod
@@ -121,8 +123,8 @@
snapshot = cls.snapshots_client.create_snapshot(
volume_id=volume_id, **kwargs)['snapshot']
cls.snapshots.append(snapshot)
- cls.snapshots_client.wait_for_snapshot_status(snapshot['id'],
- 'available')
+ waiters.wait_for_snapshot_status(cls.snapshots_client,
+ snapshot['id'], 'available')
return snapshot
# NOTE(afazekas): these create_* and clean_* could be defined
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 7046dcf..866db3d 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -16,6 +16,7 @@
from testtools import matchers
from tempest.api.volume import base
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -51,8 +52,8 @@
volume_id=volume['id'])['transfer']
transfer_id = transfer['id']
auth_key = transfer['auth_key']
- self.client.wait_for_volume_status(volume['id'],
- 'awaiting-transfer')
+ waiters.wait_for_volume_status(self.client,
+ volume['id'], 'awaiting-transfer')
# Get a volume transfer
body = self.client.show_volume_transfer(transfer_id)['transfer']
@@ -66,7 +67,8 @@
# Accept a volume transfer by alt_tenant
body = self.alt_client.accept_volume_transfer(
transfer_id, auth_key=auth_key)['transfer']
- self.alt_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.alt_client,
+ volume['id'], 'available')
@test.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
def test_create_list_delete_volume_transfer(self):
@@ -78,8 +80,8 @@
body = self.client.create_volume_transfer(
volume_id=volume['id'])['transfer']
transfer_id = body['id']
- self.client.wait_for_volume_status(volume['id'],
- 'awaiting-transfer')
+ waiters.wait_for_volume_status(self.client,
+ volume['id'], 'awaiting-transfer')
# List all volume transfers (looking for the one we created)
body = self.client.list_volume_transfers()['transfers']
@@ -91,7 +93,7 @@
# Delete a volume transfer
self.client.delete_volume_transfer(transfer_id)
- self.client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client, volume['id'], 'available')
class VolumesV1TransfersTest(VolumesV2TransfersTest):
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 5f9ea7f..9c67579 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -43,7 +43,8 @@
# Create a test shared volume for attach/detach tests
cls.volume = cls.create_volume()
- cls.client.wait_for_volume_status(cls.volume['id'], 'available')
+ waiters.wait_for_volume_status(cls.client,
+ cls.volume['id'], 'available')
@classmethod
def resource_cleanup(cls):
@@ -64,9 +65,11 @@
self.client.attach_volume(self.volume['id'],
instance_uuid=self.server['id'],
mountpoint=mountpoint)
- self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+ waiters.wait_for_volume_status(self.client,
+ self.volume['id'], 'in-use')
self.client.detach_volume(self.volume['id'])
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client,
+ self.volume['id'], 'available')
@test.idempotent_id('63e21b4c-0a0c-41f6-bfc3-7c2816815599')
@testtools.skipUnless(CONF.volume_feature_enabled.bootable,
@@ -91,10 +94,11 @@
self.client.attach_volume(self.volume['id'],
instance_uuid=self.server['id'],
mountpoint=mountpoint)
- self.client.wait_for_volume_status(self.volume['id'], 'in-use')
+ waiters.wait_for_volume_status(self.client,
+ self.volume['id'], 'in-use')
# NOTE(gfidente): added in reverse order because functions will be
# called in reverse order to the order they are added (LIFO)
- self.addCleanup(self.client.wait_for_volume_status,
+ self.addCleanup(waiters.wait_for_volume_status, self.client,
self.volume['id'],
'available')
self.addCleanup(self.client.detach_volume, self.volume['id'])
@@ -120,7 +124,8 @@
image_id = body["image_id"]
self.addCleanup(self.image_client.delete_image, image_id)
self.image_client.wait_for_image_status(image_id, 'active')
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client,
+ self.volume['id'], 'available')
@test.idempotent_id('92c4ef64-51b2-40c0-9f7e-4749fbaaba33')
def test_reserve_unreserve_volume(self):
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index ed1e5c5..1947779 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -14,6 +14,7 @@
# under the License.
from tempest.api.volume import base
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -33,7 +34,8 @@
self.volume = self.create_volume()
extend_size = int(self.volume['size']) + 1
self.client.extend_volume(self.volume['id'], new_size=extend_size)
- self.client.wait_for_volume_status(self.volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client,
+ self.volume['id'], 'available')
volume = self.client.show_volume(self.volume['id'])['volume']
self.assertEqual(int(volume['size']), extend_size)
diff --git a/tempest/api/volume/test_volumes_get.py b/tempest/api/volume/test_volumes_get.py
index aa3ef2f..5d83bb0 100644
--- a/tempest/api/volume/test_volumes_get.py
+++ b/tempest/api/volume/test_volumes_get.py
@@ -18,6 +18,7 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -53,7 +54,7 @@
volume = self.client.create_volume(**kwargs)['volume']
self.assertIn('id', volume)
self.addCleanup(self._delete_volume, volume['id'])
- self.client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client, volume['id'], 'available')
self.assertIn(self.name_field, volume)
self.assertEqual(volume[self.name_field], v_name,
"The created volume name is not equal "
@@ -113,7 +114,8 @@
new_volume = self.client.create_volume(**params)['volume']
self.assertIn('id', new_volume)
self.addCleanup(self._delete_volume, new_volume['id'])
- self.client.wait_for_volume_status(new_volume['id'], 'available')
+ waiters.wait_for_volume_status(self.client,
+ new_volume['id'], 'available')
params = {self.name_field: volume[self.name_field],
self.descrip_field: volume[self.descrip_field]}
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index c79235a..347877c 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -12,6 +12,7 @@
from tempest.api.volume import base
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import test
@@ -37,7 +38,8 @@
def _detach(self, volume_id):
"""Detach volume."""
self.volumes_client.detach_volume(volume_id)
- self.volumes_client.wait_for_volume_status(volume_id, 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume_id, 'available')
def _list_by_param_values_and_assert(self, with_detail=False, **params):
"""list or list_details with given params and validates result."""
@@ -70,9 +72,9 @@
self.servers_client.attach_volume(
server['id'], volumeId=self.volume_origin['id'],
device=mountpoint)
- self.volumes_client.wait_for_volume_status(self.volume_origin['id'],
- 'in-use')
- self.addCleanup(self.volumes_client.wait_for_volume_status,
+ waiters.wait_for_volume_status(self.volumes_client,
+ self.volume_origin['id'], 'in-use')
+ self.addCleanup(waiters.wait_for_volume_status, self.volumes_client,
self.volume_origin['id'], 'available')
self.addCleanup(self.servers_client.detach_volume, server['id'],
self.volume_origin['id'])
@@ -171,7 +173,8 @@
# NOTE(gfidente): size is required also when passing snapshot_id
volume = self.volumes_client.create_volume(
snapshot_id=snapshot['id'])['volume']
- self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
self.volumes_client.delete_volume(volume['id'])
self.volumes_client.wait_for_resource_deletion(volume['id'])
self.cleanup_snapshot(snapshot)
diff --git a/tempest/cmd/javelin.py b/tempest/cmd/javelin.py
index 82a121c..e3c1c64 100755
--- a/tempest/cmd/javelin.py
+++ b/tempest/cmd/javelin.py
@@ -1031,7 +1031,7 @@
v_name = volume['name']
body = client.volumes.create_volume(size=size,
display_name=v_name)['volume']
- client.volumes.wait_for_volume_status(body['id'], 'available')
+ waiters.wait_for_volume_status(client.volumes, body['id'], 'available')
def destroy_volumes(volumes):
diff --git a/tempest/cmd/verify_tempest_config.py b/tempest/cmd/verify_tempest_config.py
index 9049886..0ba322d 100644
--- a/tempest/cmd/verify_tempest_config.py
+++ b/tempest/cmd/verify_tempest_config.py
@@ -97,7 +97,14 @@
ca_certs=ca_certs)
__, body = raw_http.request(endpoint, 'GET')
client_dict[service].reset_path()
- body = json.loads(body)
+ try:
+ body = json.loads(body)
+ except ValueError:
+ LOG.error(
+ 'Failed to get a JSON response from unversioned endpoint %s '
+ '(versioned endpoint was %s). Response is:\n%s',
+ endpoint, client_dict[service].base_url, body[:100])
+ raise
if service == 'keystone':
versions = map(lambda x: x['id'], body['versions']['values'])
else:
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 2d2909a..b5c4547 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -98,8 +98,8 @@
volume = volumes_client.create_volume(
display_name=volume_name,
imageRef=image_id)
- volumes_client.wait_for_volume_status(volume['volume']['id'],
- 'available')
+ waiters.wait_for_volume_status(volumes_client,
+ volume['volume']['id'], 'available')
bd_map_v2 = [{
'uuid': volume['volume']['id'],
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index c9ac0a4..1217dc9 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -283,7 +283,8 @@
self.assertEqual(name, volume['display_name'])
else:
self.assertEqual(name, volume['name'])
- self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
# The volume retrieved on creation has a non-up-to-date status.
# Retrieval after it becomes active ensures correct details.
volume = self.volumes_client.show_volume(volume['id'])['volume']
@@ -478,8 +479,8 @@
self.addCleanup(
self.delete_wrapper, self.snapshots_client.delete_snapshot,
snapshot_id)
- self.snapshots_client.wait_for_snapshot_status(snapshot_id,
- 'available')
+ waiters.wait_for_snapshot_status(self.snapshots_client,
+ snapshot_id, 'available')
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
@@ -492,14 +493,16 @@
server['id'], volumeId=volume_to_attach['id'], device='/dev/%s'
% CONF.compute.volume_device_name)['volumeAttachment']
self.assertEqual(volume_to_attach['id'], volume['id'])
- self.volumes_client.wait_for_volume_status(volume['id'], 'in-use')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'in-use')
# Return the updated volume after the attachment
return self.volumes_client.show_volume(volume['id'])['volume']
def nova_volume_detach(self, server, volume):
self.servers_client.detach_volume(server['id'], volume['id'])
- self.volumes_client.wait_for_volume_status(volume['id'], 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
volume = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual('available', volume['status'])
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index dcb095b..a9f2dff 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -119,7 +119,7 @@
@test.idempotent_id('7fff3fb3-91d8-4fd0-bd7d-0204f1f180ba')
@test.attr(type='smoke')
@test.services('compute', 'network')
- def test_server_basicops(self):
+ def test_server_basic_ops(self):
keypair = self.create_keypair()
self.security_group = self._create_security_group()
security_groups = [{'name': self.security_group['name']}]
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 6121a90..e7223c7 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -19,6 +19,7 @@
import testtools
from tempest.common.utils import data_utils
+from tempest.common import waiters
from tempest import config
from tempest import exceptions
from tempest.lib import decorators
@@ -67,14 +68,15 @@
self.snapshots_client.delete_snapshot(snapshot['id'])
try:
while self.snapshots_client.show_snapshot(
- snapshot['id'])['snapshot']:
+ snapshot['id'])['snapshot']:
time.sleep(1)
except lib_exc.NotFound:
pass
self.addCleanup(cleaner)
- self.volumes_client.wait_for_volume_status(volume['id'], 'available')
- self.snapshots_client.wait_for_snapshot_status(snapshot['id'],
- 'available')
+ waiters.wait_for_volume_status(self.volumes_client,
+ volume['id'], 'available')
+ waiters.wait_for_snapshot_status(self.snapshots_client,
+ snapshot['id'], 'available')
self.assertEqual(snapshot_name, snapshot['display_name'])
return snapshot
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 71bb50e..25d825a 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -87,7 +87,8 @@
self.addCleanup(
self.snapshots_client.wait_for_resource_deletion, snap['id'])
self.addCleanup(self.snapshots_client.delete_snapshot, snap['id'])
- self.snapshots_client.wait_for_snapshot_status(snap['id'], 'available')
+ waiters.wait_for_snapshot_status(self.snapshots_client,
+ snap['id'], 'available')
# NOTE(e0ne): Cinder API v2 uses name instead of display_name
if 'display_name' in snap:
diff --git a/tempest/services/volume/base/base_snapshots_client.py b/tempest/services/volume/base/base_snapshots_client.py
index 5e5637a..68503dd 100644
--- a/tempest/services/volume/base/base_snapshots_client.py
+++ b/tempest/services/volume/base/base_snapshots_client.py
@@ -10,13 +10,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-import time
-
from oslo_log import log as logging
from oslo_serialization import jsonutils as json
from six.moves.urllib import parse as urllib
-from tempest import exceptions
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -70,43 +67,6 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
- # NOTE(afazekas): just for the wait function
- def _get_snapshot_status(self, snapshot_id):
- body = self.show_snapshot(snapshot_id)['snapshot']
- status = body['status']
- # NOTE(afazekas): snapshot can reach an "error"
- # state in a "normal" lifecycle
- if (status == 'error'):
- raise exceptions.SnapshotBuildErrorException(
- snapshot_id=snapshot_id)
-
- return status
-
- # NOTE(afazkas): Wait reinvented again. It is not in the correct layer
- def wait_for_snapshot_status(self, snapshot_id, status):
- """Waits for a Snapshot to reach a given status."""
- start_time = time.time()
- old_value = value = self._get_snapshot_status(snapshot_id)
- while True:
- dtime = time.time() - start_time
- time.sleep(self.build_interval)
- if value != old_value:
- LOG.info('Value transition from "%s" to "%s"'
- 'in %d second(s).', old_value,
- value, dtime)
- if (value == status):
- return value
-
- if dtime > self.build_timeout:
- message = ('Time Limit Exceeded! (%ds)'
- 'while waiting for %s, '
- 'but we got %s.' %
- (self.build_timeout, status, value))
- raise exceptions.TimeoutException(message)
- time.sleep(self.build_interval)
- old_value = value
- value = self._get_snapshot_status(snapshot_id)
-
def delete_snapshot(self, snapshot_id):
"""Delete Snapshot."""
resp, body = self.delete("snapshots/%s" % str(snapshot_id))
diff --git a/tempest/services/volume/base/base_volumes_client.py b/tempest/services/volume/base/base_volumes_client.py
index f638bb6..4344802 100644
--- a/tempest/services/volume/base/base_volumes_client.py
+++ b/tempest/services/volume/base/base_volumes_client.py
@@ -17,7 +17,6 @@
import six
from six.moves.urllib import parse as urllib
-from tempest.common import waiters
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
@@ -148,10 +147,6 @@
self.expected_success(202, resp.status)
return rest_client.ResponseBody(resp, body)
- def wait_for_volume_status(self, volume_id, status):
- """Waits for a Volume to reach a given status."""
- waiters.wait_for_volume_status(self, volume_id, status)
-
def is_resource_deleted(self, id):
try:
self.show_volume(id)
diff --git a/tempest/stress/cleanup.py b/tempest/stress/cleanup.py
index 1c1fb46..3b0a937 100644
--- a/tempest/stress/cleanup.py
+++ b/tempest/stress/cleanup.py
@@ -88,8 +88,8 @@
LOG.info("Cleanup::remove %s snapshots" % len(snaps))
for v in snaps:
try:
- admin_manager.snapshots_client.\
- wait_for_snapshot_status(v['id'], 'available')
+ waiters.wait_for_snapshot_status(
+ admin_manager.snapshots_client, v['id'], 'available')
admin_manager.snapshots_client.delete_snapshot(v['id'])
except Exception:
pass
@@ -105,8 +105,8 @@
LOG.info("Cleanup::remove %s volumes" % len(vols))
for v in vols:
try:
- admin_manager.volumes_client.\
- wait_for_volume_status(v['id'], 'available')
+ waiters.wait_for_volume_status(
+ admin_manager.volumes_client, v['id'], 'available')
admin_manager.volumes_client.delete_volume(v['id'])
except Exception:
pass
diff --git a/tempest/tests/cmd/test_javelin.py b/tempest/tests/cmd/test_javelin.py
index 57cfe97..2d0256a 100644
--- a/tempest/tests/cmd/test_javelin.py
+++ b/tempest/tests/cmd/test_javelin.py
@@ -213,8 +213,8 @@
name=self.fake_object['name'],
ip_version=fake_version)
- def test_create_volumes(self):
-
+ @mock.patch("tempest.common.waiters.wait_for_volume_status")
+ def test_create_volumes(self, mock_wait_for_volume_status):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_volume_by_name",
@@ -228,12 +228,12 @@
mocked_function.assert_called_once_with(
size=self.fake_object['gb'],
display_name=self.fake_object['name'])
- mocked_function = self.fake_client.volumes.wait_for_volume_status
- mocked_function.assert_called_once_with(
- self.fake_object.body['volume']['id'],
+ mock_wait_for_volume_status.assert_called_once_with(
+ self.fake_client.volumes, self.fake_object.body['volume']['id'],
'available')
- def test_create_volume_existing(self):
+ @mock.patch("tempest.common.waiters.wait_for_volume_status")
+ def test_create_volume_existing(self, mock_wait_for_volume_status):
self.useFixture(mockpatch.PatchObject(javelin, "client_for_user",
return_value=self.fake_client))
self.useFixture(mockpatch.PatchObject(javelin, "_get_volume_by_name",
@@ -245,8 +245,7 @@
mocked_function = self.fake_client.volumes.create_volume
self.assertFalse(mocked_function.called)
- mocked_function = self.fake_client.volumes.wait_for_volume_status
- self.assertFalse(mocked_function.called)
+ self.assertFalse(mock_wait_for_volume_status.called)
def test_create_router(self):
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index 193abc7..dc0ba6f 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -82,6 +82,28 @@
self.assertIn('v2.0', versions)
self.assertIn('v3.0', versions)
+ def test_get_versions_invalid_response(self):
+ # When the response doesn't contain a JSON response, an error is
+ # logged.
+ mock_log_error = self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config.LOG, 'error')).mock
+
+ self.useFixture(mockpatch.PatchObject(
+ verify_tempest_config, '_get_unversioned_endpoint'))
+
+ # Simulated response is not JSON.
+ sample_body = (
+ '<html><head>Sample Response</head><body>This is the sample page '
+ 'for the web server. Why are you requesting it?</body></html>')
+ self.useFixture(mockpatch.Patch('httplib2.Http.request',
+ return_value=(None, sample_body)))
+
+ # service value doesn't matter, just needs to match what
+ # _get_api_versions puts in its client_dict.
+ self.assertRaises(ValueError, verify_tempest_config._get_api_versions,
+ os=mock.MagicMock(), service='keystone')
+ self.assertTrue(mock_log_error.called)
+
def test_verify_api_versions(self):
api_services = ['cinder', 'glance', 'keystone']
fake_os = mock.MagicMock()