Merge "Add Jammy integration jobs to gate pipeline also"
diff --git a/releasenotes/notes/image-wait-multiple-79c55305b584b1ba.yaml b/releasenotes/notes/image-wait-multiple-79c55305b584b1ba.yaml
new file mode 100644
index 0000000..6f63ebd
--- /dev/null
+++ b/releasenotes/notes/image-wait-multiple-79c55305b584b1ba.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ The wait_for_image_status() waiter now allows a list of status values
+ instead of just a string, and returns the state the image was in when we
+ stopped waiting.
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index b5ee9b1..c5d5b19 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -150,6 +150,15 @@
compute.shelve_server(self.servers_client, server['id'],
force_shelve_offload=True)
+ # Work around https://bugs.launchpad.net/nova/+bug/2045785
+ # This can be removed when ^ is fixed.
+ def _check_server_host_is_none():
+ server_details = self.os_admin.servers_client.show_server(
+ server['id'])
+ self.assertIsNone(server_details['server']['OS-EXT-SRV-ATTR:host'])
+
+ self.wait_for(_check_server_host_is_none)
+
self.os_admin.servers_client.unshelve_server(
server['id'],
body={'unshelve': {'host': host}}
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 2c42bfd..b4312b7 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -154,13 +154,21 @@
def wait_for_image_status(client, image_id, status):
- """Waits for an image to reach a given status.
+ """Waits for an image to reach a given status (or list of them).
The client should have a show_image(image_id) method to get the image.
The client should also have build_interval and build_timeout attributes.
+
+ status can be either a string or a list of strings that constitute a
+ terminal state that we will return.
"""
show_image = client.show_image
+ if isinstance(status, str):
+ terminal_status = [status]
+ else:
+ terminal_status = status
+
current_status = 'An unknown status'
start = int(time.time())
while int(time.time()) - start < client.build_timeout:
@@ -171,8 +179,8 @@
image = image['image']
current_status = image['status']
- if current_status == status:
- return
+ if current_status in terminal_status:
+ return current_status
if current_status.lower() == 'killed':
raise exceptions.ImageKilledException(image_id=image_id,
status=status)
@@ -184,7 +192,7 @@
message = ('Image %(image_id)s failed to reach %(status)s state '
'(current state %(current_status)s) within the required '
'time (%(timeout)s s).' % {'image_id': image_id,
- 'status': status,
+ 'status': ','.join(terminal_status),
'current_status': current_status,
'timeout': client.build_timeout})
caller = test_utils.find_test_caller()
diff --git a/tempest/lib/common/http.py b/tempest/lib/common/http.py
index d163968..5bdcecd 100644
--- a/tempest/lib/common/http.py
+++ b/tempest/lib/common/http.py
@@ -60,6 +60,14 @@
retry = urllib3.util.Retry(redirect=False)
r = super(ClosingProxyHttp, self).request(method, url, retries=retry,
*args, **new_kwargs)
+
+ # Clearing the pool is necessary to free memory that holds certificates
+ # loaded by the HTTPConnection class in urllib3. This line can be
+ # removed once we require a newer version of urllib3 (e.g., 2.2.3) that
+ # does not retain certificates in memory for each HTTPConnection
+ # managed by the PoolManager.
+ self.clear()
+
if not kwargs.get('preload_content', True):
# This means we asked urllib3 for streaming content, so we
# need to return the raw response and not read any data yet
@@ -114,6 +122,14 @@
retry = urllib3.util.Retry(redirect=False)
r = super(ClosingHttp, self).request(method, url, retries=retry,
*args, **new_kwargs)
+
+ # Clearing the pool is necessary to free memory that holds certificates
+ # loaded by the HTTPConnection class in urllib3. This line can be
+ # removed once we require a newer version of urllib3 (e.g., 2.2.3) that
+ # does not retain certificates in memory for each HTTPConnection
+ # managed by the PoolManager.
+ self.clear()
+
if not kwargs.get('preload_content', True):
# This means we asked urllib3 for streaming content, so we
# need to return the raw response and not read any data yet
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 369efcc..be2b2d6 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -923,6 +923,19 @@
if not isinstance(exc, lib_exc.SSHTimeout):
LOG.debug('Network information on a devstack host')
+ def get_snapshot_id(self, bdms):
+ if isinstance(bdms, str):
+ bdms = json.loads(bdms)
+ snapshot_id = None
+ for bdm in bdms:
+ # Look for the block device mapping that actually has a
+ # snapshot. If the server has ephemeral or swap disk, their
+ # block device mappings will be present with snapshot_id = None
+ if 'snapshot_id' in bdm and bdm['snapshot_id'] is not None:
+ snapshot_id = bdm['snapshot_id']
+ break
+ return snapshot_id
+
def create_server_snapshot(self, server, name=None, **kwargs):
"""Creates server snapshot"""
# Glance client
@@ -949,20 +962,19 @@
snapshot_image = _image_client.show_image(image_id)
image_props = snapshot_image
- bdm = image_props.get('block_device_mapping')
- if bdm:
- bdm = json.loads(bdm)
- if bdm and 'snapshot_id' in bdm[0]:
- snapshot_id = bdm[0]['snapshot_id']
- self.addCleanup(
- self.snapshots_client.wait_for_resource_deletion,
- snapshot_id)
- self.addCleanup(test_utils.call_and_ignore_notfound_exc,
- self.snapshots_client.delete_snapshot,
- snapshot_id)
- waiters.wait_for_volume_resource_status(self.snapshots_client,
- snapshot_id,
- 'available')
+ bdms = image_props.get('block_device_mapping')
+ if bdms:
+ snapshot_id = self.get_snapshot_id(bdms)
+ self.assertIsNotNone(snapshot_id)
+ self.addCleanup(
+ self.snapshots_client.wait_for_resource_deletion,
+ snapshot_id)
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.snapshots_client.delete_snapshot,
+ snapshot_id)
+ waiters.wait_for_volume_resource_status(
+ self.snapshots_client, snapshot_id, 'available')
+
image_name = snapshot_image['name']
self.assertEqual(name, image_name)
LOG.debug("Created snapshot image %s for server %s",
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 5e28ecd..febc2f6 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -11,7 +11,6 @@
# under the License.
from oslo_log import log as logging
-from oslo_serialization import jsonutils as json
import testtools
from tempest.common import utils
@@ -245,8 +244,7 @@
bdms = image.get('block_device_mapping')
if not bdms:
bdms = image['properties']['block_device_mapping']
- bdms = json.loads(bdms)
- snapshot_id = bdms[0]['snapshot_id']
+ snapshot_id = self.get_snapshot_id(bdms)
self._delete_snapshot(snapshot_id)
# Now, delete the first server which will also delete the first