Merge "Add tempest-multinode-full-py3 job in gate pipeline"
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index f630578..3d221c9 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,10 +9,10 @@
Tempest master supports the below OpenStack Releases:
+* Zed
* Yoga
* Xena
* Wallaby
-* Victoria
For older OpenStack Release:
diff --git a/releasenotes/notes/tempest-zed-release-335293c4a7f5a4b1.yaml b/releasenotes/notes/tempest-zed-release-335293c4a7f5a4b1.yaml
new file mode 100644
index 0000000..841aa5d
--- /dev/null
+++ b/releasenotes/notes/tempest-zed-release-335293c4a7f5a4b1.yaml
@@ -0,0 +1,17 @@
+---
+prelude: |
+ This release is to tag Tempest for OpenStack Zed release.
+ This release marks the start of Zed release support in Tempest.
+ After this release, Tempest will support below OpenStack Releases:
+
+ * Zed
+ * Yoga
+ * Xena
+ * Wallaby
+
+ Current development of Tempest is for OpenStack 2023.1 development
+ cycle. Every Tempest commit is also tested against master during
+ the 2023.1 cycle. However, this does not necessarily mean that using
+ Tempest as of this tag will work against a 2023.1 (or future release)
+ cloud.
+ To be on safe side, use this tag to test the OpenStack Zed release.
diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst
index 9b5aad3..b36be01 100644
--- a/releasenotes/source/index.rst
+++ b/releasenotes/source/index.rst
@@ -6,6 +6,7 @@
:maxdepth: 1
unreleased
+ v32.0.0
v31.1.0
v31.0.0
v30.0.0
diff --git a/releasenotes/source/v32.0.0.rst b/releasenotes/source/v32.0.0.rst
new file mode 100644
index 0000000..e4c2cea
--- /dev/null
+++ b/releasenotes/source/v32.0.0.rst
@@ -0,0 +1,5 @@
+=====================
+v32.0.0 Release Notes
+=====================
+.. release-notes:: 32.0.0 Release Notes
+ :version: 32.0.0
diff --git a/requirements.txt b/requirements.txt
index c4c7fcc..a118856 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,3 +21,4 @@
PrettyTable>=0.7.1 # BSD
urllib3>=1.21.1 # MIT
debtcollector>=1.2.0 # Apache-2.0
+defusedxml>=0.7.1 # PSFL
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 96031ac..7e647dd 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -49,12 +49,12 @@
raise cls.skipException('Server does not support '
'any import method')
- def _create_image(self):
+ def _create_image(self, disk_format=None, container_format=None):
# Create image
uuid = '00000000-1111-2222-3333-444455556666'
image_name = data_utils.rand_name('image')
- container_format = CONF.image.container_formats[0]
- disk_format = CONF.image.disk_formats[0]
+ container_format = container_format or CONF.image.container_formats[0]
+ disk_format = disk_format or CONF.image.disk_formats[0]
image = self.create_image(name=image_name,
container_format=container_format,
disk_format=disk_format,
@@ -134,6 +134,141 @@
import_params={'uri': image_uri})
waiters.wait_for_image_imported_to_stores(self.client, image['id'])
+ @decorators.idempotent_id('8876c818-c40e-4b90-9742-31d231616305')
+ def test_image_glance_download_import_success(self):
+ # We use glance-direct initially, then glance-download for test
+ self._require_import_method('glance-direct')
+ self._require_import_method('glance-download')
+
+ # Create an image via the normal import process to be our source
+ src = self._stage_and_check()
+ self.client.image_import(src, method='glance-direct')
+ waiters.wait_for_image_imported_to_stores(self.client, src)
+
+ # Add some properties to it that will be copied by the default
+ # config (and one that won't)
+ self.client.update_image(src, [
+ {'add': '/hw_cpu_cores', 'value': '5'},
+ {'add': '/trait:STORAGE_DISK_SSD', 'value': 'required'},
+ {'add': '/os_distro', 'value': 'rhel'},
+ {'add': '/speed', 'value': '88mph'},
+ ])
+
+ # Make sure our properties stuck on the source image
+ src_image = self.client.show_image(src)
+ self.assertEqual('5', src_image['hw_cpu_cores'])
+ self.assertEqual('required', src_image['trait:STORAGE_DISK_SSD'])
+ self.assertEqual('rhel', src_image['os_distro'])
+ self.assertEqual('88mph', src_image['speed'])
+
+ # Create a new image which we will fill from another glance image
+ dst = self._create_image(container_format='ovf',
+ disk_format='iso')['id']
+
+ # Set some values that will conflict to make sure we get the
+ # new ones and confirm they stuck before the import.
+ self.client.update_image(dst, [
+ {'add': '/hw_cpu_cores', 'value': '1'},
+ {'add': '/os_distro', 'value': 'windows'},
+ ])
+ dst_image = self.client.show_image(dst)
+ self.assertEqual('1', dst_image['hw_cpu_cores'])
+ self.assertEqual('windows', dst_image['os_distro'])
+
+ params = {
+ 'glance_image_id': src,
+ 'glance_region': self.client.region,
+ 'glance_service_interface': 'public',
+ }
+ self.client.image_import(dst, method='glance-download',
+ import_params=params)
+ waiters.wait_for_image_tasks_status(self.client, dst, 'success')
+
+ # Make sure the new image has all the keys imported from the
+ # original image that we expect
+ dst_image = self.client.show_image(dst)
+ self.assertEqual(src_image['disk_format'], dst_image['disk_format'])
+ self.assertEqual(src_image['container_format'],
+ dst_image['container_format'])
+ self.assertEqual('5', dst_image['hw_cpu_cores'])
+ self.assertEqual('required', dst_image['trait:STORAGE_DISK_SSD'])
+ self.assertEqual('rhel', dst_image['os_distro'])
+ self.assertNotIn('speed', dst_image)
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('36d4b546-64a2-4bb9-bdd0-ba676aa48f2c')
+ def test_image_glance_download_import_bad_uuid(self):
+ self._require_import_method('glance-download')
+ image_id = self._create_image()['id']
+ params = {
+ 'glance_image_id': 'foo',
+ 'glance_region': self.client.region,
+ 'glance_service_interface': 'public',
+ }
+
+ # A non-UUID-like image id should make us fail immediately
+ e = self.assertRaises(lib_exc.BadRequest,
+ self.client.image_import,
+ image_id, method='glance-download',
+ import_params=params)
+ self.assertIn('image id does not look like a UUID', str(e))
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('77644240-dbbe-4744-ae28-09b2ac12e218')
+ def test_image_glance_download_import_bad_endpoint(self):
+ self._require_import_method('glance-download')
+ image_id = self._create_image()['id']
+
+ # Set some properties before the import to make sure they are
+ # undisturbed
+ self.client.update_image(image_id, [
+ {'add': '/hw_cpu_cores', 'value': '1'},
+ {'add': '/os_distro', 'value': 'windows'},
+ ])
+ image = self.client.show_image(image_id)
+ self.assertEqual('1', image['hw_cpu_cores'])
+ self.assertEqual('windows', image['os_distro'])
+
+ params = {
+ 'glance_image_id': '36d4b546-64a2-4bb9-bdd0-ba676aa48f2c',
+ 'glance_region': 'not a region',
+ 'glance_service_interface': 'not an interface',
+ }
+
+ # A bad region or interface will cause us to fail when we
+ # contact the remote glance.
+ self.client.image_import(image_id, method='glance-download',
+ import_params=params)
+ waiters.wait_for_image_tasks_status(self.client, image_id, 'failure')
+
+ # Make sure we reverted the image status to queued on failure, and that
+ # our extra properties are still in place.
+ image = self.client.show_image(image_id)
+ self.assertEqual('queued', image['status'])
+ self.assertEqual('1', image['hw_cpu_cores'])
+ self.assertEqual('windows', image['os_distro'])
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('c7edec8e-24b5-416a-9d42-b3e773bab62c')
+ def test_image_glance_download_import_bad_missing_image(self):
+ self._require_import_method('glance-download')
+ image_id = self._create_image()['id']
+ params = {
+ 'glance_image_id': '36d4b546-64a2-4bb9-bdd0-ba676aa48f2c',
+ 'glance_region': self.client.region,
+ 'glance_service_interface': 'public',
+ }
+
+ # A non-existent image will cause us to fail when we
+ # contact the remote glance.
+ self.client.image_import(image_id, method='glance-download',
+ import_params=params)
+ waiters.wait_for_image_tasks_status(self.client, image_id, 'failure')
+
+ # Make sure we reverted the image status to queued on failure
+ image = self.client.show_image(image_id)
+ self.assertEqual('queued', image['status'])
+
@decorators.idempotent_id('e04761a1-22af-42c2-b8bc-a34a3f12b585')
def test_remote_import(self):
"""Test image import against a different worker than stage.
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index a11bed8..7d5bd26 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -186,12 +186,10 @@
"""Test creating object with transfer_encoding"""
object_name = data_utils.rand_name(name='TestObject')
data = data_utils.random_bytes(1024)
- headers = {'Transfer-Encoding': 'chunked'}
resp, _ = self.object_client.create_object(
self.container_name,
object_name,
data=data_utils.chunkify(data, 512),
- headers=headers,
chunked=True)
self.assertHeaders(resp, 'Object', 'PUT')
diff --git a/tempest/clients.py b/tempest/clients.py
index b7fa54a..a65c43b 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -118,7 +118,6 @@
enable_instance_password=eip)
self.server_groups_client = self.compute.ServerGroupsClient()
self.limits_client = self.compute.LimitsClient()
- self.compute_images_client = self.compute.ImagesClient()
self.keypairs_client = self.compute.KeyPairsClient(
ssh_key_type=CONF.validation.ssh_key_type)
self.quotas_client = self.compute.QuotasClient()
@@ -158,6 +157,8 @@
**params_volume)
self.snapshots_extensions_client = self.compute.SnapshotsClient(
**params_volume)
+ self.compute_images_client = self.compute.ImagesClient(
+ build_timeout=CONF.image.build_timeout)
def _set_placement_clients(self):
self.placement_client = self.placement.PlacementClient()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index eb7e366..00f133e 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -209,13 +209,6 @@
kwargs.get('max_count', 0)) > 1)
if CONF.validation.run_validation and validatable:
- # As a first implementation, multiple pingable or sshable servers will
- # not be supported
- if multiple_create_request:
- msg = ("Multiple pingable or sshable servers not supported at "
- "this stage.")
- raise ValueError(msg)
-
LOG.debug("Provisioning test server with validation resources %s",
validation_resources)
if 'security_groups' in kwargs:
@@ -298,11 +291,11 @@
if multiple_create_request:
# Get servers created which name match with name param.
body_servers = clients.servers_client.list_servers()
- servers = \
+ created_servers = \
[s for s in body_servers['servers'] if s['name'].startswith(name)]
else:
body = rest_client.ResponseBody(body.response, body['server'])
- servers = [body]
+ created_servers = [body]
if wait_until:
@@ -314,11 +307,19 @@
wait_until_extra = wait_until
wait_until = 'ACTIVE'
- for server in servers:
- try:
- waiters.wait_for_server_status(
+ servers = []
+ try:
+ # Wait for server to be in active state and populate servers list
+ # with those full server response so that we will have addresses
+ # field present in server which is needed to be used for wait for
+ # ssh
+ for server in created_servers:
+ server = waiters.wait_for_server_status(
clients.servers_client, server['id'], wait_until,
request_id=request_id)
+ servers.append(server)
+
+ for server in servers:
if CONF.validation.run_validation and validatable:
if CONF.validation.connect_method == 'floating':
_setup_validation_fip(
@@ -329,31 +330,31 @@
server, clients, tenant_network,
validatable, validation_resources,
wait_until_extra, False)
+ except Exception:
+ with excutils.save_and_reraise_exception():
+ for server in created_servers:
+ try:
+ clients.servers_client.delete_server(
+ server['id'])
+ except Exception:
+ LOG.exception('Deleting server %s failed',
+ server['id'])
+ for server in created_servers:
+ # NOTE(artom) If the servers were booted with volumes
+ # and with delete_on_termination=False we need to wait
+ # for the servers to go away before proceeding with
+ # cleanup, otherwise we'll attempt to delete the
+ # volumes while they're still attached to servers that
+ # are in the process of being deleted.
+ try:
+ waiters.wait_for_server_termination(
+ clients.servers_client, server['id'])
+ except Exception:
+ LOG.exception('Server %s failed to delete in time',
+ server['id'])
+ return body, servers
- except Exception:
- with excutils.save_and_reraise_exception():
- for server in servers:
- try:
- clients.servers_client.delete_server(
- server['id'])
- except Exception:
- LOG.exception('Deleting server %s failed',
- server['id'])
- for server in servers:
- # NOTE(artom) If the servers were booted with volumes
- # and with delete_on_termination=False we need to wait
- # for the servers to go away before proceeding with
- # cleanup, otherwise we'll attempt to delete the
- # volumes while they're still attached to servers that
- # are in the process of being deleted.
- try:
- waiters.wait_for_server_termination(
- clients.servers_client, server['id'])
- except Exception:
- LOG.exception('Server %s failed to delete in time',
- server['id'])
-
- return body, servers
+ return body, created_servers
def shelve_server(servers_client, server_id, force_shelve_offload=False):
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index f207066..53582a6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -49,19 +49,19 @@
# between the UNKNOWN->ACTIVE transition.
# TODO(afazekas): enumerate and validate the stable status set
if status == 'BUILD' and server_status != 'UNKNOWN':
- return
+ return body
if server_status == status:
if ready_wait:
if status == 'BUILD':
- return
+ return body
# NOTE(afazekas): The instance is in "ready for action state"
# when no task in progress
if task_state is None:
# without state api extension 3 sec usually enough
time.sleep(CONF.compute.ready_wait)
- return
+ return body
else:
- return
+ return body
time.sleep(client.build_interval)
body = client.show_server(server_id)['server']
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 52b2534..d7ce526 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -14,8 +14,8 @@
# under the License.
from urllib import parse as urllib
-from xml.etree import ElementTree as etree
+from defusedxml import ElementTree as etree
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 6d07ec1..ee87726 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -14,9 +14,9 @@
# under the License.
from urllib import parse as urllib
-from xml.etree import ElementTree as etree
import debtcollector.moves
+from defusedxml import ElementTree as etree
from oslo_serialization import jsonutils as json
from tempest.lib.common import rest_client
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 445b45f..2824677 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -34,6 +34,8 @@
- glance-multistore-cinder-import:
voting: false
irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-zed:
+ irrelevant-files: *tempest-irrelevant-files
- tempest-full-yoga:
irrelevant-files: *tempest-irrelevant-files
- tempest-full-xena:
@@ -171,9 +173,11 @@
irrelevant-files: *tempest-irrelevant-files
periodic-stable:
jobs:
+ - tempest-full-zed
- tempest-full-yoga
- tempest-full-xena
- tempest-full-wallaby-py3
+ - tempest-slow-zed
- tempest-slow-yoga
- tempest-slow-xena
- tempest-slow-wallaby
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index d1445c0..6d97fad 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,10 @@
# NOTE(gmann): This file includes all stable release jobs definition.
- job:
+ name: tempest-full-zed
+ parent: tempest-full-py3
+ override-checkout: stable/zed
+
+- job:
name: tempest-full-yoga
parent: tempest-full-py3
override-checkout: stable/yoga
@@ -15,6 +20,11 @@
override-checkout: stable/wallaby
- job:
+ name: tempest-slow-zed
+ parent: tempest-slow-py3
+ override-checkout: stable/zed
+
+- job:
name: tempest-slow-yoga
parent: tempest-slow-py3
override-checkout: stable/yoga