Merge "Add a boot-snapshot-boot test"
diff --git a/doc/source/supported_version.rst b/doc/source/supported_version.rst
index f630578..3d221c9 100644
--- a/doc/source/supported_version.rst
+++ b/doc/source/supported_version.rst
@@ -9,10 +9,10 @@
 
 Tempest master supports the below OpenStack Releases:
 
+* Zed
 * Yoga
 * Xena
 * Wallaby
-* Victoria
 
 For older OpenStack Release:
 
diff --git a/requirements.txt b/requirements.txt
index c4c7fcc..a118856 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,3 +21,4 @@
 PrettyTable>=0.7.1 # BSD
 urllib3>=1.21.1 # MIT
 debtcollector>=1.2.0 # Apache-2.0
+defusedxml>=0.7.1 # PSFL
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index a11bed8..7d5bd26 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -186,12 +186,10 @@
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(1024)
-        headers = {'Transfer-Encoding': 'chunked'}
         resp, _ = self.object_client.create_object(
             self.container_name,
             object_name,
             data=data_utils.chunkify(data, 512),
-            headers=headers,
             chunked=True)
 
         self.assertHeaders(resp, 'Object', 'PUT')
diff --git a/tempest/clients.py b/tempest/clients.py
index b7fa54a..a65c43b 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -118,7 +118,6 @@
             enable_instance_password=eip)
         self.server_groups_client = self.compute.ServerGroupsClient()
         self.limits_client = self.compute.LimitsClient()
-        self.compute_images_client = self.compute.ImagesClient()
         self.keypairs_client = self.compute.KeyPairsClient(
             ssh_key_type=CONF.validation.ssh_key_type)
         self.quotas_client = self.compute.QuotasClient()
@@ -158,6 +157,8 @@
             **params_volume)
         self.snapshots_extensions_client = self.compute.SnapshotsClient(
             **params_volume)
+        self.compute_images_client = self.compute.ImagesClient(
+            build_timeout=CONF.image.build_timeout)
 
     def _set_placement_clients(self):
         self.placement_client = self.placement.PlacementClient()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 2c256a9..00f133e 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -291,11 +291,11 @@
     if multiple_create_request:
         # Get servers created which name match with name param.
         body_servers = clients.servers_client.list_servers()
-        servers = \
+        created_servers = \
             [s for s in body_servers['servers'] if s['name'].startswith(name)]
     else:
         body = rest_client.ResponseBody(body.response, body['server'])
-        servers = [body]
+        created_servers = [body]
 
     if wait_until:
 
@@ -307,11 +307,19 @@
             wait_until_extra = wait_until
             wait_until = 'ACTIVE'
 
-        for server in servers:
-            try:
-                waiters.wait_for_server_status(
+        servers = []
+        try:
+            # Wait for server to be in active state and populate servers list
+            # with those full server response so that we will have addresses
+            # field present in server which is needed to be used for wait for
+            # ssh
+            for server in created_servers:
+                server = waiters.wait_for_server_status(
                     clients.servers_client, server['id'], wait_until,
                     request_id=request_id)
+                servers.append(server)
+
+            for server in servers:
                 if CONF.validation.run_validation and validatable:
                     if CONF.validation.connect_method == 'floating':
                         _setup_validation_fip(
@@ -322,31 +330,31 @@
                             server, clients, tenant_network,
                             validatable, validation_resources,
                             wait_until_extra, False)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                for server in created_servers:
+                    try:
+                        clients.servers_client.delete_server(
+                            server['id'])
+                    except Exception:
+                        LOG.exception('Deleting server %s failed',
+                                      server['id'])
+                for server in created_servers:
+                    # NOTE(artom) If the servers were booted with volumes
+                    # and with delete_on_termination=False we need to wait
+                    # for the servers to go away before proceeding with
+                    # cleanup, otherwise we'll attempt to delete the
+                    # volumes while they're still attached to servers that
+                    # are in the process of being deleted.
+                    try:
+                        waiters.wait_for_server_termination(
+                            clients.servers_client, server['id'])
+                    except Exception:
+                        LOG.exception('Server %s failed to delete in time',
+                                      server['id'])
+        return body, servers
 
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    for server in servers:
-                        try:
-                            clients.servers_client.delete_server(
-                                server['id'])
-                        except Exception:
-                            LOG.exception('Deleting server %s failed',
-                                          server['id'])
-                    for server in servers:
-                        # NOTE(artom) If the servers were booted with volumes
-                        # and with delete_on_termination=False we need to wait
-                        # for the servers to go away before proceeding with
-                        # cleanup, otherwise we'll attempt to delete the
-                        # volumes while they're still attached to servers that
-                        # are in the process of being deleted.
-                        try:
-                            waiters.wait_for_server_termination(
-                                clients.servers_client, server['id'])
-                        except Exception:
-                            LOG.exception('Server %s failed to delete in time',
-                                          server['id'])
-
-    return body, servers
+    return body, created_servers
 
 
 def shelve_server(servers_client, server_id, force_shelve_offload=False):
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index f207066..53582a6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -49,19 +49,19 @@
         # between the UNKNOWN->ACTIVE transition.
         # TODO(afazekas): enumerate and validate the stable status set
         if status == 'BUILD' and server_status != 'UNKNOWN':
-            return
+            return body
         if server_status == status:
             if ready_wait:
                 if status == 'BUILD':
-                    return
+                    return body
                 # NOTE(afazekas): The instance is in "ready for action state"
                 # when no task in progress
                 if task_state is None:
                     # without state api extension 3 sec usually enough
                     time.sleep(CONF.compute.ready_wait)
-                    return
+                    return body
             else:
-                return
+                return body
 
         time.sleep(client.build_interval)
         body = client.show_server(server_id)['server']
diff --git a/tempest/lib/services/object_storage/account_client.py b/tempest/lib/services/object_storage/account_client.py
index 52b2534..d7ce526 100644
--- a/tempest/lib/services/object_storage/account_client.py
+++ b/tempest/lib/services/object_storage/account_client.py
@@ -14,8 +14,8 @@
 #    under the License.
 
 from urllib import parse as urllib
-from xml.etree import ElementTree as etree
 
+from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index 6d07ec1..ee87726 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -14,9 +14,9 @@
 #    under the License.
 
 from urllib import parse as urllib
-from xml.etree import ElementTree as etree
 
 import debtcollector.moves
+from defusedxml import ElementTree as etree
 from oslo_serialization import jsonutils as json
 
 from tempest.lib.common import rest_client
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 1432180..2824677 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -144,6 +144,8 @@
             irrelevant-files: *tempest-irrelevant-files
         - tempest-ipv6-only:
             irrelevant-files: *tempest-irrelevant-files-3
+        - tempest-multinode-full-py3:
+            irrelevant-files: *tempest-irrelevant-files
         #- devstack-plugin-ceph-tempest-py3:
         #    irrelevant-files: *tempest-irrelevant-files
         #- tempest-full-centos-9-stream:
@@ -152,6 +154,7 @@
             irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
+        - nova-multi-cell
         - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api: