Merge "Fix security vulnerabilities using Bandit"
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index a11bed8..7d5bd26 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -186,12 +186,10 @@
         """Test creating object with transfer_encoding"""
         object_name = data_utils.rand_name(name='TestObject')
         data = data_utils.random_bytes(1024)
-        headers = {'Transfer-Encoding': 'chunked'}
         resp, _ = self.object_client.create_object(
             self.container_name,
             object_name,
             data=data_utils.chunkify(data, 512),
-            headers=headers,
             chunked=True)
 
         self.assertHeaders(resp, 'Object', 'PUT')
diff --git a/tempest/clients.py b/tempest/clients.py
index b7fa54a..a65c43b 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -118,7 +118,6 @@
             enable_instance_password=eip)
         self.server_groups_client = self.compute.ServerGroupsClient()
         self.limits_client = self.compute.LimitsClient()
-        self.compute_images_client = self.compute.ImagesClient()
         self.keypairs_client = self.compute.KeyPairsClient(
             ssh_key_type=CONF.validation.ssh_key_type)
         self.quotas_client = self.compute.QuotasClient()
@@ -158,6 +157,8 @@
             **params_volume)
         self.snapshots_extensions_client = self.compute.SnapshotsClient(
             **params_volume)
+        self.compute_images_client = self.compute.ImagesClient(
+            build_timeout=CONF.image.build_timeout)
 
     def _set_placement_clients(self):
         self.placement_client = self.placement.PlacementClient()
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 2c256a9..00f133e 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -291,11 +291,11 @@
     if multiple_create_request:
         # Get servers created which name match with name param.
         body_servers = clients.servers_client.list_servers()
-        servers = \
+        created_servers = \
             [s for s in body_servers['servers'] if s['name'].startswith(name)]
     else:
         body = rest_client.ResponseBody(body.response, body['server'])
-        servers = [body]
+        created_servers = [body]
 
     if wait_until:
 
@@ -307,11 +307,19 @@
             wait_until_extra = wait_until
             wait_until = 'ACTIVE'
 
-        for server in servers:
-            try:
-                waiters.wait_for_server_status(
+        servers = []
+        try:
+            # Wait for server to be in active state and populate servers list
+            # with those full server response so that we will have addresses
+            # field present in server which is needed to be used for wait for
+            # ssh
+            for server in created_servers:
+                server = waiters.wait_for_server_status(
                     clients.servers_client, server['id'], wait_until,
                     request_id=request_id)
+                servers.append(server)
+
+            for server in servers:
                 if CONF.validation.run_validation and validatable:
                     if CONF.validation.connect_method == 'floating':
                         _setup_validation_fip(
@@ -322,31 +330,31 @@
                             server, clients, tenant_network,
                             validatable, validation_resources,
                             wait_until_extra, False)
+        except Exception:
+            with excutils.save_and_reraise_exception():
+                for server in created_servers:
+                    try:
+                        clients.servers_client.delete_server(
+                            server['id'])
+                    except Exception:
+                        LOG.exception('Deleting server %s failed',
+                                      server['id'])
+                for server in created_servers:
+                    # NOTE(artom) If the servers were booted with volumes
+                    # and with delete_on_termination=False we need to wait
+                    # for the servers to go away before proceeding with
+                    # cleanup, otherwise we'll attempt to delete the
+                    # volumes while they're still attached to servers that
+                    # are in the process of being deleted.
+                    try:
+                        waiters.wait_for_server_termination(
+                            clients.servers_client, server['id'])
+                    except Exception:
+                        LOG.exception('Server %s failed to delete in time',
+                                      server['id'])
+        return body, servers
 
-            except Exception:
-                with excutils.save_and_reraise_exception():
-                    for server in servers:
-                        try:
-                            clients.servers_client.delete_server(
-                                server['id'])
-                        except Exception:
-                            LOG.exception('Deleting server %s failed',
-                                          server['id'])
-                    for server in servers:
-                        # NOTE(artom) If the servers were booted with volumes
-                        # and with delete_on_termination=False we need to wait
-                        # for the servers to go away before proceeding with
-                        # cleanup, otherwise we'll attempt to delete the
-                        # volumes while they're still attached to servers that
-                        # are in the process of being deleted.
-                        try:
-                            waiters.wait_for_server_termination(
-                                clients.servers_client, server['id'])
-                        except Exception:
-                            LOG.exception('Server %s failed to delete in time',
-                                          server['id'])
-
-    return body, servers
+    return body, created_servers
 
 
 def shelve_server(servers_client, server_id, force_shelve_offload=False):
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index f207066..53582a6 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -49,19 +49,19 @@
         # between the UNKNOWN->ACTIVE transition.
         # TODO(afazekas): enumerate and validate the stable status set
         if status == 'BUILD' and server_status != 'UNKNOWN':
-            return
+            return body
         if server_status == status:
             if ready_wait:
                 if status == 'BUILD':
-                    return
+                    return body
                 # NOTE(afazekas): The instance is in "ready for action state"
                 # when no task in progress
                 if task_state is None:
                     # without state api extension 3 sec usually enough
                     time.sleep(CONF.compute.ready_wait)
-                    return
+                    return body
             else:
-                return
+                return body
 
         time.sleep(client.build_interval)
         body = client.show_server(server_id)['server']
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 3cc3fda..d3b8e42 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -34,6 +34,8 @@
         - glance-multistore-cinder-import:
             voting: false
             irrelevant-files: *tempest-irrelevant-files
+        - tempest-full-zed:
+            irrelevant-files: *tempest-irrelevant-files
         - tempest-full-yoga:
             irrelevant-files: *tempest-irrelevant-files
         - tempest-full-xena:
@@ -150,6 +152,7 @@
             irrelevant-files: *tempest-irrelevant-files
     experimental:
       jobs:
+        - nova-multi-cell
         - tempest-with-latest-microversion
         - tempest-stestr-master
         - tempest-cinder-v2-api:
@@ -168,9 +171,11 @@
             irrelevant-files: *tempest-irrelevant-files
     periodic-stable:
       jobs:
+        - tempest-full-zed
         - tempest-full-yoga
         - tempest-full-xena
         - tempest-full-wallaby-py3
+        - tempest-slow-zed
         - tempest-slow-yoga
         - tempest-slow-xena
         - tempest-slow-wallaby
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index d1445c0..6d97fad 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -1,5 +1,10 @@
 # NOTE(gmann): This file includes all stable release jobs definition.
 - job:
+    name: tempest-full-zed
+    parent: tempest-full-py3
+    override-checkout: stable/zed
+
+- job:
     name: tempest-full-yoga
     parent: tempest-full-py3
     override-checkout: stable/yoga
@@ -15,6 +20,11 @@
     override-checkout: stable/wallaby
 
 - job:
+    name: tempest-slow-zed
+    parent: tempest-slow-py3
+    override-checkout: stable/zed
+
+- job:
     name: tempest-slow-yoga
     parent: tempest-slow-py3
     override-checkout: stable/yoga