Merge "Add periodic job to experimental queue"
diff --git a/HACKING.rst b/HACKING.rst
index 17e2a49..caf954b 100644
--- a/HACKING.rst
+++ b/HACKING.rst
@@ -194,6 +194,13 @@
 attribute should be sparingly applied to only the tests that sanity-check the
 most essential functionality of an OpenStack cloud.
 
+Multinode Attribute
+^^^^^^^^^^^^^^^^^^^
+The ``type='multinode'`` attribute is used to signify that a test is desired
+to be executed in a multinode environment. By marking the tests with this
+attribute we can avoid running tests which aren't that beneficial for the
+multinode setup and thus reduce the consumption of resources.
+
 Test fixtures and resources
 ---------------------------
 Test level resources should be cleaned-up after the test execution. Clean-up
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 1d05f13..b723977 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -819,21 +819,8 @@
         # Add a new location
         new_loc = {'metadata': {'foo': 'bar'},
                    'url': CONF.image.http_image}
-
-        # NOTE(danms): If glance was unable to fetch the remote image via
-        # HTTP, it will return BadRequest. Because this can be transient in
-        # CI, we try this a few times before we agree that it has failed
-        # for a reason worthy of failing the test.
-        for i in range(BAD_REQUEST_RETRIES):
-            try:
-                self.client.update_image(image['id'], [
-                    dict(add='/locations/-', value=new_loc)])
-                break
-            except lib_exc.BadRequest:
-                if i + 1 == BAD_REQUEST_RETRIES:
-                    raise
-                else:
-                    time.sleep(1)
+        self._update_image_with_retries(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
 
         # The image should now be active, with one location that looks
         # like we expect
@@ -858,20 +845,14 @@
     def test_set_location(self):
         self._check_set_location()
 
-    def _check_set_multiple_locations(self):
-        image = self._check_set_location()
-
-        new_loc = {'metadata': {'speed': '88mph'},
-                   'url': '%s#new' % CONF.image.http_image}
-
+    def _update_image_with_retries(self, image, patch):
         # NOTE(danms): If glance was unable to fetch the remote image via
         # HTTP, it will return BadRequest. Because this can be transient in
         # CI, we try this a few times before we agree that it has failed
         # for a reason worthy of failing the test.
         for i in range(BAD_REQUEST_RETRIES):
             try:
-                self.client.update_image(image['id'], [
-                    dict(add='/locations/-', value=new_loc)])
+                self.client.update_image(image, patch)
                 break
             except lib_exc.BadRequest:
                 if i + 1 == BAD_REQUEST_RETRIES:
@@ -879,6 +860,15 @@
                 else:
                     time.sleep(1)
 
+    def _check_set_multiple_locations(self):
+        image = self._check_set_location()
+
+        new_loc = {'metadata': {'speed': '88mph'},
+                   'url': '%s#new' % CONF.image.http_image}
+        self._update_image_with_retries(image['id'],
+                                        [dict(add='/locations/-',
+                                              value=new_loc)])
+
         # The image should now have two locations and the last one
         # (locations are ordered) should have the new URL.
         image = self.client.show_image(image['id'])
@@ -989,8 +979,9 @@
                                        'os_hash_algo': 'sha512'},
                    'metadata': {},
                    'url': CONF.image.http_image}
-        self.client.update_image(image['id'], [
-            dict(add='/locations/-', value=new_loc)])
+        self._update_image_with_retries(image['id'],
+                                        [dict(add='/locations/-',
+                                              value=new_loc)])
 
         # Expect that all of our values ended up on the image
         image = self.client.show_image(image['id'])
@@ -1017,8 +1008,9 @@
                                 'os_hash_algo': orig_image['os_hash_algo']},
             'metadata': {},
             'url': '%s#new' % CONF.image.http_image}
-        self.client.update_image(orig_image['id'], [
-            dict(add='/locations/-', value=new_loc)])
+        self._update_image_with_retries(orig_image['id'],
+                                        [dict(add='/locations/-',
+                                              value=new_loc)])
 
         # Setting the same exact values on a new location should work
         image = self.client.show_image(orig_image['id'])
@@ -1052,17 +1044,17 @@
 
             # This should always fail due to the mismatch
             self.assertRaises(lib_exc.Conflict,
-                              self.client.update_image,
-                              orig_image['id'], [
-                                  dict(add='/locations/-', value=new_loc)])
+                              self._update_image_with_retries,
+                              orig_image['id'],
+                              [dict(add='/locations/-', value=new_loc)])
 
         # Now try to add a new location with all of the substitutions,
         # which should also fail
         new_loc['validation_data'] = values
         self.assertRaises(lib_exc.Conflict,
-                          self.client.update_image,
-                          orig_image['id'], [
-                              dict(add='/locations/-', value=new_loc)])
+                          self._update_image_with_retries,
+                          orig_image['id'],
+                          [dict(add='/locations/-', value=new_loc)])
 
         # Make sure nothing has changed on our image after all the
         # above failures
diff --git a/tempest/api/volume/base.py b/tempest/api/volume/base.py
index 49f9e22..9ba9949 100644
--- a/tempest/api/volume/base.py
+++ b/tempest/api/volume/base.py
@@ -20,6 +20,7 @@
 from tempest.lib.common.utils import data_utils
 from tempest.lib.common.utils import test_utils
 from tempest.lib.decorators import cleanup_order
+from tempest.lib import exceptions as lib_exc
 import tempest.test
 
 CONF = config.CONF
@@ -126,12 +127,32 @@
 
         volume = self.volumes_client.create_volume(**kwargs)['volume']
         self.cleanup(test_utils.call_and_ignore_notfound_exc,
-                     self.delete_volume, self.volumes_client, volume['id'])
+                     self._delete_volume_for_cleanup,
+                     self.volumes_client, volume['id'])
         if wait_until:
             waiters.wait_for_volume_resource_status(self.volumes_client,
                                                     volume['id'], wait_until)
         return volume
 
+    @staticmethod
+    def _delete_volume_for_cleanup(volumes_client, volume_id):
+        """Delete a volume (only) for cleanup.
+
+        If it is attached to a server, wait for it to become available,
+        assuming we have already deleted the server and just need nova to
+        complete the delete operation before it is available to be deleted.
+        Otherwise proceed to the regular delete_volume().
+        """
+        try:
+            vol = volumes_client.show_volume(volume_id)['volume']
+            if vol['status'] == 'in-use':
+                waiters.wait_for_volume_resource_status(volumes_client,
+                                                        volume_id,
+                                                        'available')
+        except lib_exc.NotFound:
+            pass
+        BaseVolumeTest.delete_volume(volumes_client, volume_id)
+
     @cleanup_order
     def create_snapshot(self, volume_id=1, **kwargs):
         """Wrapper utility that returns a test snapshot."""
@@ -183,15 +204,17 @@
         snapshots_client.delete_snapshot(snapshot_id)
         snapshots_client.wait_for_resource_deletion(snapshot_id)
 
-    def attach_volume(self, server_id, volume_id):
+    def attach_volume(self, server_id, volume_id, wait_for_detach=True):
         """Attach a volume to a server"""
         self.servers_client.attach_volume(
             server_id, volumeId=volume_id,
             device='/dev/%s' % CONF.compute.volume_device_name)
         waiters.wait_for_volume_resource_status(self.volumes_client,
                                                 volume_id, 'in-use')
-        self.addCleanup(waiters.wait_for_volume_resource_status,
-                        self.volumes_client, volume_id, 'available')
+        if wait_for_detach:
+            self.addCleanup(waiters.wait_for_volume_resource_status,
+                            self.volumes_client, volume_id, 'available',
+                            server_id, self.servers_client)
         self.addCleanup(self.servers_client.detach_volume, server_id,
                         volume_id)
 
diff --git a/tempest/api/volume/test_volumes_snapshots.py b/tempest/api/volume/test_volumes_snapshots.py
index b3a04f8..95521e7 100644
--- a/tempest/api/volume/test_volumes_snapshots.py
+++ b/tempest/api/volume/test_volumes_snapshots.py
@@ -44,12 +44,17 @@
     @utils.services('compute')
     def test_snapshot_create_delete_with_volume_in_use(self):
         """Test create/delete snapshot from volume attached to server"""
-        # Create a test instance
-        server = self.create_server(wait_until='SSHABLE')
         # NOTE(zhufl) Here we create volume from self.image_ref for adding
         # coverage for "creating snapshot from non-blank volume".
         volume = self.create_volume(imageRef=self.image_ref)
-        self.attach_volume(server['id'], volume['id'])
+
+        # Create a test instance
+        server = self.create_server(wait_until='SSHABLE')
+
+        # NOTE(danms): We are attaching this volume to a server, but we do
+        # not need to block on detach during cleanup because we will be
+        # deleting the server anyway.
+        self.attach_volume(server['id'], volume['id'], wait_for_detach=False)
 
         # Snapshot a volume which attached to an instance with force=False
         self.assertRaises(lib_exc.BadRequest, self.create_snapshot,
@@ -81,7 +86,11 @@
 
         # Create a server and attach it
         server = self.create_server(wait_until='SSHABLE')
-        self.attach_volume(server['id'], self.volume_origin['id'])
+        # NOTE(danms): We are attaching this volume to a server, but we do
+        # not need to block on detach during cleanup because we will be
+        # deleting the server anyway.
+        self.attach_volume(server['id'], self.volume_origin['id'],
+                           wait_for_detach=False)
 
         # Now that the volume is attached, create other snapshots
         snapshot2 = self.create_snapshot(self.volume_origin['id'], force=True)
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 45a7b8a..c5da412 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -303,12 +303,16 @@
     raise lib_exc.TimeoutException(message)
 
 
-def wait_for_volume_resource_status(client, resource_id, status):
+def wait_for_volume_resource_status(client, resource_id, status,
+                                    server_id=None, servers_client=None):
     """Waits for a volume resource to reach a given status.
 
     This function is a common function for volume, snapshot and backup
     resources. The function extracts the name of the desired resource from
     the client class name of the resource.
+
+    If server_id and servers_client are provided, dump the console for that
+    server on failure.
     """
     resource_name = re.findall(
         r'(volume|group-snapshot|snapshot|backup|group)',
@@ -330,6 +334,11 @@
             raise exceptions.VolumeExtendErrorException(volume_id=resource_id)
 
         if int(time.time()) - start >= client.build_timeout:
+            if server_id and servers_client:
+                console_output = servers_client.get_console_output(
+                    server_id)['output']
+                LOG.debug('Console output for %s\nbody=\n%s',
+                          server_id, console_output)
             message = ('%s %s failed to reach %s status (current %s) '
                        'within the required time (%s s).' %
                        (resource_name, resource_id, status, resource_status,
diff --git a/tempest/config.py b/tempest/config.py
index 00b394e..dfc0a8e 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -975,12 +975,12 @@
                default='ecdsa',
                help='Type of key to use for ssh connections. '
                     'Valid types are rsa, ecdsa'),
-    cfg.IntOpt('allowed_network_downtime',
-               default=5.0,
-               help="Allowed VM network connection downtime during live "
-                    "migration, in seconds. "
-                    "When the measured downtime exceeds this value, an "
-                    "exception is raised."),
+    cfg.FloatOpt('allowed_network_downtime',
+                 default=5.0,
+                 help="Allowed VM network connection downtime during live "
+                      "migration, in seconds. "
+                      "When the measured downtime exceeds this value, an "
+                      "exception is raised."),
 ]
 
 volume_group = cfg.OptGroup(name='volume',
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index f4f37b0..e6c6eb6 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -275,7 +275,7 @@
         LOG.debug("Downtime seconds measured with downtime_meter = %r",
                   downtime)
         allowed_downtime = CONF.validation.allowed_network_downtime
-        self.assertLess(
+        self.assertLessEqual(
             downtime, allowed_downtime,
             "Downtime of {} seconds is higher than expected '{}'".format(
                 downtime, allowed_downtime))
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 2695048..93c949e 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -386,6 +386,29 @@
         mock_sleep.assert_called_once_with(1)
 
     @mock.patch.object(time, 'sleep')
+    def test_wait_for_volume_status_timeout_console(self, mock_sleep):
+        # Tests that the wait method gets the server console log if the
+        # timeout is hit.
+        client = mock.Mock(spec=volumes_client.VolumesClient,
+                           resource_type="volume",
+                           build_interval=1,
+                           build_timeout=1)
+        servers_client = mock.Mock()
+        servers_client.get_console_output.return_value = {
+            'output': 'console log'}
+        volume = {'volume': {'status': 'detaching'}}
+        mock_show = mock.Mock(return_value=volume)
+        client.show_volume = mock_show
+        volume_id = '7532b91e-aa0a-4e06-b3e5-20c0c5ee1caa'
+        self.assertRaises(lib_exc.TimeoutException,
+                          waiters.wait_for_volume_resource_status,
+                          client, volume_id, 'available',
+                          server_id='someserver',
+                          servers_client=servers_client)
+        servers_client.get_console_output.assert_called_once_with(
+            'someserver')
+
+    @mock.patch.object(time, 'sleep')
     def test_wait_for_volume_status_error_extending(self, mock_sleep):
         # Tests that the wait method raises VolumeExtendErrorException if
         # the volume status is 'error_extending'.
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 32c017a..f1e6c01 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -73,7 +73,11 @@
     parent: devstack-tempest
     # This job version is with swift enabled on py3
     # as swift is ready on py3 from stable/ussuri onwards.
-    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+    # As this use 'integrated-full' tox env which is not
+    # available in old tempest used till stable/wallaby,
+    # this job definition is only for stable/xena onwards
+    # and separate job definition until stable/wallaby
+    branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria|wallaby)).*$
     description: |
       Base integration test with Neutron networking, horizon, swift enable,
       and py3.
@@ -406,20 +410,20 @@
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-networking
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
 
 - project-template:
     name: integrated-gate-compute
@@ -443,15 +447,15 @@
             branches: ^stable/(wallaby|xena|yoga).*$
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - tempest-integrated-compute
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     periodic-weekly:
       jobs:
         # centos-9-stream is tested from zed release onwards
@@ -473,20 +477,20 @@
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/ussuri).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-placement
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
 
 - project-template:
     name: integrated-gate-storage
@@ -503,20 +507,20 @@
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
 
 - project-template:
     name: integrated-gate-object-storage
@@ -531,17 +535,17 @@
         - tempest-integrated-object-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
     gate:
       jobs:
         - grenade
         - tempest-integrated-object-storage
         # Do not run it on ussuri until below issue is fixed
         # https://storyboard.openstack.org/#!/story/2010057
-        # and job is broken on wallaby branch due to the issue
+        # and job is broken up to wallaby branch due to the issue
         # described in https://review.opendev.org/872341
         - openstacksdk-functional-devstack:
-            branches: ^(?!stable/(ussuri|wallaby)).*$
+            branches: ^(?!stable/(ussuri|victoria|wallaby)).*$
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index f70e79c..8aeb748 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -56,6 +56,36 @@
 - job:
     name: tempest-full-py3
     parent: devstack-tempest
+    # This job version is to use the 'full' tox env which
+    # is available for stable/ussuri to stable/wallaby also.
+    branches:
+      - stable/ussuri
+      - stable/victoria
+      - stable/wallaby
+    description: |
+      Base integration test with Neutron networking, horizon, swift enable,
+      and py3.
+      Former names for this job where:
+        * legacy-tempest-dsvm-py35
+        * gate-tempest-dsvm-py35
+    required-projects:
+      - openstack/horizon
+    vars:
+      tox_envlist: full
+      devstack_localrc:
+        USE_PYTHON3: true
+        FORCE_CONFIG_DRIVE: true
+        ENABLE_VOLUME_MULTIATTACH: true
+        GLANCE_USE_IMPORT_WORKFLOW: True
+      devstack_plugins:
+        neutron: https://opendev.org/openstack/neutron
+      devstack_services:
+        # Enbale horizon so that we can run horizon test.
+        horizon: true
+
+- job:
+    name: tempest-full-py3
+    parent: devstack-tempest
     # This job version is with swift disabled on py3
     # as swift was not ready on py3 until stable/train.
     branches: