Merge "Migrate tempest jobs to Ubuntu Jammy (22.04)"
diff --git a/doc/source/overview.rst b/doc/source/overview.rst
index 2eaf72f..315255d 100644
--- a/doc/source/overview.rst
+++ b/doc/source/overview.rst
@@ -207,21 +207,21 @@
 is ``test_path=./tempest/test_discover`` which will only run test discover on the
 Tempest suite.
 
-Alternatively, there are the py27 and py36 tox jobs which will run the unit
-tests with the corresponding version of python.
+Alternatively, there is the py39 tox job which will run the unit tests with
+the corresponding version of python.
 
 One common activity is to just run a single test, you can do this with tox
-simply by specifying to just run py27 or py36 tests against a single test::
+simply by specifying to just run py39 tests against a single test::
 
-    $ tox -e py36 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
+    $ tox -e py39 -- -n tempest.tests.test_microversions.TestMicroversionsTestsClass.test_config_version_none_23
 
 Or all tests in the test_microversions.py file::
 
-    $ tox -e py36 -- -n tempest.tests.test_microversions
+    $ tox -e py39 -- -n tempest.tests.test_microversions
 
 You may also use regular expressions to run any matching tests::
 
-    $ tox -e py36 -- test_microversions
+    $ tox -e py39 -- test_microversions
 
 Additionally, when running a single test, or test-file, the ``-n/--no-discover``
 argument is no longer required, however it may perform faster if included.
diff --git a/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml b/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml
new file mode 100644
index 0000000..c8a026e
--- /dev/null
+++ b/releasenotes/notes/image_multiple_locations-cda4453567953c1d.yaml
@@ -0,0 +1,8 @@
+---
+features:
+  - |
+    Add a new config option
+    `[image_feature_enabled]/manage_locations` which enables
+    tests for the `show_multiple_locations=True` functionality in
+    glance. In order for this to work you must also have a store
+    capable of hosting images with an HTTP URI.
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 91ce1f9..d47ffce 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -128,3 +128,27 @@
                                               wait_for_server=False)
         self.addCleanup(self.client.delete_image, image['id'])
         self.assertEqual(snapshot_name, image['name'])
+
+    @decorators.idempotent_id('f3cac456-e3fe-4183-a7a7-a59f7f017088')
+    def test_create_server_from_snapshot(self):
+        # Create one server normally
+        server = self.create_test_server(wait_until='ACTIVE')
+        self.addCleanup(self.servers_client.delete_server, server['id'])
+
+        # Snapshot it
+        snapshot_name = data_utils.rand_name('test-snap')
+        image = self.create_image_from_server(server['id'],
+                                              name=snapshot_name,
+                                              wait_until='ACTIVE',
+                                              wait_for_server=False)
+        self.addCleanup(self.client.delete_image, image['id'])
+
+        # Try to create another server from that snapshot
+        server2 = self.create_test_server(wait_until='ACTIVE',
+                                          image_id=image['id'])
+
+        # Delete server 2 before we finish otherwise we'll race with
+        # the cleanup which tries to delete the image before the
+        # server is gone.
+        self.servers_client.delete_server(server2['id'])
+        waiters.wait_for_server_termination(self.servers_client, server2['id'])
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index a69dbb3..e1e7fda 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -190,7 +190,7 @@
                 server, self.os_primary, tenant_network,
                 True, self.validation_resources, "SSHABLE", True)
         else:
-            waiters.wait_for_server_status(self.client, self.server['id'],
+            waiters.wait_for_server_status(self.client, server['id'],
                                            'ACTIVE')
 
         msg = ('Server was not rebuilt to the original image. '
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index 7e647dd..d590668 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -363,10 +363,12 @@
 
         if all_stores:
             stores_list = ','.join([store['id']
-                                    for store in self.available_stores])
+                                    for store in self.available_stores
+                                    if store.get('read-only') != 'true'])
         else:
-            stores = [store['id'] for store in self.available_stores]
-            stores_list = stores[::len(stores) - 1]
+            stores = [store['id'] for store in self.available_stores
+                      if store.get('read-only') != 'true']
+            stores_list = stores[::max(1, len(stores) - 1)]
 
         return body, stores_list
 
@@ -765,3 +767,280 @@
         fetched_images = self.alt_img_client.list_images(params)['images']
         self.assertEqual(1, len(fetched_images))
         self.assertEqual(image['id'], fetched_images[0]['id'])
+
+
+class ImageLocationsTest(base.BaseV2ImageTest):
+    @classmethod
+    def skip_checks(cls):
+        super(ImageLocationsTest, cls).skip_checks()
+        if not CONF.image_feature_enabled.manage_locations:
+            skip_msg = (
+                "%s skipped as show_multiple_locations is not available" % (
+                    cls.__name__))
+            raise cls.skipException(skip_msg)
+
+    @decorators.idempotent_id('58b0fadc-219d-40e1-b159-1c902cec323a')
+    def test_location_after_upload(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Now try uploading an image file
+        file_content = data_utils.random_bytes()
+        image_file = io.BytesIO(file_content)
+        self.client.store_image_file(image['id'], image_file)
+        waiters.wait_for_image_status(self.client, image['id'], 'active')
+
+        # Locations should now have one item
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Expected one location in %r' % image['locations'])
+
+        # NOTE(danms): If show_image_direct_url is enabled, then this
+        # will be present. If so, it should match the one location we set
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        return image
+
+    def _check_set_location(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Add a new location
+        new_loc = {'metadata': {'foo': 'bar'},
+                   'url': CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # The image should now be active, with one location that looks
+        # like we expect
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Image should have one location but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(new_loc['url'], image['locations'][0]['url'])
+        self.assertEqual('bar', image['locations'][0]['metadata'].get('foo'))
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        # If we added the location directly, the image goes straight
+        # to active and no hashing is done
+        self.assertEqual('active', image['status'])
+        self.assertIsNone(None, image['os_hash_algo'])
+        self.assertIsNone(None, image['os_hash_value'])
+
+        return image
+
+    @decorators.idempotent_id('37599b8a-d5c0-4590-aee5-73878502be15')
+    def test_set_location(self):
+        self._check_set_location()
+
+    def _check_set_multiple_locations(self):
+        image = self._check_set_location()
+
+        new_loc = {'metadata': {'speed': '88mph'},
+                   'url': '%s#new' % CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # The image should now have two locations and the last one
+        # (locations are ordered) should have the new URL.
+        image = self.client.show_image(image['id'])
+        self.assertEqual(2, len(image['locations']),
+                         'Image should have two locations but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(new_loc['url'], image['locations'][1]['url'])
+
+        # The image should still be active and still have no hashes
+        self.assertEqual('active', image['status'])
+        self.assertIsNone(None, image['os_hash_algo'])
+        self.assertIsNone(None, image['os_hash_value'])
+
+        # The direct_url should still match the first location
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        return image
+
+    @decorators.idempotent_id('bf6e0009-c039-4884-b498-db074caadb10')
+    def test_replace_location(self):
+        image = self._check_set_multiple_locations()
+        original_locs = image['locations']
+
+        # Replacing with the exact thing should work
+        self.client.update_image(image['id'], [
+            dict(replace='/locations', value=image['locations'])])
+
+        # Changing metadata on a location should work
+        original_locs[0]['metadata']['date'] = '2015-10-15'
+        self.client.update_image(image['id'], [
+            dict(replace='/locations', value=original_locs)])
+
+        # Deleting a location should not work
+        self.assertRaises(
+            lib_exc.BadRequest,
+            self.client.update_image,
+            image['id'], [
+                dict(replace='/locations', value=[original_locs[0]])])
+
+        # Replacing a location (with a different URL) should not work
+        new_loc = {'metadata': original_locs[1]['metadata'],
+                   'url': '%s#new3' % CONF.image.http_image}
+        self.assertRaises(
+            lib_exc.BadRequest,
+            self.client.update_image,
+            image['id'], [
+                dict(replace='/locations', value=[original_locs[0],
+                                                  new_loc])])
+
+        # Make sure the locations haven't changed with the above failures,
+        # but the metadata we updated should be changed.
+        image = self.client.show_image(image['id'])
+        self.assertEqual(2, len(image['locations']),
+                         'Image should have two locations but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(original_locs, image['locations'])
+
+    @decorators.idempotent_id('8a648de4-b745-4c28-a7b5-20de1c3da4d2')
+    def test_delete_locations(self):
+        image = self._check_set_multiple_locations()
+        expected_remaining_loc = image['locations'][1]
+
+        self.client.update_image(image['id'], [
+            dict(remove='/locations/0')])
+
+        # The image should now have only the one location we did not delete
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']),
+                         'Image should have one location but has %i' % (
+                         len(image['locations'])))
+        self.assertEqual(expected_remaining_loc['url'],
+                         image['locations'][0]['url'])
+
+        # The direct_url should now be the last remaining location
+        if 'direct_url' in image:
+            self.assertEqual(image['direct_url'], image['locations'][0]['url'])
+
+        # Removing the last location should be disallowed
+        self.assertRaises(lib_exc.Forbidden,
+                          self.client.update_image, image['id'], [
+                              dict(remove='/locations/0')])
+
+    @decorators.idempotent_id('a9a20396-8399-4b36-909d-564949be098f')
+    def test_set_location_bad_scheme(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Locations should be empty when there is no data
+        self.assertEqual('queued', image['status'])
+        self.assertEqual([], image['locations'])
+
+        # Adding a new location using a scheme that is not allowed
+        # should result in an error
+        new_loc = {'metadata': {'foo': 'bar'},
+                   'url': 'gopher://info.cern.ch'}
+        self.assertRaises(lib_exc.BadRequest,
+                          self.client.update_image, image['id'], [
+                              dict(add='/locations/-', value=new_loc)])
+
+    def _check_set_location_with_hash(self):
+        image = self.client.create_image(container_format='bare',
+                                         disk_format='raw')
+
+        # Create a new location with validation data
+        new_loc = {'validation_data': {'checksum': '1' * 32,
+                                       'os_hash_value': 'deadbeef' * 16,
+                                       'os_hash_algo': 'sha512'},
+                   'metadata': {},
+                   'url': CONF.image.http_image}
+        self.client.update_image(image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # Expect that all of our values ended up on the image
+        image = self.client.show_image(image['id'])
+        self.assertEqual(1, len(image['locations']))
+        self.assertEqual('1' * 32, image['checksum'])
+        self.assertEqual('deadbeef' * 16, image['os_hash_value'])
+        self.assertEqual('sha512', image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
+        self.assertEqual('active', image['status'])
+
+        return image
+
+    @decorators.idempotent_id('42d6f7db-c9f5-4bae-9e15-a90262fe445a')
+    def test_set_location_with_hash(self):
+        self._check_set_location_with_hash()
+
+    @decorators.idempotent_id('304c8a19-aa86-47dd-a022-ec4c7f433f1b')
+    def test_set_location_with_hash_second_matching(self):
+        orig_image = self._check_set_location_with_hash()
+
+        new_loc = {
+            'validation_data': {'checksum': orig_image['checksum'],
+                                'os_hash_value': orig_image['os_hash_value'],
+                                'os_hash_algo': orig_image['os_hash_algo']},
+            'metadata': {},
+            'url': '%s#new' % CONF.image.http_image}
+        self.client.update_image(orig_image['id'], [
+            dict(add='/locations/-', value=new_loc)])
+
+        # Setting the same exact values on a new location should work
+        image = self.client.show_image(orig_image['id'])
+        self.assertEqual(2, len(image['locations']))
+        self.assertEqual(orig_image['checksum'], image['checksum'])
+        self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
+        self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
+        self.assertNotIn('validation_data', image['locations'][1])
+
+    @decorators.idempotent_id('f3ce99c2-9ffb-4b9f-b2cb-876929382553')
+    def test_set_location_with_hash_not_matching(self):
+        orig_image = self._check_set_location_with_hash()
+        values = {
+            'checksum': '2' * 32,
+            'os_hash_value': 'beefdead' * 16,
+            'os_hash_algo': 'sha256',
+        }
+
+        # Try to set a new location with one each of the above
+        # substitutions
+        for k, v in values.items():
+            new_loc = {
+                'validation_data': {
+                    'checksum': orig_image['checksum'],
+                    'os_hash_value': orig_image['os_hash_value'],
+                    'os_hash_algo': orig_image['os_hash_algo']},
+                'metadata': {},
+                'url': '%s#new' % CONF.image.http_image}
+            new_loc['validation_data'][k] = v
+
+            # This should always fail due to the mismatch
+            self.assertRaises(lib_exc.Conflict,
+                              self.client.update_image,
+                              orig_image['id'], [
+                                  dict(add='/locations/-', value=new_loc)])
+
+        # Now try to add a new location with all of the substitutions,
+        # which should also fail
+        new_loc['validation_data'] = values
+        self.assertRaises(lib_exc.Conflict,
+                          self.client.update_image,
+                          orig_image['id'], [
+                              dict(add='/locations/-', value=new_loc)])
+
+        # Make sure nothing has changed on our image after all the
+        # above failures
+        image = self.client.show_image(orig_image['id'])
+        self.assertEqual(1, len(image['locations']))
+        self.assertEqual(orig_image['checksum'], image['checksum'])
+        self.assertEqual(orig_image['os_hash_value'], image['os_hash_value'])
+        self.assertEqual(orig_image['os_hash_algo'], image['os_hash_algo'])
+        self.assertNotIn('validation_data', image['locations'][0])
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 00f133e..be8766d 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -352,6 +352,8 @@
                     except Exception:
                         LOG.exception('Server %s failed to delete in time',
                                       server['id'])
+        if servers and not multiple_create_request:
+            body = rest_client.ResponseBody(body.response, servers[0])
         return body, servers
 
     return body, created_servers
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 53582a6..71599bd 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -233,6 +233,26 @@
 
     exc_cls = lib_exc.TimeoutException
     start = int(time.time())
+
+    # NOTE(danms): Don't wait for stores that are read-only as those
+    # will never complete
+    try:
+        store_info = client.info_stores()['stores']
+        stores = ','.join(sorted([
+            store['id'] for store in store_info
+            if store.get('read-only') != 'true' and
+            (not stores or store['id'] in stores.split(','))]))
+    except lib_exc.NotFound:
+        # If multi-store is not enabled, then we can not resolve which
+        # ones are read-only, and stores must have been passed as None
+        # anyway for us to succeed. If not, then we should raise right
+        # now and avoid waiting since we will never see the stores
+        # appear.
+        if stores is not None:
+            raise lib_exc.TimeoutException(
+                'Image service has no store support; '
+                'cowardly refusing to wait for them.')
+
     while int(time.time()) - start < client.build_timeout:
         image = client.show_image(image_id)
         if image['status'] == 'active' and (stores is None or
diff --git a/tempest/config.py b/tempest/config.py
index 39e7fb3..a60e5a8 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -735,6 +735,11 @@
     cfg.BoolOpt('os_glance_reserved',
                 default=False,
                 help="Should we check that os_glance namespace is reserved"),
+    cfg.BoolOpt('manage_locations',
+                default=False,
+                help=('Is show_multiple_locations enabled in glance. '
+                      'Note that at least one http store must be enabled as '
+                      'well, because we use that location scheme to test.')),
 ]
 
 network_group = cfg.OptGroup(name='network',
@@ -1242,29 +1247,39 @@
 EnforceScopeGroup = [
     cfg.BoolOpt('nova',
                 default=False,
-                help='Does the compute service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'nova.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the compute service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when nova.conf: [oslo_policy]. '
+                     'enforce_new_defaults and nova.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in nova conf.'),
     cfg.BoolOpt('neutron',
                 default=False,
-                help='Does the network service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'neutron.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the network service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when neutron.conf: [oslo_policy]. '
+                     'enforce_new_defaults and neutron.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in neutron conf.'),
     cfg.BoolOpt('glance',
                 default=False,
-                help='Does the Image service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'glance.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Image service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when glance.conf: [oslo_policy]. '
+                     'enforce_new_defaults and glance.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in glance conf.'),
     cfg.BoolOpt('cinder',
                 default=False,
-                help='Does the Volume service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'cinder.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Volume service API policies enforce scope and '
+                     'new defaults? This configuration value should be '
+                     'enabled when cinder.conf: [oslo_policy]. '
+                     'enforce_new_defaults and cinder.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in cinder conf.'),
     cfg.BoolOpt('keystone',
                 default=False,
-                help='Does the Identity service API policies enforce scope? '
-                     'This configuration value should be same as '
-                     'keystone.conf: [oslo_policy].enforce_scope option.'),
+                help='Does the Identity service API policies enforce scope '
+                     'and new defaults? This configuration value should be '
+                     'enabled when keystone.conf: [oslo_policy]. '
+                     'enforce_new_defaults and keystone.conf: [oslo_policy]. '
+                     'enforce_scope options are enabled in keystone conf.'),
 ]
 
 debug_group = cfg.OptGroup(name="debug",
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 1d0ee77..71088a4 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -59,11 +59,19 @@
     def test_wait_for_image_imported_to_stores(self):
         self.client.show_image.return_value = ({'status': 'active',
                                                 'stores': 'fake_store'})
+        self.client.info_stores.return_value = {
+            'stores': [{'id': 'fake_store',
+                        'description': 'A writable store'},
+                       {'id': 'another_fake_store',
+                        'description': 'A read-only store',
+                        'read-only': 'true'}]
+        }
         start_time = int(time.time())
         waiters.wait_for_image_imported_to_stores(
-            self.client, 'fake_image_id', 'fake_store')
+            self.client, 'fake_image_id', 'fake_store,another_fake_store')
         end_time = int(time.time())
-        # Ensure waiter returns before build_timeout
+        # Ensure waiter returns before build_timeout, and did not wait
+        # for the read-only store
         self.assertLess((end_time - start_time), 10)
 
     def test_wait_for_image_imported_to_stores_failure(self):
@@ -95,6 +103,22 @@
                           waiters.wait_for_image_imported_to_stores,
                           client, 'fake_image_id', 'fake_store')
 
+    def test_wait_for_image_imported_to_stores_no_stores(self):
+        client = mock.MagicMock()
+        client.show_image.return_value = ({'status': 'active'})
+        client.info_stores.side_effect = lib_exc.NotFound
+        client.build_timeout = 2
+        start_time = time.time()
+        waiters.wait_for_image_imported_to_stores(
+            client, 'fake_image_id', None)
+        end_time = time.time()
+        self.assertLess(end_time - start_time, 10)
+
+        exc = self.assertRaises(lib_exc.TimeoutException,
+                                waiters.wait_for_image_imported_to_stores,
+                                client, 'fake_image_id', 'foo,bar')
+        self.assertIn('cowardly', str(exc))
+
     def test_wait_for_image_copied_to_stores(self):
         self.client.show_image.return_value = ({
             'status': 'active',