Merge "Add credential generation for manager personas"
diff --git a/releasenotes/notes/2024.2-intermediate-release-2a9f305375fcb462.yaml b/releasenotes/notes/2024.2-intermediate-release-2a9f305375fcb462.yaml
new file mode 100644
index 0000000..11d3a4f
--- /dev/null
+++ b/releasenotes/notes/2024.2-intermediate-release-2a9f305375fcb462.yaml
@@ -0,0 +1,5 @@
+---
+prelude: >
+ This is an intermediate release during the 2024.2 Dalmatian development
+ cycle to make new functionality available to plugins and other consumers.
+
diff --git a/releasenotes/notes/add-target-host-filter-94803e93b701d052.yaml b/releasenotes/notes/add-target-host-filter-94803e93b701d052.yaml
new file mode 100644
index 0000000..83a3728
--- /dev/null
+++ b/releasenotes/notes/add-target-host-filter-94803e93b701d052.yaml
@@ -0,0 +1,6 @@
+---
+features:
+ - |
+ Add a new config option `[compute]/target_hosts_to_avoid` which will
+ filter out any hypervisor candidates with a hostname that matches the
+ provided pattern when determining target hosts for migration.
diff --git a/requirements.txt b/requirements.txt
index 6e66046..b0df18b 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -23,3 +23,4 @@
debtcollector>=1.2.0 # Apache-2.0
defusedxml>=0.7.1 # PSFL
fasteners>=0.16.0 # Apache-2.0
+testscenarios>=0.5.0
diff --git a/tempest/api/compute/admin/test_hosts.py b/tempest/api/compute/admin/test_hosts.py
index 0d79570..849b535 100644
--- a/tempest/api/compute/admin/test_hosts.py
+++ b/tempest/api/compute/admin/test_hosts.py
@@ -14,8 +14,11 @@
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
+from tempest import config
from tempest.lib import decorators
+CONF = config.CONF
+
class HostsAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests nova hosts API using admin privileges."""
@@ -70,7 +73,7 @@
hosts = [host for host in hosts if (
host['service'] == 'compute' and
- not host['host_name'].endswith('-ironic'))]
+ CONF.compute.target_hosts_to_avoid not in host['host_name'])]
self.assertNotEmpty(hosts)
for host in hosts:
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index ed94af0..1069e0f 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -538,6 +538,37 @@
volume['id'], 'available')
return volume
+ @classmethod
+ def verify_metadata_from_api(self, server, ssh_client, verify_method):
+ md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
+ LOG.info('Attempting to verify tagged devices in server %s via '
+ 'the metadata service: %s', server['id'], md_url)
+
+ def get_and_verify_metadata():
+ try:
+ ssh_client.exec_command('curl -V')
+ except exceptions.SSHExecCommandFailed:
+ if not CONF.compute_feature_enabled.config_drive:
+ raise self.skipException('curl not found in guest '
+ 'and config drive is '
+ 'disabled')
+ LOG.warning('curl was not found in the guest, device '
+ 'tagging metadata was not checked in the '
+ 'metadata API')
+ return True
+ cmd = 'curl %s' % md_url
+ md_json = ssh_client.exec_command(cmd)
+ return verify_method(md_json)
+ # NOTE(gmann) Keep refreshing the metadata info until the metadata
+ # cache is refreshed. For safer side, we will go with wait loop of
+ # build_interval till build_timeout. verify_method() above will return
+ # True if all metadata verification is done as expected.
+ if not test_utils.call_until_true(get_and_verify_metadata,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ raise lib_exc.TimeoutException('Timeout while verifying '
+ 'metadata on server.')
+
def _detach_volume(self, server, volume):
"""Helper method to detach a volume.
@@ -689,7 +720,7 @@
binary='nova-compute')['services']
hosts = []
for svc in svcs:
- if svc['host'].endswith('-ironic'):
+ if CONF.compute.target_hosts_to_avoid in svc['host']:
continue
if svc['state'] == 'up' and svc['status'] == 'enabled':
if CONF.compute.compute_volume_common_az:
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index b7db200..0b39b8a 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -16,6 +16,8 @@
import netaddr
import testtools
+from oslo_serialization import jsonutils as json
+
from tempest.api.compute import base
from tempest.common import utils
from tempest.common.utils.linux import remote_client
@@ -235,3 +237,76 @@
servers_client=self.client)
hostname = linux_client.exec_command("hostname").rstrip()
self.assertEqual('guest-instance-1-domain-com', hostname)
+
+
+class ServersV294TestFqdnHostnames(base.BaseV2ComputeTest):
+ """Test creating server with FQDN hostname and verifying attributes
+
+ Starting Antelope release, Nova allows to set hostname as an FQDN
+ type and allows free form characters in hostname using --hostname
+ parameter with API above 2.94 .
+
+ This is to create server with --hostname having FQDN type value having
+ more than 64 characters
+ """
+
+ min_microversion = '2.94'
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(ServersV294TestFqdnHostnames, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServersV294TestFqdnHostnames, cls).setup_clients()
+ cls.client = cls.servers_client
+
+ @classmethod
+ def resource_setup(cls):
+ super(ServersV294TestFqdnHostnames, cls).resource_setup()
+ cls.validation_resources = cls.get_class_validation_resources(
+ cls.os_primary)
+ cls.accessIPv4 = '1.1.1.1'
+ cls.name = 'guest-instance-1'
+ cls.password = data_utils.rand_password()
+ cls.hostname = 'x' * 52 + '-guest-test.domaintest.com'
+ cls.test_server = cls.create_test_server(
+ validatable=True,
+ validation_resources=cls.validation_resources,
+ wait_until='ACTIVE',
+ name=cls.name,
+ accessIPv4=cls.accessIPv4,
+ adminPass=cls.password,
+ hostname=cls.hostname)
+ cls.server = cls.client.show_server(cls.test_server['id'])['server']
+
+ def verify_metadata_hostname(self, md_json):
+ md_dict = json.loads(md_json)
+ dhcp_domain = CONF.compute_feature_enabled.dhcp_domain
+ if md_dict['hostname'] == f"{self.hostname}{dhcp_domain}":
+ return True
+ else:
+ return False
+
+ @decorators.idempotent_id('e7b05488-f9d5-4fce-91b3-e82216c52017')
+ @testtools.skipUnless(CONF.validation.run_validation,
+ 'Instance validation tests are disabled.')
+ def test_verify_hostname_allows_fqdn(self):
+ """Test to verify --hostname allows FQDN type name scheme
+
+ Verify the hostname has FQDN value and Freeform characters
+ in the hostname are allowed
+ """
+ self.assertEqual(
+ self.hostname, self.server['OS-EXT-SRV-ATTR:hostname'])
+ # Verify that metadata API has correct hostname inside guest
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(self.test_server, self.validation_resources),
+ self.ssh_user,
+ self.password,
+ self.validation_resources['keypair']['private_key'],
+ server=self.test_server,
+ servers_client=self.client)
+ self.verify_metadata_from_api(
+ self.test_server, linux_client, self.verify_metadata_hostname)
diff --git a/tempest/api/compute/servers/test_device_tagging.py b/tempest/api/compute/servers/test_device_tagging.py
index 2640311..d2fdd52 100644
--- a/tempest/api/compute/servers/test_device_tagging.py
+++ b/tempest/api/compute/servers/test_device_tagging.py
@@ -23,9 +23,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest.lib import exceptions
CONF = config.CONF
@@ -64,36 +62,6 @@
dhcp=True)
super(DeviceTaggingBase, cls).setup_credentials()
- def verify_metadata_from_api(self, server, ssh_client, verify_method):
- md_url = 'http://169.254.169.254/openstack/latest/meta_data.json'
- LOG.info('Attempting to verify tagged devices in server %s via '
- 'the metadata service: %s', server['id'], md_url)
-
- def get_and_verify_metadata():
- try:
- ssh_client.exec_command('curl -V')
- except exceptions.SSHExecCommandFailed:
- if not CONF.compute_feature_enabled.config_drive:
- raise self.skipException('curl not found in guest '
- 'and config drive is '
- 'disabled')
- LOG.warning('curl was not found in the guest, device '
- 'tagging metadata was not checked in the '
- 'metadata API')
- return True
- cmd = 'curl %s' % md_url
- md_json = ssh_client.exec_command(cmd)
- return verify_method(md_json)
- # NOTE(gmann) Keep refreshing the metadata info until the metadata
- # cache is refreshed. For safer side, we will go with wait loop of
- # build_interval till build_timeout. verify_method() above will return
- # True if all metadata verification is done as expected.
- if not test_utils.call_until_true(get_and_verify_metadata,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- raise exceptions.TimeoutException('Timeout while verifying '
- 'metadata on server.')
-
def verify_metadata_on_config_drive(self, server, ssh_client,
verify_method):
LOG.info('Attempting to verify tagged devices in server %s via '
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index 21ed0cd..c911039 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -605,6 +605,14 @@
self.addCleanup(_clean_oldest_backup, image1_id)
waiters.wait_for_image_status(glance_client,
image1_id, 'active')
+ # This is required due to ceph issue:
+ # https://bugs.launchpad.net/glance/+bug/2045769.
+ # New location APIs are async so we need to wait for the location
+ # import task to complete.
+ # This should work with old location API since we don't fail if there
+ # are no tasks for the image
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image1_id, 'success')
backup2 = data_utils.rand_name(
prefix=CONF.resource_name_prefix, name='backup-2')
@@ -621,6 +629,8 @@
self.addCleanup(glance_client.delete_image, image2_id)
waiters.wait_for_image_status(glance_client,
image2_id, 'active')
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image2_id, 'success')
# verify they have been created
properties = {
@@ -655,6 +665,8 @@
image3_id = resp['image_id']
else:
image3_id = data_utils.parse_image_id(resp.response['location'])
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image3_id, 'success')
self.addCleanup(glance_client.delete_image, image3_id)
# the first back up should be deleted
waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
diff --git a/tempest/api/image/base.py b/tempest/api/image/base.py
index 0cc088a..c2f067c 100644
--- a/tempest/api/image/base.py
+++ b/tempest/api/image/base.py
@@ -177,8 +177,8 @@
# If we added the location directly, the image goes straight
# to active and no hashing is done
self.assertEqual('active', image['status'])
- self.assertIsNone(None, image['os_hash_algo'])
- self.assertIsNone(None, image['os_hash_value'])
+ self.assertIsNone(image['os_hash_algo'])
+ self.assertIsNone(image['os_hash_value'])
return image
@@ -201,8 +201,8 @@
# The image should still be active and still have no hashes
self.assertEqual('active', image['status'])
- self.assertIsNone(None, image['os_hash_algo'])
- self.assertIsNone(None, image['os_hash_value'])
+ self.assertIsNone(image['os_hash_algo'])
+ self.assertIsNone(image['os_hash_value'])
# The direct_url should still match the first location
if 'direct_url' in image:
diff --git a/tempest/api/image/v2/test_images_formats.py b/tempest/api/image/v2/test_images_formats.py
new file mode 100644
index 0000000..48f1325
--- /dev/null
+++ b/tempest/api/image/v2/test_images_formats.py
@@ -0,0 +1,200 @@
+# Copyright 2024 Red Hat, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import os
+
+import testscenarios
+import yaml
+
+from tempest.api.compute import base as compute_base
+from tempest.api.image import base
+from tempest.common import waiters
+from tempest import config
+from tempest import exceptions
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+CONF = config.CONF
+
+
+def load_tests(loader, suite, pattern):
+ """Generate scenarios from the image manifest."""
+ if CONF.image.images_manifest_file is None:
+ return suite
+ ImagesFormatTest.scenarios = []
+ with open(CONF.image.images_manifest_file) as f:
+ ImagesFormatTest._manifest = yaml.load(f, Loader=yaml.SafeLoader)
+ for imgdef in ImagesFormatTest._manifest['images']:
+ ImagesFormatTest.scenarios.append((imgdef['name'],
+ {'imgdef': imgdef}))
+ result = loader.suiteClass()
+ result.addTests(testscenarios.generate_scenarios(suite))
+ return result
+
+
+class ImagesFormatTest(base.BaseV2ImageTest,
+ compute_base.BaseV2ComputeTest):
+ def setUp(self):
+ super().setUp()
+ if CONF.image.images_manifest_file is None:
+ self.skipTest('Image format testing is not configured')
+ self._image_base = os.path.dirname(os.path.abspath(
+ CONF.image.images_manifest_file))
+
+ self.images = []
+
+ def tearDown(self):
+ for img in self.images:
+ try:
+ self.client.delete_image(img['id'])
+ except lib_exc.NotFound:
+ pass
+ return super().tearDown()
+
+ @classmethod
+ def resource_setup(cls):
+ super().resource_setup()
+ cls.available_import_methods = cls.client.info_import()[
+ 'import-methods']['value']
+
+ def _test_image(self, image_def, override_format=None, asimport=False):
+ image_name = data_utils.rand_name(
+ prefix=CONF.resource_name_prefix,
+ name=image_def['name'])
+ image = self.client.create_image(
+ name=image_name,
+ container_format='bare',
+ disk_format=override_format or image_def['format'])
+ self.images.append(image)
+ image_fn = os.path.join(self._image_base, image_def['filename'])
+ with open(image_fn, 'rb') as f:
+ if asimport:
+ self.client.stage_image_file(image['id'], f)
+ self.client.image_import(image['id'], method='glance-direct')
+ else:
+ self.client.store_image_file(image['id'], f)
+ return image
+
+ @decorators.idempotent_id('a245fcbe-63ce-4dc1-a1d0-c16d76d9e6df')
+ def test_accept_usable_formats(self):
+ if self.imgdef['usable']:
+ if self.imgdef['format'] in CONF.image.disk_formats:
+ # These are expected to work
+ self._test_image(self.imgdef)
+ else:
+ # If this is not configured to be supported, we should get
+ # a BadRequest from glance
+ self.assertRaises(lib_exc.BadRequest,
+ self._test_image, self.imgdef)
+ else:
+ self.skipTest(
+ 'Glance does not currently reject unusable images on upload')
+
+ @decorators.idempotent_id('7c7c2f16-2e97-4dce-8cb4-bc10be031c85')
+ def test_accept_reject_formats_import(self):
+ """Make sure glance rejects invalid images during conversion."""
+ if 'glance-direct' not in self.available_import_methods:
+ self.skipTest('Import via glance-direct is not available')
+ if not CONF.image_feature_enabled.image_conversion:
+ self.skipTest('Import image_conversion not enabled')
+
+ # VMDK with footer was not supported by earlier service versions,
+ # so we need to tolerate it passing and failing (skip for the latter).
+ # See this for more info:
+ # https://bugs.launchpad.net/glance/+bug/2073262
+ is_broken = 'footer' in self.imgdef['name']
+
+ if (self.imgdef['format'] in CONF.image.disk_formats and
+ self.imgdef['usable']):
+ # Usable images should end up in active state
+ image = self._test_image(self.imgdef, asimport=True)
+ try:
+ waiters.wait_for_image_status(self.client, image['id'],
+ 'active')
+ except lib_exc.TimeoutException:
+ if is_broken:
+ self.skipTest(
+ 'Older glance did not support vmdk-with-footer')
+ else:
+ raise
+ else:
+ # FIXME(danms): Make this better, but gpt will fail before
+ # the import even starts until glance has it in its API
+ # schema as a valid value. Other formats expected to fail
+ # do so during import and return to queued state.
+ if self.imgdef['format'] not in CONF.image.disk_formats:
+ self.assertRaises(lib_exc.BadRequest,
+ self._test_image,
+ self.imgdef, asimport=True)
+ else:
+ image = self._test_image(self.imgdef, asimport=True)
+ waiters.wait_for_image_status(self.client, image['id'],
+ 'queued')
+ self.client.delete_image(image['id'])
+
+ if self.imgdef['format'] == 'iso':
+ # NOTE(danms): Glance has a special case to not convert ISO images
+ # because they are special and must remain as ISOs in order to be
+ # properly used for CD-based rescue and boot.
+ self.assertEqual('iso', image['disk_format'])
+
+ def _create_server_with_image_def(self, image_def, **overrides):
+ image_def = dict(image_def, **overrides)
+ image = self._test_image(image_def)
+ server = self.create_test_server(name='server-%s' % image['name'],
+ image_id=image['id'],
+ wait_until='ACTIVE')
+ return server
+
+ @decorators.idempotent_id('f77394bc-81f4-4d54-9f5b-e48f3d6b5376')
+ def test_compute_rejects_invalid(self):
+ """Make sure compute rejects invalid/insecure images."""
+ if self.imgdef['format'] not in CONF.image.disk_formats:
+ # if this format is not allowed by glance, we can not create
+ # a properly-formatted image for it, so skip it.
+ self.skipTest(
+ 'Format %s not allowed by config' % self.imgdef['format'])
+
+ # VMDK with footer was not supported by earlier service versions,
+ # so we need to tolerate it passing and failing (skip for the latter).
+ # See this for more info:
+ # https://bugs.launchpad.net/glance/+bug/2073262
+ is_broken = 'footer' in self.imgdef['name']
+
+ if self.imgdef['usable']:
+ try:
+ server = self._create_server_with_image_def(self.imgdef)
+ except exceptions.BuildErrorException:
+ if is_broken:
+ self.skip('Tolerating failed build with known-broken '
+ 'image format')
+ else:
+ raise
+ self.delete_server(server['id'])
+ else:
+ self.assertRaises(exceptions.BuildErrorException,
+ self._create_server_with_image_def,
+ self.imgdef)
+
+ @decorators.idempotent_id('ffe21610-e801-4992-9b81-e2d646e2e2e9')
+ def test_compute_rejects_format_mismatch(self):
+ """Make sure compute rejects any image with a format mismatch."""
+ # Lying about the disk_format should always fail
+ override_fmt = (
+ self.imgdef['format'] in ('raw', 'gpt') and 'qcow2' or 'raw')
+ self.assertRaises(exceptions.BuildErrorException,
+ self._create_server_with_image_def,
+ self.imgdef,
+ format=override_fmt)
diff --git a/tempest/cmd/cleanup_service.py b/tempest/cmd/cleanup_service.py
index b202940..db4407d 100644
--- a/tempest/cmd/cleanup_service.py
+++ b/tempest/cmd/cleanup_service.py
@@ -115,21 +115,32 @@
return [item for item in item_list
if item['tenant_id'] == self.tenant_id]
- def _filter_by_prefix(self, item_list):
- items = [item for item in item_list
- if item['name'].startswith(self.prefix)]
+ def _filter_by_prefix(self, item_list, top_key=None):
+ items = []
+ for item in item_list:
+ name = item[top_key]['name'] if top_key else item['name']
+ if name.startswith(self.prefix):
+ items.append(item)
return items
def _filter_by_resource_list(self, item_list, attr):
if attr not in self.resource_list_json:
return []
- items = [item for item in item_list if item['id']
- in self.resource_list_json[attr].keys()]
+ items = []
+ for item in item_list:
+ item_id = (item['keypair']['name'] if attr == 'keypairs'
+ else item['id'])
+ if item_id in self.resource_list_json[attr].keys():
+ items.append(item)
return items
def _filter_out_ids_from_saved(self, item_list, attr):
- items = [item for item in item_list if item['id']
- not in self.saved_state_json[attr].keys()]
+ items = []
+ for item in item_list:
+ item_id = (item['keypair']['name'] if attr == 'keypairs'
+ else item['id'])
+ if item_id not in self.saved_state_json[attr].keys():
+ items.append(item)
return items
def list(self):
@@ -294,16 +305,11 @@
keypairs = client.list_keypairs()['keypairs']
if self.prefix:
- keypairs = self._filter_by_prefix(keypairs)
+ keypairs = self._filter_by_prefix(keypairs, 'keypair')
elif self.is_resource_list:
- keypairs = [keypair for keypair in keypairs
- if keypair['keypair']['name']
- in self.resource_list_json['keypairs'].keys()]
+ keypairs = self._filter_by_resource_list(keypairs, 'keypairs')
elif not self.is_save_state:
- # recreate list removing saved keypairs
- keypairs = [keypair for keypair in keypairs
- if keypair['keypair']['name']
- not in self.saved_state_json['keypairs'].keys()]
+ keypairs = self._filter_out_ids_from_saved(keypairs, 'keypairs')
LOG.debug("List count, %s Keypairs", len(keypairs))
return keypairs
diff --git a/tempest/config.py b/tempest/config.py
index bd4a7a1..b1f736c 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -418,7 +418,11 @@
help="Specify destination host for live-migration and cold"
" migration. If option is not set tests will use host"
" automatically."),
-
+ cfg.StrOpt('target_hosts_to_avoid',
+ default='-ironic',
+ help="When aggregating available hypervisors for testing,"
+ " avoid migrating to and booting any test VM on hosts with"
+ " a name that matches the provided pattern"),
]
placement_group = cfg.OptGroup(name='placement',
@@ -469,6 +473,15 @@
"the '.' with '-' to comply with fqdn hostname. Nova "
"changed that in Wallaby cycle, if your cloud is older "
"than wallaby then you can keep/make it False."),
+ cfg.StrOpt('dhcp_domain',
+ default='.novalocal',
+ help="Configure a fully-qualified domain name for instance "
+ "hostnames. The value is suffixed to instance hostname "
+ "from the database to construct the hostname that "
+ "appears in the metadata API. To disable this behavior "
+ "(for example in order to correctly support "
+ "microversion's 2.94 FQDN hostnames), set this to the "
+ "empty string."),
cfg.BoolOpt('change_password',
default=False,
help="Does the test environment support changing the admin "
@@ -668,14 +681,18 @@
help="Time in seconds between image operation status "
"checks."),
cfg.ListOpt('container_formats',
- default=['ami', 'ari', 'aki', 'bare', 'ovf', 'ova'],
+ default=['bare', 'ami', 'ari', 'aki', 'ovf', 'ova'],
help="A list of image's container formats "
"users can specify."),
cfg.ListOpt('disk_formats',
- default=['ami', 'ari', 'aki', 'vhd', 'vmdk', 'raw', 'qcow2',
+ default=['qcow2', 'raw', 'ami', 'ari', 'aki', 'vhd', 'vmdk',
'vdi', 'iso', 'vhdx'],
help="A list of image's disk formats "
- "users can specify.")
+ "users can specify."),
+ cfg.StrOpt('images_manifest_file',
+ default=None,
+ help="A path to a manifest.yml generated using the "
+ "os-test-images project"),
]
image_feature_group = cfg.OptGroup(name='image-feature-enabled',
@@ -708,6 +725,9 @@
help=('Is show_multiple_locations enabled in glance. '
'Note that at least one http store must be enabled as '
'well, because we use that location scheme to test.')),
+ cfg.BoolOpt('image_conversion',
+ default=False,
+ help=('Is image_conversion enabled in glance.')),
]
network_group = cfg.OptGroup(name='network',
diff --git a/tempest/scenario/test_instances_with_cinder_volumes.py b/tempest/scenario/test_instances_with_cinder_volumes.py
index b9ac2c8..0ddbec1 100644
--- a/tempest/scenario/test_instances_with_cinder_volumes.py
+++ b/tempest/scenario/test_instances_with_cinder_volumes.py
@@ -80,7 +80,7 @@
for host in zone['hosts']:
if 'nova-compute' in zone['hosts'][host] and \
zone['hosts'][host]['nova-compute']['available'] and \
- not host.endswith('-ironic'):
+ CONF.compute.target_hosts_to_avoid not in host:
hosts.append({'zone': zone['zoneName'],
'host_name': host})
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index fe85234..556b925 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -48,7 +48,7 @@
for host in zone['hosts']:
if 'nova-compute' in zone['hosts'][host] and \
zone['hosts'][host]['nova-compute']['available'] and \
- not host.endswith('-ironic'):
+ CONF.compute.target_hosts_to_avoid not in host:
hosts.append({'zone': zone['zoneName'],
'host_name': host})
diff --git a/tempest/tests/cmd/test_cleanup_services.py b/tempest/tests/cmd/test_cleanup_services.py
index 2557145..7f8db9f 100644
--- a/tempest/tests/cmd/test_cleanup_services.py
+++ b/tempest/tests/cmd/test_cleanup_services.py
@@ -610,21 +610,14 @@
self._test_prefix_opt_precedence(delete_mock)
def test_resource_list_opt_precedence(self):
- delete_mock = [(self.filter_prefix, [], None),
+ delete_mock = [(self.filter_saved_state, [], None),
+ (self.filter_resource_list, [], None),
+ (self.filter_prefix, [], None),
(self.get_method, self.response, 200),
(self.validate_response, 'validate', None),
(self.delete_method, 'error', None),
(self.log_method, 'exception', None)]
- serv = self._create_cmd_service(
- self.service_class, is_resource_list=True)
-
- _, fixtures = self.run_function_with_mocks(
- serv.delete,
- delete_mock
- )
-
- # Check that prefix was not used for filtering
- fixtures[0].mock.assert_not_called()
+ self._test_resource_list_opt_precedence(delete_mock)
class TestVolumeService(BaseCmdServiceTests):
diff --git a/zuul.d/base.yaml b/zuul.d/base.yaml
index 633f501..4de4111 100644
--- a/zuul.d/base.yaml
+++ b/zuul.d/base.yaml
@@ -12,6 +12,10 @@
timeout: 7200
roles: &base_roles
- zuul: opendev.org/openstack/devstack
+ failure-output:
+ # This matches stestr/tempest output when a test fails
+ # {1} tempest.api.test_blah [5.743446s] ... FAILED
+ - '\{\d+\} (.*?) \[[\d\.]+s\] \.\.\. FAILED'
vars: &base_vars
devstack_localrc:
IMAGE_URLS: http://download.cirros-cloud.net/0.6.2/cirros-0.6.2-x86_64-disk.img, http://download.cirros-cloud.net/0.6.1/cirros-0.6.1-x86_64-disk.img
@@ -60,6 +64,10 @@
required-projects: *base_required-projects
timeout: 7200
roles: *base_roles
+ failure-output:
+ # This matches stestr/tempest output when a test fails
+ # {1} tempest.api.test_blah [5.743446s] ... FAILED
+ - '\{\d+\} (.*?) \[[\d\.]+s\] \.\.\. FAILED'
vars: *base_vars
run: playbooks/devstack-tempest-ipv6.yaml
post-run: playbooks/post-tempest.yaml