Merge "Enable tempest cleanup with record_resources in jobs"
diff --git a/doc/source/stable_branch_support_policy.rst b/doc/source/stable_branch_support_policy.rst
index 9c2d1ed..cea632b 100644
--- a/doc/source/stable_branch_support_policy.rst
+++ b/doc/source/stable_branch_support_policy.rst
@@ -23,7 +23,7 @@
switch to running Tempest from a tag with support for the branch, or exclude
a newly introduced test (if that is the cause of the issue). Tempest will not
be creating stable branches to support *Extended Maintenance* phase branches, as
-the burden is on the *Extended Maintenance* phase branche maintainers, not the Tempest
+the burden is on the *Extended Maintenance* phase branch maintainers, not the Tempest
project, to support that branch.
.. _Extended Maintenance policy: https://governance.openstack.org/tc/resolutions/20180301-stable-branch-eol.html
diff --git a/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
new file mode 100644
index 0000000..30a2278
--- /dev/null
+++ b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new config option in the ``volume_feature_enabled`` section,
+ ``volume_types_for_data_volume``, is added to allow the user to specify
+ which volume types can be used for data volumes in a new test
+ ``test_instances_with_cinder_volumes_on_all_compute_nodes``. By default,
+ this option is set to None.
diff --git a/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml b/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml
new file mode 100644
index 0000000..48c1717
--- /dev/null
+++ b/releasenotes/notes/cleanup-container-client-interface-6a9fe49072cfdb17.yaml
@@ -0,0 +1,8 @@
+---
+upgrade:
+ - |
+ The following deprecated alias methods of the ``ContainerClient`` class
+ has been removed.
+
+ - ``update_container_metadata``, replaced by ``create_update_or_delete_container_metadata``
+ - ``list_container_contents``, replaced by ``list_container_objects``
diff --git a/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml b/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml
new file mode 100644
index 0000000..d408538
--- /dev/null
+++ b/releasenotes/notes/deprecate-import_image-e8c627aab833b64d.yaml
@@ -0,0 +1,12 @@
+---
+upgrade:
+ - |
+ Default value of the ``[image-feature-enabled] image_import`` has been
+ changed from ``False`` to ``True``, and now the image import feature is
+ tested by default.
+
+deprecations:
+ - |
+ The ``[image-feature-enabled] image_import`` option has been deprecated.
+ The image import feature works in both standalone mode and WSGI mode since
+ Victoria and the image import feature can be always tested.
diff --git a/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml b/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml
new file mode 100644
index 0000000..2834876
--- /dev/null
+++ b/releasenotes/notes/deprecate-os_glance_reserved-bace16f21facca3b.yaml
@@ -0,0 +1,11 @@
+---
+upgrade:
+ - |
+ Default value of the ``[image-feature-enabled] os_glance_reserved`` has
+ been changed from ``False`` to ``True`` and now the reservation of
+ os_glance namespace is tested by default.
+
+deprecations:
+ - |
+ The ``[image-feature-enabled] os_glance_reserved`` option has been
+ deprecatd because glance reserves the os_glance namespace since Wallaby.
diff --git a/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml b/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml
new file mode 100644
index 0000000..26da18c
--- /dev/null
+++ b/releasenotes/notes/remove-xenapi_apis-86720c0c399460ab.yaml
@@ -0,0 +1,5 @@
+---
+upgrade:
+ - |
+ The deprecated ``[compute-feature-enabled] xenapi_apis`` option has been
+ removed.
diff --git a/roles/run-tempest/README.rst b/roles/run-tempest/README.rst
index 04db849..c682641 100644
--- a/roles/run-tempest/README.rst
+++ b/roles/run-tempest/README.rst
@@ -81,7 +81,7 @@
.. zuul:rolevar:: stable_constraints_file
:default: ''
- Upper constraints file to be used for stable branch till stable/victoria.
+ Upper constraints file to be used for stable branch till Wallaby
.. zuul:rolevar:: tempest_tox_environment
:default: ''
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 3d78557..29409c0 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -25,11 +25,11 @@
target_branch: "{{ zuul.override_checkout }}"
when: zuul.override_checkout is defined
-- name: Use stable branch upper-constraints till stable/wallaby
+- name: Use stable branch upper-constraints till Wallaby
set_fact:
# TOX_CONSTRAINTS_FILE is new name, UPPER_CONSTRAINTS_FILE is old one, best to set both
tempest_tox_environment: "{{ tempest_tox_environment | combine({'UPPER_CONSTRAINTS_FILE': stable_constraints_file}) | combine({'TOX_CONSTRAINTS_FILE': stable_constraints_file}) }}"
- when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "stable/victoria", "stable/wallaby"]
+ when: target_branch in ["stable/ocata", "stable/pike", "stable/queens", "stable/rocky", "stable/stein", "stable/train", "stable/ussuri", "unmaintained/victoria", "unmaintained/wallaby"]
- name: Use Configured upper-constraints for non-master Tempest
set_fact:
@@ -80,14 +80,14 @@
- name: Tempest 26.1.0 workaround to fallback exclude-list to blacklist
# NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with
- # stestr 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1
+ # stestr 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1
# in stable/train|ussuri|victoria) which does not have new args exclude-list
# so let's fallback to old arg if new arg is passed.
set_fact:
exclude_list_option: "--blacklist-file={{ tempest_test_exclude_list|quote }}"
when:
- tempest_test_exclude_list is defined
- - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+ - target_branch in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
# TODO(kopecmartin) remove this after all consumers of the role have switched
# to tempest_exclude_regex option, until then it's kept here for the backward
@@ -105,11 +105,11 @@
when:
- tempest_black_regex is not defined
- tempest_exclude_regex is defined
- - target_branch not in ["stable/train", "stable/ussuri", "stable/victoria"]
+ - target_branch not in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
- name: Tempest 26.1.0 workaround to fallback exclude-regex to black-regex
# NOTE(gmann): stable/train|ussuri|victoria use Tempest 26.1.0 and with stestr
- # 2.5.1/3.0.1 (beacause of upper constraints of stestr 2.5.1/3.0.1 in
+ # 2.5.1/3.0.1 (because of upper constraints of stestr 2.5.1/3.0.1 in
# stable/train|ussuri|victoria) which does not have new args exclude-list so
# let's fallback to old arg if new arg is passed.
set_fact:
@@ -117,7 +117,7 @@
when:
- tempest_black_regex is not defined
- tempest_exclude_regex is defined
- - target_branch in ["stable/train", "stable/ussuri", "stable/victoria"]
+ - target_branch in ["stable/train", "stable/ussuri", "unmaintained/victoria"]
- name: Run Tempest
command: tox -e {{tox_envlist}} {{tox_extra_args}} -- \
diff --git a/tempest/api/compute/admin/test_agents.py b/tempest/api/compute/admin/test_agents.py
deleted file mode 100644
index 8fc155b..0000000
--- a/tempest/api/compute/admin/test_agents.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Copyright 2014 NEC Corporation. All rights reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-from tempest.api.compute import base
-from tempest import config
-from tempest.lib.common.utils import data_utils
-from tempest.lib import decorators
-
-CONF = config.CONF
-
-
-# TODO(stephenfin): Remove these tests once the nova Ussuri branch goes EOL
-class AgentsAdminTestJSON(base.BaseV2ComputeAdminTest):
- """Tests Compute Agents API"""
-
- @classmethod
- def skip_checks(cls):
- super(AgentsAdminTestJSON, cls).skip_checks()
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise cls.skipException('The os-agents API is not supported.')
-
- @classmethod
- def setup_clients(cls):
- super(AgentsAdminTestJSON, cls).setup_clients()
- cls.client = cls.os_admin.agents_client
-
- @classmethod
- def resource_setup(cls):
- super(AgentsAdminTestJSON, cls).resource_setup()
- cls.params_agent = cls._param_helper(
- hypervisor='common', os='linux', architecture='x86_64',
- version='7.0', url='xxx://xxxx/xxx/xxx',
- md5hash='add6bb58e139be103324d04d82d8f545')
-
- @staticmethod
- def _param_helper(**kwargs):
- rand_key = 'architecture'
- if rand_key in kwargs:
- # NOTE: The rand_name is for avoiding agent conflicts.
- # If you try to create an agent with the same hypervisor,
- # os and architecture as an existing agent, Nova will return
- # an HTTPConflict or HTTPServerError.
- kwargs[rand_key] = data_utils.rand_name(
- prefix=CONF.resource_name_prefix,
- name=kwargs[rand_key])
- return kwargs
-
- @decorators.idempotent_id('1fc6bdc8-0b6d-4cc7-9f30-9b04fabe5b90')
- def test_create_agent(self):
- """Test creating a compute agent"""
- params = self._param_helper(
- hypervisor='kvm', os='win', architecture='x86',
- version='7.0', url='xxx://xxxx/xxx/xxx',
- md5hash='add6bb58e139be103324d04d82d8f545')
- body = self.client.create_agent(**params)['agent']
- self.addCleanup(self.client.delete_agent, body['agent_id'])
- for expected_item, value in params.items():
- self.assertEqual(value, body[expected_item])
-
- @decorators.idempotent_id('dc9ffd51-1c50-4f0e-a820-ae6d2a568a9e')
- def test_update_agent(self):
- """Test updating a compute agent"""
- # Create and update an agent.
- body = self.client.create_agent(**self.params_agent)['agent']
- self.addCleanup(self.client.delete_agent, body['agent_id'])
- agent_id = body['agent_id']
- params = self._param_helper(
- version='8.0', url='xxx://xxxx/xxx/xxx2',
- md5hash='add6bb58e139be103324d04d82d8f547')
- body = self.client.update_agent(agent_id, **params)['agent']
- for expected_item, value in params.items():
- self.assertEqual(value, body[expected_item])
-
- @decorators.idempotent_id('470e0b89-386f-407b-91fd-819737d0b335')
- def test_delete_agent(self):
- """Test deleting a compute agent"""
- body = self.client.create_agent(**self.params_agent)['agent']
- self.client.delete_agent(body['agent_id'])
-
- # Verify the list doesn't contain the deleted agent.
- agents = self.client.list_agents()['agents']
- self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
- agents))
-
- @decorators.idempotent_id('6a326c69-654b-438a-80a3-34bcc454e138')
- def test_list_agents(self):
- """Test listing compute agents"""
- body = self.client.create_agent(**self.params_agent)['agent']
- self.addCleanup(self.client.delete_agent, body['agent_id'])
- agents = self.client.list_agents()['agents']
- self.assertNotEmpty(agents, 'Cannot get any agents.')
- self.assertIn(body['agent_id'], map(lambda x: x['agent_id'], agents))
-
- @decorators.idempotent_id('eabadde4-3cd7-4ec4-a4b5-5a936d2d4408')
- def test_list_agents_with_filter(self):
- """Test listing compute agents by the filter"""
- body = self.client.create_agent(**self.params_agent)['agent']
- self.addCleanup(self.client.delete_agent, body['agent_id'])
- params = self._param_helper(
- hypervisor='xen', os='linux', architecture='x86',
- version='7.0', url='xxx://xxxx/xxx/xxx1',
- md5hash='add6bb58e139be103324d04d82d8f546')
- agent_xen = self.client.create_agent(**params)['agent']
- self.addCleanup(self.client.delete_agent, agent_xen['agent_id'])
-
- agent_id_xen = agent_xen['agent_id']
- agents = (self.client.list_agents(hypervisor=agent_xen['hypervisor'])
- ['agents'])
- self.assertNotEmpty(agents, 'Cannot get any agents.')
- self.assertIn(agent_id_xen, map(lambda x: x['agent_id'], agents))
- self.assertNotIn(body['agent_id'], map(lambda x: x['agent_id'],
- agents))
- for agent in agents:
- self.assertEqual(agent_xen['hypervisor'], agent['hypervisor'])
diff --git a/tempest/api/compute/admin/test_servers.py b/tempest/api/compute/admin/test_servers.py
index be838fc..6c9aafb 100644
--- a/tempest/api/compute/admin/test_servers.py
+++ b/tempest/api/compute/admin/test_servers.py
@@ -207,15 +207,10 @@
self.assertEqual(self.image_ref_alt, rebuilt_image_id)
@decorators.idempotent_id('7a1323b4-a6a2-497a-96cb-76c07b945c71')
- def test_reset_network_inject_network_info(self):
- """Test resetting and injecting network info of a server"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'The resetNetwork server action is not supported.')
-
- # Reset Network of a Server
+ def test_inject_network_info(self):
+ """Test injecting network info of a server"""
+ # Create a server
server = self.create_test_server(wait_until='ACTIVE')
- self.client.reset_network(server['id'])
# Inject the Network Info into Server
self.client.inject_network_info(server['id'])
diff --git a/tempest/api/compute/admin/test_servers_on_multinodes.py b/tempest/api/compute/admin/test_servers_on_multinodes.py
index 013e7d8..b5ee9b1 100644
--- a/tempest/api/compute/admin/test_servers_on_multinodes.py
+++ b/tempest/api/compute/admin/test_servers_on_multinodes.py
@@ -24,7 +24,7 @@
class ServersOnMultiNodesTest(base.BaseV2ComputeAdminTest):
- """Test creating servers on mutiple nodes with scheduler_hints."""
+ """Test creating servers on multiple nodes with scheduler_hints."""
@classmethod
def resource_setup(cls):
super(ServersOnMultiNodesTest, cls).resource_setup()
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2557e47..ed94af0 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -410,7 +410,7 @@
:param validatable: whether to the server needs to be
validatable. When True, validation resources are acquired via
the `get_class_validation_resources` helper.
- :param kwargs: extra paramaters are passed through to the
+ :param kwargs: extra parameters are passed through to the
`create_test_server` call.
:return: the UUID of the created server.
"""
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 87cedae..d728853 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -71,7 +71,7 @@
self.assertEqual(snapshot_name, image['name'])
except lib_exceptions.TimeoutException as ex:
# If timeout is reached, we don't need to check state,
- # since, it wouldn't be a 'SAVING' state atleast and apart from
+ # since, it wouldn't be a 'SAVING' state at least and apart from
# it, this testcase doesn't have scope for other state transition
# Hence, skip the test.
raise self.skipException("This test is skipped because " + str(ex))
diff --git a/tempest/api/compute/images/test_images_oneserver_negative.py b/tempest/api/compute/images/test_images_oneserver_negative.py
index 275a26f..a245a8a 100644
--- a/tempest/api/compute/images/test_images_oneserver_negative.py
+++ b/tempest/api/compute/images/test_images_oneserver_negative.py
@@ -130,7 +130,7 @@
except lib_exc.TimeoutException as ex:
# Test cannot capture the image saving state.
# If timeout is reached, we don't need to check state,
- # since, it wouldn't be a 'SAVING' state atleast and apart from
+ # since, it wouldn't be a 'SAVING' state at least and apart from
# it, this testcase doesn't have scope for other state transition
# Hence, skip the test.
raise self.skipException("This test is skipped because " + str(ex))
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 6664e15..b7db200 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -185,7 +185,7 @@
class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
- """Test creating server with FQDN hostname and verifying atrributes
+ """Test creating server with FQDN hostname and verifying attributes
Starting Wallaby release, Nova sanitizes freeform characters in
server hostname with dashes. This test verifies the same.
diff --git a/tempest/api/compute/servers/test_multiple_create_negative.py b/tempest/api/compute/servers/test_multiple_create_negative.py
index 3a970dd..d2e2935 100644
--- a/tempest/api/compute/servers/test_multiple_create_negative.py
+++ b/tempest/api/compute/servers/test_multiple_create_negative.py
@@ -40,7 +40,7 @@
@decorators.attr(type=['negative'])
@decorators.idempotent_id('a6f9c2ab-e060-4b82-b23c-4532cb9390ff')
def test_max_count_less_than_one(self):
- """Test creating server with max_count < 1 shoudld fail"""
+ """Test creating server with max_count < 1 should fail"""
invalid_max_count = 0
self.assertRaises(lib_exc.BadRequest, self.create_test_server,
max_count=invalid_max_count)
diff --git a/tempest/api/compute/servers/test_server_metadata.py b/tempest/api/compute/servers/test_server_metadata.py
index 9f93e76..5f35b15 100644
--- a/tempest/api/compute/servers/test_server_metadata.py
+++ b/tempest/api/compute/servers/test_server_metadata.py
@@ -27,13 +27,6 @@
create_default_network = True
@classmethod
- def skip_checks(cls):
- super(ServerMetadataTestJSON, cls).skip_checks()
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise cls.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
- @classmethod
def setup_clients(cls):
super(ServerMetadataTestJSON, cls).setup_clients()
cls.client = cls.servers_client
diff --git a/tempest/api/compute/servers/test_server_metadata_negative.py b/tempest/api/compute/servers/test_server_metadata_negative.py
index 655909c..2059dfa 100644
--- a/tempest/api/compute/servers/test_server_metadata_negative.py
+++ b/tempest/api/compute/servers/test_server_metadata_negative.py
@@ -14,13 +14,10 @@
# under the License.
from tempest.api.compute import base
-from tempest import config
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
-CONF = config.CONF
-
class ServerMetadataNegativeTestJSON(base.BaseV2ComputeTest):
"""Negative tests of server metadata"""
@@ -91,10 +88,6 @@
Raise BadRequest if key in uri does not match the key passed in body.
"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
meta = {'testkey': 'testvalue'}
self.assertRaises(lib_exc.BadRequest,
self.client.set_server_metadata_item,
@@ -104,10 +97,6 @@
@decorators.idempotent_id('0df38c2a-3d4e-4db5-98d8-d4d9fa843a12')
def test_set_metadata_non_existent_server(self):
"""Test setting metadata for a non existent server should fail"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
non_existent_server_id = data_utils.rand_uuid()
meta = {'meta1': 'data1'}
self.assertRaises(lib_exc.NotFound,
@@ -119,10 +108,6 @@
@decorators.idempotent_id('904b13dc-0ef2-4e4c-91cd-3b4a0f2f49d8')
def test_update_metadata_non_existent_server(self):
"""Test updating metadata for a non existent server should fail"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
non_existent_server_id = data_utils.rand_uuid()
meta = {'key1': 'value1', 'key2': 'value2'}
self.assertRaises(lib_exc.NotFound,
@@ -134,10 +119,6 @@
@decorators.idempotent_id('a452f38c-05c2-4b47-bd44-a4f0bf5a5e48')
def test_update_metadata_with_blank_key(self):
"""Test updating server metadata to blank key should fail"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
meta = {'': 'data1'}
self.assertRaises(lib_exc.BadRequest,
self.client.update_server_metadata,
@@ -150,10 +131,6 @@
Should not be able to delete metadata item from a non-existent server.
"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
non_existent_server_id = data_utils.rand_uuid()
self.assertRaises(lib_exc.NotFound,
self.client.delete_server_metadata_item,
@@ -168,10 +145,6 @@
A 403 Forbidden or 413 Overlimit (old behaviour) exception
will be raised while exceeding metadata items limit for project.
"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
quota_set = self.quotas_client.show_quota_set(
self.tenant_id)['quota_set']
quota_metadata = quota_set['metadata_items']
@@ -196,10 +169,6 @@
@decorators.idempotent_id('96100343-7fa9-40d8-80fa-d29ef588ce1c')
def test_set_server_metadata_blank_key(self):
"""Test setting server metadata with blank key should fail"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
meta = {'': 'data1'}
self.assertRaises(lib_exc.BadRequest,
self.client.set_server_metadata,
@@ -209,10 +178,6 @@
@decorators.idempotent_id('64a91aee-9723-4863-be44-4c9d9f1e7d0e')
def test_set_server_metadata_missing_metadata(self):
"""Test setting server metadata without metadata field should fail"""
- if not CONF.compute_feature_enabled.xenapi_apis:
- raise self.skipException(
- 'Metadata is read-only on non-Xen-based deployments.')
-
meta = {'meta1': 'data1'}
self.assertRaises(lib_exc.BadRequest,
self.client.set_server_metadata,
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 97c2774..d6c0324 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -234,7 +234,7 @@
and virtio as the rescue disk.
"""
# This test just check detach fail and does not
- # perfom the detach operation but in cleanup from
+ # perform the detach operation but in cleanup from
# self.attach_volume() it will try to detach the server
# after unrescue the server. Due to that we need to make
# server SSHable before it try to detach, more details are
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 955ba1c..fd05ec6 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -139,7 +139,7 @@
"""Test detaching volume from a rescued server should fail"""
volume = self.create_volume()
# This test just check detach fail and does not
- # perfom the detach operation but in cleanup from
+ # perform the detach operation but in cleanup from
# self.attach_volume() it will try to detach the server
# after unrescue the server. Due to that we need to make
# server SSHable before it try to detach, more details are
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 7ea8f09..e267b0f 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -465,6 +465,73 @@
self._boot_from_multiattach_volume()
@utils.services('image')
+ @decorators.idempotent_id('07eb6686-571c-45f0-9d96-446b120f1121')
+ def test_boot_with_multiattach_volume_direct_lun(self, boot=False):
+ image = self.images_client.show_image(CONF.compute.image_ref)
+ if image.get('hw_scsi_model') != 'virtio-scsi':
+ # NOTE(danms): Technically we don't need this to be virtio-scsi,
+ # but cirros (and other) test images won't see the device unless
+ # they have lsilogic drivers (which is the default). So use this
+ # as sort of the indication that the test should be enabled.
+ self.skip('hw_scsi_model=virtio-scsi not set on image')
+ if not CONF.validation.run_validation:
+ self.skip('validation is required for this test')
+
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+
+ volume = self._create_multiattach_volume(bootable=boot)
+ # Create an image-backed instance with the multi-attach volume as a
+ # block device with device_type=lun
+ bdm = [{'source_type': 'image',
+ 'destination_type': 'local',
+ 'uuid': CONF.compute.image_ref,
+ 'boot_index': 0},
+ {'uuid': volume['id'],
+ 'source_type': 'volume',
+ 'destination_type': 'volume',
+ 'device_type': 'lun',
+ 'disk_bus': 'scsi'}]
+
+ if boot:
+ # If we're booting from it, we don't need the local-from-image
+ # disk, but we need the volume to have a boot_index
+ bdm.pop(0)
+ bdm[0]['boot_index'] = 0
+
+ server = self.create_test_server(
+ validatable=True,
+ validation_resources=validation_resources,
+ block_device_mapping_v2=bdm, wait_until='SSHABLE')
+
+ # Assert the volume is attached to the server.
+ attachments = self.servers_client.list_volume_attachments(
+ server['id'])['volumeAttachments']
+ self.assertEqual(1, len(attachments))
+ self.assertEqual(volume['id'], attachments[0]['volumeId'])
+
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ self.image_ssh_user,
+ self.image_ssh_password,
+ validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.servers_client)
+
+ # Assert the volume appears as a SCSI device
+ command = 'lsblk -S'
+ blks = linux_client.exec_command(command).strip()
+ self.assertIn('\nsda ', blks)
+
+ self.servers_client.delete_server(server['id'])
+ waiters.wait_for_server_termination(self.servers_client, server['id'])
+
+ @utils.services('image')
+ @decorators.idempotent_id('bfe61d6e-767a-4f93-9de8-054355536475')
+ def test_boot_from_multiattach_volume_direct_lun(self, boot=False):
+ self.test_boot_with_multiattach_volume_direct_lun(boot=True)
+
+ @utils.services('image')
@decorators.idempotent_id('885ac48a-2d7a-40c5-ae8b-1993882d724c')
@testtools.skipUnless(CONF.compute_feature_enabled.snapshot,
'Snapshotting is not available.')
diff --git a/tempest/api/network/test_allowed_address_pair.py b/tempest/api/network/test_allowed_address_pair.py
index 5c28e96..01dda06 100644
--- a/tempest/api/network/test_allowed_address_pair.py
+++ b/tempest/api/network/test_allowed_address_pair.py
@@ -108,7 +108,7 @@
# both cases, with and without that "active" attribute, we need to
# removes that field from the allowed_address_pairs which are returned
# by the Neutron server.
- # We could make expected results of those tests to be dependend on the
+ # We could make expected results of those tests to be dependent on the
# available Neutron's API extensions but in that case existing tests
# may fail randomly as all tests are always using same IP addresses
# thus allowed_address_pair may be active=True or active=False.
diff --git a/tempest/api/network/test_floating_ips.py b/tempest/api/network/test_floating_ips.py
index e39ad08..07f0903 100644
--- a/tempest/api/network/test_floating_ips.py
+++ b/tempest/api/network/test_floating_ips.py
@@ -129,7 +129,7 @@
self.assertIsNone(updated_floating_ip['fixed_ip_address'])
self.assertIsNone(updated_floating_ip['router_id'])
- # Explicity test deletion of floating IP
+ # Explicitly test deletion of floating IP
self.floating_ips_client.delete_floatingip(created_floating_ip['id'])
@decorators.idempotent_id('e1f6bffd-442f-4668-b30e-df13f2705e77')
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index bd3e360..a0c6342 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -118,7 +118,7 @@
@classmethod
def skip_checks(cls):
super(TagsExtTest, cls).skip_checks()
- # Added condition to support backward compatiblity since
+ # Added condition to support backward compatibility since
# tag-ext has been renamed to standard-attr-tag
if not (utils.is_extension_enabled('tag-ext', 'network') or
utils.is_extension_enabled('standard-attr-tag', 'network')):
diff --git a/tempest/api/object_storage/test_container_sync.py b/tempest/api/object_storage/test_container_sync.py
index e2c9d54..2524def 100644
--- a/tempest/api/object_storage/test_container_sync.py
+++ b/tempest/api/object_storage/test_container_sync.py
@@ -142,7 +142,7 @@
"""Test container synchronization"""
def make_headers(cont, cont_client):
# tell first container to synchronize to a second
- # use rsplit with a maxsplit of 1 to ensure ipv6 adresses are
+ # use rsplit with a maxsplit of 1 to ensure ipv6 addresses are
# handled properly as well
client_proxy_ip = urlparse.urlparse(
cont_client.base_url).netloc.rsplit(':', 1)[0]
diff --git a/tempest/api/volume/test_volumes_actions.py b/tempest/api/volume/test_volumes_actions.py
index 150677d..8cf44be 100644
--- a/tempest/api/volume/test_volumes_actions.py
+++ b/tempest/api/volume/test_volumes_actions.py
@@ -119,6 +119,13 @@
self.images_client.delete_image,
image_id)
waiters.wait_for_image_status(self.images_client, image_id, 'active')
+ # This is required for the optimized upload volume path.
+ # New location APIs are async so we need to wait for the location
+ # import task to complete.
+ # This should work with old location API since we don't fail if there
+ # are no tasks for the image
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image_id, 'success')
waiters.wait_for_volume_resource_status(self.volumes_client,
self.volume['id'], 'available')
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index a8aafe9..49fcaf2 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -424,7 +424,7 @@
class _WebSocket(object):
def __init__(self, client_socket, url):
- """Contructor for the WebSocket wrapper to the socket."""
+ """Constructor for the WebSocket wrapper to the socket."""
self._socket = client_socket
# cached stream for early frames.
self.cached_stream = b''
diff --git a/tempest/common/custom_matchers.py b/tempest/common/custom_matchers.py
index b0bf5b2..8d257b0 100644
--- a/tempest/common/custom_matchers.py
+++ b/tempest/common/custom_matchers.py
@@ -53,7 +53,7 @@
# Check common headers for all HTTP methods.
#
# Please note that for 1xx and 204 responses Content-Length presence
- # is not checked intensionally. According to RFC 7230 a server MUST
+ # is not checked intentionally. According to RFC 7230 a server MUST
# NOT send the header in such responses. Thus, clients should not
# depend on this header. However, the standard does not require them
# to validate the server's behavior. We leverage that to not refuse
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index ddc6047..d65b491 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -606,7 +606,7 @@
floating IPs.
:param server: The server JSON dict on which to wait.
:param floating_ip: The floating IP JSON dict on which to wait.
- :param wait_for_disassociate: Boolean indiating whether to wait for
+ :param wait_for_disassociate: Boolean indicating whether to wait for
disassociation instead of association.
"""
diff --git a/tempest/config.py b/tempest/config.py
index 507d116..6eee88a 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -595,18 +595,6 @@
help='Does the test environment support attaching a volume to '
'more than one instance? This depends on hypervisor and '
'volume backend/type and compute API version 2.60.'),
- cfg.BoolOpt('xenapi_apis',
- default=False,
- help='Does the test environment support the XenAPI-specific '
- 'APIs: os-agents, writeable server metadata and the '
- 'resetNetwork server action? '
- 'These were removed in Victoria alongside the XenAPI '
- 'virt driver.',
- deprecated_for_removal=True,
- deprecated_reason="On Nova side, XenAPI virt driver and the "
- "APIs that only worked with that driver "
- "have been removed and there's nothing to "
- "test after Ussuri."),
cfg.BoolOpt('ide_bus',
default=True,
help='Does the test environment support attaching devices '
@@ -684,19 +672,19 @@
'are current one. In future, Tempest will '
'test v2 APIs only so this config option '
'will be removed.'),
- # Image import feature is setup in devstack victoria onwards.
- # Once all stable branches setup the same via glance standalone
- # mode or with uwsgi, we can remove this config option.
cfg.BoolOpt('import_image',
- default=False,
- help="Is image import feature enabled"),
- # NOTE(danms): Starting mid-Wallaby glance began enforcing the
- # previously-informal requirement that os_glance_* properties are
- # reserved for internal use. Thus, we can only run these checks
- # if we know we are on a new enough glance.
+ default=True,
+ help="Is image import feature enabled",
+ deprecated_for_removal=True,
+ deprecated_reason='Issue with image import in WSGI mode was '
+ 'fixed in Victoria, and this feature works '
+ 'in any deployment architecture now.'),
cfg.BoolOpt('os_glance_reserved',
- default=False,
- help="Should we check that os_glance namespace is reserved"),
+ default=True,
+ help="Should we check that os_glance namespace is reserved",
+ deprecated_for_removal=True,
+ deprecated_reason='os_glance namespace is always reserved '
+ 'since Wallaby'),
cfg.BoolOpt('manage_locations',
default=False,
help=('Is show_multiple_locations enabled in glance. '
@@ -1078,7 +1066,11 @@
default=True,
help='Does the cloud support extending the size of a volume '
'which has snapshot? Some drivers do not support this '
- 'operation.')
+ 'operation.'),
+ cfg.StrOpt('volume_types_for_data_volume',
+ default=None,
+ help='Volume types used for data volumes. Multiple volume '
+ 'types can be assigned.'),
]
@@ -1177,7 +1169,7 @@
cfg.StrOpt('dhcp_client',
default='udhcpc',
choices=["udhcpc", "dhclient", "dhcpcd", ""],
- help='DHCP client used by images to renew DCHP lease. '
+ help='DHCP client used by images to renew DHCP lease. '
'If left empty, update operation will be skipped. '
'Supported clients: "udhcpc", "dhclient", "dhcpcd"'),
cfg.StrOpt('protocol',
diff --git a/tempest/lib/api_schema/response/compute/v2_80/__init__.py b/tempest/lib/api_schema/response/compute/v2_80/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_80/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_80/migrations.py b/tempest/lib/api_schema/response/compute/v2_80/migrations.py
new file mode 100644
index 0000000..f2fa008
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_80/migrations.py
@@ -0,0 +1,40 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_59 import migrations
+
+###########################################################################
+#
+# 2.80:
+#
+# The user_id and project_id value is now returned in the response body in
+# addition to the migration id for the following API responses:
+#
+# - GET /os-migrations
+#
+###########################################################################
+
+user_id = {'type': 'string'}
+project_id = {'type': 'string'}
+
+list_migrations = copy.deepcopy(migrations.list_migrations)
+
+list_migrations['response_body']['properties']['migrations']['items'][
+ 'properties'].update({
+ 'user_id': user_id,
+ 'project_id': project_id
+ })
+
+list_migrations['response_body']['properties']['migrations']['items'][
+ 'required'].extend(['user_id', 'project_id'])
diff --git a/tempest/lib/api_schema/response/compute/v2_89/__init__.py b/tempest/lib/api_schema/response/compute/v2_89/__init__.py
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_89/__init__.py
diff --git a/tempest/lib/api_schema/response/compute/v2_89/servers.py b/tempest/lib/api_schema/response/compute/v2_89/servers.py
new file mode 100644
index 0000000..debf0dc
--- /dev/null
+++ b/tempest/lib/api_schema/response/compute/v2_89/servers.py
@@ -0,0 +1,84 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import copy
+
+from tempest.lib.api_schema.response.compute.v2_79 import servers as servers279
+
+
+###########################################################################
+#
+# 2.89:
+#
+# The attachment_id and bdm_uuid parameter is now returned in the response body
+# of the following calls:
+#
+# - GET /servers/{server_id}/os-volume_attachments
+# - GET /servers/{server_id}/os-volume_attachments/{volume_id}
+# - POST /servers/{server_id}/os-volume_attachments
+###########################################################################
+
+attach_volume = copy.deepcopy(servers279.attach_volume)
+
+show_volume_attachment = copy.deepcopy(servers279.show_volume_attachment)
+
+list_volume_attachments = copy.deepcopy(servers279.list_volume_attachments)
+
+# Remove properties
+# 'id' is available unti v2.88
+show_volume_attachment['response_body']['properties'][
+ 'volumeAttachment']['properties'].pop('id')
+show_volume_attachment['response_body']['properties'][
+ 'volumeAttachment']['required'].remove('id')
+list_volume_attachments['response_body']['properties'][
+ 'volumeAttachments']['items']['properties'].pop('id')
+list_volume_attachments['response_body']['properties'][
+ 'volumeAttachments']['items']['required'].remove('id')
+
+
+# Add new properties
+new_properties = {
+ 'attachment_id': {'type': 'string', 'format': 'uuid'},
+ 'bdm_uuid': {'type': 'string', 'format': 'uuid'}
+}
+
+show_volume_attachment['response_body']['properties'][
+ 'volumeAttachment']['properties'].update(new_properties)
+show_volume_attachment['response_body']['properties'][
+ 'volumeAttachment']['required'].extend(new_properties.keys())
+list_volume_attachments['response_body']['properties'][
+ 'volumeAttachments']['items']['properties'].update(new_properties)
+list_volume_attachments['response_body']['properties'][
+ 'volumeAttachments']['items']['required'].extend(new_properties.keys())
+
+
+# NOTE(zhufl): Below are the unchanged schema in this microversion. We
+# need to keep this schema in this file to have the generic way to select the
+# right schema based on self.schema_versions_info mapping in service client.
+# ****** Schemas unchanged since microversion 2.75 ***
+rebuild_server = copy.deepcopy(servers279.rebuild_server)
+rebuild_server_with_admin_pass = copy.deepcopy(
+ servers279.rebuild_server_with_admin_pass)
+update_server = copy.deepcopy(servers279.update_server)
+get_server = copy.deepcopy(servers279.get_server)
+list_servers_detail = copy.deepcopy(servers279.list_servers_detail)
+list_servers = copy.deepcopy(servers279.list_servers)
+show_server_diagnostics = copy.deepcopy(servers279.show_server_diagnostics)
+get_remote_consoles = copy.deepcopy(servers279.get_remote_consoles)
+list_tags = copy.deepcopy(servers279.list_tags)
+update_all_tags = copy.deepcopy(servers279.update_all_tags)
+delete_all_tags = copy.deepcopy(servers279.delete_all_tags)
+check_tag_existence = copy.deepcopy(servers279.check_tag_existence)
+update_tag = copy.deepcopy(servers279.update_tag)
+delete_tag = copy.deepcopy(servers279.delete_tag)
+show_instance_action = copy.deepcopy(servers279.show_instance_action)
+create_backup = copy.deepcopy(servers279.create_backup)
diff --git a/tempest/lib/api_schema/response/volume/volumes.py b/tempest/lib/api_schema/response/volume/volumes.py
index 900e5ef..9b5dfda 100644
--- a/tempest/lib/api_schema/response/volume/volumes.py
+++ b/tempest/lib/api_schema/response/volume/volumes.py
@@ -236,7 +236,7 @@
}
}
-# TODO(zhufl): This is under discussion, so will be merged in a seperate patch.
+# TODO(zhufl): This is under discussion, so will be merged in a separate patch.
# https://bugs.launchpad.net/cinder/+bug/1880566
# upload_volume = {
# 'status_code': [202],
diff --git a/tempest/lib/cmd/check_uuid.py b/tempest/lib/cmd/check_uuid.py
index 466222d..af1112d 100755
--- a/tempest/lib/cmd/check_uuid.py
+++ b/tempest/lib/cmd/check_uuid.py
@@ -266,7 +266,7 @@
"groups! This is not valid according to the PEP8 "
"style guide. " % source_path)
- # Divide grouped_imports into groupes based on PEP8 style guide
+ # Divide grouped_imports into groups based on PEP8 style guide
pep8_groups = {}
package_name = self.package.__name__.split(".")[0]
for key in grouped_imports:
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index 99647d4..6814373 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -51,7 +51,7 @@
:param str identity_admin_role: The role name to use for admin
:param list extra_roles: A list of strings for extra roles that should
be assigned to all created users
- :param bool neutron_available: Whether we are running in an environemnt
+ :param bool neutron_available: Whether we are running in an environment
with neutron
:param bool create_networks: Whether dynamic project networks should be
created or not
@@ -453,7 +453,7 @@
# NOTE(gmann): For 'domain' and 'system' scoped token, there is no
# project_id so we are skipping the network creation for both
# scope.
- # We need to create nework resource once per project.
+ # We need to create network resource once per project.
if (not project_id and (not scope or scope == 'project')):
if (self.neutron_available and self.create_networks):
network, subnet, router = self._create_network_resources(
diff --git a/tempest/lib/decorators.py b/tempest/lib/decorators.py
index 7d54c1a..144450b 100644
--- a/tempest/lib/decorators.py
+++ b/tempest/lib/decorators.py
@@ -198,7 +198,7 @@
There are functions created as classmethod and the cleanup
was managed by the class with addClassResourceCleanup,
In case the function called from a class level (resource_setup) its ok
- But when it is called from testcase level there is no reson to delete the
+ But when it is called from testcase level there is no reason to delete the
resource when class tears down.
The testcase results will not reflect the resources cleanup because test
diff --git a/tempest/lib/services/compute/migrations_client.py b/tempest/lib/services/compute/migrations_client.py
index 8a6e62a..d43fe83 100644
--- a/tempest/lib/services/compute/migrations_client.py
+++ b/tempest/lib/services/compute/migrations_client.py
@@ -21,6 +21,8 @@
as schemav223
from tempest.lib.api_schema.response.compute.v2_59 import migrations \
as schemav259
+from tempest.lib.api_schema.response.compute.v2_80 import migrations \
+ as schemav280
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -29,7 +31,8 @@
schema_versions_info = [
{'min': None, 'max': '2.22', 'schema': schema},
{'min': '2.23', 'max': '2.58', 'schema': schemav223},
- {'min': '2.59', 'max': None, 'schema': schemav259}]
+ {'min': '2.59', 'max': '2.79', 'schema': schemav259},
+ {'min': '2.80', 'max': None, 'schema': schemav280}]
def list_migrations(self, **params):
"""List all migrations.
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 7e3b99f..1b93f91 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -43,6 +43,7 @@
from tempest.lib.api_schema.response.compute.v2_75 import servers as schemav275
from tempest.lib.api_schema.response.compute.v2_79 import servers as schemav279
from tempest.lib.api_schema.response.compute.v2_8 import servers as schemav28
+from tempest.lib.api_schema.response.compute.v2_89 import servers as schemav289
from tempest.lib.api_schema.response.compute.v2_9 import servers as schemav29
from tempest.lib.common import rest_client
from tempest.lib.services.compute import base_compute_client
@@ -73,7 +74,8 @@
{'min': '2.71', 'max': '2.72', 'schema': schemav271},
{'min': '2.73', 'max': '2.74', 'schema': schemav273},
{'min': '2.75', 'max': '2.78', 'schema': schemav275},
- {'min': '2.79', 'max': None, 'schema': schemav279}]
+ {'min': '2.79', 'max': '2.88', 'schema': schemav279},
+ {'min': '2.89', 'max': None, 'schema': schemav289}]
def __init__(self, auth_provider, service, region,
enable_instance_password=True, **kwargs):
@@ -896,7 +898,11 @@
API reference:
https://docs.openstack.org/api-ref/compute/#evacuate-server-evacuate-action
"""
- if self.enable_instance_password:
+ api_version = self.get_headers().get(self.api_microversion_header_name)
+
+ if not api_version and self.enable_instance_password:
+ evacuate_schema = schema.evacuate_server_with_admin_pass
+ elif api_version < '2.14':
evacuate_schema = schema.evacuate_server_with_admin_pass
else:
evacuate_schema = schema.evacuate_server
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 0608d47..a6a1623 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -159,7 +159,7 @@
"""
url = 'images/%s/file' % image_id
- # We are going to do chunked transfert, so split the input data
+ # We are going to do chunked transfer, so split the input data
# info fixed-sized chunks.
headers = {'Content-Type': 'application/octet-stream'}
data = iter(functools.partial(data.read, CHUNKSIZE), b'')
diff --git a/tempest/lib/services/object_storage/container_client.py b/tempest/lib/services/object_storage/container_client.py
index bdca0d0..47edf70 100644
--- a/tempest/lib/services/object_storage/container_client.py
+++ b/tempest/lib/services/object_storage/container_client.py
@@ -15,7 +15,6 @@
from urllib import parse as urllib
-import debtcollector.moves
from defusedxml import ElementTree as etree
from oslo_serialization import jsonutils as json
@@ -64,7 +63,7 @@
delete_metadata=None,
create_update_metadata_prefix='X-Container-Meta-',
delete_metadata_prefix='X-Remove-Container-Meta-'):
- """Creates, Updates or deletes an containter metadata entry.
+ """Creates, Updates or deletes an container metadata entry.
Container Metadata can be created, updated or deleted based on
metadata header or value. For detailed info, please refer to the
@@ -85,11 +84,6 @@
self.expected_success(204, resp.status)
return resp, body
- update_container_metadata = debtcollector.moves.moved_function(
- create_update_or_delete_container_metadata,
- 'update_container_metadata', __name__,
- version='Queens', removal_version='Rocky')
-
def list_container_metadata(self, container_name):
"""List all container metadata."""
url = str(container_name)
@@ -126,7 +120,3 @@
self.expected_success([200, 204], resp.status)
return resp, body
-
- list_container_contents = debtcollector.moves.moved_function(
- list_container_objects, 'list_container_contents', __name__,
- version='Queens', removal_version='Rocky')
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index c6f8973..95f3ffc 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -86,7 +86,7 @@
def migrate_volume(self, volume_id, **kwargs):
"""Migrate a volume to a new backend
- For a full list of available parameters please refer to the offical
+ For a full list of available parameters please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/index.html#migrate-a-volume
@@ -173,7 +173,7 @@
resp, body = self.post(url, post_body)
body = json.loads(body)
# TODO(zhufl): This is under discussion, so will be merged
- # in a seperate patch.
+ # in a separate patch.
# https://bugs.launchpad.net/cinder/+bug/1880566
# self.validate_response(schema.upload_volume, resp, body)
self.expected_success(202, resp.status)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 5f30909..d51e7e5 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -751,6 +751,31 @@
return rules
+ def create_and_add_security_group_to_server(self, server):
+ """Create a security group and add it to the server.
+
+ :param server: The server to add the security group to.
+ :return: The security group was added to the server.
+ """
+
+ secgroup = self.create_security_group()
+ self.servers_client.add_security_group(server['id'],
+ name=secgroup['name'])
+ self.addCleanup(self.servers_client.remove_security_group,
+ server['id'], name=secgroup['name'])
+
+ def wait_for_secgroup_add():
+ body = (self.servers_client.show_server(server['id'])
+ ['server'])
+ return {'name': secgroup['name']} in body['security_groups']
+
+ if not test_utils.call_until_true(wait_for_secgroup_add,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ msg = ('Timed out waiting for adding security group %s to server '
+ '%s' % (secgroup['id'], server['id']))
+ raise lib_exc.TimeoutException(msg)
+
def get_remote_client(self, ip_address, username=None, private_key=None,
server=None):
"""Get a SSH client to a remote server
@@ -1177,6 +1202,15 @@
self.assertIsNone(floating_ip['port_id'])
return floating_ip
+ def create_file(self, ip_address, path, private_key=None, server=None,
+ username=None):
+ """Create a file on a remote server"""
+ ssh_client = self.get_remote_client(ip_address,
+ private_key=private_key,
+ server=server,
+ username=username)
+ ssh_client.exec_command('sudo mkdir -p %s' % path)
+
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None, server=None, username=None,
fs='vfat'):
diff --git a/tempest/scenario/test_instances_with_cinder_volumes.py b/tempest/scenario/test_instances_with_cinder_volumes.py
new file mode 100644
index 0000000..5f33b49
--- /dev/null
+++ b/tempest/scenario/test_instances_with_cinder_volumes.py
@@ -0,0 +1,225 @@
+# Copyright 2024 Openstack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from tempest.scenario import manager
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestInstancesWithCinderVolumes(manager.ScenarioTest):
+ """This is cinder volumes test.
+
+ Tests are below:
+ * test_instances_with_cinder_volumes_on_all_compute_nodes
+ """
+
+ compute_min_microversion = '2.60'
+
+ @decorators.idempotent_id('d0e3c1a3-4b0a-4b0e-8b0a-4b0e8b0a4b0e')
+ @decorators.attr(type=['slow', 'multinode'])
+ @utils.services('compute', 'volume', 'image', 'network')
+ def test_instances_with_cinder_volumes_on_all_compute_nodes(self):
+ """Test instances with cinder volumes launches on all compute nodes
+
+ Steps:
+ 1. Create an image
+ 2. Create a keypair
+ 3. Create a bootable volume from the image and of the given volume
+ type
+ 4. Boot an instance from the bootable volume on each available
+ compute node, up to CONF.compute.min_compute_nodes
+ 5. Create a volume using each volume_types_for_data_volume on all
+ available compute nodes, up to CONF.compute.min_compute_nodes.
+ Total number of volumes is equal to
+ compute nodes * len(volume_types_for_data_volume)
+ 6. Attach volumes to the instances
+ 7. Assign floating IP to all instances
+ 8. Configure security group for ssh access to all instances
+ 9. Confirm ssh access to all instances
+ 10. Run write test to all volumes through ssh connection per
+ instance
+ 11. Clean up the sources, an instance, volumes, keypair and image
+ """
+ boot_volume_type = (CONF.volume.volume_type or
+ self.create_volume_type()['name'])
+
+ # create an image
+ image = self.image_create()
+
+ # create keypair
+ keypair = self.create_keypair()
+
+ # check all available zones for booting instances
+ available_zone = \
+ self.os_admin.availability_zone_client.list_availability_zones(
+ detail=True)['availabilityZoneInfo']
+
+ hosts = []
+ for zone in available_zone:
+ if zone['zoneState']['available']:
+ for host in zone['hosts']:
+ if 'nova-compute' in zone['hosts'][host] and \
+ zone['hosts'][host]['nova-compute']['available'] and \
+ not host.endswith('-ironic'):
+ hosts.append({'zone': zone['zoneName'],
+ 'host_name': host})
+
+ # fail if there is less hosts than minimal number of instances
+ if len(hosts) < CONF.compute.min_compute_nodes:
+ raise exceptions.InvalidConfiguration(
+ "Host list %s is shorter than min_compute_nodes. " % hosts)
+
+ # get volume types
+ volume_types = []
+ if CONF.volume_feature_enabled.volume_types_for_data_volume:
+ types = CONF.volume_feature_enabled.volume_types_for_data_volume
+ volume_types = types.split(',')
+ else:
+ # no user specified volume types, create 2 default ones
+ volume_types.append(self.create_volume_type()['name'])
+ volume_types.append(self.create_volume_type()['name'])
+
+ hosts_to_boot_servers = hosts[:CONF.compute.min_compute_nodes]
+ LOG.debug("List of hosts selected to boot servers %s: ",
+ hosts_to_boot_servers)
+
+ # create volumes so that we dont need to wait for them to be created
+ # and save them in a list
+ created_volumes = []
+ for host in hosts_to_boot_servers:
+ for volume_type in volume_types:
+ created_volumes.append(
+ self.create_volume(volume_type=volume_type,
+ wait_until=None)
+ )
+
+ bootable_volumes = []
+ for host in hosts_to_boot_servers:
+ # create boot volume from image and of the given volume type
+ bootable_volumes.append(
+ self.create_volume(
+ imageRef=image, volume_type=boot_volume_type,
+ wait_until=None)
+ )
+
+ # boot server
+ servers = []
+
+ for bootable_volume in bootable_volumes:
+
+ # wait for bootable volumes to become available
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, bootable_volume['id'], 'available')
+
+ # create an instance from bootable volume
+ server = self.boot_instance_from_resource(
+ source_id=bootable_volume['id'],
+ source_type='volume',
+ keypair=keypair,
+ wait_until=None
+ )
+ servers.append(server)
+
+ start = 0
+ end = len(volume_types)
+ for server in servers:
+ attached_volumes = []
+
+ # wait for server to become active
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ # attach volumes to the instances
+ for volume in created_volumes[start:end]:
+
+ # wait for volume to become available
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume['id'], 'available')
+
+ attached_volume = self.nova_volume_attach(server, volume)
+ attached_volumes.append(attached_volume)
+ LOG.debug("Attached volume %s to server %s",
+ attached_volume['id'], server['id'])
+
+ # assign floating ip
+ floating_ip = None
+ if (CONF.network_feature_enabled.floating_ips and
+ CONF.network.floating_network_name):
+ fip = self.create_floating_ip(server)
+ floating_ip = self.associate_floating_ip(
+ fip, server)
+ ssh_ip = floating_ip['floating_ip_address']
+ else:
+ ssh_ip = self.get_server_ip(server)
+
+ # create security group
+ self.create_and_add_security_group_to_server(server)
+
+ # confirm ssh access
+ self.linux_client = self.get_remote_client(
+ ssh_ip, private_key=keypair['private_key'],
+ server=server
+ )
+
+ # run write test on all volumes
+ for volume in attached_volumes:
+
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume['id'], 'in-use')
+
+ # get the mount path
+ mount_path = f"/mnt/{volume['attachments'][0]['device'][5:]}"
+
+ # create file for mounting on server
+ self.create_file(ssh_ip, mount_path,
+ private_key=keypair['private_key'],
+ server=server)
+
+ # dev name volume['attachments'][0]['device'][5:] is like
+ # /dev/vdb, we need to remove /dev/ -> first 5 chars
+ timestamp_before = self.create_timestamp(
+ ssh_ip, private_key=keypair['private_key'], server=server,
+ dev_name=volume['attachments'][0]['device'][5:],
+ mount_path=mount_path
+ )
+ timestamp_after = self.get_timestamp(
+ ssh_ip, private_key=keypair['private_key'], server=server,
+ dev_name=volume['attachments'][0]['device'][5:],
+ mount_path=mount_path
+ )
+ self.assertEqual(timestamp_before, timestamp_after)
+
+ # delete volume
+ self.nova_volume_detach(server, volume)
+ self.volumes_client.delete_volume(volume['id'])
+
+ if floating_ip:
+ # delete the floating IP, this should refresh the server
+ # addresses
+ self.disassociate_floating_ip(floating_ip)
+ waiters.wait_for_server_floating_ip(
+ self.servers_client, server, floating_ip,
+ wait_for_disassociate=True)
+
+ start += len(volume_types)
+ end += len(volume_types)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 6372c6b..543be31 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -19,9 +19,7 @@
from tempest.common import utils
from tempest.common import waiters
from tempest import config
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest.lib import exceptions
from tempest.scenario import manager
CONF = config.CONF
@@ -73,25 +71,6 @@
disks = self.linux_client.get_disks()
self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
- def create_and_add_security_group_to_server(self, server):
- secgroup = self.create_security_group()
- self.servers_client.add_security_group(server['id'],
- name=secgroup['name'])
- self.addCleanup(self.servers_client.remove_security_group,
- server['id'], name=secgroup['name'])
-
- def wait_for_secgroup_add():
- body = (self.servers_client.show_server(server['id'])
- ['server'])
- return {'name': secgroup['name']} in body['security_groups']
-
- if not test_utils.call_until_true(wait_for_secgroup_add,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- msg = ('Timed out waiting for adding security group %s to server '
- '%s' % (secgroup['id'], server['id']))
- raise exceptions.TimeoutException(msg)
-
@decorators.attr(type='slow')
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
@utils.services('compute', 'volume', 'image', 'network')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 3a93f74..911ff42 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -20,6 +20,7 @@
from tempest.common.utils import net_downtime
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import api_version_request
from tempest.lib import decorators
from tempest.scenario import manager
@@ -193,8 +194,11 @@
# check if microversion is less than 2.25 because of
# disk_over_commit is depracted since compute api version 2.25
# if min_microversion is None, it runs on version < 2.25
+ min_v = api_version_request.APIVersionRequest(
+ CONF.compute.min_microversion)
+ api_v = api_version_request.APIVersionRequest('2.25')
if not migration and (CONF.compute.min_microversion is None or
- CONF.compute.min_microversion < '2.25'):
+ min_v < api_v):
migration_kwargs['disk_over_commit'] = False
if dest_host:
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 7b819e0..fb68e46 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -179,8 +179,7 @@
def _check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True, mtu=None):
- """Verifies connectivty to a VM via public network and floating IP
-
+ """Verifies connectivity to a VM via public network and floating IP
and verifies floating IP has resource status is correct.
:param should_connect: bool. determines if connectivity check is
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 92dbffb..e060b0f 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -137,7 +137,7 @@
# Make sure the machine ssh-able before attaching the volume
# Just a live machine is responding
- # for device attache/detach as expected
+ # for device attach/detach as expected
linux_client = self.get_remote_client(
ip_for_snapshot, private_key=keypair['private_key'],
server=server_from_snapshot)
diff --git a/tempest/test.py b/tempest/test.py
index a766367..173bfab 100644
--- a/tempest/test.py
+++ b/tempest/test.py
@@ -646,7 +646,7 @@
then be run.
Cleanup functions are always called during the test class tearDown
- fixture, even if an exception occured during setUp or tearDown.
+ fixture, even if an exception occurred during setUp or tearDown.
"""
cls._class_cleanups.append((fn, arguments, keywordArguments))
diff --git a/tempest/test_discover/plugins.py b/tempest/test_discover/plugins.py
index 1d69d9d..f2e809b 100644
--- a/tempest/test_discover/plugins.py
+++ b/tempest/test_discover/plugins.py
@@ -58,7 +58,7 @@
help="Whether or not my service is available")
# Note: as long as the group is listed in get_opt_lists,
- # it will be possible to access its optins in the plugin code
+ # it will be possible to access its options in the plugin code
# via ("-" in the group name are replaces with "_"):
# CONF.my_service.<option_name>
my_service_group = cfg.OptGroup(name="my-service",
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 8a1158d..154d8d1 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -252,7 +252,7 @@
@mock.patch('tempest.lib.auth.get_credentials')
def test_get_credentials_v3_no_domain(self, mock_auth_get_credentials):
- expected_uri = 'https://v3.identity.exmaple.com'
+ expected_uri = 'https://v3.identity.example.com'
expected_result = 'my_creds'
expected_domain = 'my_domain'
mock_auth_get_credentials.return_value = expected_result
@@ -272,7 +272,7 @@
@mock.patch('tempest.lib.auth.get_credentials')
def test_get_credentials_v3_domain(self, mock_auth_get_credentials):
- expected_uri = 'https://v3.identity.exmaple.com'
+ expected_uri = 'https://v3.identity.example.com'
expected_result = 'my_creds'
expected_domain = 'my_domain'
mock_auth_get_credentials.return_value = expected_result
@@ -291,7 +291,7 @@
@mock.patch('tempest.lib.auth.get_credentials')
def test_get_credentials_v3_system(self, mock_auth_get_credentials):
- expected_uri = 'https://v3.identity.exmaple.com'
+ expected_uri = 'https://v3.identity.example.com'
expected_result = 'my_creds'
mock_auth_get_credentials.return_value = expected_result
cfg.CONF.set_default('uri_v3', expected_uri, 'identity')
diff --git a/tempest/tests/lib/common/utils/test_data_utils.py b/tempest/tests/lib/common/utils/test_data_utils.py
index a0267d0..06a7805 100644
--- a/tempest/tests/lib/common/utils/test_data_utils.py
+++ b/tempest/tests/lib/common/utils/test_data_utils.py
@@ -79,7 +79,7 @@
self.assertEqual(len(actual), 3)
self.assertRegex(actual, "[A-Za-z0-9~!@#%^&*_=+]{3}")
actual2 = data_utils.rand_password(2)
- # NOTE(masayukig): Originally, we checked that the acutal and actual2
+ # NOTE(masayukig): Originally, we checked that the actual and actual2
# are different each other. But only 3 letters can be the same value
# in a very rare case. So, we just check the length here, too,
# just in case.
diff --git a/tempest/tests/lib/services/base.py b/tempest/tests/lib/services/base.py
index 924f9f2..fd4bc17 100644
--- a/tempest/tests/lib/services/base.py
+++ b/tempest/tests/lib/services/base.py
@@ -54,7 +54,7 @@
``assert_called_once_with(foo='bar')`` is called.
* If mock_args='foo' then ``assert_called_once_with('foo')``
is called.
- :param resp_as_string: Whether response body is retruned as string.
+ :param resp_as_string: Whether response body is returned as string.
This is for service client methods which return ResponseBodyData
object.
:param kwargs: kwargs that are passed to function.
diff --git a/tempest/tests/lib/test_ssh.py b/tempest/tests/lib/test_ssh.py
index 13870ba..0ba6ed3 100644
--- a/tempest/tests/lib/test_ssh.py
+++ b/tempest/tests/lib/test_ssh.py
@@ -162,7 +162,7 @@
client = ssh.Client('localhost', 'root', timeout=timeout)
# We need to mock LOG here because LOG.info() calls time.time()
- # in order to preprend a timestamp.
+ # in order to prepend a timestamp.
with mock.patch.object(ssh, 'LOG'):
self.assertRaises(exceptions.SSHTimeout,
client._get_ssh_connection)
diff --git a/tox.ini b/tox.ini
index 51c38f2..fcdf6ff 100644
--- a/tox.ini
+++ b/tox.ini
@@ -154,7 +154,7 @@
sitepackages = {[tempestenv]sitepackages}
setenv = {[tempestenv]setenv}
deps = {[tempestenv]deps}
-# But exlcude the extra tests mentioned in tools/tempest-extra-tests-list.txt
+# But exclude the extra tests mentioned in tools/tempest-extra-tests-list.txt
regex = '(^tempest\.scenario.*)|(^tempest\.serial_tests)|(?!.*\[.*\bslow\b.*\])(^tempest\.api)'
commands =
find . -type f -name "*.pyc" -delete
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index acd65d8..2fd6e36 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -31,7 +31,7 @@
description: |
Integration test of IPv6-only deployments. This job runs
smoke and IPv6 relates tests only. Basic idea is to test
- whether OpenStack Services listen on IPv6 addrress or not.
+ whether OpenStack Services listen on IPv6 address or not.
timeout: 10800
vars:
tox_envlist: ipv6-only
@@ -88,7 +88,7 @@
devstack_plugins:
neutron: https://opendev.org/openstack/neutron
devstack_services:
- # Enbale horizon so that we can run horizon test.
+ # Enable horizon so that we can run horizon test.
horizon: true
- job:
@@ -221,7 +221,7 @@
tox_envlist: integrated-object-storage
devstack_localrc:
# NOTE(gmann): swift is not ready on python3 yet and devstack
- # install it on python2.7 only. But settting the USE_PYTHON3
+ # install it on python2.7 only. But setting the USE_PYTHON3
# for future once swift is ready on py3.
USE_PYTHON3: true
@@ -243,7 +243,7 @@
name: tempest-multinode-full-py3
parent: tempest-multinode-full-base
nodeset: openstack-two-node-jammy
- # This job runs on ubuntu Jammy and after stable/zed.
+ # This job runs on ubuntu Jammy and after unmaintained/zed.
branches:
regex: ^.*/(victoria|wallaby|xena|yoga|zed)$
negate: true
@@ -389,15 +389,7 @@
This job runs the Tempest tests with scope and new defaults enabled.
vars:
devstack_localrc:
- # Enabeling the scope and new defaults for services.
- # NOTE: (gmann) We need to keep keystone scope check disable as
- # services (except ironic) does not support the system scope and
- # they need keystone to continue working with project scope. Until
- # Keystone policies are changed to work for both system as well as
- # for project scoped, we need to keep scope check disable for
- # keystone.
- # Nova, Glance, and Neutron have enabled the new defaults and scope
- # by default in devstack.
+ KEYSTONE_ENFORCE_SCOPE: true
CINDER_ENFORCE_SCOPE: true
PLACEMENT_ENFORCE_SCOPE: true
@@ -490,7 +482,7 @@
# (on SLURP as well as non SLURP release) so we are adding grenade-skip-level-always
# job in integrated gate and we do not need to update skip level job
# here until Nova change the decision.
- # This is added from 2023.2 relese cycle onwards so we need to use branch variant
+ # This is added from 2023.2 release cycle onwards so we need to use branch variant
# to make sure we do not run this job on older than 2023.2 gate.
- grenade-skip-level-always:
branches:
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index e2505cb..d58862f 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -39,7 +39,7 @@
# those in respective stable branch gate.
- tempest-full-2024-1:
irrelevant-files: *tempest-irrelevant-files
- - tempest-full-zed:
+ - tempest-full-2023-1:
irrelevant-files: *tempest-irrelevant-files
- tempest-multinode-full-py3:
irrelevant-files: *tempest-irrelevant-files
@@ -169,8 +169,6 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-all-rbac-old-defaults
- tempest-full-parallel
- - tempest-full-zed-extra-tests
- - tempest-full-enforce-scope-new-defaults-zed
- neutron-ovs-tempest-dvr-ha-multinode-full:
irrelevant-files: *tempest-irrelevant-files
- nova-tempest-v2-api:
@@ -192,15 +190,12 @@
- tempest-full-2024-1
- tempest-full-2023-2
- tempest-full-2023-1
- - tempest-full-zed
- tempest-slow-2024-1
- tempest-slow-2023-2
- tempest-slow-2023-1
- - tempest-slow-zed
- tempest-full-2024-1-extra-tests
- tempest-full-2023-2-extra-tests
- tempest-full-2023-1-extra-tests
- - tempest-full-zed-extra-tests
periodic:
jobs:
- tempest-all
@@ -212,4 +207,3 @@
- tempest-centos9-stream-fips
- tempest-full-centos-9-stream
- tempest-full-test-account-no-admin-py3
- - tempest-full-enforce-scope-new-defaults-zed
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index a662685..9d69715 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -18,12 +18,6 @@
override-checkout: stable/2023.1
- job:
- name: tempest-full-zed
- parent: tempest-full-py3
- nodeset: openstack-single-node-focal
- override-checkout: stable/zed
-
-- job:
name: tempest-full-2024-1-extra-tests
parent: tempest-extra-tests
nodeset: openstack-single-node-jammy
@@ -42,12 +36,6 @@
override-checkout: stable/2023.1
- job:
- name: tempest-full-zed-extra-tests
- parent: tempest-extra-tests
- nodeset: openstack-single-node-focal
- override-checkout: stable/zed
-
-- job:
name: tempest-slow-2024-1
parent: tempest-slow-py3
nodeset: openstack-two-node-jammy
@@ -66,18 +54,6 @@
override-checkout: stable/2023.1
- job:
- name: tempest-full-enforce-scope-new-defaults-zed
- parent: tempest-full-enforce-scope-new-defaults
- nodeset: openstack-single-node-focal
- override-checkout: stable/zed
-
-- job:
- name: tempest-slow-zed
- parent: tempest-slow-py3
- nodeset: openstack-two-node-focal
- override-checkout: stable/zed
-
-- job:
name: tempest-full-py3
parent: devstack-tempest
# This job version is to use the 'full' tox env which
@@ -103,14 +79,14 @@
devstack_plugins:
neutron: https://opendev.org/openstack/neutron
devstack_services:
- # Enbale horizon so that we can run horizon test.
+ # Enable horizon so that we can run horizon test.
horizon: true
- job:
name: tempest-multinode-full-py3
parent: tempest-multinode-full
nodeset: openstack-two-node-focal
- # This job runs on Focal and supposed to run until stable/zed.
+ # This job runs on Focal and supposed to run until unmaintained/zed.
branches:
- ^.*/victoria
- ^.*/wallaby
@@ -133,7 +109,7 @@
name: tempest-multinode-full
parent: tempest-multinode-full-base
nodeset: openstack-two-node-focal
- # This job runs on Focal and on python2. This is for stable/victoria to stable/zed.
+ # This job runs on Focal and on python2. This is for unmaintained/victoria to unmaintained/zed.
branches:
- ^.*/victoria
- ^.*/wallaby