Merge "Manage a snapshot with an attached volume"
diff --git a/roles/run-tempest-26/tasks/main.yaml b/roles/run-tempest-26/tasks/main.yaml
index 7423bfb..7ad5c99 100644
--- a/roles/run-tempest-26/tasks/main.yaml
+++ b/roles/run-tempest-26/tasks/main.yaml
@@ -17,7 +17,7 @@
- name: Limit max concurrency when more than 3 vcpus are available
set_fact:
- default_concurrency: "{{ num_cores|int // 2 }}"
+ default_concurrency: "{{ num_cores|int - 2 }}"
when: num_cores|int > 3
- name: Override target branch
diff --git a/roles/run-tempest/tasks/main.yaml b/roles/run-tempest/tasks/main.yaml
index 3fb494f..3d78557 100644
--- a/roles/run-tempest/tasks/main.yaml
+++ b/roles/run-tempest/tasks/main.yaml
@@ -17,7 +17,7 @@
- name: Limit max concurrency when more than 3 vcpus are available
set_fact:
- default_concurrency: "{{ num_cores|int // 2 }}"
+ default_concurrency: "{{ num_cores|int - 2 }}"
when: num_cores|int > 3
- name: Override target branch
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index 5f86a1f..19026d3 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -258,7 +258,7 @@
port = self.ports_client.show_port(port_id)['port']
return port['status'] == 'ACTIVE'
- @decorators.unstable_test(bug='2024160')
+ @decorators.unstable_test(bug='2027605')
@decorators.attr(type='multinode')
@decorators.idempotent_id('0022c12e-a482-42b0-be2d-396b5f0cffe3')
@utils.requires_ext(service='network', extension='trunk')
diff --git a/tempest/api/compute/admin/test_networks.py b/tempest/api/compute/admin/test_networks.py
index fb6376e..d7fb62d 100644
--- a/tempest/api/compute/admin/test_networks.py
+++ b/tempest/api/compute/admin/test_networks.py
@@ -64,5 +64,5 @@
configured_network = CONF.compute.fixed_network_name
self.assertIn(configured_network, [x['label'] for x in networks])
else:
- network_labels = [x['label'] for x in networks]
- self.assertNotEmpty(network_labels)
+ raise self.skipException(
+ "Environment has no known-for-sure existing network.")
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index b1bfac7..2700cd9 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -326,18 +326,18 @@
body['id'])
return body
- def wait_for(self, condition):
+ def wait_for(self, condition, *args):
"""Repeatedly calls condition() until a timeout."""
start_time = int(time.time())
while True:
try:
- condition()
+ condition(*args)
except Exception:
pass
else:
return
if int(time.time()) - start_time >= self.build_timeout:
- condition()
+ condition(*args)
return
time.sleep(self.build_interval)
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index efecd6c..9b6bf84 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -295,8 +295,8 @@
def test_reassign_port_between_servers(self):
"""Tests reassigning port between servers
- 1. Create a port in Neutron.
- 2. Create two servers in Nova.
+ 1. Create two servers in Nova.
+ 2. Create a port in Neutron.
3. Attach the port to the first server.
4. Detach the port from the first server.
5. Attach the port to the second server.
@@ -304,11 +304,6 @@
"""
network = self.get_tenant_network()
network_id = network['id']
- port = self.ports_client.create_port(
- network_id=network_id,
- name=data_utils.rand_name(self.__class__.__name__))
- port_id = port['port']['id']
- self.addCleanup(self.ports_client.delete_port, port_id)
# NOTE(artom) We create two servers one at a time because
# create_test_server doesn't support multiple validatable servers.
@@ -318,12 +313,21 @@
def _create_validatable_server():
_, servers = compute.create_test_server(
self.os_primary, tenant_network=network,
- wait_until='ACTIVE', validatable=True,
+ validatable=True,
validation_resources=validation_resources)
return servers[0]
+ # NOTE(danms): We create these with no waiters because we will wait
+ # for them to be validatable (i.e. SSHABLE) below. That way some of
+ # the server creation overlap each other and with create_port.
servers = [_create_validatable_server(), _create_validatable_server()]
+ port = self.ports_client.create_port(
+ network_id=network_id,
+ name=data_utils.rand_name(self.__class__.__name__))
+ port_id = port['port']['id']
+ self.addCleanup(self.ports_client.delete_port, port_id)
+
# add our cleanups for the servers since we bypassed the base class
for server in servers:
self.addCleanup(self.delete_server, server['id'])
@@ -332,7 +336,9 @@
# NOTE(mgoddard): Get detailed server to ensure addresses are
# present in fixed IP case.
server = self.servers_client.show_server(server['id'])['server']
- self._wait_for_validation(server, validation_resources)
+ compute.wait_for_ssh_or_ping(server, self.os_primary, network,
+ True, validation_resources,
+ 'SSHABLE', True)
# attach the port to the server
iface = self.interfaces_client.create_interface(
server['id'], port_id=port_id)['interfaceAttachment']
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index af58433..7afd9c2 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -34,13 +34,13 @@
LOG = logging.getLogger(__name__)
-class ServerActionsTestJSON(base.BaseV2ComputeTest):
+class ServerActionsBase(base.BaseV2ComputeTest):
"""Test server actions"""
def setUp(self):
# NOTE(afazekas): Normally we use the same server with all test cases,
# but if it has an issue, we build a new one
- super(ServerActionsTestJSON, self).setUp()
+ super().setUp()
# Check if the server is in a clean state after test
try:
self.validation_resources = self.get_class_validation_resources(
@@ -73,7 +73,7 @@
self.server_id, validatable=True, wait_until='SSHABLE')
def tearDown(self):
- super(ServerActionsTestJSON, self).tearDown()
+ super(ServerActionsBase, self).tearDown()
# NOTE(zhufl): Because server_check_teardown will raise Exception
# which will prevent other cleanup steps from being executed, so
# server_check_teardown should be called after super's tearDown.
@@ -82,51 +82,19 @@
@classmethod
def setup_credentials(cls):
cls.prepare_instance_network()
- super(ServerActionsTestJSON, cls).setup_credentials()
+ super(ServerActionsBase, cls).setup_credentials()
@classmethod
def setup_clients(cls):
- super(ServerActionsTestJSON, cls).setup_clients()
+ super(ServerActionsBase, cls).setup_clients()
cls.client = cls.servers_client
@classmethod
def resource_setup(cls):
- super(ServerActionsTestJSON, cls).resource_setup()
+ super(ServerActionsBase, cls).resource_setup()
cls.server_id = cls.recreate_server(None, validatable=True,
wait_until='SSHABLE')
- @decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
- @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
- 'Change password not available.')
- def test_change_server_password(self):
- """Test changing server's password
-
- The server's password should be set to the provided password and
- the user can authenticate with the new password.
- """
- # Since this test messes with the password and makes the
- # server unreachable, it should create its own server
- newserver = self.create_test_server(
- validatable=True,
- validation_resources=self.validation_resources,
- wait_until='ACTIVE')
- self.addCleanup(self.delete_server, newserver['id'])
- # The server's password should be set to the provided password
- new_password = 'Newpass1234'
- self.client.change_password(newserver['id'], adminPass=new_password)
- waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
-
- if CONF.validation.run_validation:
- # Verify that the user can authenticate with the new password
- server = self.client.show_server(newserver['id'])['server']
- linux_client = remote_client.RemoteClient(
- self.get_server_ip(server, self.validation_resources),
- self.ssh_user,
- new_password,
- server=server,
- servers_client=self.client)
- linux_client.validate_authentication()
-
def _test_reboot_server(self, reboot_type):
if CONF.validation.run_validation:
# Get the time the server was last rebooted,
@@ -159,45 +127,6 @@
self.assertGreater(new_boot_time, boot_time,
'%s > %s' % (new_boot_time, boot_time))
- @decorators.attr(type='smoke')
- @decorators.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
- def test_reboot_server_hard(self):
- """Test hard rebooting server
-
- The server should be power cycled.
- """
- self._test_reboot_server('HARD')
-
- @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
- def test_remove_server_all_security_groups(self):
- """Test removing all security groups from server"""
- server = self.create_test_server(wait_until='ACTIVE')
-
- # Remove all Security group
- self.client.remove_security_group(
- server['id'], name=server['security_groups'][0]['name'])
-
- # Verify all Security group
- server = self.client.show_server(server['id'])['server']
- self.assertNotIn('security_groups', server)
-
- def _rebuild_server_and_check(self, image_ref, server):
- rebuilt_server = (self.client.rebuild_server(server['id'], image_ref)
- ['server'])
- if CONF.validation.run_validation:
- tenant_network = self.get_tenant_network()
- compute.wait_for_ssh_or_ping(
- server, self.os_primary, tenant_network,
- True, self.validation_resources, "SSHABLE", True)
- else:
- waiters.wait_for_server_status(self.client, server['id'],
- 'ACTIVE')
-
- msg = ('Server was not rebuilt to the original image. '
- 'The original image: {0}. The current image: {1}'
- .format(image_ref, rebuilt_server['image']['id']))
- self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
-
def _test_rebuild_server(self):
# Get the IPs the server has before rebuilding it
original_addresses = (self.client.show_server(self.server_id)['server']
@@ -250,6 +179,108 @@
servers_client=self.client)
linux_client.validate_authentication()
+ def _test_resize_server_confirm(self, server_id, stop=False):
+ # The server's RAM and disk space should be modified to that of
+ # the provided flavor
+
+ if stop:
+ self.client.stop_server(server_id)
+ waiters.wait_for_server_status(self.client, server_id,
+ 'SHUTOFF')
+
+ self.client.resize_server(server_id, self.flavor_ref_alt)
+ # NOTE(jlk): Explicitly delete the server to get a new one for later
+ # tests. Avoids resize down race issues.
+ self.addCleanup(self.delete_server, server_id)
+ waiters.wait_for_server_status(self.client, server_id,
+ 'VERIFY_RESIZE')
+
+ self.client.confirm_resize_server(server_id)
+ expected_status = 'SHUTOFF' if stop else 'ACTIVE'
+ waiters.wait_for_server_status(self.client, server_id,
+ expected_status)
+
+ server = self.client.show_server(server_id)['server']
+ self.assert_flavor_equal(self.flavor_ref_alt, server['flavor'])
+
+ if stop:
+ # NOTE(mriedem): tearDown requires the server to be started.
+ self.client.start_server(server_id)
+
+ def _get_output(self, server_id):
+ output = self.client.get_console_output(
+ server_id, length=3)['output']
+ self.assertTrue(output, "Console output was empty.")
+ lines = len(output.split('\n'))
+ self.assertEqual(lines, 3)
+
+ def _validate_url(self, url):
+ valid_scheme = ['http', 'https']
+ parsed_url = urlparse.urlparse(url)
+ self.assertNotEqual('None', parsed_url.port)
+ self.assertNotEqual('None', parsed_url.hostname)
+ self.assertIn(parsed_url.scheme, valid_scheme)
+
+ def _rebuild_server_and_check(self, image_ref, server):
+ rebuilt_server = (self.client.rebuild_server(server['id'], image_ref)
+ ['server'])
+ if CONF.validation.run_validation:
+ tenant_network = self.get_tenant_network()
+ compute.wait_for_ssh_or_ping(
+ server, self.os_primary, tenant_network,
+ True, self.validation_resources, "SSHABLE", True)
+ else:
+ waiters.wait_for_server_status(self.client, server['id'],
+ 'ACTIVE')
+
+ msg = ('Server was not rebuilt to the original image. '
+ 'The original image: {0}. The current image: {1}'
+ .format(image_ref, rebuilt_server['image']['id']))
+ self.assertEqual(image_ref, rebuilt_server['image']['id'], msg)
+
+
+class ServerActionsTestJSON(ServerActionsBase):
+ @decorators.idempotent_id('6158df09-4b82-4ab3-af6d-29cf36af858d')
+ @testtools.skipUnless(CONF.compute_feature_enabled.change_password,
+ 'Change password not available.')
+ def test_change_server_password(self):
+ """Test changing server's password
+
+ The server's password should be set to the provided password and
+ the user can authenticate with the new password.
+ """
+ # Since this test messes with the password and makes the
+ # server unreachable, it should create its own server
+ newserver = self.create_test_server(
+ validatable=True,
+ validation_resources=self.validation_resources,
+ wait_until='ACTIVE')
+ self.addCleanup(self.delete_server, newserver['id'])
+ # The server's password should be set to the provided password
+ new_password = 'Newpass1234'
+ self.client.change_password(newserver['id'], adminPass=new_password)
+ waiters.wait_for_server_status(self.client, newserver['id'], 'ACTIVE')
+
+ if CONF.validation.run_validation:
+ # Verify that the user can authenticate with the new password
+ server = self.client.show_server(newserver['id'])['server']
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, self.validation_resources),
+ self.ssh_user,
+ new_password,
+ server=server,
+ servers_client=self.client)
+ linux_client.validate_authentication()
+
+ @decorators.attr(type='smoke')
+ @decorators.idempotent_id('2cb1baf6-ac8d-4429-bf0d-ba8a0ba53e32')
+ def test_reboot_server_hard(self):
+ """Test hard rebooting server
+
+ The server should be power cycled.
+ """
+ self._test_reboot_server('HARD')
+
@decorators.idempotent_id('aaa6cdf3-55a7-461a-add9-1c8596b9a07c')
def test_rebuild_server(self):
"""Test rebuilding server
@@ -258,6 +289,120 @@
"""
self._test_rebuild_server()
+ @decorators.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ def test_resize_server_confirm(self):
+ """Test resizing server and then confirming"""
+ self._test_resize_server_confirm(self.server_id, stop=False)
+
+ @decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize not available.')
+ def test_resize_server_revert(self):
+ """Test resizing server and then reverting
+
+ The server's RAM and disk space should return to its original
+ values after a resize is reverted.
+ """
+
+ self.client.resize_server(self.server_id, self.flavor_ref_alt)
+ # NOTE(zhufl): Explicitly delete the server to get a new one for later
+ # tests. Avoids resize down race issues.
+ self.addCleanup(self.delete_server, self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id,
+ 'VERIFY_RESIZE')
+
+ self.client.revert_resize_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+
+ server = self.client.show_server(self.server_id)['server']
+ self.assert_flavor_equal(self.flavor_ref, server['flavor'])
+
+ @decorators.idempotent_id('4b8867e6-fffa-4d54-b1d1-6fdda57be2f3')
+ @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
+ 'Console output not supported.')
+ def test_get_console_output(self):
+ """Test getting console output for a server
+
+ Should be able to GET the console output for a given server_id and
+ number of lines.
+ """
+
+ # This reboot is necessary for outputting some console log after
+ # creating an instance backup. If an instance backup, the console
+ # log file is truncated and we cannot get any console log through
+ # "console-log" API.
+ # The detail is https://bugs.launchpad.net/nova/+bug/1251920
+ self.reboot_server(self.server_id, type='HARD')
+ self.wait_for(self._get_output, self.server_id)
+
+ @decorators.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
+ @testtools.skipUnless(CONF.compute_feature_enabled.pause,
+ 'Pause is not available.')
+ def test_pause_unpause_server(self):
+ """Test pausing and unpausing server"""
+ self.client.pause_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
+ self.client.unpause_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+
+ @decorators.idempotent_id('0d8ee21e-b749-462d-83da-b85b41c86c7f')
+ @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
+ 'Suspend is not available.')
+ def test_suspend_resume_server(self):
+ """Test suspending and resuming server"""
+ self.client.suspend_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id,
+ 'SUSPENDED')
+ self.client.resume_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+
+ @decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
+ def test_stop_start_server(self):
+ """Test stopping and starting server"""
+ self.client.stop_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
+ self.client.start_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+
+ @decorators.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
+ def test_lock_unlock_server(self):
+ """Test locking and unlocking server
+
+ Lock the server, and trying to stop it will fail because locked
+ server is not allowed to be stopped by non-admin user.
+ Then unlock the server, now the server can be stopped and started.
+ """
+ # Lock the server,try server stop(exceptions throw),unlock it and retry
+ self.client.lock_server(self.server_id)
+ self.addCleanup(self.client.unlock_server, self.server_id)
+ server = self.client.show_server(self.server_id)['server']
+ self.assertEqual(server['status'], 'ACTIVE')
+ # Locked server is not allowed to be stopped by non-admin user
+ self.assertRaises(lib_exc.Conflict,
+ self.client.stop_server, self.server_id)
+ self.client.unlock_server(self.server_id)
+ self.client.stop_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
+ self.client.start_server(self.server_id)
+ waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+
+
+class ServerActionsTestOtherA(ServerActionsBase):
+ @decorators.idempotent_id('1d1c9104-1b0a-11e7-a3d4-fa163e65f5ce')
+ def test_remove_server_all_security_groups(self):
+ """Test removing all security groups from server"""
+ server = self.create_test_server(wait_until='ACTIVE')
+
+ # Remove all Security group
+ self.client.remove_security_group(
+ server['id'], name=server['security_groups'][0]['name'])
+
+ # Verify all Security group
+ server = self.client.show_server(server['id'])['server']
+ self.assertNotIn('security_groups', server)
+
@decorators.idempotent_id('30449a88-5aff-4f9b-9866-6ee9b17f906d')
def test_rebuild_server_in_stop_state(self):
"""Test rebuilding server in stop state
@@ -330,41 +475,6 @@
servers_client=self.client)
linux_client.validate_authentication()
- def _test_resize_server_confirm(self, server_id, stop=False):
- # The server's RAM and disk space should be modified to that of
- # the provided flavor
-
- if stop:
- self.client.stop_server(server_id)
- waiters.wait_for_server_status(self.client, server_id,
- 'SHUTOFF')
-
- self.client.resize_server(server_id, self.flavor_ref_alt)
- # NOTE(jlk): Explicitly delete the server to get a new one for later
- # tests. Avoids resize down race issues.
- self.addCleanup(self.delete_server, server_id)
- waiters.wait_for_server_status(self.client, server_id,
- 'VERIFY_RESIZE')
-
- self.client.confirm_resize_server(server_id)
- expected_status = 'SHUTOFF' if stop else 'ACTIVE'
- waiters.wait_for_server_status(self.client, server_id,
- expected_status)
-
- server = self.client.show_server(server_id)['server']
- self.assert_flavor_equal(self.flavor_ref_alt, server['flavor'])
-
- if stop:
- # NOTE(mriedem): tearDown requires the server to be started.
- self.client.start_server(server_id)
-
- @decorators.idempotent_id('1499262a-9328-4eda-9068-db1ac57498d2')
- @testtools.skipUnless(CONF.compute_feature_enabled.resize,
- 'Resize not available.')
- def test_resize_server_confirm(self):
- """Test resizing server and then confirming"""
- self._test_resize_server_confirm(self.server_id, stop=False)
-
@decorators.idempotent_id('e6c28180-7454-4b59-b188-0257af08a63b')
@decorators.related_bug('1728603')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
@@ -402,6 +512,8 @@
servers_client=self.client)
linux_client.validate_authentication()
+
+class ServerActionsTestOtherB(ServerActionsBase):
@decorators.idempotent_id('138b131d-66df-48c9-a171-64f45eb92962')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
'Resize not available.')
@@ -409,29 +521,6 @@
"""Test resizing a stopped server and then confirming"""
self._test_resize_server_confirm(self.server_id, stop=True)
- @decorators.idempotent_id('c03aab19-adb1-44f5-917d-c419577e9e68')
- @testtools.skipUnless(CONF.compute_feature_enabled.resize,
- 'Resize not available.')
- def test_resize_server_revert(self):
- """Test resizing server and then reverting
-
- The server's RAM and disk space should return to its original
- values after a resize is reverted.
- """
-
- self.client.resize_server(self.server_id, self.flavor_ref_alt)
- # NOTE(zhufl): Explicitly delete the server to get a new one for later
- # tests. Avoids resize down race issues.
- self.addCleanup(self.delete_server, self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id,
- 'VERIFY_RESIZE')
-
- self.client.revert_resize_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
-
- server = self.client.show_server(self.server_id)['server']
- self.assert_flavor_equal(self.flavor_ref, server['flavor'])
-
@decorators.idempotent_id('fbbf075f-a812-4022-bc5c-ccb8047eef12')
@decorators.related_bug('1737599')
@testtools.skipUnless(CONF.compute_feature_enabled.resize,
@@ -595,31 +684,6 @@
self.assertEqual((backup2, backup3),
(image_list[0]['name'], image_list[1]['name']))
- def _get_output(self):
- output = self.client.get_console_output(
- self.server_id, length=3)['output']
- self.assertTrue(output, "Console output was empty.")
- lines = len(output.split('\n'))
- self.assertEqual(lines, 3)
-
- @decorators.idempotent_id('4b8867e6-fffa-4d54-b1d1-6fdda57be2f3')
- @testtools.skipUnless(CONF.compute_feature_enabled.console_output,
- 'Console output not supported.')
- def test_get_console_output(self):
- """Test getting console output for a server
-
- Should be able to GET the console output for a given server_id and
- number of lines.
- """
-
- # This reboot is necessary for outputting some console log after
- # creating an instance backup. If an instance backup, the console
- # log file is truncated and we cannot get any console log through
- # "console-log" API.
- # The detail is https://bugs.launchpad.net/nova/+bug/1251920
- self.reboot_server(self.server_id, type='HARD')
- self.wait_for(self._get_output)
-
@decorators.idempotent_id('89104062-69d8-4b19-a71b-f47b7af093d7')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@@ -643,6 +707,7 @@
self.wait_for(_check_full_length_console_log)
+ @decorators.skip_because(bug='2028851')
@decorators.idempotent_id('5b65d4e7-4ecd-437c-83c0-d6b79d927568')
@testtools.skipUnless(CONF.compute_feature_enabled.console_output,
'Console output not supported.')
@@ -661,28 +726,7 @@
self.client.stop_server(temp_server_id)
waiters.wait_for_server_status(self.client, temp_server_id, 'SHUTOFF')
- self.wait_for(self._get_output)
-
- @decorators.idempotent_id('bd61a9fd-062f-4670-972b-2d6c3e3b9e73')
- @testtools.skipUnless(CONF.compute_feature_enabled.pause,
- 'Pause is not available.')
- def test_pause_unpause_server(self):
- """Test pausing and unpausing server"""
- self.client.pause_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'PAUSED')
- self.client.unpause_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
-
- @decorators.idempotent_id('0d8ee21e-b749-462d-83da-b85b41c86c7f')
- @testtools.skipUnless(CONF.compute_feature_enabled.suspend,
- 'Suspend is not available.')
- def test_suspend_resume_server(self):
- """Test suspending and resuming server"""
- self.client.suspend_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id,
- 'SUSPENDED')
- self.client.resume_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
+ self.wait_for(self._get_output, temp_server_id)
@decorators.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
@@ -736,43 +780,6 @@
compute.shelve_server(self.client, server['id'],
force_shelve_offload=True)
- @decorators.idempotent_id('af8eafd4-38a7-4a4b-bdbc-75145a580560')
- def test_stop_start_server(self):
- """Test stopping and starting server"""
- self.client.stop_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
- self.client.start_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
-
- @decorators.idempotent_id('80a8094c-211e-440a-ab88-9e59d556c7ee')
- def test_lock_unlock_server(self):
- """Test locking and unlocking server
-
- Lock the server, and trying to stop it will fail because locked
- server is not allowed to be stopped by non-admin user.
- Then unlock the server, now the server can be stopped and started.
- """
- # Lock the server,try server stop(exceptions throw),unlock it and retry
- self.client.lock_server(self.server_id)
- self.addCleanup(self.client.unlock_server, self.server_id)
- server = self.client.show_server(self.server_id)['server']
- self.assertEqual(server['status'], 'ACTIVE')
- # Locked server is not allowed to be stopped by non-admin user
- self.assertRaises(lib_exc.Conflict,
- self.client.stop_server, self.server_id)
- self.client.unlock_server(self.server_id)
- self.client.stop_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'SHUTOFF')
- self.client.start_server(self.server_id)
- waiters.wait_for_server_status(self.client, self.server_id, 'ACTIVE')
-
- def _validate_url(self, url):
- valid_scheme = ['http', 'https']
- parsed_url = urlparse.urlparse(url)
- self.assertNotEqual('None', parsed_url.port)
- self.assertNotEqual('None', parsed_url.hostname)
- self.assertIn(parsed_url.scheme, valid_scheme)
-
@decorators.idempotent_id('c6bc11bf-592e-4015-9319-1c98dc64daf5')
@testtools.skipUnless(CONF.compute_feature_enabled.vnc_console,
'VNC Console feature is disabled.')
diff --git a/tempest/api/image/v2/test_images.py b/tempest/api/image/v2/test_images.py
index fecd5a7..977ad82 100644
--- a/tempest/api/image/v2/test_images.py
+++ b/tempest/api/image/v2/test_images.py
@@ -22,6 +22,7 @@
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from tempest.lib import exceptions as lib_exc
@@ -733,6 +734,30 @@
body = self.schemas_client.show_schema(schema)
self.assertEqual("images", body['name'])
+ @decorators.idempotent_id('d43f3efc-da4c-4af9-b636-868f0c6acedb')
+ def test_list_hidden_image(self):
+ image = self.client.create_image(os_hidden=True)
+ image = image['image'] if 'image' in image else image
+ self.addCleanup(self.client.wait_for_resource_deletion, image['id'])
+ self.addCleanup(test_utils.call_and_ignore_notfound_exc,
+ self.client.delete_image, image['id'])
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertNotIn(image['id'], fetched_images_id)
+
+ @decorators.idempotent_id('fdb96b81-257b-42ac-978b-ddeefa3760e4')
+ def test_list_update_hidden_image(self):
+ image = self.create_image()
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertIn(image['id'], fetched_images_id)
+
+ self.client.update_image(image['id'],
+ [dict(replace='/os_hidden', value=True)])
+ images_list = self.client.list_images()['images']
+ fetched_images_id = [img['id'] for img in images_list]
+ self.assertNotIn(image['id'], fetched_images_id)
+
class ListSharedImagesTest(base.BaseV2ImageTest):
"""Here we test the listing of a shared image information"""
diff --git a/tempest/api/object_storage/base.py b/tempest/api/object_storage/base.py
index 7107dc4..58ad9d4 100644
--- a/tempest/api/object_storage/base.py
+++ b/tempest/api/object_storage/base.py
@@ -15,6 +15,8 @@
import time
+from oslo_log import log
+
from tempest.common import custom_matchers
from tempest.common import waiters
from tempest import config
@@ -23,6 +25,7 @@
import tempest.test
CONF = config.CONF
+LOG = log.getLogger(__name__)
def delete_containers(containers, container_client, object_client):
@@ -41,17 +44,33 @@
for cont in containers:
try:
- params = {'limit': 9999, 'format': 'json'}
- _, objlist = container_client.list_container_objects(cont, params)
- # delete every object in the container
- for obj in objlist:
- object_client.delete_object(cont, obj['name'])
- object_client.wait_for_resource_deletion(obj['name'], cont)
- # Verify resource deletion
+ delete_objects(cont, container_client, object_client)
container_client.delete_container(cont)
container_client.wait_for_resource_deletion(cont)
except lib_exc.NotFound:
- pass
+ LOG.warning(f"Container {cont} wasn't deleted as it wasn't found.")
+
+
+def delete_objects(container, container_client, object_client):
+ """Remove all objects from container.
+
+ Will not throw any error if the objects do not exist
+
+ :param container: Name of the container that contains the objects to be
+ deleted
+ :param container_client: Client to be used to list objects in
+ the container
+ :param object_client: Client to be used to delete objects
+ """
+ params = {'limit': 9999, 'format': 'json'}
+ _, objlist = container_client.list_container_objects(container, params)
+
+ for obj in objlist:
+ try:
+ object_client.delete_object(container, obj['name'])
+ object_client.wait_for_resource_deletion(obj['name'], container)
+ except lib_exc.NotFound:
+ LOG.warning(f"Object {obj} wasn't deleted as it wasn't found.")
class BaseObjectTest(tempest.test.BaseTestCase):
diff --git a/tempest/api/object_storage/test_container_acl_negative.py b/tempest/api/object_storage/test_container_acl_negative.py
index 85e6ddb..347c79e 100644
--- a/tempest/api/object_storage/test_container_acl_negative.py
+++ b/tempest/api/object_storage/test_container_acl_negative.py
@@ -41,6 +41,7 @@
super(ObjectACLsNegativeTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.update_container(self.container_name)
+ self.containers.append(self.container_name)
@classmethod
def resource_cleanup(cls):
diff --git a/tempest/api/object_storage/test_object_services.py b/tempest/api/object_storage/test_object_services.py
index 7d5bd26..61b9136 100644
--- a/tempest/api/object_storage/test_object_services.py
+++ b/tempest/api/object_storage/test_object_services.py
@@ -1016,9 +1016,10 @@
super(PublicObjectTest, self).setUp()
self.container_name = data_utils.rand_name(name='TestContainer')
self.container_client.update_container(self.container_name)
+ self.containers.append(self.container_name)
def tearDown(self):
- self.delete_containers([self.container_name])
+ self.delete_containers()
super(PublicObjectTest, self).tearDown()
@decorators.idempotent_id('07c9cf95-c0d4-4b49-b9c8-0ef2c9b27193')
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 20495ee..4d35bbb 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -1140,14 +1140,19 @@
server=server,
username=username)
+ # Default the directory in which to write the timestamp file to /tmp
+ # and only use the mount_path as the target directory if we mounted
+ # dev_name to mount_path.
+ target_dir = '/tmp'
if dev_name is not None:
ssh_client.make_fs(dev_name, fs=fs)
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
mount_path))
- cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % mount_path
+ target_dir = mount_path
+ cmd_timestamp = 'sudo sh -c "date > %s/timestamp; sync"' % target_dir
ssh_client.exec_command(cmd_timestamp)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
- % mount_path)
+ % target_dir)
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
return timestamp
@@ -1172,10 +1177,15 @@
server=server,
username=username)
+ # Default the directory from which to read the timestamp file to /tmp
+ # and only use the mount_path as the target directory if we mounted
+ # dev_name to mount_path.
+ target_dir = '/tmp'
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
+ target_dir = mount_path
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
- % mount_path)
+ % target_dir)
if dev_name is not None:
ssh_client.exec_command('sudo umount %s' % mount_path)
return timestamp
@@ -1659,7 +1669,8 @@
def create_encrypted_volume(self, encryption_provider, volume_type,
key_size=256, cipher='aes-xts-plain64',
- control_location='front-end'):
+ control_location='front-end',
+ wait_until='available'):
"""Creates an encrypted volume"""
volume_type = self.create_volume_type(name=volume_type)
self.create_encryption_type(type_id=volume_type['id'],
@@ -1667,7 +1678,8 @@
key_size=key_size,
cipher=cipher,
control_location=control_location)
- return self.create_volume(volume_type=volume_type['name'])
+ return self.create_volume(volume_type=volume_type['name'],
+ wait_until=wait_until)
class ObjectStorageScenarioTest(ScenarioTest):
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 60abc02..753e64f 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -16,6 +16,7 @@
import testtools
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.scenario import manager
@@ -56,9 +57,16 @@
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
"""LUKs v1 decrypts volume through libvirt."""
- server = self.launch_instance()
volume = self.create_encrypted_volume('luks',
- volume_type='luks')
+ volume_type='luks',
+ wait_until=None)
+ server = self.launch_instance()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+
self.attach_detach_volume(server, volume)
@decorators.idempotent_id('7abec0a3-61a0-42a5-9e36-ad3138fb38b4')
@@ -68,16 +76,30 @@
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luksv2(self):
"""LUKs v2 decrypts volume through os-brick."""
- server = self.launch_instance()
volume = self.create_encrypted_volume('luks2',
- volume_type='luksv2')
+ volume_type='luksv2',
+ wait_until=None)
+ server = self.launch_instance()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+
self.attach_detach_volume(server, volume)
@decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
@decorators.attr(type='slow')
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
- server = self.launch_instance()
volume = self.create_encrypted_volume('plain',
- volume_type='cryptsetup')
+ volume_type='cryptsetup',
+ wait_until=None)
+ server = self.launch_instance()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+
self.attach_detach_volume(server, volume)
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index cbe4122..7b819e0 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -898,10 +898,13 @@
nic=spoof_nic, should_succeed=True)
# Set a mac address by making nic down temporary
spoof_ip_addresses = ssh_client.get_nic_ip_addresses(spoof_nic)
- cmd = ("sudo ip link set {nic} down;"
+ dhcp_cmd = ("sudo start-stop-daemon -K -x /sbin/dhcpcd -p "
+ "/var/run/dhcpcd/pid -o || true")
+ cmd = ("{dhcp_cmd}; sudo ip link set {nic} down;"
"sudo ip link set dev {nic} address {mac};"
"sudo ip link set {nic} up;"
"sudo ip address flush dev {nic};").format(nic=spoof_nic,
+ dhcp_cmd=dhcp_cmd,
mac=spoof_mac)
for ip_address in spoof_ip_addresses:
cmd += (
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index 9285da2..fe85234 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -14,6 +14,7 @@
# under the License.
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -46,7 +47,8 @@
if zone['zoneState']['available']:
for host in zone['hosts']:
if 'nova-compute' in zone['hosts'][host] and \
- zone['hosts'][host]['nova-compute']['available']:
+ zone['hosts'][host]['nova-compute']['available'] and \
+ not host.endswith('-ironic'):
hosts.append({'zone': zone['zoneName'],
'host_name': host})
@@ -60,6 +62,7 @@
# threshold (so that things don't get crazy if you have 1000
# compute nodes but set min to 3).
servers = []
+ host_server_ids = {}
for host in hosts[:CONF.compute.min_compute_nodes]:
# by getting to active state here, this means this has
@@ -67,12 +70,18 @@
# in order to use the availability_zone:host scheduler hint,
# admin client is need here.
inst = self.create_server(
+ wait_until=None,
clients=self.os_admin,
availability_zone='%(zone)s:%(host_name)s' % host)
+ host_server_ids[host['host_name']] = inst['id']
+
+ for host_name, server_id in host_server_ids.items():
+ waiters.wait_for_server_status(self.os_admin.servers_client,
+ server_id, 'ACTIVE')
server = self.os_admin.servers_client.show_server(
- inst['id'])['server']
+ server_id)['server']
# ensure server is located on the requested host
- self.assertEqual(host['host_name'], server['OS-EXT-SRV-ATTR:host'])
+ self.assertEqual(host_name, server['OS-EXT-SRV-ATTR:host'])
servers.append(server)
# make sure we really have the number of servers we think we should
diff --git a/tempest/scenario/test_server_volume_attachment.py b/tempest/scenario/test_server_volume_attachment.py
index cc8cf00..1d0d0d0 100644
--- a/tempest/scenario/test_server_volume_attachment.py
+++ b/tempest/scenario/test_server_volume_attachment.py
@@ -69,11 +69,18 @@
@utils.services('compute', 'volume', 'image', 'network')
def test_server_detach_rules(self):
"""Test that various methods of detaching a volume honors the rules"""
+ volume = self.create_volume(wait_until=None)
+ volume2 = self.create_volume(wait_until=None)
+
server = self.create_server(wait_until='SSHABLE')
servers = self.servers_client.list_servers()['servers']
self.assertIn(server['id'], [x['id'] for x in servers])
- volume = self.create_volume()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
volume = self.nova_volume_attach(server, volume)
self.addCleanup(self.nova_volume_detach, server, volume)
@@ -143,7 +150,12 @@
volume['id'], connector=None, attachment_id=att_id)
# Test user call to detach with mismatch is rejected
- volume2 = self.create_volume()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume2['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume2 = self.volumes_client.show_volume(volume2['id'])['volume']
+
volume2 = self.nova_volume_attach(server, volume2)
att_id2 = volume2['attachments'][0]['attachment_id']
self.assertRaises(
diff --git a/tempest/scenario/test_stamp_pattern.py b/tempest/scenario/test_stamp_pattern.py
index 4b81b9e..82f0341 100644
--- a/tempest/scenario/test_stamp_pattern.py
+++ b/tempest/scenario/test_stamp_pattern.py
@@ -16,6 +16,7 @@
import testtools
from tempest.common import utils
+from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
@@ -84,7 +85,7 @@
security_group = self.create_security_group()
# boot an instance and create a timestamp file in it
- volume = self.create_volume()
+ volume = self.create_volume(wait_until=None)
server = self.create_server(
key_name=keypair['name'],
security_groups=[{'name': security_group['name']}])
@@ -97,6 +98,12 @@
ip_for_server, private_key=keypair['private_key'],
server=server)
disks_list_before_attach = linux_client.list_disks()
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume['id'], 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume = self.volumes_client.show_volume(volume['id'])['volume']
+
self.nova_volume_attach(server, volume)
volume_device_name = self._attached_volume_name(
disks_list_before_attach, ip_for_server, keypair['private_key'])
@@ -115,7 +122,7 @@
# create second volume from the snapshot(volume2)
volume_from_snapshot = self.create_volume(
- snapshot_id=volume_snapshot['id'])
+ snapshot_id=volume_snapshot['id'], wait_until=None)
# boot second instance from the snapshot(instance2)
server_from_snapshot = self.create_server(
@@ -135,6 +142,14 @@
disks_list_before_attach = linux_client.list_disks()
# attach volume2 to instance2
+ waiters.wait_for_volume_resource_status(self.volumes_client,
+ volume_from_snapshot['id'],
+ 'available')
+ # The volume retrieved on creation has a non-up-to-date status.
+ # Retrieval after it becomes active ensures correct details.
+ volume_from_snapshot = self.volumes_client.show_volume(
+ volume_from_snapshot['id'])['volume']
+
self.nova_volume_attach(server_from_snapshot, volume_from_snapshot)
volume_device_name = self._attached_volume_name(
disks_list_before_attach, ip_for_snapshot, keypair['private_key'])
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 70fdc79..8ac0b42 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -276,7 +276,7 @@
timeout: 10800
# This job runs on stable/stein onwards.
branches: ^(?!stable/(ocata|pike|queens|rocky)).*$
- vars: &tempest_slow_vars
+ vars:
tox_envlist: slow-serial
devstack_localrc:
CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
@@ -300,17 +300,27 @@
# This job version is with swift enabled on py3
# as swift is ready on py3 from stable/ussuri onwards.
timeout: 10800
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
- vars: *tempest_slow_vars
-
-- job:
- name: tempest-slow-parallel
- parent: tempest-slow-py3
- # This job run slow tests in parallel.
+ # As the 'slow' tox env which is not available in old tempest used
+ # till stable/wallaby, this job definition is only for stable/xena
+ # onwards and separate job definition until stable/wallaby
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria|wallaby)).*$
vars:
tox_envlist: slow
devstack_localrc:
- MYSQL_REDUCE_MEMORY: true
+ CINDER_ENABLED_BACKENDS: lvm:lvmdriver-1,lvm:lvmdriver-2
+ ENABLE_VOLUME_MULTIATTACH: true
+ devstack_plugins:
+ neutron: https://opendev.org/openstack/neutron
+ devstack_services:
+ neutron-placement: true
+ neutron-qos: true
+ group-vars:
+ # NOTE(mriedem): The ENABLE_VOLUME_MULTIATTACH variable is used on both
+ # the controller and subnode prior to Rocky so we have to make sure the
+ # variable is set in both locations.
+ subnode:
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
- job:
name: tempest-cinder-v2-api
@@ -383,12 +393,24 @@
# Keystone policies are changed to work for both system as well as
# for project scoped, we need to keep scope check disable for
# keystone.
- NOVA_ENFORCE_SCOPE: true
+ # Nova and Glance have enabled the new defaults and scope by default
+ # in devstack.
CINDER_ENFORCE_SCOPE: true
- GLANCE_ENFORCE_SCOPE: true
NEUTRON_ENFORCE_SCOPE: true
PLACEMENT_ENFORCE_SCOPE: true
+- job:
+ name: tempest-all-rbac-old-defaults
+ parent: tempest-all
+ description: |
+ Integration test that runs all tests on RBAC old defaults.
+ devstack_localrc:
+ # NOTE(gmann): Nova and Glance have enabled the new defaults and scope
+ # by default in devstack so we need some jobs keep testing the old
+ # defaults until they are removed from service side.
+ NOVA_ENFORCE_SCOPE: false
+ GLANCE_ENFORCE_SCOPE: false
+
- project-template:
name: integrated-gate-networking
description: |
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 6e1ba5e..9787526 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -118,6 +118,8 @@
- tempest-full-test-account-py3:
voting: false
irrelevant-files: *tempest-irrelevant-files
+ - ironic-tempest-bios-ipmi-direct-tinyipa:
+ irrelevant-files: *tempest-irrelevant-files
- openstack-tox-bashate:
irrelevant-files: *tempest-irrelevant-files-2
gate:
@@ -146,6 +148,8 @@
# irrelevant-files: *tempest-irrelevant-files
- nova-live-migration:
irrelevant-files: *tempest-irrelevant-files
+ - ironic-tempest-bios-ipmi-direct-tinyipa:
+ irrelevant-files: *tempest-irrelevant-files
experimental:
jobs:
- nova-multi-cell
@@ -157,7 +161,7 @@
irrelevant-files: *tempest-irrelevant-files
- tempest-all:
irrelevant-files: *tempest-irrelevant-files
- - tempest-slow-parallel
+ - tempest-all-rbac-old-defaults
- tempest-full-parallel
- tempest-full-zed-extra-tests
- tempest-full-yoga-extra-tests
@@ -192,7 +196,7 @@
periodic:
jobs:
- tempest-all
- - tempest-slow-parallel
+ - tempest-all-rbac-old-defaults
- tempest-full-parallel
- tempest-full-oslo-master
- tempest-stestr-master
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 89435ce..d399556 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -311,6 +311,18 @@
USE_PYTHON3: true
- job:
+ name: tempest-slow-py3
+ parent: tempest-slow
+ # This job version is to use the 'slow-serial' tox env for
+ # the stable/ussuri to stable/wallaby testing.
+ branches:
+ - stable/ussuri
+ - stable/victoria
+ - stable/wallaby
+ vars:
+ tox_envlist: slow-serial
+
+- job:
name: tempest-full-py3-opensuse15
parent: tempest-full-py3
nodeset: devstack-single-node-opensuse-15