Merge "Enable snapshot deletion with deps on ceph"
diff --git a/releasenotes/notes/Add-scenario-config-opt-target-dir-5a969b64be1dc718.yaml b/releasenotes/notes/Add-scenario-config-opt-target-dir-5a969b64be1dc718.yaml
new file mode 100644
index 0000000..3adacfc
--- /dev/null
+++ b/releasenotes/notes/Add-scenario-config-opt-target-dir-5a969b64be1dc718.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - |
+ Adding a new config options `[scenario]/target_dir` which allows
+ users to specify the location where timestamps files will be
+ written to. The default value is /tmp that, however, cannot be
+ expected to persist across reboots of an instance.
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index d728853..a90d500 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -90,6 +90,14 @@
name=snapshot_name,
wait_until='ACTIVE',
wait_for_server=False)
+ # This is required due to ceph issue:
+ # https://bugs.launchpad.net/glance/+bug/2045769.
+ # New location APIs are async so we need to wait for the location
+ # import task to complete.
+ # This should work with old location API since we don't fail if there
+ # are no tasks for the image
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image['id'], 'success')
self.addCleanup(self.client.delete_image, image['id'])
self.assertEqual(snapshot_name, image['name'])
@@ -110,6 +118,14 @@
name=snapshot_name,
wait_until='ACTIVE',
wait_for_server=False)
+ # This is required due to ceph issue:
+ # https://bugs.launchpad.net/glance/+bug/2045769.
+ # New location APIs are async so we need to wait for the location
+ # import task to complete.
+ # This should work with old location API since we don't fail if there
+ # are no tasks for the image
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image['id'], 'success')
self.addCleanup(self.client.delete_image, image['id'])
self.assertEqual(snapshot_name, image['name'])
@@ -130,6 +146,14 @@
name=snapshot_name,
wait_until='ACTIVE',
wait_for_server=False)
+ # This is required due to ceph issue:
+ # https://bugs.launchpad.net/glance/+bug/2045769.
+ # New location APIs are async so we need to wait for the location
+ # import task to complete.
+ # This should work with old location API since we don't fail if there
+ # are no tasks for the image
+ waiters.wait_for_image_tasks_status(self.images_client,
+ image['id'], 'success')
self.addCleanup(self.client.delete_image, image['id'])
self.assertEqual(snapshot_name, image['name'])
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index e249f35..9e97f47 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -672,6 +672,28 @@
raise lib_exc.TimeoutException
+def wait_for_server_ports_active(client, server_id, is_active, **kwargs):
+ """Wait for all server ports to reach active status
+ :param client: The network client to use when querying the port's status
+ :param server_id: The uuid of the server's ports we need to verify.
+ :param is_active: A function to call to the check port active status.
+ :param kwargs: Additional arguments, if any, to pass to list_ports()
+ """
+ start_time = time.time()
+ while (time.time() - start_time <= client.build_timeout):
+ ports = client.list_ports(device_id=server_id, **kwargs)['ports']
+ if all(is_active(port) for port in ports):
+ LOG.debug("Server ID %s ports are all ACTIVE %s: ",
+ server_id, ports)
+ return ports
+ LOG.warning("Server ID %s has ports that are not ACTIVE, waiting "
+ "for state to change on all: %s", server_id, ports)
+ time.sleep(client.build_interval)
+ LOG.error("Server ID %s ports have failed to transition to ACTIVE, "
+ "timing out: %s", server_id, ports)
+ raise lib_exc.TimeoutException
+
+
def wait_for_ssh(ssh_client, timeout=30):
"""Waits for SSH connection to become usable"""
start_time = int(time.time())
diff --git a/tempest/config.py b/tempest/config.py
index b1f736c..471782b 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1217,7 +1217,10 @@
default='icmp',
choices=('icmp', 'tcp', 'udp'),
help='The protocol used in security groups tests to check '
- 'connectivity.')
+ 'connectivity.'),
+ cfg.StrOpt('target_dir',
+ default='/tmp',
+ help='Directory in which to write the timestamp file.'),
]
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 714a7c7..01c42c8 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -1098,8 +1098,6 @@
if ip_addr and not kwargs.get('fixed_ips'):
kwargs['fixed_ips'] = 'ip_address=%s' % ip_addr
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'], **kwargs)['ports']
# A port can have more than one IP address in some cases.
# If the network is dual-stack (IPv4 + IPv6), this port is associated
@@ -1114,6 +1112,18 @@
return (port['status'] == 'ACTIVE' or
port.get('binding:vnic_type') == 'baremetal')
+ # Wait for all compute ports to be ACTIVE.
+ # This will raise a TimeoutException if that does not happen.
+ client = self.os_admin.ports_client
+ try:
+ ports = waiters.wait_for_server_ports_active(
+ client=client, server_id=server['id'],
+ is_active=_is_active, **kwargs)
+ except lib_exc.TimeoutException:
+ LOG.error("Server ports failed transitioning to ACTIVE for "
+ "server: %s", server)
+ raise
+
port_map = [(p["id"], fxip["ip_address"])
for p in ports
for fxip in p["fixed_ips"]
@@ -1121,7 +1131,8 @@
_is_active(p))]
inactive = [p for p in ports if p['status'] != 'ACTIVE']
if inactive:
- LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
+ # This should just be Ironic ports, see _is_active() above
+ LOG.debug("Instance has ports that are not ACTIVE: %s", inactive)
self.assertNotEmpty(port_map,
"No IPv4 addresses found in: %s" % ports)
@@ -1227,7 +1238,7 @@
# Default the directory in which to write the timestamp file to /tmp
# and only use the mount_path as the target directory if we mounted
# dev_name to mount_path.
- target_dir = '/tmp'
+ target_dir = CONF.scenario.target_dir
if dev_name is not None:
mount_path = os.path.join(mount_path, dev_name)
ssh_client.make_fs(dev_name, fs=fs)
@@ -1266,7 +1277,7 @@
# Default the directory from which to read the timestamp file to /tmp
# and only use the mount_path as the target directory if we mounted
# dev_name to mount_path.
- target_dir = '/tmp'
+ target_dir = CONF.scenario.target_dir
if dev_name is not None:
mount_path = os.path.join(mount_path, dev_name)
ssh_client.mkdir(mount_path)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index f194173..f7f2dc7 100755
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -884,6 +884,58 @@
waiters.wait_for_port_status, mock_client,
fake_port_id, fake_status)
+ def test_wait_for_server_ports_active(self):
+ """Test that the waiter replies with the ports before the timeout"""
+
+ def is_active(port):
+ return port['status'] == 'ACTIVE'
+
+ def client_response(device_id):
+ """Mock client response, replies with partial status after one
+ call and final status after 2 calls
+ """
+ if mock_client.call_count >= 2:
+ return mock_ports_active
+ else:
+ mock_client.call_count += 1
+ if mock_client.call_count > 1:
+ return mock_ports_half_active
+ return mock_ports
+
+ mock_ports = {'ports': [{'id': '1234', 'status': 'DOWN'},
+ {'id': '5678', 'status': 'DOWN'}]}
+ mock_ports_half_active = {'ports': [{'id': '1234', 'status': 'ACTIVE'},
+ {'id': '5678', 'status': 'DOWN'}]}
+ mock_ports_active = {'ports': [{'id': '1234', 'status': 'ACTIVE'},
+ {'id': '5678', 'status': 'ACTIVE'}]}
+ mock_client = mock.Mock(
+ spec=ports_client.PortsClient,
+ build_timeout=30, build_interval=1,
+ list_ports=client_response)
+ fake_server_id = "9876"
+ self.assertEqual(mock_ports_active['ports'],
+ waiters.wait_for_server_ports_active(
+ mock_client, fake_server_id, is_active))
+
+ def test_wait_for_server_ports_active_timeout(self):
+ """Negative test - checking that a timeout
+ presented by a small 'fake_timeout' and a static status of
+ 'DOWN' in the mock will raise a timeout exception
+ """
+
+ def is_active(port):
+ return port['status'] == 'ACTIVE'
+
+ mock_ports = {'ports': [{'id': '1234', 'status': "DOWN"}]}
+ mock_client = mock.Mock(
+ spec=ports_client.PortsClient,
+ build_timeout=2, build_interval=1,
+ list_ports=lambda device_id: mock_ports)
+ fake_server_id = "9876"
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_server_ports_active,
+ mock_client, fake_server_id, is_active)
+
class TestServerFloatingIPWaiters(base.TestCase):
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index 1343a7c..8077308 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -358,18 +358,6 @@
TEMPEST_VOLUME_TYPE: volumev2
- job:
- name: tempest-centos8-stream-fips
- parent: devstack-tempest
- description: |
- Integration testing for a FIPS enabled Centos 8 system
- nodeset: devstack-single-node-centos-8-stream
- vars:
- tox_envlist: full
- configure_swap_size: 4096
- nslookup_target: 'opendev.org'
- enable_fips: True
-
-- job:
name: tempest-centos9-stream-fips
parent: devstack-tempest
description: |
@@ -503,9 +491,6 @@
- ^.*/2024.1
- master
- tempest-integrated-compute
- # centos-8-stream is tested from wallaby -> yoga branches
- - tempest-integrated-compute-centos-8-stream:
- branches: ^.*/(wallaby|xena|yoga)$
# Do not run it on ussuri until below issue is fixed
# https://storyboard.openstack.org/#!/story/2010057
# and job is broken up to wallaby branch due to the issue
diff --git a/zuul.d/stable-jobs.yaml b/zuul.d/stable-jobs.yaml
index 9d69715..cb1330f 100644
--- a/zuul.d/stable-jobs.yaml
+++ b/zuul.d/stable-jobs.yaml
@@ -149,41 +149,3 @@
- ^.*/victoria
- ^.*/wallaby
- ^.*/xena
-
-- job:
- name: tempest-integrated-compute-centos-8-stream
- parent: tempest-integrated-compute
- # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
- voting: false
- nodeset: devstack-single-node-centos-8-stream
- branches:
- - ^.*/wallaby
- - ^.*/xena
- - ^.*/yoga
- description: |
- This job runs integration tests for compute. This is
- subset of 'tempest-full-py3' job and run Nova, Neutron, Cinder (except backup tests)
- and Glance related tests. This is meant to be run on Nova gate only.
- This version of the job also uses CentOS 8 stream.
- vars:
- # Required until bug/1949606 is resolved when using libvirt and QEMU
- # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
- configure_swap_size: 4096
-
-- job:
- name: tempest-full-py3-centos-8-stream
- parent: tempest-full-py3
- # TODO(gmann): Make this job non voting until bug#1957941 if fixed.
- voting: false
- branches:
- - ^.*/wallaby
- - ^.*/xena
- - ^.*/yoga
- nodeset: devstack-single-node-centos-8-stream
- description: |
- Base integration test with Neutron networking and py36 running
- on CentOS 8 stream
- vars:
- # Required until bug/1949606 is resolved when using libvirt and QEMU
- # >=5.0.0 with a [libvirt]virt_type of qemu (TCG).
- configure_swap_size: 4096