Merge "Fix compute_unified decoration"
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 549d4fb..99d8e2a 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -85,10 +85,14 @@
hw_scsi_model='virtio-scsi',
hw_disk_bus='scsi',
hw_cdrom_bus='scsi')
- server = self.create_test_server(image_id=custom_img,
- config_drive=True,
- wait_until='ACTIVE')
-
+ validation_resources = self.get_test_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ image_id=custom_img,
+ config_drive=True,
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until="SSHABLE")
# NOTE(lyarwood): self.create_test_server delete the server
# at class level cleanup so add server cleanup to ensure that
# the instance is deleted first before created image. This
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 354e3b9..716ecda 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -16,6 +16,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import compute
from tempest.common import utils
from tempest.common import waiters
from tempest import config
@@ -112,7 +113,6 @@
class BaseServerStableDeviceRescueTest(base.BaseV2ComputeTest):
- create_default_network = True
@classmethod
def skip_checks(cls):
@@ -124,19 +124,31 @@
msg = "Stable rescue not available."
raise cls.skipException(msg)
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(network=True, subnet=True, router=True,
+ dhcp=True)
+ super(BaseServerStableDeviceRescueTest, cls).setup_credentials()
+
def _create_server_and_rescue_image(self, hw_rescue_device=None,
hw_rescue_bus=None,
- block_device_mapping_v2=None):
-
- server_id = self.create_test_server(
- wait_until='ACTIVE')['id']
+ block_device_mapping_v2=None,
+ validatable=False,
+ validation_resources=None,
+ wait_until='ACTIVE'):
+ server = self.create_test_server(
+ wait_until=wait_until,
+ validatable=validatable,
+ validation_resources=validation_resources)
image_id = self.create_image_from_server(
- server_id, wait_until='ACTIVE')['id']
+ server['id'], wait_until='ACTIVE')['id']
if block_device_mapping_v2:
- server_id = self.create_test_server(
- wait_until='ACTIVE',
- block_device_mapping_v2=block_device_mapping_v2)['id']
+ server = self.create_test_server(
+ wait_until=wait_until,
+ validatable=validatable,
+ validation_resources=validation_resources,
+ block_device_mapping_v2=block_device_mapping_v2)
if hw_rescue_bus:
self.images_client.update_image(
@@ -146,16 +158,28 @@
self.images_client.update_image(
image_id, [dict(add='/hw_rescue_device',
value=hw_rescue_device)])
- return server_id, image_id
+ return server, image_id
- def _test_stable_device_rescue(self, server_id, rescue_image_id):
+ def _test_stable_device_rescue(
+ self, server, rescue_image_id,
+ validation_resources=None):
self.servers_client.rescue_server(
- server_id, rescue_image_ref=rescue_image_id)
+ server['id'], rescue_image_ref=rescue_image_id)
waiters.wait_for_server_status(
- self.servers_client, server_id, 'RESCUE')
- self.servers_client.unrescue_server(server_id)
- waiters.wait_for_server_status(
- self.servers_client, server_id, 'ACTIVE')
+ self.servers_client, server['id'], 'RESCUE')
+ self.servers_client.unrescue_server(server['id'])
+ # NOTE(gmann) In next addCleanup, server unrescue is called before the
+ # detach volume is called in cleanup (added by self.attach_volume()
+ # method) so to make sure server is ready before detach operation, we
+ # need to perform ssh on it, more details are in bug#1960346.
+ if validation_resources and CONF.validation.run_validation:
+ tenant_network = self.get_tenant_network()
+ compute.wait_for_ssh_or_ping(
+ server, self.os_primary, tenant_network,
+ True, validation_resources, "SSHABLE", True)
+ else:
+ waiters.wait_for_server_status(
+ self.servers_client, server['id'], 'ACTIVE')
class ServerStableDeviceRescueTestIDE(BaseServerStableDeviceRescueTest):
@@ -172,9 +196,9 @@
"Aarch64 does not support ide bus for cdrom")
def test_stable_device_rescue_cdrom_ide(self):
"""Test rescuing server with cdrom and ide as the rescue disk"""
- server_id, rescue_image_id = self._create_server_and_rescue_image(
+ server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='cdrom', hw_rescue_bus='ide')
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(server, rescue_image_id)
class ServerStableDeviceRescueTest(BaseServerStableDeviceRescueTest):
@@ -183,23 +207,23 @@
@decorators.idempotent_id('16865750-1417-4854-bcf7-496e6753c01e')
def test_stable_device_rescue_disk_virtio(self):
"""Test rescuing server with disk and virtio as the rescue disk"""
- server_id, rescue_image_id = self._create_server_and_rescue_image(
+ server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='disk', hw_rescue_bus='virtio')
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(server, rescue_image_id)
@decorators.idempotent_id('12340157-6306-4745-bdda-cfa019908b48')
def test_stable_device_rescue_disk_scsi(self):
"""Test rescuing server with disk and scsi as the rescue disk"""
- server_id, rescue_image_id = self._create_server_and_rescue_image(
+ server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='disk', hw_rescue_bus='scsi')
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(server, rescue_image_id)
@decorators.idempotent_id('647d04cf-ad35-4956-89ab-b05c5c16f30c')
def test_stable_device_rescue_disk_usb(self):
"""Test rescuing server with disk and usb as the rescue disk"""
- server_id, rescue_image_id = self._create_server_and_rescue_image(
+ server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='disk', hw_rescue_bus='usb')
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(server, rescue_image_id)
@decorators.idempotent_id('a3772b42-00bf-4310-a90b-1cc6fd3e7eab')
@utils.services('volume')
@@ -209,14 +233,25 @@
Attach a volume to the server and then rescue the server with disk
and virtio as the rescue disk.
"""
- server_id, rescue_image_id = self._create_server_and_rescue_image(
- hw_rescue_device='disk', hw_rescue_bus='virtio')
- server = self.servers_client.show_server(server_id)['server']
+ # This test just check detach fail and does not
+ # perfom the detach operation but in cleanup from
+ # self.attach_volume() it will try to detach the server
+ # after unrescue the server. Due to that we need to make
+ # server SSHable before it try to detach, more details are
+ # in bug#1960346
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ server, rescue_image_id = self._create_server_and_rescue_image(
+ hw_rescue_device='disk', hw_rescue_bus='virtio', validatable=True,
+ validation_resources=validation_resources, wait_until="SSHABLE")
+ server = self.servers_client.show_server(server['id'])['server']
volume = self.create_volume()
self.attach_volume(server, volume)
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'in-use')
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(
+ server, rescue_image_id,
+ validation_resources=validation_resources)
class ServerBootFromVolumeStableRescueTest(BaseServerStableDeviceRescueTest):
@@ -248,10 +283,10 @@
"source_type": "blank",
"volume_size": CONF.volume.volume_size,
"destination_type": "volume"}]
- server_id, rescue_image_id = self._create_server_and_rescue_image(
+ server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='disk', hw_rescue_bus='virtio',
block_device_mapping_v2=block_device_mapping_v2)
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(server, rescue_image_id)
@decorators.attr(type='slow')
@decorators.idempotent_id('e4636333-c928-40fc-98b7-70a23eef4224')
@@ -267,7 +302,7 @@
"volume_size": CONF.volume.volume_size,
"uuid": CONF.compute.image_ref,
"destination_type": "volume"}]
- server_id, rescue_image_id = self._create_server_and_rescue_image(
+ server, rescue_image_id = self._create_server_and_rescue_image(
hw_rescue_device='disk', hw_rescue_bus='virtio',
block_device_mapping_v2=block_device_mapping_v2)
- self._test_stable_device_rescue(server_id, rescue_image_id)
+ self._test_stable_device_rescue(server, rescue_image_id)
diff --git a/tempest/api/compute/servers/test_server_rescue_negative.py b/tempest/api/compute/servers/test_server_rescue_negative.py
index 9bcf062..955ba1c 100644
--- a/tempest/api/compute/servers/test_server_rescue_negative.py
+++ b/tempest/api/compute/servers/test_server_rescue_negative.py
@@ -16,6 +16,7 @@
import testtools
from tempest.api.compute import base
+from tempest.common import compute
from tempest.common import utils
from tempest.common import waiters
from tempest import config
@@ -38,7 +39,8 @@
@classmethod
def setup_credentials(cls):
- cls.set_network_resources(network=True, subnet=True, router=True)
+ cls.set_network_resources(network=True, subnet=True, router=True,
+ dhcp=True)
super(ServerRescueNegativeTestJSON, cls).setup_credentials()
@classmethod
@@ -136,21 +138,41 @@
def test_rescued_vm_detach_volume(self):
"""Test detaching volume from a rescued server should fail"""
volume = self.create_volume()
-
+ # This test just check detach fail and does not
+ # perfom the detach operation but in cleanup from
+ # self.attach_volume() it will try to detach the server
+ # after unrescue the server. Due to that we need to make
+ # server SSHable before it try to detach, more details are
+ # in bug#1960346
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ adminPass=self.password,
+ wait_until="SSHABLE",
+ validatable=True,
+ validation_resources=validation_resources)
# Attach the volume to the server
- server = self.servers_client.show_server(self.server_id)['server']
self.attach_volume(server, volume)
# Rescue the server
- self.servers_client.rescue_server(self.server_id,
+ self.servers_client.rescue_server(server['id'],
adminPass=self.password)
waiters.wait_for_server_status(self.servers_client,
- self.server_id, 'RESCUE')
+ server['id'], 'RESCUE')
+ # NOTE(gmann) In next addCleanup, server unrescue is called before the
+ # detach volume is called in cleanup (added by self.attach_volume()
+ # method) so to make sure server is ready before detach operation, we
+ # need to perform ssh on it, more details are in bug#1960346.
+ if CONF.validation.run_validation:
+ tenant_network = self.get_tenant_network()
+ self.addCleanup(compute.wait_for_ssh_or_ping,
+ server, self.os_primary, tenant_network,
+ True, validation_resources, "SSHABLE", True)
# addCleanup is a LIFO queue
- self.addCleanup(self._unrescue, self.server_id)
+ self.addCleanup(self._unrescue, server['id'])
# Detach the volume from the server expecting failure
self.assertRaises(lib_exc.Conflict,
self.servers_client.detach_volume,
- self.server_id,
+ server['id'],
volume['id'])
diff --git a/tempest/common/compute.py b/tempest/common/compute.py
index 43e30ad..03da49a 100644
--- a/tempest/common/compute.py
+++ b/tempest/common/compute.py
@@ -84,6 +84,73 @@
raise lib_exc.InvalidConfiguration()
+def _setup_validation_fip(
+ server, clients, tenant_network, validation_resources):
+ if CONF.service_available.neutron:
+ ifaces = clients.interfaces_client.list_interfaces(server['id'])
+ validation_port = None
+ for iface in ifaces['interfaceAttachments']:
+ if iface['net_id'] == tenant_network['id']:
+ validation_port = iface['port_id']
+ break
+ if not validation_port:
+ # NOTE(artom) This will get caught by the catch-all clause in
+ # the wait_until loop below
+ raise ValueError('Unable to setup floating IP for validation: '
+ 'port not found on tenant network')
+ clients.floating_ips_client.update_floatingip(
+ validation_resources['floating_ip']['id'],
+ port_id=validation_port)
+ else:
+ fip_client = clients.compute_floating_ips_client
+ fip_client.associate_floating_ip_to_server(
+ floating_ip=validation_resources['floating_ip']['ip'],
+ server_id=server['id'])
+
+
+def wait_for_ssh_or_ping(server, clients, tenant_network,
+ validatable, validation_resources, wait_until,
+ set_floatingip):
+ """Wait for the server for SSH or Ping as requested.
+
+ :param server: The server dict as returned by the API
+ :param clients: Client manager which provides OpenStack Tempest clients.
+ :param tenant_network: Tenant network to be used for creating a server.
+ :param validatable: Whether the server will be pingable or sshable.
+ :param validation_resources: Resources created for the connection to the
+ server. Include a keypair, a security group and an IP.
+ :param wait_until: Server status to wait for the server to reach.
+ It can be PINGABLE and SSHABLE states when the server is both
+ validatable and has the required validation_resources provided.
+ :param set_floatingip: If FIP needs to be associated to server
+ """
+ if set_floatingip and CONF.validation.connect_method == 'floating':
+ _setup_validation_fip(
+ server, clients, tenant_network, validation_resources)
+
+ server_ip = get_server_ip(
+ server, validation_resources=validation_resources)
+ if wait_until == 'PINGABLE':
+ waiters.wait_for_ping(
+ server_ip,
+ clients.servers_client.build_timeout,
+ clients.servers_client.build_interval
+ )
+ if wait_until == 'SSHABLE':
+ pkey = validation_resources['keypair']['private_key']
+ ssh_client = remote_client.RemoteClient(
+ server_ip,
+ CONF.validation.image_ssh_user,
+ pkey=pkey,
+ server=server,
+ servers_client=clients.servers_client
+ )
+ waiters.wait_for_ssh(
+ ssh_client,
+ clients.servers_client.build_timeout
+ )
+
+
def create_test_server(clients, validatable=False, validation_resources=None,
tenant_network=None, wait_until=None,
volume_backed=False, name=None, flavor=None,
@@ -237,28 +304,6 @@
body = rest_client.ResponseBody(body.response, body['server'])
servers = [body]
- def _setup_validation_fip():
- if CONF.service_available.neutron:
- ifaces = clients.interfaces_client.list_interfaces(server['id'])
- validation_port = None
- for iface in ifaces['interfaceAttachments']:
- if iface['net_id'] == tenant_network['id']:
- validation_port = iface['port_id']
- break
- if not validation_port:
- # NOTE(artom) This will get caught by the catch-all clause in
- # the wait_until loop below
- raise ValueError('Unable to setup floating IP for validation: '
- 'port not found on tenant network')
- clients.floating_ips_client.update_floatingip(
- validation_resources['floating_ip']['id'],
- port_id=validation_port)
- else:
- fip_client = clients.compute_floating_ips_client
- fip_client.associate_floating_ip_to_server(
- floating_ip=validation_resources['floating_ip']['ip'],
- server_id=servers[0]['id'])
-
if wait_until:
# NOTE(lyarwood): PINGABLE and SSHABLE both require the instance to
@@ -274,35 +319,16 @@
waiters.wait_for_server_status(
clients.servers_client, server['id'], wait_until,
request_id=request_id)
-
if CONF.validation.run_validation and validatable:
-
if CONF.validation.connect_method == 'floating':
- _setup_validation_fip()
-
- server_ip = get_server_ip(
- server, validation_resources=validation_resources)
-
- if wait_until_extra == 'PINGABLE':
- waiters.wait_for_ping(
- server_ip,
- clients.servers_client.build_timeout,
- clients.servers_client.build_interval
- )
-
- if wait_until_extra == 'SSHABLE':
- pkey = validation_resources['keypair']['private_key']
- ssh_client = remote_client.RemoteClient(
- server_ip,
- CONF.validation.image_ssh_user,
- pkey=pkey,
- server=server,
- servers_client=clients.servers_client
- )
- waiters.wait_for_ssh(
- ssh_client,
- clients.servers_client.build_timeout
- )
+ _setup_validation_fip(
+ server, clients, tenant_network,
+ validation_resources)
+ if wait_until_extra:
+ wait_for_ssh_or_ping(
+ server, clients, tenant_network,
+ validatable, validation_resources,
+ wait_until_extra, False)
except Exception:
with excutils.save_and_reraise_exception():
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index e62f24a..bd03037 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -145,6 +145,8 @@
irrelevant-files: *tempest-irrelevant-files-3
- devstack-plugin-ceph-tempest-py3:
irrelevant-files: *tempest-irrelevant-files
+ - tempest-full-centos-9-stream:
+ irrelevant-files: *tempest-irrelevant-files
experimental:
jobs:
- tempest-with-latest-microversion
diff --git a/zuul.d/tempest-specific.yaml b/zuul.d/tempest-specific.yaml
index 7d28e5c..a4a4b67 100644
--- a/zuul.d/tempest-specific.yaml
+++ b/zuul.d/tempest-specific.yaml
@@ -85,7 +85,7 @@
- job:
name: tempest-full-centos-9-stream
parent: tempest-full-py3-centos-8-stream
- voting: false
+ voting: true
nodeset: devstack-single-node-centos-9-stream
- job: