Merge "Revert "Patch to fix test_rebuild_volume_backed_server"" into mcp/caracal
diff --git a/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml b/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml
new file mode 100644
index 0000000..0a0b78e
--- /dev/null
+++ b/releasenotes/notes/add-boot-from-volume-option-312d02c0c84f2092.yaml
@@ -0,0 +1,7 @@
+---
+features:
+ - A new config option in group compute-feature-enabled
+ boot_from_volume which specifies if nova allow to boot
+ instances from volume. This functionality is not available
+ on some hypervisors and cinder backends like ironic and
+ ceph.
diff --git a/requirements.txt b/requirements.txt
index 6e66046..83410e2 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -23,3 +23,5 @@
debtcollector>=1.2.0 # Apache-2.0
defusedxml>=0.7.1 # PSFL
fasteners>=0.16.0 # Apache-2.0
+tenacity>=4.4.0 # Apache-2.0
+websocket-client # LGPLv2+
diff --git a/tempest/api/compute/admin/test_auto_allocate_network.py b/tempest/api/compute/admin/test_auto_allocate_network.py
index e8011a6..7cfbeb0 100644
--- a/tempest/api/compute/admin/test_auto_allocate_network.py
+++ b/tempest/api/compute/admin/test_auto_allocate_network.py
@@ -37,8 +37,6 @@
calls to Neutron to automatically allocate the network topology.
"""
- force_tenant_isolation = True
-
min_microversion = '2.37'
max_microversion = 'latest'
@@ -53,12 +51,6 @@
'auto-allocated-topology extension is not available')
@classmethod
- def setup_credentials(cls):
- # Do not create network resources for these tests.
- cls.set_network_resources()
- super(AutoAllocateNetworkTest, cls).setup_credentials()
-
- @classmethod
def setup_clients(cls):
super(AutoAllocateNetworkTest, cls).setup_clients()
cls.networks_client = cls.os_primary.networks_client
diff --git a/tempest/api/compute/admin/test_create_server.py b/tempest/api/compute/admin/test_create_server.py
index 293e284..e24259f 100644
--- a/tempest/api/compute/admin/test_create_server.py
+++ b/tempest/api/compute/admin/test_create_server.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+from re import search
+
import testtools
from tempest.api.compute import base
@@ -22,6 +24,7 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -136,3 +139,213 @@
servers_client=self.client)
disks_num_eph = len(linux_client.get_disks().split('\n'))
self.assertEqual(disks_num + 1, disks_num_eph)
+
+
+class ServersTestUEFI(base.BaseV2ComputeAdminTest):
+ """Test creating server with UEFI firmware type"""
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(ServersTestUEFI, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServersTestUEFI, cls).setup_clients()
+ cls.client = cls.servers_client
+
+ @testtools.skipIf(
+ CONF.compute_feature_enabled.barbican_integration_enabled,
+ "Not supported when barbican integration enabled.")
+ @decorators.idempotent_id('94feb6c3-d07e-b3b9-def8-64fd082d9b21')
+ def test_created_server_uefi(self):
+ # create custom image with uefi type
+ custom_img = self.create_image_with_custom_property(
+ hw_machine_type='q35',
+ hw_firmware_type='uefi',
+ )
+ # create the server and wait for it to become ready
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ image_id=custom_img, validatable=True,
+ validation_resources=validation_resources, wait_until='SSHABLE')
+ # check UEFI boot loader in console log server
+ uefi_boot_loader = "UEFI Misc Device"
+ console_log = self.client.get_console_output(server['id'])['output']
+ self.assertTrue(console_log, "Console output was empty.")
+ self.assertIn(uefi_boot_loader, console_log)
+
+
+class WindowsServersBaseTest(base.BaseV2ComputeAdminTest):
+ """Test Windows OS guest servers"""
+
+ image_id = None
+ flavor_id = None
+
+ @classmethod
+ def skip_checks(cls):
+ super(WindowsServersBaseTest, cls).skip_checks()
+
+ if not (cls.image_id and cls.flavor_id):
+ skip_msg = ("Environment is not prepared for testing "
+ "Windows servers")
+ raise cls.skipException(skip_msg)
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(WindowsServersBaseTest, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ super(WindowsServersBaseTest, cls).setup_clients()
+ cls.client = cls.servers_client
+
+ def _test_create_server(self):
+ # Create the server and wait for it to become ready
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ self.create_test_server(
+ image_id=self.image_id,
+ flavor=self.flavor_id,
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until='PINGABLE')
+
+ def _test_live_migration(self):
+ block_migration = (CONF.compute_feature_enabled.
+ block_migration_for_live_migration)
+ disk_over_commit = False
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ server_id = self.create_test_server(
+ image_id=self.image_id,
+ flavor=self.flavor_id,
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until='PINGABLE')['id']
+ source_host = self.get_host_for_server(server_id)
+ if not CONF.compute_feature_enabled.can_migrate_between_any_hosts:
+ # not to specify a host so that the scheduler will pick one
+ destination_host = None
+ else:
+ destination_host = self.get_host_other_than(server_id)
+ self.admin_servers_client.live_migrate_server(
+ server_id,
+ host=destination_host,
+ block_migration=block_migration,
+ disk_over_commit=disk_over_commit)
+ waiters.wait_for_server_status(self.client,
+ server_id, 'ACTIVE')
+ destination_host = self.get_host_for_server(server_id)
+ self.assertNotEqual(source_host, destination_host)
+
+ def _test_cold_migration(self):
+ # Run as admin to allow migrate tpm secret
+ validation_resources = self.get_class_validation_resources(
+ self.os_admin)
+ server_id = self.create_test_server(
+ clients=self.os_admin,
+ image_id=self.image_id,
+ flavor=self.flavor_id,
+ validatable=True,
+ validation_resources=validation_resources,
+ wait_until='PINGABLE')['id']
+ source_host = self.get_host_for_server(server_id)
+ self.admin_servers_client.migrate_server(server_id)
+ waiters.wait_for_server_status(self.admin_servers_client,
+ server_id, 'VERIFY_RESIZE')
+ self.admin_servers_client.confirm_resize_server(server_id)
+ waiters.wait_for_server_status(self.admin_servers_client,
+ server_id, 'ACTIVE')
+ destination_host = self.get_host_for_server(server_id)
+ self.assertNotEqual(source_host, destination_host)
+
+
+class WindowsServers10Test(WindowsServersBaseTest):
+
+ image_id = CONF.compute.windows10_image_ref
+ flavor_id = CONF.compute.windows10_flavor_ref
+
+ @decorators.idempotent_id('4d54bcfa-08d3-48eb-b7a1-3568db4fc607')
+ def test_create_server(self):
+ self._test_create_server()
+
+ @decorators.attr(type='multinode')
+ @decorators.idempotent_id('6c22fcb1-4c3e-4bf6-b8c7-c3e2322cf5ff')
+ @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+ 'Live migration is not available.')
+ def test_live_migration(self):
+ self._test_live_migration()
+
+ @decorators.attr(type='multinode')
+ @decorators.idempotent_id('96d67c40-fd4d-4286-a3c7-880d9eb77a95')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ def test_cold_migration(self):
+ self._test_cold_migration()
+
+
+class WindowsServers11Test(WindowsServersBaseTest):
+
+ image_id = CONF.compute.windows11_image_ref
+ flavor_id = CONF.compute.windows11_flavor_ref
+
+ @decorators.idempotent_id('1cff7fea-f251-4a05-a667-9b946913a3c5')
+ def test_create_server(self):
+ self._test_create_server()
+
+ @decorators.attr(type=['negative'])
+ @decorators.idempotent_id('9afd991e-0478-41ca-b5cf-bf32b10ae5a7')
+ @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+ 'Live migration is not available.')
+ def test_live_migration_with_vtpm_negative(self):
+ """Test live migrating instance with vTPM should not be supported"""
+ self.assertRaises(lib_exc.BadRequest, self._test_live_migration)
+
+ @decorators.attr(type='multinode')
+ @decorators.idempotent_id('7da88453-cc6d-4fef-b893-b4ae8f40767d')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ def test_cold_migration(self):
+ self._test_cold_migration()
+
+
+class ServersTestVGPU(base.BaseV2ComputeAdminTest):
+ """Test creating server with vGPU flavor"""
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.prepare_instance_network()
+ super(ServersTestVGPU, cls).setup_credentials()
+
+ @classmethod
+ def setup_clients(cls):
+ super(ServersTestVGPU, cls).setup_clients()
+ cls.client = cls.servers_client
+
+ @testtools.skipUnless(CONF.compute_feature_enabled.vgpu_flavor_ref,
+ 'vGPU flavor is not available.')
+ @testtools.skipUnless(CONF.compute.image_full_ref,
+ 'Current test requires full OS to be used.')
+ @decorators.idempotent_id('5c06d62b-d9c9-4cef-8b56-ef003af03519')
+ def test_create_server_vgpu(self):
+ # create the server and wait for it to become ready
+ validation_resources = self.get_class_validation_resources(
+ self.os_primary)
+ server = self.create_test_server(
+ image_id=CONF.compute.image_full_ref,
+ validatable=True,
+ validation_resources=validation_resources,
+ flavor=CONF.compute_feature_enabled.vgpu_flavor_ref)
+ # check that vgpu_pci_name is in console log server
+ linux_client = remote_client.RemoteClient(
+ self.get_server_ip(server, validation_resources),
+ CONF.compute.image_full_username,
+ pkey=validation_resources['keypair']['private_key'],
+ server=server,
+ servers_client=self.client)
+ output = linux_client.exec_command('lspci')
+ self.assertTrue(search(
+ CONF.compute_feature_enabled.vgpu_lspci_pattern, output))
diff --git a/tempest/api/compute/admin/test_volume.py b/tempest/api/compute/admin/test_volume.py
index 2813d7a..b0f20f6 100644
--- a/tempest/api/compute/admin/test_volume.py
+++ b/tempest/api/compute/admin/test_volume.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common import waiters
from tempest import config
@@ -37,50 +39,13 @@
cls.prepare_instance_network()
super(BaseAttachSCSIVolumeTest, cls).setup_credentials()
- def _create_image_with_custom_property(self, **kwargs):
- """Wrapper utility that returns the custom image.
-
- Creates a new image by downloading the default image's bits and
- uploading them to a new image. Any kwargs are set as image properties
- on the new image.
-
- :param return image_id: The UUID of the newly created image.
- """
- image = self.admin_image_client.show_image(CONF.compute.image_ref)
- # NOTE(danms): We need to stream this, so chunked=True means we get
- # back a urllib3.HTTPResponse and have to carefully pass it to
- # store_image_file() to upload it in pieces.
- image_data_resp = self.admin_image_client.show_image_file(
- CONF.compute.image_ref, chunked=True)
- create_dict = {
- 'container_format': image['container_format'],
- 'disk_format': image['disk_format'],
- 'min_disk': image['min_disk'],
- 'min_ram': image['min_ram'],
- 'visibility': 'public',
- }
- if 'kernel_id' in image:
- create_dict['kernel_id'] = image['kernel_id']
- if 'ramdisk_id' in image:
- create_dict['ramdisk_id'] = image['ramdisk_id']
-
- create_dict.update(kwargs)
- try:
- new_image = self.admin_image_client.create_image(**create_dict)
- self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
- new_image['id'])
- self.addCleanup(
- self.admin_image_client.delete_image, new_image['id'])
- self.admin_image_client.store_image_file(new_image['id'],
- image_data_resp)
- finally:
- image_data_resp.release_conn()
- return new_image['id']
-
class AttachSCSIVolumeTestJSON(BaseAttachSCSIVolumeTest):
"""Test attaching scsi volume to server"""
+ @testtools.skipIf(
+ CONF.compute_feature_enabled.barbican_integration_enabled,
+ "Not supported when barbican integration enabled.")
@decorators.idempotent_id('777e468f-17ca-4da4-b93d-b7dbf56c0494')
def test_attach_scsi_disk_with_config_drive(self):
"""Test the attach/detach volume with config drive/scsi disk
@@ -90,7 +55,7 @@
virtio-scsi mode with further asserting list volume attachments
in instance after attach and detach of the volume.
"""
- custom_img = self._create_image_with_custom_property(
+ custom_img = self.create_image_with_custom_property(
hw_scsi_model='virtio-scsi',
hw_disk_bus='scsi',
hw_cdrom_bus='scsi')
diff --git a/tempest/api/compute/admin/test_volume_swap.py b/tempest/api/compute/admin/test_volume_swap.py
index 9576b74..c091011 100644
--- a/tempest/api/compute/admin/test_volume_swap.py
+++ b/tempest/api/compute/admin/test_volume_swap.py
@@ -36,6 +36,10 @@
raise cls.skipException("Cinder is not available")
if not CONF.compute_feature_enabled.swap_volume:
raise cls.skipException("Swapping volumes is not supported.")
+ if CONF.compute_feature_enabled.attach_encrypted_volume:
+ raise cls.skipException(
+ 'Volume swap is not available for OS configurations '
+ 'with crypted volumes.')
def wait_for_server_volume_swap(self, server_id, old_volume_id,
new_volume_id):
@@ -163,6 +167,8 @@
super(TestMultiAttachVolumeSwap, cls).skip_checks()
if not CONF.compute_feature_enabled.volume_multiattach:
raise cls.skipException('Volume multi-attach is not available.')
+ if not CONF.volume.volume_type_multiattach:
+ raise cls.skipException('Multi-attach volume type is not defined')
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_volumes_negative.py b/tempest/api/compute/admin/test_volumes_negative.py
index 55c842f..235aea1 100644
--- a/tempest/api/compute/admin/test_volumes_negative.py
+++ b/tempest/api/compute/admin/test_volumes_negative.py
@@ -100,6 +100,8 @@
super(UpdateMultiattachVolumeNegativeTest, cls).skip_checks()
if not CONF.compute_feature_enabled.volume_multiattach:
raise cls.skipException('Volume multi-attach is not available.')
+ if not CONF.volume.volume_type_multiattach:
+ raise cls.skipException('Multi-attach volume type is not defined')
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7576d497-b7c6-44bd-9cc5-c5b4e50fec71')
diff --git a/tempest/api/compute/base.py b/tempest/api/compute/base.py
index 2557e47..313f73d 100644
--- a/tempest/api/compute/base.py
+++ b/tempest/api/compute/base.py
@@ -701,3 +701,37 @@
for target_host in hosts:
if source_host != target_host:
return target_host
+
+ def create_image_with_custom_property(self, base_image=None, **kwargs):
+ """Wrapper utility that returns the custom image.
+
+ Creates a new image by downloading the base image bits and
+ uploading them to a new image. Any kwargs are set as image properties
+ on the new image.
+
+ :param return image_id: The UUID of the newly created image.
+ """
+ if base_image is None:
+ base_image = CONF.compute.image_ref
+ image = self.admin_image_client.show_image(base_image)
+ image_data_resp = self.admin_image_client.show_image_file(
+ CONF.compute.image_ref, chunked=True)
+ create_dict = {
+ 'container_format': image['container_format'],
+ 'disk_format': image['disk_format'],
+ 'min_disk': image['min_disk'],
+ 'min_ram': image['min_ram'],
+ 'visibility': 'public',
+ }
+ create_dict.update(kwargs)
+ try:
+ new_image = self.admin_image_client.create_image(**create_dict)
+ self.addCleanup(self.admin_image_client.wait_for_resource_deletion,
+ new_image['id'])
+ self.addCleanup(
+ self.admin_image_client.delete_image, new_image['id'])
+ self.admin_image_client.store_image_file(new_image['id'],
+ image_data_resp)
+ finally:
+ image_data_resp.release_conn()
+ return new_image['id']
diff --git a/tempest/api/compute/images/test_images.py b/tempest/api/compute/images/test_images.py
index 87cedae..ff0d458 100644
--- a/tempest/api/compute/images/test_images.py
+++ b/tempest/api/compute/images/test_images.py
@@ -133,6 +133,9 @@
self.addCleanup(self.client.delete_image, image['id'])
self.assertEqual(snapshot_name, image['name'])
+ @testtools.skipIf(
+ CONF.compute_feature_enabled.barbican_integration_enabled,
+ "Not supported when barbican integration enabled.")
@decorators.idempotent_id('f3cac456-e3fe-4183-a7a7-a59f7f017088')
def test_create_server_from_snapshot(self):
# Create one server normally
diff --git a/tempest/api/compute/servers/test_attach_interfaces.py b/tempest/api/compute/servers/test_attach_interfaces.py
index 8984d1d..688b31b 100644
--- a/tempest/api/compute/servers/test_attach_interfaces.py
+++ b/tempest/api/compute/servers/test_attach_interfaces.py
@@ -109,21 +109,24 @@
"""
port = self.ports_client.show_port(port_id)['port']
device_id = port['device_id']
+ dns_name = port.get('dns_name')
start = int(time.time())
# NOTE(mriedem): Nova updates the port's device_id to '' rather than
# None, but it's not contractual so handle Falsey either way.
- while device_id:
+ while any([device_id, dns_name]):
time.sleep(self.build_interval)
port = self.ports_client.show_port(port_id)['port']
device_id = port['device_id']
+ dns_name = port.get('dns_name')
timed_out = int(time.time()) - start >= self.build_timeout
- if device_id and timed_out:
- message = ('Port %s failed to detach (device_id %s) within '
- 'the required time (%s s).' %
- (port_id, device_id, self.build_timeout))
+ if any([device_id, dns_name]) and timed_out:
+ message = ('Port %s failed to detach (device_id %s), '
+ '(dns_name %s) within the required time (%s s).' %
+ (port_id, device_id or 'is out',
+ dns_name or 'is out', self.build_timeout))
raise lib_exc.TimeoutException(message)
return port
diff --git a/tempest/api/compute/servers/test_console.py b/tempest/api/compute/servers/test_console.py
new file mode 100644
index 0000000..0ebb268
--- /dev/null
+++ b/tempest/api/compute/servers/test_console.py
@@ -0,0 +1,319 @@
+# Copyright 2016-2017 OpenStack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import ssl
+import struct
+import urllib.parse as urlparse
+import urllib3
+import websocket
+
+from tempest.api.compute import base
+from tempest.common import compute
+from tempest import config
+from tempest.lib import decorators
+
+CONF = config.CONF
+
+
+class ConsoleTestBase(base.BaseV2ComputeTest):
+ create_default_network = True
+
+ def setUp(self):
+ super(ConsoleTestBase, self).setUp()
+ self._websocket = None
+ self.server = self.create_test_server(wait_until="ACTIVE")
+ self.use_get_remote_console = False
+ if not self.is_requested_microversion_compatible("2.5"):
+ self.use_get_remote_console = True
+
+ def tearDown(self):
+ super(ConsoleTestBase, self).tearDown()
+ if self._websocket is not None:
+ self._websocket.close()
+ # NOTE(zhufl): Because server_check_teardown will raise Exception
+ # which will prevent other cleanup steps from being executed, so
+ # server_check_teardown should be called after super's tearDown.
+ self.server_check_teardown()
+
+ @property
+ def cert_params(self):
+ ssl_opt = {}
+ if CONF.identity.disable_ssl_certificate_validation:
+ ssl_opt["cert_reqs"] = ssl.CERT_NONE
+ else:
+ ssl_opt["ca_certs"] = CONF.identity.ca_certificates_file
+ return ssl_opt
+
+ def _validate_html(self, url, js_title):
+ """Verify we can connect to console and get back the javascript."""
+
+ resp = urllib3.PoolManager(**self.cert_params).request("GET", url)
+ # Make sure that the GET request was accepted by the console proxy
+ self.assertEqual(
+ resp.status,
+ 200,
+ "Got a Bad HTTP Response on the "
+ "initial call: " + str(resp.status),
+ )
+ # Do some basic validation to make sure it is an expected HTML document
+ resp_data = resp.data.decode()
+ # This is needed in the case of example: <html lang="en">
+ self.assertRegex(
+ resp_data, "<html.*>", "Not a valid html document in the response."
+ )
+ self.assertIn(
+ "</html>", resp_data, "Not a valid html document in the response."
+ )
+ # Just try to make sure we got JavaScript back for console, since we
+ # won't actually use it since not inside of a browser
+ self.assertIn(
+ js_title,
+ resp_data,
+ "Not a valid console javascript html document.",
+ )
+ self.assertIn(
+ "<script",
+ resp_data,
+ "Not a valid console javascript html document.",
+ )
+
+ def _validate_websocket_upgrade(self):
+ """Verify that the websocket upgrade was successful.
+
+ Parses response and ensures that required response
+ fields are present and accurate.
+ (https://tools.ietf.org/html/rfc7231#section-6.2.2)
+ """
+
+ self.assertTrue(
+ self._websocket.response.startswith(
+ b"HTTP/1.1 101 Switching Protocols"
+ ),
+ "Incorrect HTTP return status code: {}".format(
+ str(self._websocket.response)
+ ),
+ )
+ _required_header = "upgrade: websocket"
+ _response = str(self._websocket.response).lower()
+ self.assertIn(
+ _required_header,
+ _response,
+ "Did not get the expected WebSocket HTTP Response.",
+ )
+
+ def _get_console_body(self, type, protocol, get_console):
+ if self.use_get_remote_console:
+ return self.servers_client.get_remote_console(
+ self.server["id"], type=type, protocol=protocol
+ )["remote_console"]
+ return getattr(self.servers_client, get_console)(self.server["id"],
+ type=type)[
+ "console"
+ ]
+
+ def _test_console_bad_token(self, type, protocol, get_console):
+ body = self._get_console_body(type, protocol, get_console)
+ self.assertEqual(type, body["type"])
+ # Do the WebSockify HTTP Request to console proxy with a bad token
+ parts = urlparse.urlparse(body["url"])
+ qparams = urlparse.parse_qs(parts.query)
+ if "path" in qparams:
+ qparams["path"] = urlparse.unquote(qparams["path"][0]).replace(
+ "token=", "token=bad"
+ )
+ elif "token" in qparams:
+ qparams["token"] = "bad" + qparams["token"][0]
+ new_query = urlparse.urlencode(qparams)
+ new_parts = urlparse.ParseResult(
+ parts.scheme,
+ parts.netloc,
+ parts.path,
+ parts.params,
+ new_query,
+ parts.fragment,
+ )
+ url = urlparse.urlunparse(new_parts)
+ self._websocket = compute.create_websocket(url)
+ # Make sure the console proxy rejected the connection and closed it
+ data = self._websocket.receive_frame()
+ self.assertTrue(
+ data is None or not data,
+ "The console proxy actually sent us some data, but we "
+ "expected it to close the connection.",
+ )
+
+
+class NoVNCConsoleTestJSON(ConsoleTestBase):
+ """Test novnc console"""
+
+ @classmethod
+ def skip_checks(cls):
+ super(NoVNCConsoleTestJSON, cls).skip_checks()
+ if not CONF.compute_feature_enabled.vnc_console:
+ raise cls.skipException("VNC Console feature is disabled.")
+
+ def _validate_rfb_negotiation(self):
+ """Verify we can connect to novnc and do the websocket connection."""
+ # Turn the Socket into a WebSocket to do the communication
+ data = self._websocket.receive_frame()
+ self.assertFalse(
+ data is None or not data,
+ "Token must be invalid because the connection closed.",
+ )
+ # Parse the RFB version from the data to make sure it is valid
+ # and belong to the known supported RFB versions.
+ version = float(
+ "%d.%d" % (int(data[4:7], base=10), int(data[8:11], base=10))
+ )
+ # Add the max RFB versions supported
+ supported_versions = [3.3, 3.8]
+ self.assertIn(
+ version, supported_versions, "Bad RFB Version: " + str(version)
+ )
+ # Send our RFB version to the server
+ self._websocket.send_frame(data)
+ # Get the sever authentication type and make sure None is supported
+ data = self._websocket.receive_frame()
+ self.assertIsNotNone(data, "Expected authentication type None.")
+ data_length = len(data)
+ if version == 3.3:
+ # For RFB 3.3: in the security handshake, rather than a two-way
+ # negotiation, the server decides the security type and sends a
+ # single word(4 bytes).
+ self.assertEqual(
+ data_length, 4, "Expected authentication type None."
+ )
+ self.assertIn(
+ 1,
+ [int(data[i]) for i in (0, 3)],
+ "Expected authentication type None.",
+ )
+ else:
+ self.assertGreaterEqual(
+ len(data), 2, "Expected authentication type None."
+ )
+ self.assertIn(
+ 1,
+ [int(data[i + 1]) for i in range(int(data[0]))],
+ "Expected authentication type None.",
+ )
+ # Send to the server that we only support authentication
+ # type None
+ self._websocket.send_frame(bytes((1,)))
+
+ # The server should send 4 bytes of 0's if security
+ # handshake succeeded
+ data = self._websocket.receive_frame()
+ self.assertEqual(
+ len(data), 4, "Server did not think security was successful."
+ )
+ self.assertEqual(
+ [int(i) for i in data],
+ [0, 0, 0, 0],
+ "Server did not think security was successful.",
+ )
+
+ # Say to leave the desktop as shared as part of client initialization
+ self._websocket.send_frame(bytes((1,)))
+ # Get the server initialization packet back and make sure it is the
+ # right structure where bytes 20-24 is the name length and
+ # 24-N is the name
+ data = self._websocket.receive_frame()
+ data_length = len(data) if data is not None else 0
+ self.assertFalse(
+ data_length <= 24 or
+ data_length != (struct.unpack(">L", data[20:24])[0] + 24),
+ "Server initialization was not the right format.",
+ )
+ # Since the rest of the data on the screen is arbitrary, we will
+ # close the socket and end our validation of the data at this point
+ # Assert that the latest check was false, meaning that the server
+ # initialization was the right format
+ self.assertFalse(
+ data_length <= 24 or
+ data_length != (struct.unpack(">L", data[20:24])[0] + 24)
+ )
+
+ @decorators.idempotent_id("c640fdff-8ab4-45a4-a5d8-7e6146cbd0dc")
+ def test_novnc(self):
+ """Test accessing novnc console of server"""
+ body = self._get_console_body("novnc", "vnc", "get_vnc_console")
+ self.assertEqual("novnc", body["type"])
+ # Do the initial HTTP Request to novncproxy to get the JavaScript
+ self._validate_html(body["url"], "noVNC")
+ # Do the WebSockify HTTP Request to novncproxy to do the RFB connection
+ self._websocket = compute.create_websocket(body["url"])
+ # Validate that we successfully connected and upgraded to Web Sockets
+ self._validate_websocket_upgrade()
+ # Validate the RFB Negotiation to determine if a valid VNC session
+ self._validate_rfb_negotiation()
+
+ @decorators.idempotent_id("f9c79937-addc-4aaa-9e0e-841eef02aeb7")
+ def test_novnc_bad_token(self):
+ """Test accessing novnc console with bad token
+
+ Do the WebSockify HTTP Request to novnc proxy with a bad token,
+ the novnc proxy should reject the connection and closed it.
+ """
+ self._test_console_bad_token("novnc", "vnc", "get_vnc_console")
+
+
+class SpiceConsoleTestJSON(ConsoleTestBase):
+ """Test spice console"""
+
+ @classmethod
+ def skip_checks(cls):
+ super(SpiceConsoleTestJSON, cls).skip_checks()
+ if not CONF.compute_feature_enabled.spice_console:
+ raise cls.skipException("SPICE Console feature is disabled.")
+
+ def _validate_websocket_connection(self, body):
+ # Protocol Magic number UINT8[4] { 0x52, 0x45, 0x44, 0x51} // "REDQ"
+ spice_magic = b"REDQ"
+ scheme = {"https": "wss", "http": "ws"}
+
+ q = urlparse.urlparse(body["url"])
+ ws = websocket.WebSocket(sslopt=self.cert_params)
+ ws.connect(
+ f"{scheme[q.scheme]}://{q.netloc}/websockify", cookie=q.query,
+ subprotocols=["binary"]
+ )
+ ws.send_binary(b"\r\n\r\n")
+ opcode, data = ws.recv_data()
+ self.assertEqual(opcode, websocket.ABNF.OPCODE_BINARY)
+ self.assertTrue(data.startswith(spice_magic))
+
+ @decorators.idempotent_id("0914a681-72dd-4fad-8457-b45195373d3d")
+ def test_spice(self):
+ """Test accessing spice console of server"""
+ body = self._get_console_body(
+ "spice-html5", "spice", "get_spice_console"
+ )
+ self.assertEqual("spice-html5", body["type"])
+ # Do the initial HTTP Request to spiceproxy to get the JavaScript
+ self._validate_html(body["url"], "Spice Javascript client")
+ # Validate that we successfully connected to Web Sockets
+ self._validate_websocket_connection(body)
+
+ @decorators.idempotent_id("6f4b0690-d078-4a28-a2ce-33dafdfca7ac")
+ def test_spice_bad_token(self):
+ """Test accessing spice console with bad token
+
+ Do the WebSockify HTTP Request to spice proxy with a bad token,
+ the spice proxy should reject the connection and closed it.
+ """
+ self._test_console_bad_token(
+ "spice-html5", "spice", "get_spice_console"
+ )
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 6664e15..6f97b1f 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -182,6 +182,8 @@
if not utils.get_service_list()['volume']:
msg = "Volume service not enabled."
raise cls.skipException(msg)
+ if not CONF.compute_feature_enabled.boot_from_volume:
+ raise cls.skipException("Booting from volume is not enabled.")
class ServersTestFqdnHostnames(base.BaseV2ComputeTest):
diff --git a/tempest/api/compute/servers/test_novnc.py b/tempest/api/compute/servers/test_novnc.py
deleted file mode 100644
index 1308b19..0000000
--- a/tempest/api/compute/servers/test_novnc.py
+++ /dev/null
@@ -1,230 +0,0 @@
-# Copyright 2016-2017 OpenStack Foundation
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-import struct
-import urllib.parse as urlparse
-import urllib3
-
-from tempest.api.compute import base
-from tempest.common import compute
-from tempest import config
-from tempest.lib import decorators
-
-CONF = config.CONF
-
-
-class NoVNCConsoleTestJSON(base.BaseV2ComputeTest):
- """Test novnc console"""
-
- create_default_network = True
-
- @classmethod
- def skip_checks(cls):
- super(NoVNCConsoleTestJSON, cls).skip_checks()
- if not CONF.compute_feature_enabled.vnc_console:
- raise cls.skipException('VNC Console feature is disabled.')
-
- def setUp(self):
- super(NoVNCConsoleTestJSON, self).setUp()
- self._websocket = None
-
- def tearDown(self):
- super(NoVNCConsoleTestJSON, self).tearDown()
- if self._websocket is not None:
- self._websocket.close()
- # NOTE(zhufl): Because server_check_teardown will raise Exception
- # which will prevent other cleanup steps from being executed, so
- # server_check_teardown should be called after super's tearDown.
- self.server_check_teardown()
-
- @classmethod
- def setup_clients(cls):
- super(NoVNCConsoleTestJSON, cls).setup_clients()
- cls.client = cls.servers_client
-
- @classmethod
- def resource_setup(cls):
- super(NoVNCConsoleTestJSON, cls).resource_setup()
- cls.server = cls.create_test_server(wait_until="ACTIVE")
- cls.use_get_remote_console = False
- if not cls.is_requested_microversion_compatible('2.5'):
- cls.use_get_remote_console = True
-
- def _validate_novnc_html(self, vnc_url):
- """Verify we can connect to novnc and get back the javascript."""
- resp = urllib3.PoolManager().request('GET', vnc_url)
- # Make sure that the GET request was accepted by the novncproxy
- self.assertEqual(resp.status, 200, 'Got a Bad HTTP Response on the '
- 'initial call: ' + str(resp.status))
- # Do some basic validation to make sure it is an expected HTML document
- resp_data = resp.data.decode()
- # This is needed in the case of example: <html lang="en">
- self.assertRegex(resp_data, '<html.*>',
- 'Not a valid html document in the response.')
- self.assertIn('</html>', resp_data,
- 'Not a valid html document in the response.')
- # Just try to make sure we got JavaScript back for noVNC, since we
- # won't actually use it since not inside of a browser
- self.assertIn('noVNC', resp_data,
- 'Not a valid noVNC javascript html document.')
- self.assertIn('<script', resp_data,
- 'Not a valid noVNC javascript html document.')
-
- def _validate_rfb_negotiation(self):
- """Verify we can connect to novnc and do the websocket connection."""
- # Turn the Socket into a WebSocket to do the communication
- data = self._websocket.receive_frame()
- self.assertFalse(data is None or not data,
- 'Token must be invalid because the connection '
- 'closed.')
- # Parse the RFB version from the data to make sure it is valid
- # and belong to the known supported RFB versions.
- version = float("%d.%d" % (int(data[4:7], base=10),
- int(data[8:11], base=10)))
- # Add the max RFB versions supported
- supported_versions = [3.3, 3.8]
- self.assertIn(version, supported_versions,
- 'Bad RFB Version: ' + str(version))
- # Send our RFB version to the server
- self._websocket.send_frame(data)
- # Get the sever authentication type and make sure None is supported
- data = self._websocket.receive_frame()
- self.assertIsNotNone(data, 'Expected authentication type None.')
- data_length = len(data)
- if version == 3.3:
- # For RFB 3.3: in the security handshake, rather than a two-way
- # negotiation, the server decides the security type and sends a
- # single word(4 bytes).
- self.assertEqual(
- data_length, 4, 'Expected authentication type None.')
- self.assertIn(1, [int(data[i]) for i in (0, 3)],
- 'Expected authentication type None.')
- else:
- self.assertGreaterEqual(
- len(data), 2, 'Expected authentication type None.')
- self.assertIn(
- 1,
- [int(data[i + 1]) for i in range(int(data[0]))],
- 'Expected authentication type None.')
- # Send to the server that we only support authentication
- # type None
- self._websocket.send_frame(bytes((1,)))
-
- # The server should send 4 bytes of 0's if security
- # handshake succeeded
- data = self._websocket.receive_frame()
- self.assertEqual(
- len(data), 4,
- 'Server did not think security was successful.')
- self.assertEqual(
- [int(i) for i in data], [0, 0, 0, 0],
- 'Server did not think security was successful.')
-
- # Say to leave the desktop as shared as part of client initialization
- self._websocket.send_frame(bytes((1,)))
- # Get the server initialization packet back and make sure it is the
- # right structure where bytes 20-24 is the name length and
- # 24-N is the name
- data = self._websocket.receive_frame()
- data_length = len(data) if data is not None else 0
- self.assertFalse(data_length <= 24 or
- data_length != (struct.unpack(">L",
- data[20:24])[0] + 24),
- 'Server initialization was not the right format.')
- # Since the rest of the data on the screen is arbitrary, we will
- # close the socket and end our validation of the data at this point
- # Assert that the latest check was false, meaning that the server
- # initialization was the right format
- self.assertFalse(data_length <= 24 or
- data_length != (struct.unpack(">L",
- data[20:24])[0] + 24))
-
- def _validate_websocket_upgrade(self):
- """Verify that the websocket upgrade was successful.
-
- Parses response and ensures that required response
- fields are present and accurate.
- (https://tools.ietf.org/html/rfc7231#section-6.2.2)
- """
-
- self.assertTrue(
- self._websocket.response.startswith(b'HTTP/1.1 101 Switching '
- b'Protocols'),
- 'Incorrect HTTP return status code: {}'.format(
- str(self._websocket.response)
- )
- )
- _required_header = 'upgrade: websocket'
- _response = str(self._websocket.response).lower()
- self.assertIn(
- _required_header,
- _response,
- 'Did not get the expected WebSocket HTTP Response.'
- )
-
- @decorators.idempotent_id('c640fdff-8ab4-45a4-a5d8-7e6146cbd0dc')
- def test_novnc(self):
- """Test accessing novnc console of server"""
- if self.use_get_remote_console:
- body = self.client.get_remote_console(
- self.server['id'], console_type='novnc',
- protocol='vnc')['remote_console']
- else:
- body = self.client.get_vnc_console(self.server['id'],
- type='novnc')['console']
- self.assertEqual('novnc', body['type'])
- # Do the initial HTTP Request to novncproxy to get the NoVNC JavaScript
- self._validate_novnc_html(body['url'])
- # Do the WebSockify HTTP Request to novncproxy to do the RFB connection
- self._websocket = compute.create_websocket(body['url'])
- # Validate that we successfully connected and upgraded to Web Sockets
- self._validate_websocket_upgrade()
- # Validate the RFB Negotiation to determine if a valid VNC session
- self._validate_rfb_negotiation()
-
- @decorators.idempotent_id('f9c79937-addc-4aaa-9e0e-841eef02aeb7')
- def test_novnc_bad_token(self):
- """Test accessing novnc console with bad token
-
- Do the WebSockify HTTP Request to novnc proxy with a bad token,
- the novnc proxy should reject the connection and closed it.
- """
- if self.use_get_remote_console:
- body = self.client.get_remote_console(
- self.server['id'], console_type='novnc',
- protocol='vnc')['remote_console']
- else:
- body = self.client.get_vnc_console(self.server['id'],
- type='novnc')['console']
- self.assertEqual('novnc', body['type'])
- # Do the WebSockify HTTP Request to novncproxy with a bad token
- parts = urlparse.urlparse(body['url'])
- qparams = urlparse.parse_qs(parts.query)
- if 'path' in qparams:
- qparams['path'] = urlparse.unquote(qparams['path'][0]).replace(
- 'token=', 'token=bad')
- elif 'token' in qparams:
- qparams['token'] = 'bad' + qparams['token'][0]
- new_query = urlparse.urlencode(qparams)
- new_parts = urlparse.ParseResult(parts.scheme, parts.netloc,
- parts.path, parts.params, new_query,
- parts.fragment)
- url = urlparse.urlunparse(new_parts)
- self._websocket = compute.create_websocket(url)
- # Make sure the novncproxy rejected the connection and closed it
- data = self._websocket.receive_frame()
- self.assertTrue(data is None or not data,
- "The novnc proxy actually sent us some data, but we "
- "expected it to close the connection.")
diff --git a/tempest/api/compute/servers/test_server_rescue.py b/tempest/api/compute/servers/test_server_rescue.py
index 97c2774..5e82835 100644
--- a/tempest/api/compute/servers/test_server_rescue.py
+++ b/tempest/api/compute/servers/test_server_rescue.py
@@ -74,6 +74,7 @@
class ServerRescueTestJSONUnderV235(ServerRescueTestBase):
"""Test server rescue with compute microversion less than 2.36"""
+ min_microversion = '2.1'
max_microversion = '2.35'
# TODO(zhufl): After 2.35 we should switch to neutron client to create
@@ -123,6 +124,9 @@
if not CONF.compute_feature_enabled.stable_rescue:
msg = "Stable rescue not available."
raise cls.skipException(msg)
+ if CONF.compute_feature_enabled.barbican_integration_enabled:
+ msg = "Rescue not supported with barbican integration."
+ raise cls.skipException(msg)
@classmethod
def setup_credentials(cls):
diff --git a/tempest/api/compute/test_quotas.py b/tempest/api/compute/test_quotas.py
index 38ca53b..286e0a5 100644
--- a/tempest/api/compute/test_quotas.py
+++ b/tempest/api/compute/test_quotas.py
@@ -13,10 +13,15 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.api.compute import base
from tempest.common import tempest_fixtures as fixtures
+from tempest import config
from tempest.lib import decorators
+CONF = config.CONF
+
class QuotasTestJSON(base.BaseV2ComputeTest):
"""Test compute quotas"""
@@ -76,6 +81,8 @@
for quota in expected_quota_set:
self.assertIn(quota, quota_set.keys())
+ @testtools.skipIf(not CONF.auth.use_dynamic_credentials,
+ 'does not support static credentials')
@decorators.idempotent_id('cd65d997-f7e4-4966-a7e9-d5001b674fdc')
def test_compare_tenant_quotas_with_default_quotas(self):
"""Test tenants are created with the default compute quota values"""
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index 7ea8f09..ba95569 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -267,6 +267,12 @@
"""
server, validation_resources = self._create_server()
volume = self.create_volume()
+
+ if volume['multiattach']:
+ raise self.skipException(
+ "Attaching multiattach volumes is not supported "
+ "for shelved-offloaded instances.")
+
num_vol = self._count_volumes(server, validation_resources)
self._shelve_server(server, validation_resources)
attachment = self.attach_volume(server, volume)
@@ -277,8 +283,7 @@
# Get volume attachment of the server
volume_attachment = self.servers_client.show_volume_attachment(
- server['id'],
- attachment['id'])['volumeAttachment']
+ server['id'], attachment['id'])['volumeAttachment']
self.assertEqual(server['id'], volume_attachment['serverId'])
self.assertEqual(attachment['id'], volume_attachment['id'])
# Check the mountpoint is not None after unshelve server even in
@@ -298,6 +303,12 @@
"""
server, validation_resources = self._create_server()
volume = self.create_volume()
+
+ if volume['multiattach']:
+ raise self.skipException(
+ "Attaching multiattach volumes is not supported for "
+ "shelved-offloaded instances.")
+
num_vol = self._count_volumes(server, validation_resources)
self._shelve_server(server, validation_resources)
@@ -307,8 +318,8 @@
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
- # Unshelve the instance and check that we have the expected number of
- # volume(s)
+ # Unshelve the instance and check that we have
+ # the expected number of volume(s)
self._unshelve_server_and_check_volumes(
server, validation_resources, num_vol)
@@ -328,6 +339,8 @@
super(AttachVolumeMultiAttachTest, cls).skip_checks()
if not CONF.compute_feature_enabled.volume_multiattach:
raise cls.skipException('Volume multi-attach is not available.')
+ if not CONF.volume.volume_type_multiattach:
+ raise cls.skipException('Multi-attach volume type is not defined')
def _attach_volume_to_servers(self, volume, servers):
"""Attaches the given volume to the list of servers.
diff --git a/tempest/api/image/v2/admin/test_images.py b/tempest/api/image/v2/admin/test_images.py
index 2b1c4fb..1190c52 100644
--- a/tempest/api/image/v2/admin/test_images.py
+++ b/tempest/api/image/v2/admin/test_images.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import time
import io
@@ -105,7 +106,7 @@
# NOTE(gmann): Skip if copy-image import method and multistore
# are not available.
if ('copy-image' not in available_import_methods or
- not available_stores):
+ len(available_stores) < 2):
raise self.skipException('Either copy-image import method or '
'multistore is not available')
uuid = data_utils.rand_uuid()
@@ -235,3 +236,45 @@
observed_image,
stores,
first_image_store_deleted)
+
+
+class ImageWebUploadAdminTest(base.BaseV2ImageAdminTest):
+ @classmethod
+ def skip_checks(cls):
+ super(ImageWebUploadAdminTest, cls).skip_checks()
+ enabled_methods = CONF.image_feature_enabled.enabled_import_methods
+ if "web-download" not in enabled_methods:
+ raise cls.skipException(
+ "Glance image upload via url feature disabled")
+
+ @decorators.idempotent_id('5b2ce43c-924c-4bae-bac0-f5d6ed69d72e')
+ def test_image_upload_via_url(self):
+ # Create image
+ image_name = data_utils.rand_name("image")
+ container_format = CONF.image.container_formats[0]
+ disk_format = CONF.image.disk_formats[0]
+ image = self.create_image(name=image_name,
+ container_format=container_format,
+ disk_format=disk_format,
+ visibility='private')
+ self.assertEqual('queued', image['status'])
+
+ # Upload image via url
+ image_uri = CONF.image.http_image
+ method = {"name": "web-download", "uri": image_uri}
+ self.admin_client.import_image(image_id=image["id"], method=method)
+
+ timeout = CONF.image.build_timeout
+ interval = CONF.image.build_interval
+
+ start_time = int(time.time())
+ while True:
+ body = self.admin_client.show_image(image['id'])
+ if body["status"] == "active":
+ break
+ if int(time.time()) - start_time >= timeout:
+ message = ('Image %(id)s failed to become active within '
+ 'the required time (%(timeout)s s).' %
+ {'id': image['id'], 'timeout': timeout})
+ raise lib_exc.TimeoutException(message)
+ time.sleep(interval)
diff --git a/tempest/api/network/admin/test_routers.py b/tempest/api/network/admin/test_routers.py
index 216b15d..d8ef4a3 100644
--- a/tempest/api/network/admin/test_routers.py
+++ b/tempest/api/network/admin/test_routers.py
@@ -12,6 +12,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
+import time
import testtools
@@ -22,6 +23,7 @@
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
CONF = config.CONF
@@ -117,7 +119,30 @@
for k, v in exp_ext_gw_info.items():
self.assertEqual(v, actual_ext_gw_info[k])
+ def _wait_for_ports(self, router_id, timeout=30):
+ start = int(time.time())
+ list_body = self.admin_ports_client.list_ports(
+ network_id=CONF.network.public_network_id,
+ device_id=router_id,
+ device_owner="network:router_gateway")
+
+ while not len(list_body['ports']):
+ time.sleep(5)
+ list_body = self.admin_ports_client.list_ports(
+ network_id=CONF.network.public_network_id,
+ device_id=router_id,
+ device_owner="network:router_gateway")
+ timed_out = int(time.time()) - start >= timeout
+ if not len(list_body['ports']) and timed_out:
+ message = ('Router %s failed to attach ports within '
+ 'the required time (%s s).' %
+ (router_id, timeout))
+ raise lib_exc.TimeoutException(message)
+
def _verify_gateway_port(self, router_id):
+ # Workaround for PRODX-8489
+ if config.is_tungstenfabric_backend_enabled():
+ self._wait_for_ports(router_id)
list_body = self.admin_ports_client.list_ports(
network_id=CONF.network.public_network_id,
device_id=router_id,
diff --git a/tempest/api/network/base.py b/tempest/api/network/base.py
index 99742cc..fa30b40 100644
--- a/tempest/api/network/base.py
+++ b/tempest/api/network/base.py
@@ -19,6 +19,7 @@
from tempest import exceptions
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
+from tempest.lib.common import waiters as lib_waiters
from tempest.lib import exceptions as lib_exc
import tempest.test
@@ -224,6 +225,9 @@
test_utils.call_and_ignore_notfound_exc(
cls.routers_client.remove_router_interface, router['id'],
subnet_id=i['fixed_ips'][0]['subnet_id'])
+ lib_waiters.wait_router_interface_removed(
+ cls.ports_client, router['id'],
+ subnet_id=i['fixed_ips'][0]['subnet_id'])
cls.routers_client.delete_router(router['id'])
diff --git a/tempest/api/network/test_agent_management_negative.py b/tempest/api/network/test_agent_management_negative.py
index d1c02ce..36d44d5 100644
--- a/tempest/api/network/test_agent_management_negative.py
+++ b/tempest/api/network/test_agent_management_negative.py
@@ -14,11 +14,19 @@
# under the License.
from tempest.api.network import base
+from tempest.common import utils
from tempest.lib import decorators
class AgentManagementNegativeTest(base.BaseNetworkTest):
+ @classmethod
+ def skip_checks(cls):
+ super(AgentManagementNegativeTest, cls).skip_checks()
+ if not utils.is_extension_enabled('agent', 'network'):
+ msg = "agent extension not enabled."
+ raise cls.skipException(msg)
+
@decorators.idempotent_id('e335be47-b9a1-46fd-be30-0874c0b751e6')
@decorators.attr(type=['negative'])
def test_list_agents_non_admin(self):
diff --git a/tempest/api/network/test_security_groups.py b/tempest/api/network/test_security_groups.py
index c7f6b8f..e6d0f7e 100644
--- a/tempest/api/network/test_security_groups.py
+++ b/tempest/api/network/test_security_groups.py
@@ -191,10 +191,20 @@
protocol = 'tcp'
port_range_min = 77
port_range_max = 77
- self._create_verify_security_group_rule(sg_id, direction,
- self.ethertype, protocol,
- port_range_min,
- port_range_max)
+
+ if config.is_tungstenfabric_backend_enabled():
+ if self.ethertype == 'IPv6':
+ remote_ip_prefix = '::/0'
+ else:
+ remote_ip_prefix = '0.0.0.0/0'
+ self._create_verify_security_group_rule(
+ sg_id, direction, self.ethertype, protocol, port_range_min,
+ port_range_max, remote_ip_prefix=remote_ip_prefix)
+ else:
+ self._create_verify_security_group_rule(sg_id, direction,
+ self.ethertype, protocol,
+ port_range_min,
+ port_range_max)
@decorators.idempotent_id('c9463db8-b44d-4f52-b6c0-8dbda99f26ce')
def test_create_security_group_rule_with_icmp_type_code(self):
@@ -219,9 +229,18 @@
protocol = 'ipv6-icmp' if self._ip_version == 6 else 'icmp'
icmp_type_codes = [(3, 2), (3, 0), (8, 0), (0, 0), (11, None)]
for icmp_type, icmp_code in icmp_type_codes:
- self._create_verify_security_group_rule(sg_id, direction,
- self.ethertype, protocol,
- icmp_type, icmp_code)
+ if config.is_tungstenfabric_backend_enabled():
+ if self.ethertype == 'IPv6':
+ remote_ip_prefix = '::/0'
+ else:
+ remote_ip_prefix = '0.0.0.0/0'
+ self._create_verify_security_group_rule(
+ sg_id, direction, self.ethertype, protocol, icmp_type,
+ icmp_code, remote_ip_prefix=remote_ip_prefix)
+ else:
+ self._create_verify_security_group_rule(
+ sg_id, direction, self.ethertype, protocol, icmp_type,
+ icmp_code)
@decorators.idempotent_id('c2ed2deb-7a0c-44d8-8b4c-a5825b5c310b')
def test_create_security_group_rule_with_remote_group_id(self):
diff --git a/tempest/api/network/test_subnetpools_extensions.py b/tempest/api/network/test_subnetpools_extensions.py
index 689844b..f398062 100644
--- a/tempest/api/network/test_subnetpools_extensions.py
+++ b/tempest/api/network/test_subnetpools_extensions.py
@@ -45,6 +45,9 @@
if not utils.is_extension_enabled('subnet_allocation', 'network'):
msg = "subnet_allocation extension not enabled."
raise cls.skipException(msg)
+ if not utils.is_extension_enabled('default-subnetpools', 'network'):
+ msg = "default-subnetpools extension not enabled."
+ raise cls.skipException(msg)
@decorators.attr(type='smoke')
@decorators.idempotent_id('62595970-ab1c-4b7f-8fcc-fddfe55e9811')
diff --git a/tempest/api/network/test_tags.py b/tempest/api/network/test_tags.py
index bd3e360..af0a8b0 100644
--- a/tempest/api/network/test_tags.py
+++ b/tempest/api/network/test_tags.py
@@ -113,7 +113,10 @@
# NOTE(felipemonteiro): The supported resource names are plural. Use
# the singular case for the corresponding class resource object.
- SUPPORTED_RESOURCES = ['subnets', 'ports', 'routers', 'subnetpools']
+ if config.is_tungstenfabric_backend_enabled():
+ SUPPORTED_RESOURCES = ['subnets', 'ports', 'routers']
+ else:
+ SUPPORTED_RESOURCES = ['subnets', 'ports', 'routers', 'subnetpools']
@classmethod
def skip_checks(cls):
@@ -134,6 +137,9 @@
cls.port = cls.create_port(cls.network)
cls.router = cls.create_router()
+ if config.is_tungstenfabric_backend_enabled():
+ return
+
subnetpool_name = data_utils.rand_name(
cls.__name__ + '-Subnetpool', prefix=CONF.resource_name_prefix)
prefix = CONF.network.default_network
diff --git a/tempest/api/object_storage/test_container_quotas.py b/tempest/api/object_storage/test_container_quotas.py
index f055d19..d4bf0d7 100644
--- a/tempest/api/object_storage/test_container_quotas.py
+++ b/tempest/api/object_storage/test_container_quotas.py
@@ -88,9 +88,12 @@
nafter = self._get_bytes_used()
self.assertEqual(nbefore, nafter)
+ # NOTE(vsaienko): the quotas imlementation on active/active rgw deployment
+ # have no coordination, as result quota overflow might happen, since
+ # this is upstream and we can't fix downstream. Remove test from smoke
+ # Related-Prod: PRODX-11581
@decorators.idempotent_id('3a387039-697a-44fc-a9c0-935de31f426b')
@utils.requires_ext(extension='container_quotas', service='object')
- @decorators.attr(type="smoke")
def test_upload_too_many_objects(self):
"""Attempts to upload many objects that exceeds the count limit."""
for _ in range(QUOTA_COUNT):
diff --git a/tempest/api/volume/admin/test_multi_backend.py b/tempest/api/volume/admin/test_multi_backend.py
index 028bf1a..b45824e 100644
--- a/tempest/api/volume/admin/test_multi_backend.py
+++ b/tempest/api/volume/admin/test_multi_backend.py
@@ -30,8 +30,9 @@
raise cls.skipException("Cinder multi-backend feature disabled")
if len(set(CONF.volume.backend_names)) < 2:
- raise cls.skipException("Requires at least two different "
- "backend names")
+ raise cls.skipException(
+ "Requires at least two different "
+ "backend names")
@classmethod
def resource_setup(cls):
@@ -66,21 +67,34 @@
extra_specs = {spec_key_with_prefix: backend_name_key}
else:
extra_specs = {spec_key_without_prefix: backend_name_key}
- cls.create_volume_type(name=type_name,
- extra_specs=extra_specs)
+ cls.create_volume_type(
+ name=type_name, extra_specs=extra_specs)
+ # Pick up AZ from volume_type
+ services = cls.admin_volume_services_client.list_services()
+ vol_svrs = [
+ srv
+ for srv in services.get("services")
+ if srv["binary"] == "cinder-volume" and backend_name_key
+ in srv["host"]
+ ]
+ vol_type_zone = vol_svrs[0]["zone"]
- params = {'name': vol_name, 'volume_type': type_name,
- 'size': CONF.volume.volume_size}
+ params = {
+ "name": vol_name,
+ "volume_type": type_name,
+ "size": CONF.volume.volume_size,
+ "availability_zone": vol_type_zone,
+ }
cls.volume = cls.create_volume(**params)
if with_prefix:
- cls.volume_id_list_with_prefix.append(cls.volume['id'])
+ cls.volume_id_list_with_prefix.append(cls.volume["id"])
else:
- cls.volume_id_list_without_prefix.append(
- cls.volume['id'])
- waiters.wait_for_volume_resource_status(cls.admin_volume_client,
- cls.volume['id'], 'available')
+ cls.volume_id_list_without_prefix.append(cls.volume["id"])
+ waiters.wait_for_volume_resource_status(
+ cls.admin_volume_client, cls.volume["id"], "available"
+ )
- @decorators.idempotent_id('c1a41f3f-9dad-493e-9f09-3ff197d477cc')
+ @decorators.idempotent_id("c1a41f3f-9dad-493e-9f09-3ff197d477cc")
def test_backend_name_reporting(self):
"""Test backend name reporting for volume when type is without prefix
@@ -92,7 +106,7 @@
for volume_id in self.volume_id_list_without_prefix:
self._test_backend_name_reporting_by_volume_id(volume_id)
- @decorators.idempotent_id('f38e647f-ab42-4a31-a2e7-ca86a6485215')
+ @decorators.idempotent_id("f38e647f-ab42-4a31-a2e7-ca86a6485215")
def test_backend_name_reporting_with_prefix(self):
"""Test backend name reporting for volume when type is with prefix
@@ -105,7 +119,7 @@
for volume_id in self.volume_id_list_with_prefix:
self._test_backend_name_reporting_by_volume_id(volume_id)
- @decorators.idempotent_id('46435ab1-a0af-4401-8373-f14e66b0dd58')
+ @decorators.idempotent_id("46435ab1-a0af-4401-8373-f14e66b0dd58")
def test_backend_name_distinction(self):
"""Test volume backend distinction when type is without prefix
@@ -116,7 +130,7 @@
"""
self._test_backend_name_distinction(self.volume_id_list_without_prefix)
- @decorators.idempotent_id('4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed')
+ @decorators.idempotent_id("4236305b-b65a-4bfc-a9d2-69cb5b2bf2ed")
def test_backend_name_distinction_with_prefix(self):
"""Test volume backend distinction when type is with prefix
@@ -128,28 +142,29 @@
self._test_backend_name_distinction(self.volume_id_list_with_prefix)
def _get_volume_host(self, volume_id):
- return self.admin_volume_client.show_volume(
- volume_id)['volume']['os-vol-host-attr:host']
+ return self.admin_volume_client.show_volume(volume_id)["volume"][
+ "os-vol-host-attr:host"
+ ]
def _test_backend_name_reporting_by_volume_id(self, volume_id):
# this test checks if os-vol-attr:host is populated correctly after
# the multi backend feature has been enabled
# if multi-backend is enabled: os-vol-attr:host should be like:
# host@backend_name
- volume = self.admin_volume_client.show_volume(volume_id)['volume']
+ volume = self.admin_volume_client.show_volume(volume_id)["volume"]
- volume1_host = volume['os-vol-host-attr:host']
- msg = ("multi-backend reporting incorrect values for volume %s" %
- volume_id)
+ volume1_host = volume["os-vol-host-attr:host"]
+ msg = ("multi-backend reporting incorrect values for volume %s"
+ % volume_id)
self.assertGreater(len(volume1_host.split("@")), 1, msg)
def _test_backend_name_distinction(self, volume_id_list):
# this test checks that the volumes created at setUp don't
# belong to the same backend (if they are, than the
# volume backend distinction is not working properly)
- volume_hosts = [self._get_volume_host(volume) for volume in
- volume_id_list]
+ volume_hosts = [self._get_volume_host(volume)
+ for volume in volume_id_list]
# assert that volumes are each created on separate hosts:
- msg = ("volumes %s were created in the same backend" % ", "
- .join(volume_hosts))
+ msg = "volumes %s were created in the same backend" % ", ".join(
+ volume_hosts)
self.assertCountEqual(volume_hosts, set(volume_hosts), msg)
diff --git a/tempest/api/volume/admin/test_volume_manage.py b/tempest/api/volume/admin/test_volume_manage.py
index 609ec15..3d7cb15 100644
--- a/tempest/api/volume/admin/test_volume_manage.py
+++ b/tempest/api/volume/admin/test_volume_manage.py
@@ -79,8 +79,11 @@
new_vol_id)['volume']
self.assertNotIn(new_vol_id, [org_vol_id])
self.assertEqual(new_vol_info['name'], new_vol_name)
- for key in ['size',
- 'volume_type',
- 'availability_zone',
- 'os-vol-host-attr:host']:
+ check_attrs = ['size',
+ 'volume_type',
+ 'availability_zone'
+ ]
+ if CONF.volume.storage_protocol != 'ceph':
+ check_attrs.append('os-vol-host-attr:host')
+ for key in check_attrs:
self.assertEqual(new_vol_info[key], org_vol_info[key])
diff --git a/tempest/api/volume/admin/test_volume_retype.py b/tempest/api/volume/admin/test_volume_retype.py
index 7c25f3d..c17e01c 100644
--- a/tempest/api/volume/admin/test_volume_retype.py
+++ b/tempest/api/volume/admin/test_volume_retype.py
@@ -63,9 +63,10 @@
src_vol = self.create_volume(volume_type=self.src_vol_type['name'],
snapshot_id=snapshot['id'])
- # Delete the snapshot
- self.snapshots_client.delete_snapshot(snapshot['id'])
- self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
+ if not CONF.volume_feature_enabled.volume_locked_by_snapshot:
+ # Delete the snapshot
+ self.snapshots_client.delete_snapshot(snapshot['id'])
+ self.snapshots_client.wait_for_resource_deletion(snapshot['id'])
return src_vol
@@ -179,10 +180,11 @@
keys_with_change = ('volume_type',)
# NOTE(vsaienko): with active-active cluster deployment volume
- # services registered with different hostname.
- if CONF.volume_feature_enabled.cluster_active_active:
- keys_with_change += ('os-vol-host-attr:host',)
- else:
+ # services registered with different hostname since we don't know
+ # which service process request host might or might not be changed.
+ # TODO(vsaienko): Revisit logic when is fixed
+ # https://bugs.launchpad.net/cinder/+bug/1874414
+ if not CONF.volume_feature_enabled.cluster_active_active:
keys_with_no_change += ('os-vol-host-attr:host',)
# Check the volume information after the retype
diff --git a/tempest/api/volume/test_volume_transfers.py b/tempest/api/volume/test_volume_transfers.py
index 62cb203..d4634e6 100644
--- a/tempest/api/volume/test_volume_transfers.py
+++ b/tempest/api/volume/test_volume_transfers.py
@@ -32,10 +32,23 @@
cls.alt_client = cls.os_alt.volume_transfers_client_latest
cls.alt_volumes_client = cls.os_alt.volumes_client_latest
cls.adm_volumes_client = cls.os_admin.volumes_client_latest
+ cls.volume_type_client = cls.os_admin.volume_types_client_latest
+ cls.encryption_client = cls.os_admin.encryption_types_client_latest
+
+ def _check_default_volume_type(self):
+ default_volume_type = self.volume_type_client.\
+ show_default_volume_type()["volume_type"]["id"]
+ volume_encryption = self.encryption_client.show_encryption_type(
+ default_volume_type)
+ if volume_encryption and volume_encryption.get("provider"):
+ raise self.skipException("Not allowed to run this test with "
+ "encrypted volume")
@decorators.idempotent_id('4d75b645-a478-48b1-97c8-503f64242f1a')
def test_create_get_list_accept_volume_transfer(self):
"""Test creating, getting, listing and accepting of volume transfer"""
+ self._check_default_volume_type()
+
# Create a volume first
volume = self.create_volume()
self.addCleanup(self.delete_volume,
@@ -77,6 +90,8 @@
@decorators.idempotent_id('ab526943-b725-4c07-b875-8e8ef87a2c30')
def test_create_list_delete_volume_transfer(self):
"""Test creating, listing and deleting volume transfer"""
+ self._check_default_volume_type()
+
# Create a volume first
volume = self.create_volume()
self.addCleanup(self.delete_volume,
diff --git a/tempest/api/volume/test_volumes_filters.py b/tempest/api/volume/test_volumes_filters.py
new file mode 100644
index 0000000..74ba9cb
--- /dev/null
+++ b/tempest/api/volume/test_volumes_filters.py
@@ -0,0 +1,50 @@
+# Copyright 2021 Mirantis Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+
+import testtools
+
+from tempest.api.volume import base
+from tempest import config
+from tempest.lib import decorators
+
+
+CONF = config.CONF
+
+
+class VolumesFilter(base.BaseVolumeAdminTest):
+ @testtools.skipUnless(
+ "InstanceLocalityFilter" in CONF.volume.scheduler_default_filters,
+ "Cinder InstanceLocalityFilter is disabled",
+ )
+ @testtools.skipUnless(
+ CONF.volume_feature_enabled.instance_locality_enabled,
+ "InstanceLocalityFilter test is disabled",
+ )
+ @decorators.idempotent_id("5c13f4f7-5add-4fad-8ef7-dccca0f76295")
+ def test_instancelocalityfilter(self):
+ # 1. Create instance
+ # 2. Create volume by using local_to_instance hint
+ # 3. Compare server host and volume host are the same.
+ server = self.create_server()
+ server_host = self.admin_manager.servers_client.show_server(
+ server["id"])["server"]["OS-EXT-SRV-ATTR:host"]
+ volume = self.create_volume(hints={"local_to_instance": server["id"]})
+ fetched_volume = self.admin_volume_client.show_volume(volume["id"])[
+ "volume"]
+ self.assertEqual(
+ server_host, fetched_volume["os-vol-host-attr:host"].split("@")
+ [0],
+ "The fetched Volume host is different "
+ "from the created instance",)
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index d8480df..0a3a412 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -264,6 +264,12 @@
@decorators.idempotent_id('449c4ed2-ecdd-47bb-98dc-072aeccf158c')
def test_reserve_volume_with_negative_volume_status(self):
"""Test reserving already reserved volume should fail"""
+
+ # Skip test if the volume has "multiattach" property
+ if self.volume['multiattach']:
+ raise self.skipException('Reserving multiattach volumes is not'
+ ' supported.')
+
# Mark volume as reserved.
self.volumes_client.reserve_volume(self.volume['id'])
# Mark volume which is marked as reserved before
diff --git a/tempest/common/credentials_factory.py b/tempest/common/credentials_factory.py
index 2d486a7..2e07c80 100644
--- a/tempest/common/credentials_factory.py
+++ b/tempest/common/credentials_factory.py
@@ -87,7 +87,8 @@
('create_networks', (CONF.auth.create_isolated_networks and not
CONF.network.shared_physical_network)),
('resource_prefix', 'tempest'),
- ('identity_admin_endpoint_type', endpoint_type)
+ ('identity_admin_endpoint_type', endpoint_type),
+ ('networking_timeout_409', CONF.network.timeout_409)
]))
@@ -122,8 +123,7 @@
:param name: When provided, it makes it possible to associate credential
artifacts back to the owner (test class).
:param network_resources: Dictionary of network resources to be allocated
- for each test account. Only valid for the dynamic
- credentials provider.
+ for each test account.
:param force_tenant_isolation: Always return a `DynamicCredentialProvider`,
regardless of the configuration.
:param identity_version: Use the specified identity API version, regardless
@@ -144,6 +144,9 @@
# Most params are not relevant for pre-created accounts
return preprov_creds.PreProvisionedCredentialProvider(
name=name,
+ network_resources=network_resources,
+ separate_projects_by_network_existence=(
+ CONF.auth.separate_projects_by_network_existence),
**get_preprov_provider_params(identity_version))
else:
raise exceptions.InvalidConfiguration(
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index ddc6047..36c2b8b 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -679,6 +679,19 @@
raise lib_exc.TimeoutException()
+def wait_for_cloudinit(ssh_client, timeout=60):
+ """Waits for cloud-init completed"""
+ start_time = int(time.time())
+ while int(time.time()) - start_time < timeout:
+ try:
+ ssh_client.check_cloudinit()
+ return
+ except Exception:
+ pass
+ time.sleep(5)
+ raise lib_exc.TimeoutException()
+
+
def wait_for_caching(client, cache_client, image_id):
"""Waits until image is cached"""
start = int(time.time())
diff --git a/tempest/config.py b/tempest/config.py
index 38e2a46..d57864d 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -110,6 +110,14 @@
"This must be set to 'all' if using the "
"[oslo_policy]/enforce_scope=true option for the "
"identity service."),
+ cfg.BoolOpt('separate_projects_by_network_existence',
+ default=False,
+ help="If use_dynamic_credentials is set to False and "
+ "separate_projects_by_network_existence is set to True "
+ "Tempest divides projects with networks and without "
+ "networks. To be compatible with old behavior the config "
+ "option is set to False and project are treated the same "
+ "regardless their network resources.")
]
identity_group = cfg.OptGroup(name='identity',
@@ -290,6 +298,15 @@
help="Valid secondary image reference to be used in tests. "
"This is a required option, but if only one image is "
"available duplicate the value of image_ref above"),
+ cfg.StrOpt('image_full_ref',
+ help="This is image with full OS like ubuntu/centos used"
+ "in some tests. When not set related tests will be "
+ "skipped"),
+ cfg.StrOpt('image_full_username',
+ default="ubuntu",
+ help="Username for image_full_ref authentication."),
+ cfg.StrOpt('image_full_flavor_ref',
+ help="Flavor to boot image_full_ref."),
cfg.StrOpt('certified_image_ref',
help="Valid image reference to be used in image certificate "
"validation tests when enabled. This image must also "
@@ -301,12 +318,24 @@
help="A list of trusted certificates to be used when the "
"image certificate validation compute feature is "
"enabled."),
+ cfg.StrOpt('windows10_image_ref',
+ default=None,
+ help="Valid image reference to be used in Windows 10 tests."),
+ cfg.StrOpt('windows11_image_ref',
+ default=None,
+ help="Valid image reference to be used in Windows 11 tests."),
cfg.StrOpt('flavor_ref',
default="1",
help="Valid primary flavor to use in tests."),
cfg.StrOpt('flavor_ref_alt',
default="2",
help='Valid secondary flavor to be used in tests.'),
+ cfg.StrOpt('windows10_flavor_ref',
+ default=None,
+ help="Valid flavor to be used for Windows 10 tests."),
+ cfg.StrOpt('windows11_flavor_ref',
+ default=None,
+ help="Valid flavor to be used for Windows 11 tests."),
cfg.IntOpt('build_interval',
default=1,
help="Time in seconds between build status checks."),
@@ -622,6 +651,21 @@
cfg.BoolOpt('unified_limits',
default=False,
help='Does the test environment support unified limits?'),
+ cfg.BoolOpt('boot_from_volume',
+ default=True,
+ help='Does the test environment support booting instances '
+ 'from volume. This depends on hypervisor and volume '
+ 'backend/type.'),
+ cfg.BoolOpt('barbican_integration_enabled',
+ default=False,
+ help='Does the test environment support Barbican integration'),
+ cfg.StrOpt('vgpu_flavor_ref',
+ default=None,
+ help="Valid flavor to be used for vGPU tests."),
+ cfg.StrOpt('vgpu_lspci_pattern',
+ default="NVIDIA",
+ help="Pattern to search in lspci output to enaure VGPU is"
+ " present on host.")
]
@@ -705,6 +749,9 @@
cfg.BoolOpt('os_glance_reserved',
default=False,
help="Should we check that os_glance namespace is reserved"),
+ cfg.ListOpt('enabled_import_methods',
+ default=[],
+ help="List of enabled image import methods"),
cfg.BoolOpt('manage_locations',
default=False,
help=('Is show_multiple_locations enabled in glance. '
@@ -798,6 +845,10 @@
cfg.IntOpt('service_ports_number',
default=0,
help="Number of neutron service ports created per network"),
+ cfg.IntOpt('timeout_409',
+ default=120,
+ help="Total time in seconds to keep retrying a request that "
+ "returns HTTP 409 (Conflict)."),
]
network_feature_group = cfg.OptGroup(name='network-feature-enabled',
@@ -944,7 +995,7 @@
help='Type of key to use for ssh connections. '
'Valid types are rsa, ecdsa'),
cfg.FloatOpt('allowed_network_downtime',
- default=5.0,
+ default=10.0,
help="Allowed VM network connection downtime during live "
"migration, in seconds. "
"When the measured downtime exceeds this value, an "
@@ -1041,6 +1092,21 @@
"If both values are not specified, Tempest avoids tests "
"which require a microversion. Valid values are string "
"with format 'X.Y' or string 'latest'",),
+ cfg.ListOpt('scheduler_default_filters',
+ default=[],
+ help="The list of enabled scheduler filters.",),
+ cfg.StrOpt('volume_type_luks',
+ default='luks',
+ help="The name of volume type used by tests to create"
+ "volumes with luks encryption.",),
+ cfg.StrOpt('volume_type_luks_v2',
+ default='luks2',
+ help="The name of volume type used by tests to create"
+ "volumes with luks v2 encryption.",),
+ cfg.StrOpt('volume_type_cryptsetup',
+ default='cryptsetup',
+ help="The name of volume type used by tests to create"
+ "volumes with cryptsetup encryption.",),
]
volume_feature_group = cfg.OptGroup(name='volume-feature-enabled',
@@ -1093,7 +1159,19 @@
cfg.BoolOpt('cluster_active_active',
default=False,
help='The boolean flag to indicate if active-active mode '
- 'is used by volume backend.')
+ 'is used by volume backend.'),
+ cfg.BoolOpt('instance_locality_enabled',
+ default=False,
+ help='The boolean flag to run instance locality tests '
+ 'on environment.'),
+ cfg.ListOpt('supported_crypto_providers',
+ default=['luks'],
+ help='A list of enabled cryptoproviders for volumes'),
+ cfg.BoolOpt('volume_locked_by_snapshot',
+ default=False,
+ help='Whether the volume is locked by snapshot, i.e. '
+ 'can remove volume only when no dependent '
+ 'snapshot exist.'),
]
@@ -1336,6 +1414,10 @@
help="The boolean flag to specify the type of environment. "
"Skip tests that cannot be run in production. "
"For example: create/delete TLDs in Designate tests."),
+ cfg.StrOpt('state_path',
+ help="The top-level directory for maintaining Tempest state. "
+ "For example store configuration files mounted do docker "
+ "containers."),
]
_opts = [
@@ -1629,6 +1711,17 @@
return _parameters
+def is_tungstenfabric_backend_enabled():
+ """Return True if TungstenFabric is used as a backend."""
+ try:
+ sdn = getattr(CONF, 'sdn')
+ service_name = getattr(sdn, 'service_name')
+ if service_name == 'tungstenfabric':
+ return True
+ except cfg.NoSuchOptError:
+ return False
+
+
def _register_tempest_service_clients():
# Register tempest own service clients using the same mechanism used
# for external plugins.
diff --git a/tempest/lib/api_schema/response/compute/v2_1/servers.py b/tempest/lib/api_schema/response/compute/v2_1/servers.py
index 14e2d3b..e066f7b 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/servers.py
@@ -425,6 +425,8 @@
}
}
+get_spice_console = get_vnc_console
+
get_console_output = {
'status_code': [200],
'response_body': {
diff --git a/tempest/lib/api_schema/response/compute/v2_19/servers.py b/tempest/lib/api_schema/response/compute/v2_19/servers.py
index ba3d787..5d1f315 100644
--- a/tempest/lib/api_schema/response/compute/v2_19/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_19/servers.py
@@ -14,9 +14,12 @@
import copy
+from tempest.lib.api_schema.response.compute.v2_1 import servers \
+ as servers
from tempest.lib.api_schema.response.compute.v2_16 import servers \
as serversv216
+
# Compute microversion 2.19:
# 1. New attributes in 'server' dict.
# 'description'
@@ -63,3 +66,4 @@
list_volume_attachments = copy.deepcopy(serversv216.list_volume_attachments)
show_instance_action = copy.deepcopy(serversv216.show_instance_action)
create_backup = copy.deepcopy(serversv216.create_backup)
+list_instance_actions = copy.deepcopy(servers.list_instance_actions)
diff --git a/tempest/lib/api_schema/response/compute/v2_58/servers.py b/tempest/lib/api_schema/response/compute/v2_58/servers.py
index 637b765..5c22c79 100644
--- a/tempest/lib/api_schema/response/compute/v2_58/servers.py
+++ b/tempest/lib/api_schema/response/compute/v2_58/servers.py
@@ -12,8 +12,10 @@
import copy
from tempest.lib.api_schema.response.compute.v2_1 import parameter_types
+from tempest.lib.api_schema.response.compute.v2_1 import servers as servers
from tempest.lib.api_schema.response.compute.v2_57 import servers as servers257
+
# microversion 2.58 added updated_at to the response
show_instance_action = copy.deepcopy(servers257.show_instance_action)
show_instance_action['response_body']['properties']['instanceAction'][
@@ -21,6 +23,14 @@
show_instance_action['response_body']['properties']['instanceAction'][
'required'].append('updated_at')
+# microversion 2.58 added updated_at to the response
+list_instance_actions = copy.deepcopy(servers.list_instance_actions)
+list_instance_actions['response_body']['properties']['instanceActions'][
+ 'items']['properties'].update({'updated_at': parameter_types.date_time})
+list_instance_actions['response_body']['properties']['instanceActions'][
+ 'items']['required'].append('updated_at')
+
+
# Below are the unchanged schema in this microversion. We need
# to keep this schema in this file to have the generic way to select the
# right schema based on self.schema_versions_info mapping in service client.
diff --git a/tempest/lib/api_schema/response/volume/volume_types.py b/tempest/lib/api_schema/response/volume/volume_types.py
index 51b3a72..4d09bcd 100644
--- a/tempest/lib/api_schema/response/volume/volume_types.py
+++ b/tempest/lib/api_schema/response/volume/volume_types.py
@@ -31,8 +31,7 @@
'qos_specs_id': {'type': ['string', 'null'], 'format': 'uuid'}
},
'additionalProperties': False,
- 'required': ['name', 'is_public', 'description', 'id',
- 'os-volume-type-access:is_public']
+ 'required': ['name', 'is_public', 'description', 'id']
}
show_volume_type = {
diff --git a/tempest/lib/common/constants.py b/tempest/lib/common/constants.py
new file mode 100644
index 0000000..57fdd93
--- /dev/null
+++ b/tempest/lib/common/constants.py
@@ -0,0 +1,5 @@
+# Retry constants
+RETRY_ATTEMPTS = 30
+RETRY_INITIAL_DELAY = 1
+RETRY_BACKOFF = 3
+RETRY_MAX = 10
diff --git a/tempest/lib/common/cred_provider.py b/tempest/lib/common/cred_provider.py
index 2da206f..84f5264 100644
--- a/tempest/lib/common/cred_provider.py
+++ b/tempest/lib/common/cred_provider.py
@@ -13,6 +13,8 @@
# limitations under the License.
import abc
+import time
+
from oslo_log import log as logging
from tempest.lib import auth
@@ -133,11 +135,24 @@
name="default")
secgroups_to_delete = resp_body['security_groups']
for secgroup in secgroups_to_delete:
- try:
- security_group_client.delete_security_group(secgroup['id'])
- except exceptions.NotFound:
- LOG.warning('Security group %s, id %s not found for clean-up',
- secgroup['name'], secgroup['id'])
+ # Workaround for PRODX-4003
+ start_time = time.time()
+ while True:
+ try:
+ security_group_client.delete_security_group(secgroup['id'])
+ break
+ except exceptions.NotFound:
+ LOG.warning('Security group %s, id %s not found for '
+ 'clean-up', secgroup['name'], secgroup['id'])
+ break
+ except exceptions.Conflict:
+ LOG.warning('Conflict with state of security group %s, '
+ 'id %s.', secgroup['name'], secgroup['id'])
+ if (time.time() - self.networking_timeout_409) > \
+ start_time:
+ raise
+ else:
+ time.sleep(5)
class TestResources(object):
diff --git a/tempest/lib/common/dynamic_creds.py b/tempest/lib/common/dynamic_creds.py
index eb18aad..9eb3c22 100644
--- a/tempest/lib/common/dynamic_creds.py
+++ b/tempest/lib/common/dynamic_creds.py
@@ -16,11 +16,15 @@
import netaddr
from oslo_log import log as logging
+import tenacity
+import tempest.lib.common.constants as const
from tempest.lib.common import cred_client
from tempest.lib.common import cred_provider
from tempest.lib.common.utils import data_utils
+from tempest.lib.common import waiters as lib_waiters
from tempest.lib import exceptions as lib_exc
+
from tempest.lib.services import clients
LOG = logging.getLogger(__name__)
@@ -72,7 +76,8 @@
neutron_available=False, create_networks=True,
project_network_cidr=None, project_network_mask_bits=None,
public_network_id=None, resource_prefix=None,
- identity_admin_endpoint_type='public', identity_uri=None):
+ identity_admin_endpoint_type='public', identity_uri=None,
+ networking_timeout_409=120):
super(DynamicCredentialProvider, self).__init__(
identity_version=identity_version, identity_uri=identity_uri,
admin_role=admin_role, name=name,
@@ -117,6 +122,7 @@
self.roles_admin_client,
self.domains_admin_client,
self.creds_domain_name)
+ self.networking_timeout_409 = networking_timeout_409
def _get_admin_clients(self, endpoint_type):
"""Returns a tuple with instances of the following admin clients
@@ -533,6 +539,11 @@
del self._creds[creds_name]
return self.get_credentials(roles, scope=scope, by_role=True)
+ @tenacity.retry(
+ retry=tenacity.retry_if_exception_type(lib_exc.Conflict),
+ wait=tenacity.wait_incrementing(
+ const.RETRY_INITIAL_DELAY, const.RETRY_BACKOFF, const.RETRY_MAX),
+ stop=tenacity.stop_after_attempt(const.RETRY_ATTEMPTS))
def _clear_isolated_router(self, router_id, router_name):
client = self.routers_admin_client
try:
@@ -574,6 +585,9 @@
client.remove_router_interface(
creds.router['id'],
subnet_id=creds.subnet['id'])
+ lib_waiters.wait_router_interface_removed(
+ self.ports_admin_client, creds.router['id'],
+ subnet_id=creds.subnet['id'])
except lib_exc.NotFound:
LOG.warning('router with name: %s not found for delete',
creds.router['name'])
diff --git a/tempest/lib/common/preprov_creds.py b/tempest/lib/common/preprov_creds.py
index 6d948cf..2938a9d 100644
--- a/tempest/lib/common/preprov_creds.py
+++ b/tempest/lib/common/preprov_creds.py
@@ -75,10 +75,13 @@
HASH_CRED_FIELDS = (set(auth.KeystoneV2Credentials.ATTRIBUTES) &
set(auth.KeystoneV3Credentials.ATTRIBUTES))
- def __init__(self, identity_version, test_accounts_file,
- accounts_lock_dir, name=None, credentials_domain=None,
- admin_role=None, object_storage_operator_role=None,
- object_storage_reseller_admin_role=None, identity_uri=None):
+ def __init__(
+ self, identity_version, test_accounts_file, accounts_lock_dir,
+ name=None, credentials_domain=None, admin_role=None,
+ object_storage_operator_role=None,
+ object_storage_reseller_admin_role=None, identity_uri=None,
+ network_resources=None,
+ separate_projects_by_network_existence=False):
super(PreProvisionedCredentialProvider, self).__init__(
identity_version=identity_version, name=name,
admin_role=admin_role, credentials_domain=credentials_domain,
@@ -90,9 +93,12 @@
raise lib_exc.InvalidCredentials("No accounts file specified")
self.hash_dict = self.get_hash_dict(
accounts, admin_role, object_storage_operator_role,
- object_storage_reseller_admin_role)
+ object_storage_reseller_admin_role, network_resources,
+ separate_projects_by_network_existence)
self.accounts_dir = accounts_lock_dir
self._creds = {}
+ self._used_projects_file = os.path.join(self.accounts_dir,
+ 'used_projects')
@classmethod
def _append_role(cls, role, account_hash, hash_dict):
@@ -110,14 +116,21 @@
return hash_dict
@classmethod
- def get_hash_dict(cls, accounts, admin_role,
- object_storage_operator_role=None,
- object_storage_reseller_admin_role=None):
+ def get_hash_dict(
+ cls, accounts, admin_role, object_storage_operator_role=None,
+ object_storage_reseller_admin_role=None, network_resources=None,
+ separate_projects_by_network_existence=False):
hash_dict = {'roles': {}, 'creds': {}, 'networks': {},
- 'scoped_roles': {}}
+ 'scoped_roles': {}, 'projects': {}}
+ tests_require_projects_with_networks = \
+ cls.do_tests_require_projects_with_networks(network_resources)
# Loop over the accounts read from the yaml file
for account in accounts:
+ if not cls.is_account_needed(
+ tests_require_projects_with_networks, account,
+ separate_projects_by_network_existence):
+ continue
roles = []
types = []
scope = None
@@ -180,6 +193,9 @@
'Unknown resource type %s, ignoring this field',
resource
)
+ if scope == 'project':
+ hash_dict = cls._append_project(account, temp_hash_key,
+ hash_dict)
return hash_dict
def is_multi_user(self):
@@ -196,19 +212,49 @@
return True
return False
+ def _process_project(self, hash_, used_projects=None, use=True):
+ project = self.hash_dict['creds'][hash_].get('project_name')
+ if not project:
+ project = self.hash_dict['creds'][hash_].get('tenant_name')
+ if not project:
+ return
+ if used_projects is None:
+ used_projects = self._get_used_projects()
+ method = 'append' if use else 'remove'
+ getattr(used_projects, method)(project)
+ with open(self._used_projects_file, 'w') as file:
+ file.write('\n'.join(used_projects) + '\n')
+ return project
+
+ def _get_used_projects(self):
+ used_projects = []
+ try:
+ with open(self._used_projects_file) as file:
+ for line in file:
+ line = line.strip()
+ if line:
+ used_projects.append(line)
+ except FileNotFoundError:
+ pass
+ return used_projects
+
@lockutils.synchronized('test_accounts_io', external=True)
def _get_free_hash(self, hashes):
+ used_projects = self._get_used_projects()
+ hashes = self._exclude_used_projects(hashes, used_projects)
# Cast as a list because in some edge cases a set will be passed in
hashes = list(hashes)
if not os.path.isdir(self.accounts_dir):
os.mkdir(self.accounts_dir)
# Create File from first hash (since none are in use)
self._create_hash_file(hashes[0])
+ self._process_project(hashes[0], used_projects)
return hashes[0]
names = []
for _hash in hashes:
res = self._create_hash_file(_hash)
if res:
+ self._process_project(_hash, used_projects)
return _hash
else:
path = os.path.join(self.accounts_dir, _hash)
@@ -265,26 +311,31 @@
return temp_creds
def _get_creds(self, roles=None, scope=None):
- useable_hashes = self._get_match_hash_list(roles, scope)
- if not useable_hashes:
+ usable_hashes = self._get_match_hash_list(roles, scope)
+ if not usable_hashes:
msg = 'No users configured for type/roles %s' % roles
raise lib_exc.InvalidCredentials(msg)
- free_hash = self._get_free_hash(useable_hashes)
+ if scope == 'system':
+ free_hash = next(iter(usable_hashes))
+ else:
+ free_hash = self._get_free_hash(usable_hashes)
clean_creds = self._sanitize_creds(
self.hash_dict['creds'][free_hash])
- LOG.info('%s allocated creds:\n%s', self.name, clean_creds)
+ LOG.info('%s allocated creds for roles %s in scope %s:\n%s',
+ self.name, roles, scope, clean_creds)
return self._wrap_creds_with_network(free_hash)
@lockutils.synchronized('test_accounts_io', external=True)
def remove_hash(self, hash_string):
- hash_path = os.path.join(self.accounts_dir, hash_string)
- if not os.path.isfile(hash_path):
- LOG.warning('Expected an account lock file %s to remove, but '
- 'one did not exist', hash_path)
- else:
- os.remove(hash_path)
- if not os.listdir(self.accounts_dir):
- os.rmdir(self.accounts_dir)
+ if self._process_project(hash_string, use=False):
+ hash_path = os.path.join(self.accounts_dir, hash_string)
+ if not os.path.isfile(hash_path):
+ LOG.warning('Expected an account lock file %s to remove, but '
+ 'one did not exist', hash_path)
+ else:
+ os.remove(hash_path)
+ if not os.listdir(self.accounts_dir):
+ os.rmdir(self.accounts_dir)
def get_hash(self, creds):
for _hash in self.hash_dict['creds']:
@@ -310,18 +361,18 @@
# TODO(gmann): Remove this method in favor of get_project_member_creds()
# after the deprecation phase.
def get_primary_creds(self):
- if self._creds.get('primary'):
- return self._creds.get('primary')
- net_creds = self._get_creds()
- self._creds['primary'] = net_creds
- return net_creds
+ # NOTE(pas-ha) force os_primary and os_project_member
+ # to be exactly the same creds, otherwise they may be from
+ # different projects and fail some RBAC tests
+ return self.get_project_member_creds()
# TODO(gmann): Replace this method with more appropriate name.
# like get_project_alt_member_creds()
def get_alt_creds(self):
if self._creds.get('alt'):
return self._creds.get('alt')
- net_creds = self._get_creds()
+ # NOTE(pas-ha) use the same call as get_project_member_creds
+ net_creds = self._get_creds(['member'], scope='project')
self._creds['alt'] = net_creds
return net_creds
@@ -400,7 +451,7 @@
# TODO(gmann): Implement alt reader hash.
return
- def get_creds_by_roles(self, roles, force_new=False, scope=None):
+ def get_creds_by_roles(self, roles, force_new=True, scope=None):
roles = list(set(roles))
exist_creds = self._creds.get(str(roles).encode(
'utf-8'), None)
@@ -477,3 +528,47 @@
for attr in domain_fields.intersection(set(creds_dict.keys())):
creds_dict.pop(attr)
return creds_dict
+
+ @classmethod
+ def _append_project(cls, account, account_hash, hash_dict):
+ key_to_add = account.get('project_name') or account.get('tenant_name')
+ hash_dict['projects'].setdefault(key_to_add, [])
+ hash_dict['projects'][key_to_add].append(account_hash)
+ return hash_dict
+
+ def _exclude_used_projects(self, hashes, used_projects):
+ excluded_accounts = []
+ for project in used_projects:
+ if project in self.hash_dict['projects']:
+ excluded_accounts.extend(self.hash_dict['projects'][project])
+ return set(hashes) - set(excluded_accounts)
+
+ @staticmethod
+ def do_tests_require_projects_with_networks(network_resources):
+ """take projects with networks or projects without networks
+
+ :return: boolean value
+ """
+ if isinstance(network_resources, dict):
+ if any(network_resources.values()):
+ return True
+ return False
+
+ @staticmethod
+ def is_account_needed(tests_require_projects_with_networks, account,
+ separate_projects_by_network_existence=False):
+ """decides whether we need account for test class
+
+ :param tests_require_projects_with_networks: need projects with or
+ without network
+ :param account: dictionary which contains username, password, resources
+ :param separate_projects_by_network_existence: should we separate
+ accounts with and without networks
+ :return: boolean value
+ """
+ if ({'project_name', 'tenant_name'} & account.keys()) and \
+ separate_projects_by_network_existence:
+ is_network_in_resources = 'network' in account.get('resources', {})
+ return is_network_in_resources ==\
+ tests_require_projects_with_networks
+ return True
diff --git a/tempest/lib/common/rest_client.py b/tempest/lib/common/rest_client.py
index a2f2931..4e1dc59 100644
--- a/tempest/lib/common/rest_client.py
+++ b/tempest/lib/common/rest_client.py
@@ -94,6 +94,7 @@
self.build_interval = build_interval
self.build_timeout = build_timeout
self.trace_requests = trace_requests
+ self.ca_certs = ca_certs
self._skip_path = False
self.general_header_lc = set(('cache-control', 'connection',
diff --git a/tempest/lib/common/utils/linux/remote_client.py b/tempest/lib/common/utils/linux/remote_client.py
index 662b452..2f019ce 100644
--- a/tempest/lib/common/utils/linux/remote_client.py
+++ b/tempest/lib/common/utils/linux/remote_client.py
@@ -31,35 +31,33 @@
return function(self, *args, **kwargs)
except Exception as e:
caller = test_utils.find_test_caller() or "not found"
- if not isinstance(e, tempest.lib.exceptions.SSHTimeout):
- message = ('Executing command on %(ip)s failed. '
- 'Error: %(error)s' % {'ip': self.ip_address,
- 'error': e})
- message = '(%s) %s' % (caller, message)
- LOG.error(message)
- raise
- else:
- try:
- original_exception = sys.exc_info()
- if self.server:
+ message = ('Executing command on %(ip)s failed. '
+ 'Error: %(error)s' % {'ip': self.ip_address,
+ 'error': e})
+ message = '(%s) %s' % (caller, message)
+ LOG.error(message)
+ try:
+ original_exception = sys.exc_info()
+ if self.server:
+ if isinstance(e, tempest.lib.exceptions.SSHTimeout):
msg = 'Caller: %s. Timeout trying to ssh to server %s'
LOG.debug(msg, caller, self.server)
- if self.console_output_enabled and self.servers_client:
- try:
- msg = 'Console log for server %s: %s'
- console_log = (
- self.servers_client.get_console_output(
- self.server['id'])['output'])
- LOG.debug(msg, self.server['id'], console_log)
- except Exception:
- msg = 'Could not get console_log for server %s'
- LOG.debug(msg, self.server['id'])
- # raise the original ssh timeout exception
- raise
- finally:
- # Delete the traceback to avoid circular references
- _, _, trace = original_exception
- del trace
+ if self.console_output_enabled and self.servers_client:
+ try:
+ msg = 'Console log for server %s: %s'
+ console_log = (
+ self.servers_client.get_console_output(
+ self.server['id'])['output'])
+ LOG.debug(msg, self.server['id'], console_log)
+ except Exception:
+ msg = 'Could not get console_log for server %s'
+ LOG.debug(msg, self.server['id'])
+ # raise the original ssh exception
+ raise
+ finally:
+ # Delete the traceback to avoid circular references
+ _, _, trace = original_exception
+ del trace
return wrapper
@@ -122,6 +120,17 @@
"""
self.ssh_client.test_connection_auth()
+ @debug_ssh
+ def check_cloudinit(self):
+ """Check cloud-init is completed
+
+ This method raises an Exception when the status is not 'done'.
+ """
+ out = self.ssh_client.exec_command("cloud-init status")
+ res = [s.strip() for s in out.split(' ')]
+ if res[1] != "done":
+ raise ValueError("Cloud init is not done, {res}".format(res=res))
+
def ping_host(self, host, count=None, size=None, nic=None):
if count is None:
count = self.ping_count
diff --git a/tempest/lib/common/waiters.py b/tempest/lib/common/waiters.py
new file mode 100644
index 0000000..1f0bdc9
--- /dev/null
+++ b/tempest/lib/common/waiters.py
@@ -0,0 +1,32 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+import time
+
+from tempest.lib import exceptions as lib_exc
+
+
+def wait_router_interface_removed(
+ ports_client, router_id, subnet_id, timeout=30, interval=3):
+ """Waits for router inface is removed"""
+ start_time = int(time.time())
+ while int(time.time()) - start_time < timeout:
+ try:
+ ports = ports_client.list_ports(
+ device_id=router_id,
+ fixed_ips=f"subnet_id={subnet_id}")['ports']
+ if len(ports) == 0:
+ return
+ time.sleep(interval)
+ except Exception:
+ pass
+ raise lib_exc.TimeoutException()
diff --git a/tempest/lib/services/compute/servers_client.py b/tempest/lib/services/compute/servers_client.py
index 58008aa..27834da 100644
--- a/tempest/lib/services/compute/servers_client.py
+++ b/tempest/lib/services/compute/servers_client.py
@@ -721,6 +721,8 @@
resp, body = self.get("servers/%s/os-instance-actions" %
server_id)
body = json.loads(body)
+ # select proper schema depending on microverion
+ schema = self.get_schema(self.schema_versions_info)
self.validate_response(schema.list_instance_actions, resp, body)
return rest_client.ResponseBody(resp, body)
@@ -785,6 +787,16 @@
return self.action(server_id, "os-getVNCConsole",
schema.get_vnc_console, **kwargs)
+ def get_spice_console(self, server_id, **kwargs):
+ """Get URL of SPICE console.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/compute/#get-spice-console-os-getspiceconsole-action-deprecated
+ """
+ return self.action(server_id, "os-getSPICEConsole",
+ schema.get_spice_console, **kwargs)
+
def add_fixed_ip(self, server_id, **kwargs):
"""Add a fixed IP to server instance.
diff --git a/tempest/lib/services/image/v2/images_client.py b/tempest/lib/services/image/v2/images_client.py
index 0608d47..3963fee 100644
--- a/tempest/lib/services/image/v2/images_client.py
+++ b/tempest/lib/services/image/v2/images_client.py
@@ -55,6 +55,19 @@
body = json.loads(body)
return rest_client.ResponseBody(resp, body)
+ def import_image(self, image_id, **kwargs):
+ """Import image.
+
+ For a full list of available parameters, please refer to the official
+ API reference:
+ https://docs.openstack.org/api-ref/image/v2/#import-an-image
+ """
+ data = json.dumps(kwargs)
+ url = 'images/%s/import' % image_id
+ resp, body = self.post(url, data)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def deactivate_image(self, image_id):
"""Deactivate image.
diff --git a/tempest/lib/services/object_storage/object_client.py b/tempest/lib/services/object_storage/object_client.py
index 65e8227..c7ac80f 100644
--- a/tempest/lib/services/object_storage/object_client.py
+++ b/tempest/lib/services/object_storage/object_client.py
@@ -167,11 +167,14 @@
:param parsed_url: parsed url of the remote location
"""
context = None
- # If CONF.identity.disable_ssl_certificate_validation is true,
- # do not check ssl certification.
- if self.dscv:
- context = ssl._create_unverified_context()
if parsed_url.scheme == 'https':
+ # If CONF.identity.disable_ssl_certificate_validation is true,
+ # do not check ssl certification.
+ if self.dscv:
+ context = ssl._create_unverified_context()
+ else:
+ context = ssl.create_default_context(
+ cafile=self.ca_certs)
conn = httplib.HTTPSConnection(parsed_url.netloc,
context=context)
else:
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index c6f8973..65dc258 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -105,14 +105,17 @@
self.validate_response(schema.show_volume, resp, body)
return rest_client.ResponseBody(resp, body)
- def create_volume(self, **kwargs):
+ def create_volume(self, hints=None, **kwargs):
"""Creates a new Volume.
For a full list of available parameters, please refer to the official
API reference:
https://docs.openstack.org/api-ref/block-storage/v3/index.html#create-a-volume
"""
- post_body = json.dumps({'volume': kwargs})
+ obj = {'volume': kwargs}
+ if hints is not None:
+ obj['OS-SCH-HNT:scheduler_hints'] = hints
+ post_body = json.dumps(obj)
resp, body = self.post('volumes', post_body)
body = json.loads(body)
schema = self.get_schema(self.schema_versions_info)
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 5f30909..fa6f2b3 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -777,7 +777,20 @@
linux_client = remote_client.RemoteClient(
ip_address, username, pkey=private_key, password=password,
server=server, servers_client=self.servers_client)
- linux_client.validate_authentication()
+ try:
+ linux_client.validate_authentication()
+ except Exception as e:
+ message = ('Initializing SSH connection to %(ip)s failed. '
+ 'Error: %(error)s' % {'ip': ip_address,
+ 'error': e})
+ caller = test_utils.find_test_caller()
+ if caller:
+ message = '(%s) %s' % (caller, message)
+ LOG.exception(message)
+ servers = (server,) if server else None
+ self.log_console_output(servers=servers)
+ raise
+
return linux_client
def image_create(self, name='scenario-img', **kwargs):
@@ -1325,6 +1338,14 @@
return self.create_server(**create_kwargs)
+ def wait_for_cloud_init(
+ self, ip_address, server, private_key, username, timeout=60):
+ ssh_client = self.get_remote_client(ip_address,
+ private_key=private_key,
+ server=server,
+ username=username)
+ waiters.wait_for_cloudinit(ssh_client, timeout)
+
def create_volume_from_image(self, **kwargs):
"""Create volume from image.
@@ -1342,6 +1363,36 @@
prefix=CONF.resource_name_prefix, name=namestart)
return self.create_volume(name=name, imageRef=image_id, **kwargs)
+ def run_sync(self, ip_address, private_key=None, server=None,
+ username=None):
+ """Syncs server filesystem cached writes
+
+ This wrapper utility does ssh and syncs server's filesystem caches
+ to persistent storage.
+
+ :param ip_address: The floating IP or fixed IP of the remote server
+ :param private_key: The SSH private key to use for authentication
+ :param server: Server dict, used for debugging purposes
+ :param username: Name of the Linux account on the remote server
+ """
+
+ ssh_client = self.get_remote_client(ip_address,
+ private_key=private_key,
+ server=server,
+ username=username)
+
+ ssh_client.exec_command('sudo sh -c "sync"')
+
+
+class ScenarioTestWithNetwork(ScenarioTest):
+ """Base class for tests with default network"""
+
+ @classmethod
+ def setup_credentials(cls):
+ cls.set_network_resources(network=True, subnet=True,
+ dhcp=True, router=True)
+ super(ScenarioTestWithNetwork, cls).setup_credentials()
+
class NetworkScenarioTest(ScenarioTest):
"""Base class for network scenario tests.
@@ -1697,7 +1748,8 @@
return network, subnet, router
-class EncryptionScenarioTest(ScenarioTest):
+class EncryptionScenarioTest(ScenarioTestWithNetwork):
+
"""Base class for encryption scenario tests"""
@classmethod
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 753e64f..62b2823 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -54,12 +54,16 @@
@decorators.idempotent_id('79165fb4-5534-4b9d-8429-97ccffb8f86e')
@decorators.attr(type='slow')
+ @testtools.skipUnless(
+ 'luks' in CONF.volume_feature_enabled.supported_crypto_providers,
+ 'luks cryptoprovider is not supported.')
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
"""LUKs v1 decrypts volume through libvirt."""
- volume = self.create_encrypted_volume('luks',
- volume_type='luks',
- wait_until=None)
+ volume = self.create_encrypted_volume(
+ 'luks',
+ volume_type=CONF.volume.volume_type_luks,
+ wait_until=None)
server = self.launch_instance()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
@@ -89,12 +93,18 @@
self.attach_detach_volume(server, volume)
@decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
+ @testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
+ 'Skip because ceph does not support Provider plain.')
@decorators.attr(type='slow')
+ @testtools.skipUnless(
+ 'plain' in CONF.volume_feature_enabled.supported_crypto_providers,
+ 'plain cryptoprovider is not supported.')
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
- volume = self.create_encrypted_volume('plain',
- volume_type='cryptsetup',
- wait_until=None)
+ volume = self.create_encrypted_volume(
+ 'plain',
+ volume_type=CONF.volume.volume_type_cryptsetup,
+ wait_until=None)
server = self.launch_instance()
waiters.wait_for_volume_resource_status(self.volumes_client,
volume['id'], 'available')
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 7b819e0..700ad3b 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -156,7 +156,7 @@
self.assertIn(self.router['id'],
seen_router_ids)
- def _create_server(self, network, port_id=None):
+ def _create_server(self, network, port_id=None, **kwargs):
keypair = self.create_keypair()
self.keypairs[keypair['name']] = keypair
security_groups = [
@@ -169,7 +169,8 @@
server = self.create_server(
networks=[network],
key_name=keypair['name'],
- security_groups=security_groups)
+ security_groups=security_groups,
+ **kwargs)
self.servers.append(server)
return server
@@ -240,6 +241,26 @@
network=self.new_net,
gateway_ip=None)
+ def _assign_nic_ip_address_by_port(self, ssh_client, port, device):
+ ip_output = ssh_client.exec_command('ip a')
+ ip_address = port['fixed_ips'][0]['ip_address']
+ ip_mask = CONF.network.project_network_mask_bits
+ # check if the address is not already in use, if not, set it
+ if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
+ try:
+ ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
+ ip_address, ip_mask, device))
+ ssh_client.exec_command("sudo ip link set %s up" % device)
+ except exceptions.SSHExecCommandFailed as exc:
+ if 'RTNETLINK answers: File exists' in str(exc):
+ LOG.debug(
+ 'IP address %(ip_address)s is already set in device '
+ '%(device)s\nPrevious "ip a" output: %(ip_output)s',
+ {'ip_address': ip_address, 'device': device,
+ 'ip_output': ip_output})
+ else:
+ raise exc
+
def _hotplug_server(self):
old_floating_ip, server = self.floating_ip_tuple
ip_address = old_floating_ip['floating_ip_address']
@@ -293,24 +314,7 @@
% CONF.network.build_timeout)
_, new_nic = self.diff_list[0]
- ip_output = ssh_client.exec_command('ip a')
- ip_address = new_port['fixed_ips'][0]['ip_address']
- ip_mask = CONF.network.project_network_mask_bits
- # check if the address is not already in use, if not, set it
- if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
- try:
- ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
- ip_address, ip_mask, new_nic))
- ssh_client.exec_command("sudo ip link set %s up" % new_nic)
- except exceptions.SSHExecCommandFailed as exc:
- if 'RTNETLINK answers: File exists' in str(exc):
- LOG.debug(
- 'IP address %(ip_address)s is already set in device '
- '%(device)s\nPrevious "ip a" output: %(ip_output)s',
- {'ip_address': ip_address, 'device': new_nic,
- 'ip_output': ip_output})
- else:
- raise exc
+ self._assign_nic_ip_address_by_port(ssh_client, new_port, new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+)[@]?.*:')
@@ -394,6 +398,21 @@
router['id'], **kwargs)['router']
self.assertEqual(admin_state_up, router['admin_state_up'])
+ def _live_migrate_server(self, server, host_id=None):
+ src_host = self.get_host_for_server(server['id'])
+
+ self.os_adm.servers_client.live_migrate_server(
+ server_id=server['id'],
+ block_migration='auto',
+ host=host_id)
+
+ waiters.wait_for_server_status(
+ self.servers_client, server['id'], 'ACTIVE')
+
+ dst_host = self.get_host_for_server(server['id'])
+ self.assertNotEqual(src_host, dst_host,
+ msg="Live migration failed, servers are equal")
+
@decorators.attr(type='smoke')
@decorators.idempotent_id('f323b3ba-82f8-4db7-8ea6-6a895869ec49')
@utils.services('compute', 'network')
@@ -912,6 +931,7 @@
).format(ip_address=ip_address, nic=spoof_nic)
ssh_client.exec_command(cmd)
+ self._assign_nic_ip_address_by_port(ssh_client, spoof_port, spoof_nic)
new_mac = ssh_client.get_mac_address(nic=spoof_nic)
self.assertEqual(spoof_mac, new_mac)
@@ -922,3 +942,58 @@
security_groups=[])
self.check_remote_connectivity(ssh_client, dest=peer_address,
nic=spoof_nic, should_succeed=True)
+
+ @decorators.idempotent_id('463caa51-0967-4d6d-8ee9-11db1557c710')
+ @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+ 'Live migration is not available.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_connectivity_between_vms_after_live_migration(self):
+ """Test the live-migration of the instances and ping
+
+ 1. Create server 1 and 2 on the same host
+ 2. Ping server 1 from server 2
+ 3. Live migrate server 1 to other host
+ 4. Ping server 1 from server 2
+ 5. Migrate back server 1 to the first host
+ 6. Ping server 1 from server 2
+ """
+
+ # Create server 1 with network, subnetwork, router, host
+ # and ping server 1
+ self._setup_network_and_servers()
+
+ server01 = self.servers[0]
+ hints = {'same_host': server01['id']}
+
+ # Create server 2 with network on the same host
+ self._create_new_network(create_gateway=True)
+ server02 = self._create_server(self.network,
+ scheduler_hints=hints)
+ server02_ip = [addr['addr'] for addr in
+ server02['addresses'][self.network['name']]]
+
+ # Check if both instances are on the same host
+ host01_id = self.get_host_for_server(server01['id'])
+ host02_id = self.get_host_for_server(server02['id'])
+
+ self.assertEqual(host01_id, host02_id,
+ message="Created servers have different hosts")
+
+ # Check ping between servers before live migration
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ server02_ip, should_connect=True)
+
+ # Live migrate server 1 to the new host
+ self._live_migrate_server(server=server01)
+
+ # Check ping between servers after live migration
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ server02_ip, should_connect=True)
+
+ # Migrate back server 1 to the first host, wait for status Active
+ self._live_migrate_server(server=server01, host_id=host01_id)
+
+ # Check ping between servers after live migration
+ self._check_server_connectivity(self.floating_ip_tuple.floating_ip,
+ server02_ip, should_connect=True)
diff --git a/tempest/scenario/test_server_basic_ops.py b/tempest/scenario/test_server_basic_ops.py
index 3830fbc..ad86d0f 100644
--- a/tempest/scenario/test_server_basic_ops.py
+++ b/tempest/scenario/test_server_basic_ops.py
@@ -27,7 +27,7 @@
CONF = config.CONF
-class TestServerBasicOps(manager.ScenarioTest):
+class TestServerBasicOps(manager.ScenarioTestWithNetwork):
"""The test suite for server basic operations
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index d04cb9a..db8f533 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -41,6 +41,11 @@
super(TestSnapshotPattern, cls).skip_checks()
if not CONF.compute_feature_enabled.snapshot:
raise cls.skipException("Snapshotting is not available.")
+ if not all([CONF.compute.image_full_ref,
+ CONF.compute.image_full_username,
+ CONF.compute.image_full_flavor_ref]):
+ raise cls.skipException(
+ "Test requires image_full_* options to be set.")
@decorators.idempotent_id('608e604b-1d63-4a82-8e3e-91bc665c90b4')
@decorators.attr(type='slow')
@@ -51,16 +56,20 @@
# prepare for booting an instance
keypair = self.create_keypair()
security_group = self.create_security_group()
+ username = CONF.compute.image_full_username
# boot an instance and create a timestamp file in it
server = self.create_server(
key_name=keypair['name'],
- security_groups=[{'name': security_group['name']}])
+ security_groups=[{'name': security_group['name']}],
+ flavor=CONF.compute.image_full_flavor_ref,
+ image_id=CONF.compute.image_full_ref)
instance_ip = self.get_server_ip(server)
timestamp = self.create_timestamp(instance_ip,
private_key=keypair['private_key'],
- server=server)
+ server=server,
+ username=username)
# snapshot the instance
snapshot_image = self.create_server_snapshot(server=server)
@@ -74,13 +83,15 @@
server_from_snapshot = self.create_server(
image_id=snapshot_image['id'],
key_name=keypair['name'],
- security_groups=[{'name': security_group['name']}])
+ security_groups=[{'name': security_group['name']}],
+ flavor=CONF.compute.image_full_flavor_ref)
# check the existence of the timestamp file in the second instance
server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
timestamp2 = self.get_timestamp(server_from_snapshot_ip,
private_key=keypair['private_key'],
- server=server_from_snapshot)
+ server=server_from_snapshot,
+ username=username)
self.assertEqual(timestamp, timestamp2)
# snapshot the instance again
diff --git a/tempest/scenario/test_volume_backup_restore.py b/tempest/scenario/test_volume_backup_restore.py
index 07ca38a..ca563da 100644
--- a/tempest/scenario/test_volume_backup_restore.py
+++ b/tempest/scenario/test_volume_backup_restore.py
@@ -22,7 +22,7 @@
CONF = config.CONF
-class TestVolumeBackupRestore(manager.ScenarioTest):
+class TestVolumeBackupRestore(manager.ScenarioTestWithNetwork):
"""Test cinder backup and restore
This testcase verifies content preservation after backup and restore
diff --git a/tempest/scenario/test_volume_boot_pattern.py b/tempest/scenario/test_volume_boot_pattern.py
index 5e28ecd..1c3c8bf 100644
--- a/tempest/scenario/test_volume_boot_pattern.py
+++ b/tempest/scenario/test_volume_boot_pattern.py
@@ -54,6 +54,11 @@
'The public_network_id option must be specified.')
@testtools.skipUnless(CONF.volume_feature_enabled.snapshot,
'Cinder volume snapshots are disabled')
+ @testtools.skipUnless(all([
+ CONF.compute.image_full_ref,
+ CONF.compute.image_full_username,
+ CONF.compute.image_full_flavor_ref]),
+ 'Test requires image_full_* options to be set.')
@utils.services('compute', 'volume', 'image')
def test_volume_boot_pattern(self):
"""This test case attempts to reproduce the following steps:
@@ -67,27 +72,35 @@
* Boot an additional instance from the new snapshot based volume
* Check written content in the instance booted from snapshot
"""
-
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self.create_security_group()
+ username = CONF.compute.image_full_username
# create an instance from volume
LOG.info("Booting instance 1 from volume")
- volume_origin = self.create_volume_from_image()
+ volume_origin = self.create_volume_from_image(
+ image_id=CONF.compute.image_full_ref)
instance_1st = self.boot_instance_from_resource(
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
- security_group=security_group)
+ security_group=security_group,
+ flavor=CONF.compute.image_full_flavor_ref)
LOG.info("Booted first instance: %s", instance_1st)
# write content to volume on instance
LOG.info("Setting timestamp in instance %s", instance_1st)
ip_instance_1st = self.get_server_ip(instance_1st)
+ self.wait_for_cloud_init(
+ ip_instance_1st,
+ private_key=keypair['private_key'],
+ server=instance_1st,
+ username=username)
timestamp = self.create_timestamp(ip_instance_1st,
private_key=keypair['private_key'],
- server=instance_1st)
+ server=instance_1st,
+ username=username)
# delete instance
LOG.info("Deleting first instance: %s", instance_1st)
@@ -98,17 +111,29 @@
source_id=volume_origin['id'],
source_type='volume',
keypair=keypair,
- security_group=security_group)
+ security_group=security_group,
+ flavor=CONF.compute.image_full_flavor_ref)
LOG.info("Booted second instance %s", instance_2nd)
# check the content of written file
LOG.info("Getting timestamp in instance %s", instance_2nd)
ip_instance_2nd = self.get_server_ip(instance_2nd)
+ self.wait_for_cloud_init(
+ ip_instance_2nd,
+ private_key=keypair['private_key'],
+ server=instance_2nd,
+ username=username)
timestamp2 = self.get_timestamp(ip_instance_2nd,
private_key=keypair['private_key'],
- server=instance_2nd)
+ server=instance_2nd,
+ username=username)
self.assertEqual(timestamp, timestamp2)
+ # Sync filesystem caches to persistent storage before doing snapshot
+ self.run_sync(ip_instance_2nd,
+ private_key=keypair['private_key'],
+ server=instance_2nd,
+ username=username)
# snapshot a volume
LOG.info("Creating snapshot from volume: %s", volume_origin['id'])
snapshot = self.create_volume_snapshot(volume_origin['id'], force=True)
@@ -119,19 +144,26 @@
size=snapshot['size'])
LOG.info("Booting third instance from snapshot")
server_from_snapshot = (
- self.boot_instance_from_resource(source_id=volume['id'],
- source_type='volume',
- keypair=keypair,
- security_group=security_group))
+ self.boot_instance_from_resource(
+ source_id=volume['id'],
+ source_type='volume', keypair=keypair,
+ security_group=security_group,
+ flavor=CONF.compute.image_full_flavor_ref))
LOG.info("Booted third instance %s", server_from_snapshot)
# check the content of written file
LOG.info("Logging into third instance to get timestamp: %s",
server_from_snapshot)
server_from_snapshot_ip = self.get_server_ip(server_from_snapshot)
+ self.wait_for_cloud_init(
+ server_from_snapshot_ip,
+ private_key=keypair['private_key'],
+ server=server_from_snapshot,
+ username=username)
timestamp3 = self.get_timestamp(server_from_snapshot_ip,
private_key=keypair['private_key'],
- server=server_from_snapshot)
+ server=server_from_snapshot,
+ username=username)
self.assertEqual(timestamp, timestamp3)
@decorators.idempotent_id('05795fb2-b2a7-4c9f-8fac-ff25aedb1489')
@@ -276,17 +308,27 @@
@decorators.idempotent_id('cb78919a-e553-4bab-b73b-10cf4d2eb125')
@testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
'Encrypted volume attach is not supported')
+ @testtools.skipUnless(
+ 'luks' in CONF.volume_feature_enabled.supported_crypto_providers,
+ 'luks cryptoprovider is not supported.')
@utils.services('compute', 'volume')
def test_boot_server_from_encrypted_volume_luks(self):
"""LUKs v1 decrypts volume through libvirt."""
- self._do_test_boot_server_from_encrypted_volume_luks('luks')
+ self._do_test_boot_server_from_encrypted_volume_luks(
+ CONF.volume.volume_type_luks
+ )
@decorators.idempotent_id('5ab6100f-1b31-4dd0-a774-68cfd837ef77')
@testtools.skipIf(CONF.volume.storage_protocol == 'ceph',
'Ceph only supports LUKSv2 if doing host attach.')
@testtools.skipUnless(CONF.compute_feature_enabled.attach_encrypted_volume,
'Encrypted volume attach is not supported')
+ @testtools.skipUnless(
+ 'luks2' in CONF.volume_feature_enabled.supported_crypto_providers,
+ 'luks2 cryptoprovider is not supported.')
@utils.services('compute', 'volume')
def test_boot_server_from_encrypted_volume_luksv2(self):
"""LUKs v2 decrypts volume through os-brick."""
- self._do_test_boot_server_from_encrypted_volume_luks('luks2')
+ self._do_test_boot_server_from_encrypted_volume_luks(
+ CONF.volume.volume_type_luks_v2
+ )
diff --git a/tempest/serial_tests/api/admin/test_aggregates.py b/tempest/serial_tests/api/admin/test_aggregates.py
index cedeec0..ce54957 100644
--- a/tempest/serial_tests/api/admin/test_aggregates.py
+++ b/tempest/serial_tests/api/admin/test_aggregates.py
@@ -222,6 +222,9 @@
@decorators.idempotent_id('96be03c7-570d-409c-90f8-e4db3c646996')
def test_aggregate_add_host_create_server_with_az(self):
"""Test adding a host to the given aggregate and creating a server"""
+ if CONF.production:
+ raise self.skipException("Not allowed to run this test "
+ "on production environment")
self.useFixture(fixtures.LockFixture('availability_zone'))
az_name = data_utils.rand_name(
prefix=CONF.resource_name_prefix, name=self.az_name_prefix)
@@ -235,12 +238,20 @@
if agg['availability_zone']:
hosts_in_zone.extend(agg['hosts'])
hosts = [v for v in self.hosts_available if v not in hosts_in_zone]
- if not hosts:
+ hosts_available = []
+ for host in hosts:
+ hypervisor_servers = (
+ self.os_admin.hypervisor_client.list_servers_on_hypervisor(
+ host)["hypervisors"][0].get("servers", None))
+ if not hypervisor_servers:
+ hosts_available.append(host)
+ if not hosts_available:
raise self.skipException("All hosts are already in other "
- "availability zones, so can't add "
+ "availability zones or have running "
+ "instances, so can't add "
"host to aggregate. \nAggregates list: "
"%s" % aggregates)
- host = hosts[0]
+ host = hosts_available[0]
self.client.add_host(aggregate['id'], host=host)
self.addCleanup(self.client.remove_host, aggregate['id'], host=host)
diff --git a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
index a831fe5..cc45297 100644
--- a/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
+++ b/tempest/serial_tests/scenario/test_aggregates_basic_ops.py
@@ -63,7 +63,11 @@
hosts_available = []
for host in svc_list:
if (host['state'] == 'up' and host['status'] == 'enabled'):
- hosts_available.append(host['host'])
+ hypervisor_servers = (
+ self.os_admin.hypervisor_client.list_servers_on_hypervisor(
+ host["host"])["hypervisors"][0].get("servers", None))
+ if not hypervisor_servers:
+ hosts_available.append(host["host"])
aggregates = self.aggregates_client.list_aggregates()['aggregates']
hosts_in_zone = []
for agg in aggregates:
@@ -72,7 +76,8 @@
hosts = [v for v in hosts_available if v not in hosts_in_zone]
if not hosts:
raise self.skipException("All hosts are already in other "
- "availability zones, so can't add "
+ "availability zones or have running "
+ "instances, so can't add "
"host to aggregate. \nAggregates list: "
"%s" % aggregates)
return hosts[0]
@@ -120,6 +125,9 @@
@decorators.attr(type='slow')
@utils.services('compute')
def test_aggregate_basic_ops(self):
+ if CONF.production:
+ raise self.skipException("Not allowed to run this test "
+ "on production environment")
self.useFixture(fixtures.LockFixture('availability_zone'))
az = 'foo_zone'
aggregate_name = data_utils.rand_name(
diff --git a/tempest/test_discover/test_discover.py b/tempest/test_discover/test_discover.py
index 679d58b..20896bb 100644
--- a/tempest/test_discover/test_discover.py
+++ b/tempest/test_discover/test_discover.py
@@ -24,8 +24,8 @@
suite = unittest.TestSuite()
base_path = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0]
base_path = os.path.split(base_path)[0]
- # Load local tempest tests
- for test_dir in ['api', 'scenario', 'serial_tests']:
+ # Load local parallel tempest tests
+ for test_dir in ['api', 'scenario']:
full_test_dir = os.path.join(base_path, 'tempest', test_dir)
if not pattern:
suite.addTests(loader.discover(full_test_dir,
@@ -33,17 +33,25 @@
else:
suite.addTests(loader.discover(full_test_dir, pattern=pattern,
top_level_dir=base_path))
-
- plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
- if not plugin_load_tests:
- return suite
-
# Load any installed plugin tests
- for plugin in plugin_load_tests:
- test_dir, top_path = plugin_load_tests[plugin]
- if not pattern:
- suite.addTests(loader.discover(test_dir, top_level_dir=top_path))
- else:
- suite.addTests(loader.discover(test_dir, pattern=pattern,
- top_level_dir=top_path))
+ plugin_load_tests = ext_plugins.get_plugin_load_tests_tuple()
+ if plugin_load_tests:
+ for plugin in plugin_load_tests:
+ test_dir, top_path = plugin_load_tests[plugin]
+ if not pattern:
+ suite.addTests(loader.discover(
+ test_dir, top_level_dir=top_path))
+ else:
+ suite.addTests(loader.discover(test_dir, pattern=pattern,
+ top_level_dir=top_path))
+ # Serial tests can block execution of tests which are loaded after,
+ # so loading them always in the end
+ serial_test_dir = os.path.join(base_path, 'tempest', 'serial_tests')
+ if not pattern:
+ suite.addTests(loader.discover(serial_test_dir,
+ top_level_dir=base_path))
+ else:
+ suite.addTests(loader.discover(serial_test_dir, pattern=pattern,
+ top_level_dir=base_path))
+
return suite
diff --git a/tempest/tests/cmd/test_verify_tempest_config.py b/tempest/tests/cmd/test_verify_tempest_config.py
index fa43e58..ef81558 100644
--- a/tempest/tests/cmd/test_verify_tempest_config.py
+++ b/tempest/tests/cmd/test_verify_tempest_config.py
@@ -492,7 +492,9 @@
self.assertFalse(
verify_tempest_config.contains_version('v5.', ['v1.0', 'v2.0']))
- def test_check_service_availability(self):
+ @mock.patch('tempest.cmd.verify_tempest_config.CONF._config',
+ new_callable=fake_config.FakePrivate)
+ def test_check_service_availability(self, mock_config):
class FakeAuthProvider:
def get_auth(self):
return ('token',
@@ -505,7 +507,6 @@
class Fake_os:
auth_provider = FakeAuthProvider()
auth_version = 'v2'
- verify_tempest_config.CONF._config = fake_config.FakePrivate()
services = verify_tempest_config.check_service_availability(
Fake_os(), True)
self.assertEqual(
diff --git a/tempest/tests/common/test_credentials_factory.py b/tempest/tests/common/test_credentials_factory.py
index 8a1158d..bff3506 100644
--- a/tempest/tests/common/test_credentials_factory.py
+++ b/tempest/tests/common/test_credentials_factory.py
@@ -129,14 +129,14 @@
mock_preprov_provider_params.return_value = expected_params
expected_name = 'my_name'
expected_identity_version = 'identity_version'
- cf.get_credentials_provider(
- expected_name,
- force_tenant_isolation=False,
- identity_version=expected_identity_version)
+ cf.get_credentials_provider(expected_name,
+ identity_version=expected_identity_version)
mock_preprov_provider_params.assert_called_once_with(
expected_identity_version)
mock_preprov_credentials_provider_class.assert_called_once_with(
- name=expected_name, **expected_params)
+ name=expected_name, network_resources=None,
+ separate_projects_by_network_existence=False,
+ **expected_params)
def test_get_credentials_provider_preprov_no_file(self):
cfg.CONF.set_default('use_dynamic_credentials', False, group='auth')
diff --git a/tempest/tests/common/utils/linux/test_remote_client.py b/tempest/tests/common/utils/linux/test_remote_client.py
index 937f93a..5801f04 100644
--- a/tempest/tests/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/common/utils/linux/test_remote_client.py
@@ -180,9 +180,7 @@
def test_validate_debug_ssh_console(self):
self.assertRaises(lib_exc.SSHTimeout,
self.conn.validate_authentication)
- msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
- 'TestRemoteClientWithServer:test_validate_debug_ssh_console',
- self.server)
+ msg = 'Executing command on 127.0.0.1 failed.'
self.assertIn(msg, self.log.output)
self.assertIn('Console output for', self.log.output)
@@ -190,9 +188,7 @@
self.assertRaises(lib_exc.SSHTimeout,
self.conn.exec_command, 'fake command')
self.assertIn('fake command', self.log.output)
- msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
- 'TestRemoteClientWithServer:test_exec_command_debug_ssh_console',
- self.server)
+ msg = 'Executing command on 127.0.0.1 failed.'
self.assertIn(msg, self.log.output)
self.assertIn('Console output for', self.log.output)
@@ -204,9 +200,7 @@
def test_validate_debug_ssh_console(self):
self.assertRaises(lib_exc.SSHTimeout,
self.conn.validate_authentication)
- msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
- 'TestRemoteClientWithBrokenServer:test_validate_debug_ssh_console',
- self.server)
+ msg = 'Executing command on 127.0.0.1 failed.'
self.assertIn(msg, self.log.output)
msg = 'Could not get console_log for server %s' % self.server['id']
self.assertIn(msg, self.log.output)
@@ -215,10 +209,7 @@
self.assertRaises(lib_exc.SSHTimeout,
self.conn.exec_command, 'fake command')
self.assertIn('fake command', self.log.output)
- caller = ":".join(['TestRemoteClientWithBrokenServer',
- 'test_exec_command_debug_ssh_console'])
- msg = 'Caller: %s. Timeout trying to ssh to server %s' % (
- caller, self.server)
+ msg = 'Executing command on 127.0.0.1 failed.'
self.assertIn(msg, self.log.output)
msg = 'Could not get console_log for server %s' % self.server['id']
self.assertIn(msg, self.log.output)
diff --git a/tempest/tests/lib/common/test_dynamic_creds.py b/tempest/tests/lib/common/test_dynamic_creds.py
index d3d01c0..646ec9b 100644
--- a/tempest/tests/lib/common/test_dynamic_creds.py
+++ b/tempest/tests/lib/common/test_dynamic_creds.py
@@ -741,6 +741,7 @@
router_interface_mock = self.patch(
'tempest.lib.services.network.routers_client.RoutersClient.'
'add_router_interface')
+ self.patch('tempest.lib.common.waiters.wait_router_interface_removed')
creds.get_primary_creds()
creds.get_project_admin_creds()
router_interface_mock.assert_called_once_with('1234', subnet_id='1234')
diff --git a/tempest/tests/lib/common/test_preprov_creds.py b/tempest/tests/lib/common/test_preprov_creds.py
index f2131dc..04e6771 100644
--- a/tempest/tests/lib/common/test_preprov_creds.py
+++ b/tempest/tests/lib/common/test_preprov_creds.py
@@ -77,7 +77,10 @@
{'username': 'test_admin2', 'project_name': 'test_tenant12',
'password': 'p', 'roles': [admin_role]},
{'username': 'test_admin3', 'project_name': 'test_tenant13',
- 'password': 'p', 'types': ['admin']}]
+ 'password': 'p', 'types': ['admin']},
+ {'username': 'test_user14', 'project_name': 'test_tenant14',
+ 'password': 'p', 'roles': ['member']},
+ ]
def setUp(self):
super(TestPreProvisionedCredentials, self).setUp()
@@ -113,6 +116,17 @@
hash_list.append(temp_hash)
return hash_list
+ def _remove_hash(self, hash_list, hash_index):
+ test_account_class = preprov_creds.PreProvisionedCredentialProvider(
+ **self.fixed_params)
+ remove_mock = self.useFixture(fixtures.MockPatch('os.remove'))
+ rmdir_mock = self.useFixture(fixtures.MockPatch('os.rmdir'))
+ test_account_class.remove_hash(hash_list[hash_index])
+ hash_path = os.path.join(self.fixed_params['accounts_lock_dir'],
+ hash_list[hash_index])
+ return {'remove_mock': remove_mock, 'hash_path': hash_path,
+ 'rmdir_mock': rmdir_mock}
+
def test_get_hash(self):
# Test with all accounts to make sure we try all combinations
# and hide no race conditions
@@ -179,8 +193,12 @@
create=True) as open_mock:
test_account_class._get_free_hash(hash_list)
lock_path = os.path.join(self.fixed_params['accounts_lock_dir'],
- hash_list[0])
- open_mock.assert_called_once_with(lock_path, 'w')
+ list(set(hash_list))[0])
+ accounts_lock_dir_calls = \
+ [mock.call(test_account_class._used_projects_file),
+ mock.call(lock_path, 'w'),
+ mock.call(test_account_class._used_projects_file, 'w')]
+ open_mock.assert_has_calls(accounts_lock_dir_calls, any_order=True)
mkdir_path = os.path.join(self.fixed_params['accounts_lock_dir'])
mkdir_mock.mock.assert_called_once_with(mkdir_path)
@@ -221,43 +239,33 @@
hash_list[3])
open_mock.assert_has_calls([mock.call(lock_path, 'w')])
- @mock.patch('oslo_concurrency.lockutils.lock')
- def test_remove_hash_last_account(self, lock_mock):
+ @mock.patch('tempest.lib.common.preprov_creds.'
+ 'PreProvisionedCredentialProvider._process_project')
+ def test_remove_hash_last_account(self, _process_project_mock):
hash_list = self._get_hash_list(self.test_accounts)
# Pretend the pseudo-lock is there
self.useFixture(
fixtures.MockPatch('os.path.isfile', return_value=True))
# Pretend the lock dir is empty
self.useFixture(fixtures.MockPatch('os.listdir', return_value=[]))
- test_account_class = preprov_creds.PreProvisionedCredentialProvider(
- **self.fixed_params)
- remove_mock = self.useFixture(fixtures.MockPatch('os.remove'))
- rmdir_mock = self.useFixture(fixtures.MockPatch('os.rmdir'))
- test_account_class.remove_hash(hash_list[2])
- hash_path = os.path.join(self.fixed_params['accounts_lock_dir'],
- hash_list[2])
+ result = self._remove_hash(hash_list, 2)
lock_path = self.fixed_params['accounts_lock_dir']
- remove_mock.mock.assert_called_once_with(hash_path)
- rmdir_mock.mock.assert_called_once_with(lock_path)
+ result['remove_mock'].mock.assert_called_once_with(result['hash_path'])
+ result['rmdir_mock'].mock.assert_called_once_with(lock_path)
- @mock.patch('oslo_concurrency.lockutils.lock')
- def test_remove_hash_not_last_account(self, lock_mock):
+ @mock.patch('tempest.lib.common.preprov_creds.'
+ 'PreProvisionedCredentialProvider._process_project')
+ def test_remove_hash_not_last_account(self, _process_project_mock):
hash_list = self._get_hash_list(self.test_accounts)
# Pretend the pseudo-lock is there
self.useFixture(fixtures.MockPatch(
'os.path.isfile', return_value=True))
- # Pretend the lock dir is empty
+ # Pretend the lock dir is not empty
self.useFixture(fixtures.MockPatch('os.listdir', return_value=[
hash_list[1], hash_list[4]]))
- test_account_class = preprov_creds.PreProvisionedCredentialProvider(
- **self.fixed_params)
- remove_mock = self.useFixture(fixtures.MockPatch('os.remove'))
- rmdir_mock = self.useFixture(fixtures.MockPatch('os.rmdir'))
- test_account_class.remove_hash(hash_list[2])
- hash_path = os.path.join(self.fixed_params['accounts_lock_dir'],
- hash_list[2])
- remove_mock.mock.assert_called_once_with(hash_path)
- rmdir_mock.mock.assert_not_called()
+ result = self._remove_hash(hash_list, 2)
+ result['remove_mock'].mock.assert_called_once_with(result['hash_path'])
+ result['rmdir_mock'].mock.assert_not_called()
def test_is_multi_user(self):
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
@@ -319,7 +327,7 @@
calls = get_free_hash_mock.mock.mock_calls
self.assertEqual(len(calls), 1)
args = calls[0][1][0]
- self.assertEqual(len(args), 10)
+ self.assertEqual(len(args), 11)
for i in admin_hashes:
self.assertNotIn(i, args)
@@ -334,7 +342,7 @@
'tempest.lib.common.preprov_creds.read_accounts_yaml',
return_value=test_accounts))
test_accounts_class = preprov_creds.PreProvisionedCredentialProvider(
- **self.fixed_params)
+ network_resources={"test": "test"}, **self.fixed_params)
with mock.patch('tempest.lib.services.network.networks_client.'
'NetworksClient.list_networks',
return_value={'networks': [{'name': 'network-2',
@@ -480,4 +488,8 @@
{'username': 'test_admin2', 'project_name': 'test_project12',
'domain_name': 'domain', 'password': 'p', 'roles': [admin_role]},
{'username': 'test_admin3', 'project_name': 'test_tenant13',
- 'domain_name': 'domain', 'password': 'p', 'types': ['admin']}]
+ 'domain_name': 'domain', 'password': 'p', 'types': ['admin']},
+ {'username': 'test_user14', 'project_name': 'test_tenant14',
+ 'domain_name': 'domain', 'password': 'p',
+ 'roles': ['member']},
+ ]
diff --git a/tempest/tests/lib/common/utils/linux/test_remote_client.py b/tempest/tests/lib/common/utils/linux/test_remote_client.py
index df23e63..c41f178 100644
--- a/tempest/tests/lib/common/utils/linux/test_remote_client.py
+++ b/tempest/tests/lib/common/utils/linux/test_remote_client.py
@@ -15,6 +15,8 @@
from unittest import mock
+import fixtures
+
from tempest.lib.common import ssh
from tempest.lib.common.utils.linux import remote_client
from tempest.lib import exceptions as lib_exc
@@ -29,6 +31,12 @@
class TestRemoteClient(base.TestCase):
+ def setUp(self):
+ super(TestRemoteClient, self).setUp()
+ self.log = self.useFixture(fixtures.FakeLogger(
+ name='tempest.lib.common.utils.linux.remote_client',
+ level='DEBUG'))
+
@mock.patch.object(ssh.Client, 'exec_command', return_value='success')
def test_exec_command(self, mock_ssh_exec_command):
client = remote_client.RemoteClient('192.168.1.10', 'username')
@@ -50,9 +58,8 @@
client = remote_client.RemoteClient('192.168.1.10', 'username',
server=server)
self.assertRaises(lib_exc.SSHTimeout, client.exec_command, 'ls')
- mock_debug.assert_called_with(
- 'Caller: %s. Timeout trying to ssh to server %s',
- 'TestRemoteClient:test_debug_ssh_without_console', server)
+ msg = 'Executing command on 192.168.1.10 failed.'
+ self.assertIn(msg, self.log.output)
@mock.patch.object(remote_client.LOG, 'debug')
@mock.patch.object(ssh.Client, 'exec_command')