Merge "Add scenario test: test instances with cinder volumes"
diff --git a/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
new file mode 100644
index 0000000..30a2278
--- /dev/null
+++ b/releasenotes/notes/add-volume_types_for_data_volume-config-option.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ A new config option in the ``volume_feature_enabled`` section,
+ ``volume_types_for_data_volume``, is added to allow the user to specify
+ which volume types can be used for data volumes in a new test
+ ``test_instances_with_cinder_volumes_on_all_compute_nodes``. By default,
+ this option is set to None.
diff --git a/tempest/config.py b/tempest/config.py
index 0a084ea..1445773 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -1078,7 +1078,11 @@
default=True,
help='Does the cloud support extending the size of a volume '
'which has snapshot? Some drivers do not support this '
- 'operation.')
+ 'operation.'),
+ cfg.StrOpt('volume_types_for_data_volume',
+ default=None,
+ help='Volume types used for data volumes. Multiple volume '
+ 'types can be assigned.'),
]
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 5f30909..d51e7e5 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -751,6 +751,31 @@
return rules
+ def create_and_add_security_group_to_server(self, server):
+ """Create a security group and add it to the server.
+
+ :param server: The server to add the security group to.
+ :return: The security group was added to the server.
+ """
+
+ secgroup = self.create_security_group()
+ self.servers_client.add_security_group(server['id'],
+ name=secgroup['name'])
+ self.addCleanup(self.servers_client.remove_security_group,
+ server['id'], name=secgroup['name'])
+
+ def wait_for_secgroup_add():
+ body = (self.servers_client.show_server(server['id'])
+ ['server'])
+ return {'name': secgroup['name']} in body['security_groups']
+
+ if not test_utils.call_until_true(wait_for_secgroup_add,
+ CONF.compute.build_timeout,
+ CONF.compute.build_interval):
+ msg = ('Timed out waiting for adding security group %s to server '
+ '%s' % (secgroup['id'], server['id']))
+ raise lib_exc.TimeoutException(msg)
+
def get_remote_client(self, ip_address, username=None, private_key=None,
server=None):
"""Get a SSH client to a remote server
@@ -1177,6 +1202,15 @@
self.assertIsNone(floating_ip['port_id'])
return floating_ip
+ def create_file(self, ip_address, path, private_key=None, server=None,
+ username=None):
+ """Create a file on a remote server"""
+ ssh_client = self.get_remote_client(ip_address,
+ private_key=private_key,
+ server=server,
+ username=username)
+ ssh_client.exec_command('sudo mkdir -p %s' % path)
+
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
private_key=None, server=None, username=None,
fs='vfat'):
diff --git a/tempest/scenario/test_instances_with_cinder_volumes.py b/tempest/scenario/test_instances_with_cinder_volumes.py
new file mode 100644
index 0000000..5f33b49
--- /dev/null
+++ b/tempest/scenario/test_instances_with_cinder_volumes.py
@@ -0,0 +1,225 @@
+# Copyright 2024 Openstack Foundation
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+from oslo_log import log as logging
+
+from tempest.common import utils
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+from tempest.scenario import manager
+
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestInstancesWithCinderVolumes(manager.ScenarioTest):
+ """This is cinder volumes test.
+
+ Tests are below:
+ * test_instances_with_cinder_volumes_on_all_compute_nodes
+ """
+
+ compute_min_microversion = '2.60'
+
+ @decorators.idempotent_id('d0e3c1a3-4b0a-4b0e-8b0a-4b0e8b0a4b0e')
+ @decorators.attr(type=['slow', 'multinode'])
+ @utils.services('compute', 'volume', 'image', 'network')
+ def test_instances_with_cinder_volumes_on_all_compute_nodes(self):
+ """Test instances with cinder volumes launches on all compute nodes
+
+ Steps:
+ 1. Create an image
+ 2. Create a keypair
+ 3. Create a bootable volume from the image and of the given volume
+ type
+ 4. Boot an instance from the bootable volume on each available
+ compute node, up to CONF.compute.min_compute_nodes
+ 5. Create a volume using each volume_types_for_data_volume on all
+ available compute nodes, up to CONF.compute.min_compute_nodes.
+ Total number of volumes is equal to
+ compute nodes * len(volume_types_for_data_volume)
+ 6. Attach volumes to the instances
+ 7. Assign floating IP to all instances
+ 8. Configure security group for ssh access to all instances
+ 9. Confirm ssh access to all instances
+ 10. Run write test to all volumes through ssh connection per
+ instance
+ 11. Clean up the sources, an instance, volumes, keypair and image
+ """
+ boot_volume_type = (CONF.volume.volume_type or
+ self.create_volume_type()['name'])
+
+ # create an image
+ image = self.image_create()
+
+ # create keypair
+ keypair = self.create_keypair()
+
+ # check all available zones for booting instances
+ available_zone = \
+ self.os_admin.availability_zone_client.list_availability_zones(
+ detail=True)['availabilityZoneInfo']
+
+ hosts = []
+ for zone in available_zone:
+ if zone['zoneState']['available']:
+ for host in zone['hosts']:
+ if 'nova-compute' in zone['hosts'][host] and \
+ zone['hosts'][host]['nova-compute']['available'] and \
+ not host.endswith('-ironic'):
+ hosts.append({'zone': zone['zoneName'],
+ 'host_name': host})
+
+ # fail if there is less hosts than minimal number of instances
+ if len(hosts) < CONF.compute.min_compute_nodes:
+ raise exceptions.InvalidConfiguration(
+ "Host list %s is shorter than min_compute_nodes. " % hosts)
+
+ # get volume types
+ volume_types = []
+ if CONF.volume_feature_enabled.volume_types_for_data_volume:
+ types = CONF.volume_feature_enabled.volume_types_for_data_volume
+ volume_types = types.split(',')
+ else:
+ # no user specified volume types, create 2 default ones
+ volume_types.append(self.create_volume_type()['name'])
+ volume_types.append(self.create_volume_type()['name'])
+
+ hosts_to_boot_servers = hosts[:CONF.compute.min_compute_nodes]
+ LOG.debug("List of hosts selected to boot servers %s: ",
+ hosts_to_boot_servers)
+
+ # create volumes so that we dont need to wait for them to be created
+ # and save them in a list
+ created_volumes = []
+ for host in hosts_to_boot_servers:
+ for volume_type in volume_types:
+ created_volumes.append(
+ self.create_volume(volume_type=volume_type,
+ wait_until=None)
+ )
+
+ bootable_volumes = []
+ for host in hosts_to_boot_servers:
+ # create boot volume from image and of the given volume type
+ bootable_volumes.append(
+ self.create_volume(
+ imageRef=image, volume_type=boot_volume_type,
+ wait_until=None)
+ )
+
+ # boot server
+ servers = []
+
+ for bootable_volume in bootable_volumes:
+
+ # wait for bootable volumes to become available
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, bootable_volume['id'], 'available')
+
+ # create an instance from bootable volume
+ server = self.boot_instance_from_resource(
+ source_id=bootable_volume['id'],
+ source_type='volume',
+ keypair=keypair,
+ wait_until=None
+ )
+ servers.append(server)
+
+ start = 0
+ end = len(volume_types)
+ for server in servers:
+ attached_volumes = []
+
+ # wait for server to become active
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ # attach volumes to the instances
+ for volume in created_volumes[start:end]:
+
+ # wait for volume to become available
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume['id'], 'available')
+
+ attached_volume = self.nova_volume_attach(server, volume)
+ attached_volumes.append(attached_volume)
+ LOG.debug("Attached volume %s to server %s",
+ attached_volume['id'], server['id'])
+
+ # assign floating ip
+ floating_ip = None
+ if (CONF.network_feature_enabled.floating_ips and
+ CONF.network.floating_network_name):
+ fip = self.create_floating_ip(server)
+ floating_ip = self.associate_floating_ip(
+ fip, server)
+ ssh_ip = floating_ip['floating_ip_address']
+ else:
+ ssh_ip = self.get_server_ip(server)
+
+ # create security group
+ self.create_and_add_security_group_to_server(server)
+
+ # confirm ssh access
+ self.linux_client = self.get_remote_client(
+ ssh_ip, private_key=keypair['private_key'],
+ server=server
+ )
+
+ # run write test on all volumes
+ for volume in attached_volumes:
+
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client, volume['id'], 'in-use')
+
+ # get the mount path
+ mount_path = f"/mnt/{volume['attachments'][0]['device'][5:]}"
+
+ # create file for mounting on server
+ self.create_file(ssh_ip, mount_path,
+ private_key=keypair['private_key'],
+ server=server)
+
+ # dev name volume['attachments'][0]['device'][5:] is like
+ # /dev/vdb, we need to remove /dev/ -> first 5 chars
+ timestamp_before = self.create_timestamp(
+ ssh_ip, private_key=keypair['private_key'], server=server,
+ dev_name=volume['attachments'][0]['device'][5:],
+ mount_path=mount_path
+ )
+ timestamp_after = self.get_timestamp(
+ ssh_ip, private_key=keypair['private_key'], server=server,
+ dev_name=volume['attachments'][0]['device'][5:],
+ mount_path=mount_path
+ )
+ self.assertEqual(timestamp_before, timestamp_after)
+
+ # delete volume
+ self.nova_volume_detach(server, volume)
+ self.volumes_client.delete_volume(volume['id'])
+
+ if floating_ip:
+ # delete the floating IP, this should refresh the server
+ # addresses
+ self.disassociate_floating_ip(floating_ip)
+ waiters.wait_for_server_floating_ip(
+ self.servers_client, server, floating_ip,
+ wait_for_disassociate=True)
+
+ start += len(volume_types)
+ end += len(volume_types)
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 6372c6b..543be31 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -19,9 +19,7 @@
from tempest.common import utils
from tempest.common import waiters
from tempest import config
-from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
-from tempest.lib import exceptions
from tempest.scenario import manager
CONF = config.CONF
@@ -73,25 +71,6 @@
disks = self.linux_client.get_disks()
self.assertEqual(1, disks.count(CONF.compute.volume_device_name))
- def create_and_add_security_group_to_server(self, server):
- secgroup = self.create_security_group()
- self.servers_client.add_security_group(server['id'],
- name=secgroup['name'])
- self.addCleanup(self.servers_client.remove_security_group,
- server['id'], name=secgroup['name'])
-
- def wait_for_secgroup_add():
- body = (self.servers_client.show_server(server['id'])
- ['server'])
- return {'name': secgroup['name']} in body['security_groups']
-
- if not test_utils.call_until_true(wait_for_secgroup_add,
- CONF.compute.build_timeout,
- CONF.compute.build_interval):
- msg = ('Timed out waiting for adding security group %s to server '
- '%s' % (secgroup['id'], server['id']))
- raise exceptions.TimeoutException(msg)
-
@decorators.attr(type='slow')
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
@utils.services('compute', 'volume', 'image', 'network')