Merge "Add command to fix/generate idempotent IDs"
diff --git a/.zuul.yaml b/.zuul.yaml
index 0b120b2..d92f283 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -4,18 +4,23 @@
- tempest-plugin-jobs
check:
jobs:
+ - cinder-tempest-plugin-lvm-multiattach
- cinder-tempest-plugin-lvm-lio-barbican
- - cinder-tempest-plugin-lvm-lio-barbican-centos-8-stream:
+ - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream:
voting: false
- cinder-tempest-plugin-lvm-tgt-barbican
- nova-ceph-multistore:
voting: false
- cinder-tempest-plugin-cbak-ceph
- cinder-tempest-plugin-cbak-s3
+ # As per the Tempest "Stable Branch Support Policy", Tempest will only
+ # support the "Maintained" stable branches and not the "Extended Maintained"
+ # branches. That is what we need to do for all tempest plugins. Only jobs
+ # for the current releasable ("Maintained") stable branches should be listed
+ # here.
+ - cinder-tempest-plugin-basic-yoga
- cinder-tempest-plugin-basic-xena
- cinder-tempest-plugin-basic-wallaby
- - cinder-tempest-plugin-basic-victoria
- - cinder-tempest-plugin-basic-ussuri
# Set this job to voting once we have some actual tests to run
- cinder-tempest-plugin-protection-functional:
voting: false
@@ -26,10 +31,9 @@
- cinder-tempest-plugin-cbak-ceph
experimental:
jobs:
+ - cinder-tempest-plugin-cbak-ceph-yoga
- cinder-tempest-plugin-cbak-ceph-xena
- cinder-tempest-plugin-cbak-ceph-wallaby
- - cinder-tempest-plugin-cbak-ceph-victoria
- - cinder-tempest-plugin-cbak-ceph-ussuri
- job:
name: cinder-tempest-plugin-protection-functional
@@ -52,6 +56,30 @@
- cinder-tempest-plugin
- job:
+ name: cinder-tempest-plugin-lvm-multiattach
+ description: |
+ This enables multiattach tests along with standard tempest tests
+ parent: devstack-tempest
+ required-projects:
+ - opendev.org/openstack/tempest
+ - opendev.org/openstack/cinder-tempest-plugin
+ - opendev.org/openstack/cinder
+ vars:
+ tempest_test_regex: '(^tempest\.(api|scenario)|(^cinder_tempest_plugin))'
+ tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+ # Temporarily exclude TestMultiAttachVolumeSwap until LP bug #1980816 is resolved.
+ tempest_exclude_regex: 'TestMultiAttachVolumeSwap'
+ tox_envlist: all
+ devstack_localrc:
+ ENABLE_VOLUME_MULTIATTACH: true
+ tempest_plugins:
+ - cinder-tempest-plugin
+ irrelevant-files:
+ - ^.*\.rst$
+ - ^doc/.*$
+ - ^releasenotes/.*$
+
+- job:
name: cinder-tempest-plugin-lvm-barbican-base-abstract
description: |
This is a base job for lvm with lio & tgt targets
@@ -80,6 +108,8 @@
# FIXME: 'creator' should be re-added by the barbican devstack plugin
# but the value below override everything.
tempest_roles: member,creator
+ volume:
+ build_timeout: 300
volume-feature-enabled:
volume_revert: True
devstack_services:
@@ -96,7 +126,7 @@
description: |
This is a base job for lvm with lio & tgt targets
with cinderlib tests.
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train)).*$
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
parent: cinder-tempest-plugin-lvm-barbican-base-abstract
roles:
- zuul: opendev.org/openstack/cinderlib
@@ -114,8 +144,14 @@
name: cinder-tempest-plugin-lvm-barbican-base
description: |
This is a base job for lvm with lio & tgt targets
- with cinderlib tests to run on stable/train testing.
- branches: stable/train
+ with cinderlib tests to run on stable/train to stable/victoria
+ testing. To run on those stable branches that are using tempest
+ 26.1.0 (which is set in the devstack stackrc file), we must
+ use cinder-tempest-plugin compatible version 1.3.0.
+ branches:
+ - stable/train
+ - stable/ussuri
+ - stable/victoria
parent: cinder-tempest-plugin-lvm-barbican-base-abstract
roles:
- zuul: opendev.org/openstack/cinderlib
@@ -150,6 +186,7 @@
Integration tests that runs with the ceph devstack plugin, py3
and enable the backup service.
vars:
+ configure_swap_size: 4096
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
@@ -159,6 +196,12 @@
c-bak: true
- job:
+ name: cinder-tempest-plugin-cbak-ceph-yoga
+ parent: cinder-tempest-plugin-cbak-ceph
+ nodeset: openstack-single-node-focal
+ override-checkout: stable/yoga
+
+- job:
name: cinder-tempest-plugin-cbak-ceph-xena
parent: cinder-tempest-plugin-cbak-ceph
nodeset: openstack-single-node-focal
@@ -170,18 +213,6 @@
nodeset: openstack-single-node-focal
override-checkout: stable/wallaby
-- job:
- name: cinder-tempest-plugin-cbak-ceph-victoria
- parent: cinder-tempest-plugin-cbak-ceph
- nodeset: openstack-single-node-focal
- override-checkout: stable/victoria
-
-- job:
- name: cinder-tempest-plugin-cbak-ceph-ussuri
- parent: cinder-tempest-plugin-cbak-ceph
- nodeset: openstack-single-node-bionic
- override-checkout: stable/ussuri
-
# variant for pre-Ussuri branches (no volume revert for Ceph),
# should this job be used on those branches
- job:
@@ -210,7 +241,15 @@
nodeset: devstack-single-node-centos-8-stream
description: |
This jobs configures Cinder with LVM, LIO, barbican and
- runs tempest tests and cinderlib tests on CentOS 8.
+ runs tempest tests and cinderlib tests on CentOS Stream 8.
+
+- job:
+ name: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream
+ parent: cinder-tempest-plugin-lvm-lio-barbican
+ nodeset: devstack-single-node-centos-9-stream
+ description: |
+ This jobs configures Cinder with LVM, LIO, barbican and
+ runs tempest tests and cinderlib tests on CentOS Stream 9.
- job:
name: cinder-tempest-plugin-lvm-tgt-barbican
@@ -258,6 +297,12 @@
- ^releasenotes/.*$
- job:
+ name: cinder-tempest-plugin-basic-yoga
+ parent: cinder-tempest-plugin-basic
+ nodeset: openstack-single-node-focal
+ override-checkout: stable/yoga
+
+- job:
name: cinder-tempest-plugin-basic-xena
parent: cinder-tempest-plugin-basic
nodeset: openstack-single-node-focal
@@ -268,15 +313,3 @@
parent: cinder-tempest-plugin-basic
nodeset: openstack-single-node-focal
override-checkout: stable/wallaby
-
-- job:
- name: cinder-tempest-plugin-basic-victoria
- parent: cinder-tempest-plugin-basic
- nodeset: openstack-single-node-focal
- override-checkout: stable/victoria
-
-- job:
- name: cinder-tempest-plugin-basic-ussuri
- parent: cinder-tempest-plugin-basic
- nodeset: openstack-single-node-bionic
- override-checkout: stable/ussuri
diff --git a/README.rst b/README.rst
index 3fd608a..ad536b8 100644
--- a/README.rst
+++ b/README.rst
@@ -34,7 +34,7 @@
SYSLOG=False
LOG_COLOR=False
RECLONE=yes
- ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key
+ ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,key
ENABLED_SERVICES+=,mysql,n-api,n-cond,n-cpu,n-crt,n-sch,rabbit,tempest,placement-api
CINDER_ENABLED_BACKENDS=lvmdriver-1
CINDER_DEFAULT_VOLUME_TYPE=lvmdriver-1
diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py
index 40bfb33..f948a93 100644
--- a/cinder_tempest_plugin/api/volume/base.py
+++ b/cinder_tempest_plugin/api/volume/base.py
@@ -72,7 +72,7 @@
def create_volume(cls, wait_until='available', **kwargs):
"""Wrapper utility that returns a test volume.
- :param wait_until: wait till volume status.
+ :param wait_until: wait till volume status, None means no wait.
"""
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
@@ -93,8 +93,9 @@
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
- waiters.wait_for_volume_resource_status(cls.volumes_client,
- volume['id'], wait_until)
+ if wait_until:
+ waiters.wait_for_volume_resource_status(cls.volumes_client,
+ volume['id'], wait_until)
return volume
@classmethod
@@ -199,3 +200,19 @@
cls.admin_volume_types_client.delete_volume_type, type_id)
test_utils.call_and_ignore_notfound_exc(
cls.admin_volume_types_client.wait_for_resource_deletion, type_id)
+
+
+class CreateMultipleResourceTest(BaseVolumeTest):
+
+ def _create_multiple_resource(self, callback, repeat_count=5,
+ **kwargs):
+
+ res = []
+ for _ in range(repeat_count):
+ res.append(callback(**kwargs)['id'])
+ return res
+
+ def _wait_for_multiple_resources(self, callback, wait_list, **kwargs):
+
+ for r in wait_list:
+ callback(resource_id=r, **kwargs)
diff --git a/cinder_tempest_plugin/api/volume/test_create_from_image.py b/cinder_tempest_plugin/api/volume/test_create_from_image.py
index dc296c0..acb1943 100644
--- a/cinder_tempest_plugin/api/volume/test_create_from_image.py
+++ b/cinder_tempest_plugin/api/volume/test_create_from_image.py
@@ -23,64 +23,6 @@
CONF = config.CONF
-class VolumeFromImageTest(base.BaseVolumeTest):
-
- @classmethod
- def skip_checks(cls):
- super(VolumeFromImageTest, cls).skip_checks()
- if not CONF.service_available.glance:
- raise cls.skipException("Glance service is disabled")
-
- @classmethod
- def create_volume_no_wait(cls, **kwargs):
- """Returns a test volume.
-
- This does not wait for volume creation to finish,
- so that multiple operations can happen on the
- Cinder server in parallel.
- """
- if 'size' not in kwargs:
- kwargs['size'] = CONF.volume.volume_size
-
- if 'imageRef' in kwargs:
- image = cls.os_primary.image_client_v2.show_image(
- kwargs['imageRef'])
- min_disk = image['min_disk']
- kwargs['size'] = max(kwargs['size'], min_disk)
-
- if 'name' not in kwargs:
- name = data_utils.rand_name(cls.__name__ + '-Volume')
- kwargs['name'] = name
-
- volume = cls.volumes_client.create_volume(**kwargs)['volume']
- cls.addClassResourceCleanup(
- cls.volumes_client.wait_for_resource_deletion, volume['id'])
- cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
- cls.volumes_client.delete_volume,
- volume['id'])
-
- return volume
-
- @decorators.idempotent_id('8976a11b-1ddc-49b6-b66f-8c26adf3fa9e')
- def test_create_from_image_multiple(self):
- """Create a handful of volumes from the same image at once.
-
- The purpose of this test is to stress volume drivers,
- image download, the image cache, etc., within Cinder.
- """
-
- img_uuid = CONF.compute.image_ref
-
- vols = []
- for v in range(0, 5):
- vols.append(self.create_volume_no_wait(imageRef=img_uuid))
-
- for v in vols:
- waiters.wait_for_volume_resource_status(self.volumes_client,
- v['id'],
- 'available')
-
-
class VolumeAndVolumeTypeFromImageTest(base.BaseVolumeAdminTest):
# needs AdminTest as superclass to manipulate volume_types
diff --git a/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py b/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py
new file mode 100644
index 0000000..10a79f0
--- /dev/null
+++ b/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py
@@ -0,0 +1,105 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+from cinder_tempest_plugin.api.volume import base
+
+CONF = config.CONF
+
+
+class CreateVolumesFromSnapshotTest(base.CreateMultipleResourceTest):
+
+ @decorators.idempotent_id('3b879ad1-d861-4ad3-b2c8-c89162e867c3')
+ def test_create_multiple_volume_from_snapshot(self):
+ """Create multiple volumes from a snapshot."""
+
+ volume = self.create_volume()
+ snapshot = self.create_snapshot(volume_id=volume['id'])
+ kwargs_create = {"'snapshot_id": snapshot['id'], "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromSourceVolumeTest(base.CreateMultipleResourceTest):
+
+ @decorators.idempotent_id('b4a250d1-3ffd-4727-a2f5-9d858b298558')
+ def test_create_multiple_volume_from_source_volume(self):
+ """Create multiple volumes from a source volume.
+
+ The purpose of this test is to check the synchronization
+ of driver clone method with simultaneous requests.
+ """
+
+ volume = self.create_volume()
+ kwargs_create = {"'source_volid": volume['id'], "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromBackupTest(base.CreateMultipleResourceTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(CreateVolumesFromBackupTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.backup:
+ raise cls.skipException("Cinder backup feature disabled")
+
+ @decorators.idempotent_id('9db67083-bf1a-486c-8f77-3778467f39a1')
+ def test_create_multiple_volume_from_backup(self):
+ """Create multiple volumes from a backup."""
+
+ volume = self.create_volume()
+ backup = self.create_backup(volume_id=volume['id'])
+ kwargs_create = {"'backup_id": backup['id'], "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromImageTest(base.CreateMultipleResourceTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(CreateVolumesFromImageTest, cls).skip_checks()
+ if not CONF.service_available.glance:
+ raise cls.skipException("Glance service is disabled")
+
+ @decorators.idempotent_id('8976a11b-1ddc-49b6-b66f-8c26adf3fa9e')
+ def test_create_from_image_multiple(self):
+ """Create a handful of volumes from the same image at once.
+
+ The purpose of this test is to stress volume drivers,
+ image download, the image cache, etc., within Cinder.
+ """
+
+ img_uuid = CONF.compute.image_ref
+
+ kwargs_create = {"'imageRef": img_uuid, "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
diff --git a/cinder_tempest_plugin/scenario/manager.py b/cinder_tempest_plugin/scenario/manager.py
index 3b25bb1..8598ade 100644
--- a/cinder_tempest_plugin/scenario/manager.py
+++ b/cinder_tempest_plugin/scenario/manager.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import contextlib
+
from oslo_log import log
from tempest.common import waiters
@@ -55,20 +57,25 @@
if item not in disks_list_before_attach][0]
return volume_name
+ @contextlib.contextmanager
+ def mount_dev_path(self, ssh_client, dev_name, mount_path):
+ if dev_name is not None:
+ ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+ mount_path))
+ yield
+ ssh_client.exec_command('sudo umount %s' % mount_path)
+ else:
+ yield
+
def _get_file_md5(self, ip_address, filename, dev_name=None,
mount_path='/mnt', private_key=None, server=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
- if dev_name is not None:
- ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
- mount_path))
-
- md5_sum = ssh_client.exec_command(
- 'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
- if dev_name is not None:
- ssh_client.exec_command('sudo umount %s' % mount_path)
+ with self.mount_dev_path(ssh_client, dev_name, mount_path):
+ md5_sum = ssh_client.exec_command(
+ 'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
return md5_sum
def _count_files(self, ip_address, dev_name=None, mount_path='/mnt',
@@ -76,12 +83,9 @@
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
- if dev_name is not None:
- ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
- mount_path))
- count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
- if dev_name is not None:
- ssh_client.exec_command('sudo umount %s' % mount_path)
+ with self.mount_dev_path(ssh_client, dev_name, mount_path):
+ count = ssh_client.exec_command(
+ 'sudo ls -l %s | wc -l' % mount_path)
# We subtract 2 from the count since `wc -l` also includes the count
# of new line character and while creating the filesystem, a
# lost+found folder is also created
@@ -100,17 +104,13 @@
private_key=private_key,
server=server)
- if dev_name is not None:
- ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
- mount_path))
- ssh_client.exec_command(
- 'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
- (mount_path, filename))
- md5 = ssh_client.exec_command(
- 'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
- ssh_client.exec_command('sudo sync')
- if dev_name is not None:
- ssh_client.exec_command('sudo umount %s' % mount_path)
+ with self.mount_dev_path(ssh_client, dev_name, mount_path):
+ ssh_client.exec_command(
+ 'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
+ (mount_path, filename))
+ md5 = ssh_client.exec_command(
+ 'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
+ ssh_client.exec_command('sudo sync')
return md5
def get_md5_from_file(self, instance, instance_ip, filename,
@@ -125,6 +125,40 @@
server=instance)
return count, md5_sum
+ def write_data_to_device(self, ip_address, out_dev, in_dev='/dev/urandom',
+ bs=1024, count=100, private_key=None,
+ server=None, sha_sum=False):
+ ssh_client = self.get_remote_client(
+ ip_address, private_key=private_key, server=server)
+
+ # Write data to device
+ write_command = (
+ 'sudo dd bs=%(bs)s count=%(count)s if=%(in_dev)s of=%(out_dev)s '
+ '&& sudo dd bs=%(bs)s count=%(count)s if=%(out_dev)s' %
+ {'bs': str(bs), 'count': str(count), 'in_dev': in_dev,
+ 'out_dev': out_dev})
+ if sha_sum:
+ # If we want to read sha1sum instead of the device data
+ write_command += ' | sha1sum | head -c 40'
+ data = ssh_client.exec_command(write_command)
+
+ return data
+
+ def read_data_from_device(self, ip_address, in_dev, bs=1024, count=100,
+ private_key=None, server=None, sha_sum=False):
+ ssh_client = self.get_remote_client(
+ ip_address, private_key=private_key, server=server)
+
+ # Read data from device
+ read_command = ('sudo dd bs=%(bs)s count=%(count)s if=%(in_dev)s' %
+ {'bs': bs, 'count': count, 'in_dev': in_dev})
+ if sha_sum:
+ # If we want to read sha1sum instead of the device data
+ read_command += ' | sha1sum | head -c 40'
+ data = ssh_client.exec_command(read_command)
+
+ return data
+
def _attach_and_get_volume_device_name(self, server, volume, instance_ip,
private_key):
ssh_client = self.get_remote_client(
diff --git a/cinder_tempest_plugin/scenario/test_snapshots.py b/cinder_tempest_plugin/scenario/test_snapshots.py
index 5a9611f..99e1057 100644
--- a/cinder_tempest_plugin/scenario/test_snapshots.py
+++ b/cinder_tempest_plugin/scenario/test_snapshots.py
@@ -36,7 +36,7 @@
1) Create an instance with ephemeral disk
2) Create a volume, attach it to the instance and create a filesystem
on it and mount it
- 3) Mount the volume, create a file and write data into it, Unmount it
+ 3) Create a file and write data into it, Unmount it
4) create snapshot
5) repeat 3 and 4 two more times (simply creating 3 snapshots)
@@ -93,41 +93,21 @@
# Detach the volume
self.nova_volume_detach(server, volume)
- # Create volume from snapshot, attach it to instance and check file
- # and contents for snap1
- volume_snap_1 = self.create_volume(snapshot_id=snapshot1['id'])
- volume_device_name, __ = self._attach_and_get_volume_device_name(
- server, volume_snap_1, instance_ip, self.keypair['private_key'])
- count_snap_1, md5_file_1 = self.get_md5_from_file(
- server, instance_ip, 'file1', dev_name=volume_device_name)
- # Detach the volume
- self.nova_volume_detach(server, volume_snap_1)
+ snap_map = {1: snapshot1, 2: snapshot2, 3: snapshot3}
+ file_map = {1: file1_md5, 2: file2_md5, 3: file3_md5}
- self.assertEqual(count_snap_1, 1)
- self.assertEqual(file1_md5, md5_file_1)
+ # Loop over 3 times to check the data integrity of all 3 snapshots
+ for i in range(1, 4):
+ # Create volume from snapshot, attach it to instance and check file
+ # and contents for snap
+ volume_snap = self.create_volume(snapshot_id=snap_map[i]['id'])
+ volume_device_name, __ = self._attach_and_get_volume_device_name(
+ server, volume_snap, instance_ip, self.keypair['private_key'])
+ count_snap, md5_file = self.get_md5_from_file(
+ server, instance_ip, 'file' + str(i),
+ dev_name=volume_device_name)
+ # Detach the volume
+ self.nova_volume_detach(server, volume_snap)
- # Create volume from snapshot, attach it to instance and check file
- # and contents for snap2
- volume_snap_2 = self.create_volume(snapshot_id=snapshot2['id'])
- volume_device_name, __ = self._attach_and_get_volume_device_name(
- server, volume_snap_2, instance_ip, self.keypair['private_key'])
- count_snap_2, md5_file_2 = self.get_md5_from_file(
- server, instance_ip, 'file2', dev_name=volume_device_name)
- # Detach the volume
- self.nova_volume_detach(server, volume_snap_2)
-
- self.assertEqual(count_snap_2, 2)
- self.assertEqual(file2_md5, md5_file_2)
-
- # Create volume from snapshot, attach it to instance and check file
- # and contents for snap3
- volume_snap_3 = self.create_volume(snapshot_id=snapshot3['id'])
- volume_device_name, __ = self._attach_and_get_volume_device_name(
- server, volume_snap_3, instance_ip, self.keypair['private_key'])
- count_snap_3, md5_file_3 = self.get_md5_from_file(
- server, instance_ip, 'file3', dev_name=volume_device_name)
- # Detach the volume
- self.nova_volume_detach(server, volume_snap_3)
-
- self.assertEqual(count_snap_3, 3)
- self.assertEqual(file3_md5, md5_file_3)
+ self.assertEqual(count_snap, i)
+ self.assertEqual(file_map[i], md5_file)
diff --git a/cinder_tempest_plugin/scenario/test_volume_multiattach.py b/cinder_tempest_plugin/scenario/test_volume_multiattach.py
new file mode 100644
index 0000000..235cb25
--- /dev/null
+++ b/cinder_tempest_plugin/scenario/test_volume_multiattach.py
@@ -0,0 +1,136 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions as lib_exc
+
+from cinder_tempest_plugin.scenario import manager
+from tempest.scenario import manager as tempest_manager
+
+CONF = config.CONF
+
+
+class VolumeMultiattachTests(manager.ScenarioTest,
+ tempest_manager.EncryptionScenarioTest):
+
+ compute_min_microversion = '2.60'
+ compute_max_microversion = 'latest'
+
+ def setUp(self):
+ super(VolumeMultiattachTests, self).setUp()
+ self.keypair = self.create_keypair()
+ self.security_group = self.create_security_group()
+
+ @classmethod
+ def skip_checks(cls):
+ super(VolumeMultiattachTests, cls).skip_checks()
+ if not CONF.compute_feature_enabled.volume_multiattach:
+ raise cls.skipException('Volume multi-attach is not available.')
+
+ def _verify_attachment(self, volume_id, server_id):
+ volume = self.volumes_client.show_volume(volume_id)['volume']
+ server_ids = (
+ [attachment['server_id'] for attachment in volume['attachments']])
+ self.assertIn(server_id, server_ids)
+
+ @decorators.idempotent_id('e6604b85-5280-4f7e-90b5-186248fd3423')
+ def test_multiattach_data_integrity(self):
+
+ # Create an instance
+ server_1 = self.create_server(
+ key_name=self.keypair['name'],
+ security_groups=[{'name': self.security_group['name']}])
+
+ # Create multiattach type
+ multiattach_vol_type = self.create_volume_type(
+ extra_specs={'multiattach': "<is> True"})
+
+ # Create a multiattach volume
+ volume = self.create_volume(volume_type=multiattach_vol_type['id'])
+
+ # Create encrypted volume
+ encrypted_volume = self.create_encrypted_volume(
+ 'luks', volume_type='luks')
+
+ # Create a normal volume
+ simple_volume = self.create_volume()
+
+ # Attach normal and encrypted volumes (These volumes are not used in
+ # the current test but is used to emulate a real world scenario
+ # where different types of volumes will be attached to the server)
+ self.attach_volume(server_1, simple_volume)
+ self.attach_volume(server_1, encrypted_volume)
+
+ instance_ip = self.get_server_ip(server_1)
+
+ # Attach volume to instance and find it's device name (eg: /dev/vdb)
+ volume_device_name_inst_1, __ = (
+ self._attach_and_get_volume_device_name(
+ server_1, volume, instance_ip, self.keypair['private_key']))
+
+ out_device = '/dev/' + volume_device_name_inst_1
+
+ # This data is written from the first server and will be used to
+ # verify when reading data from second server
+ device_data_inst_1 = self.write_data_to_device(
+ instance_ip, out_device, private_key=self.keypair['private_key'],
+ server=server_1, sha_sum=True)
+
+ # Create another instance
+ server_2 = self.create_server(
+ key_name=self.keypair['name'],
+ security_groups=[{'name': self.security_group['name']}])
+
+ instance_2_ip = self.get_server_ip(server_2)
+
+ # Attach volume to instance and find it's device name (eg: /dev/vdc)
+ volume_device_name_inst_2, __ = (
+ self._attach_and_get_volume_device_name(
+ server_2, volume, instance_2_ip, self.keypair['private_key']))
+
+ in_device = '/dev/' + volume_device_name_inst_2
+
+ # Read data from volume device
+ device_data_inst_2 = self.read_data_from_device(
+ instance_2_ip, in_device, private_key=self.keypair['private_key'],
+ server=server_2, sha_sum=True)
+
+ self._verify_attachment(volume['id'], server_1['id'])
+ self._verify_attachment(volume['id'], server_2['id'])
+ self.assertEqual(device_data_inst_1, device_data_inst_2)
+
+ @decorators.idempotent_id('53514da8-f49c-4cda-8792-ff4a2fa69977')
+ def test_volume_multiattach_same_host_negative(self):
+ # Create an instance
+ server = self.create_server(
+ key_name=self.keypair['name'],
+ security_groups=[{'name': self.security_group['name']}])
+
+ # Create multiattach type
+ multiattach_vol_type = self.create_volume_type(
+ extra_specs={'multiattach': "<is> True"})
+
+ # Create an empty volume
+ volume = self.create_volume(volume_type=multiattach_vol_type['id'])
+
+ # Attach volume to instance
+ attachment = self.attach_volume(server, volume)
+
+ self.assertEqual(server['id'], attachment['serverId'])
+
+ # Try attaching the volume to the same instance
+ self.assertRaises(lib_exc.BadRequest, self.attach_volume, server,
+ volume)