Merge "Add multiattach tests"
diff --git a/.zuul.yaml b/.zuul.yaml
index 89d4277..8738ae2 100644
--- a/.zuul.yaml
+++ b/.zuul.yaml
@@ -6,17 +6,21 @@
jobs:
- cinder-tempest-plugin-lvm-multiattach
- cinder-tempest-plugin-lvm-lio-barbican
- - cinder-tempest-plugin-lvm-lio-barbican-centos-8-stream:
+ - cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream:
voting: false
- cinder-tempest-plugin-lvm-tgt-barbican
- nova-ceph-multistore:
voting: false
- cinder-tempest-plugin-cbak-ceph
- cinder-tempest-plugin-cbak-s3
+ # As per the Tempest "Stable Branch Support Policy", Tempest will only
+ # support the "Maintained" stable branches and not the "Extended Maintained"
+ # branches. That is what we need to do for all tempest plugins. Only jobs
+ # for the current releasable ("Maintained") stable branches should be listed
+ # here.
+ - cinder-tempest-plugin-basic-yoga
+ - cinder-tempest-plugin-basic-xena
- cinder-tempest-plugin-basic-wallaby
- - cinder-tempest-plugin-basic-victoria
- - cinder-tempest-plugin-basic-ussuri
- - cinder-tempest-plugin-basic-train
# Set this job to voting once we have some actual tests to run
- cinder-tempest-plugin-protection-functional:
voting: false
@@ -27,10 +31,9 @@
- cinder-tempest-plugin-cbak-ceph
experimental:
jobs:
+ - cinder-tempest-plugin-cbak-ceph-yoga
+ - cinder-tempest-plugin-cbak-ceph-xena
- cinder-tempest-plugin-cbak-ceph-wallaby
- - cinder-tempest-plugin-cbak-ceph-victoria
- - cinder-tempest-plugin-cbak-ceph-ussuri
- - cinder-tempest-plugin-cbak-ceph-train
- job:
name: cinder-tempest-plugin-protection-functional
@@ -103,6 +106,8 @@
# FIXME: 'creator' should be re-added by the barbican devstack plugin
# but the value below override everything.
tempest_roles: member,creator
+ volume:
+ build_timeout: 300
volume-feature-enabled:
volume_revert: True
devstack_services:
@@ -119,7 +124,7 @@
description: |
This is a base job for lvm with lio & tgt targets
with cinderlib tests.
- branches: ^(?!stable/(ocata|pike|queens|rocky|stein)).*$
+ branches: ^(?!stable/(ocata|pike|queens|rocky|stein|train|ussuri|victoria)).*$
parent: cinder-tempest-plugin-lvm-barbican-base-abstract
roles:
- zuul: opendev.org/openstack/cinderlib
@@ -137,6 +142,33 @@
name: cinder-tempest-plugin-lvm-barbican-base
description: |
This is a base job for lvm with lio & tgt targets
+ with cinderlib tests to run on stable/train to stable/victoria
+ testing. To run on those stable branches that are using tempest
+ 26.1.0 (which is set in the devstack stackrc file), we must
+ use cinder-tempest-plugin compatible version 1.3.0.
+ branches:
+ - stable/train
+ - stable/ussuri
+ - stable/victoria
+ parent: cinder-tempest-plugin-lvm-barbican-base-abstract
+ roles:
+ - zuul: opendev.org/openstack/cinderlib
+ required-projects:
+ - opendev.org/openstack/cinderlib
+ - name: opendev.org/openstack/cinder-tempest-plugin
+ override-checkout: 1.3.0
+ run: playbooks/tempest-and-cinderlib-run.yaml
+ # Required to collect the tox-based logs of the cinderlib functional tests
+ post-run: playbooks/post-cinderlib.yaml
+ vars:
+ fetch_subunit_output_additional_dirs:
+ - "{{ ansible_user_dir }}/{{ zuul.projects['opendev.org/openstack/cinderlib'].src_dir }}"
+ tempest_test_exclude_list: '{{ ansible_user_dir }}/{{ zuul.projects["opendev.org/openstack/tempest"].src_dir }}/tools/tempest-integrated-gate-storage-exclude-list.txt'
+
+- job:
+ name: cinder-tempest-plugin-lvm-barbican-base
+ description: |
+ This is a base job for lvm with lio & tgt targets
branches: ^(?=stable/(ocata|pike|queens|rocky|stein)).*$
parent: cinder-tempest-plugin-lvm-barbican-base-abstract
required-projects:
@@ -152,6 +184,7 @@
Integration tests that runs with the ceph devstack plugin, py3
and enable the backup service.
vars:
+ configure_swap_size: 4096
devstack_local_conf:
test-config:
$TEMPEST_CONFIG:
@@ -161,29 +194,23 @@
c-bak: true
- job:
+ name: cinder-tempest-plugin-cbak-ceph-yoga
+ parent: cinder-tempest-plugin-cbak-ceph
+ nodeset: openstack-single-node-focal
+ override-checkout: stable/yoga
+
+- job:
+ name: cinder-tempest-plugin-cbak-ceph-xena
+ parent: cinder-tempest-plugin-cbak-ceph
+ nodeset: openstack-single-node-focal
+ override-checkout: stable/xena
+
+- job:
name: cinder-tempest-plugin-cbak-ceph-wallaby
parent: cinder-tempest-plugin-cbak-ceph
nodeset: openstack-single-node-focal
override-checkout: stable/wallaby
-- job:
- name: cinder-tempest-plugin-cbak-ceph-victoria
- parent: cinder-tempest-plugin-cbak-ceph
- nodeset: openstack-single-node-focal
- override-checkout: stable/victoria
-
-- job:
- name: cinder-tempest-plugin-cbak-ceph-ussuri
- parent: cinder-tempest-plugin-cbak-ceph
- nodeset: openstack-single-node-bionic
- override-checkout: stable/ussuri
-
-- job:
- name: cinder-tempest-plugin-cbak-ceph-train
- parent: cinder-tempest-plugin-cbak-ceph
- nodeset: openstack-single-node-bionic
- override-checkout: stable/train
-
# variant for pre-Ussuri branches (no volume revert for Ceph),
# should this job be used on those branches
- job:
@@ -212,7 +239,15 @@
nodeset: devstack-single-node-centos-8-stream
description: |
This jobs configures Cinder with LVM, LIO, barbican and
- runs tempest tests and cinderlib tests on CentOS 8.
+ runs tempest tests and cinderlib tests on CentOS Stream 8.
+
+- job:
+ name: cinder-tempest-plugin-lvm-lio-barbican-centos-9-stream
+ parent: cinder-tempest-plugin-lvm-lio-barbican
+ nodeset: devstack-single-node-centos-9-stream
+ description: |
+ This jobs configures Cinder with LVM, LIO, barbican and
+ runs tempest tests and cinderlib tests on CentOS Stream 9.
- job:
name: cinder-tempest-plugin-lvm-tgt-barbican
@@ -260,28 +295,19 @@
- ^releasenotes/.*$
- job:
+ name: cinder-tempest-plugin-basic-yoga
+ parent: cinder-tempest-plugin-basic
+ nodeset: openstack-single-node-focal
+ override-checkout: stable/yoga
+
+- job:
+ name: cinder-tempest-plugin-basic-xena
+ parent: cinder-tempest-plugin-basic
+ nodeset: openstack-single-node-focal
+ override-checkout: stable/xena
+
+- job:
name: cinder-tempest-plugin-basic-wallaby
parent: cinder-tempest-plugin-basic
nodeset: openstack-single-node-focal
override-checkout: stable/wallaby
-
-- job:
- name: cinder-tempest-plugin-basic-victoria
- parent: cinder-tempest-plugin-basic
- nodeset: openstack-single-node-focal
- override-checkout: stable/victoria
-
-- job:
- name: cinder-tempest-plugin-basic-ussuri
- parent: cinder-tempest-plugin-basic
- nodeset: openstack-single-node-bionic
- override-checkout: stable/ussuri
-
-- job:
- name: cinder-tempest-plugin-basic-train
- parent: cinder-tempest-plugin-basic
- nodeset: openstack-single-node-bionic
- override-checkout: stable/train
- vars:
- devstack_localrc:
- USE_PYTHON3: True
diff --git a/README.rst b/README.rst
index 0254938..3fd608a 100644
--- a/README.rst
+++ b/README.rst
@@ -35,7 +35,7 @@
LOG_COLOR=False
RECLONE=yes
ENABLED_SERVICES=c-api,c-bak,c-sch,c-vol,cinder,dstat,g-api,g-reg,key
- ENABLED_SERVICES+=,mysql,n-api,n-cond,n-cpu,n-crt,n-sch,rabbit,tempest
+ ENABLED_SERVICES+=,mysql,n-api,n-cond,n-cpu,n-crt,n-sch,rabbit,tempest,placement-api
CINDER_ENABLED_BACKENDS=lvmdriver-1
CINDER_DEFAULT_VOLUME_TYPE=lvmdriver-1
CINDER_VOLUME_CLEAR=none
diff --git a/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
index d1fa730..e5ded52 100644
--- a/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
+++ b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
@@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.common import waiters
from tempest import config
from tempest.lib import decorators
from tempest.lib import exceptions
@@ -41,19 +40,10 @@
def test_backup_crossproject_admin_negative(self):
# create vol as user
- volume = self.volumes_client.create_volume(
- size=CONF.volume.volume_size)['volume']
- waiters.wait_for_volume_resource_status(
- self.volumes_client,
- volume['id'], 'available')
+ volume = self.create_volume(size=CONF.volume.volume_size)
# create backup as user
- backup = self.backups_client.create_backup(
- volume_id=volume['id'])['backup']
- waiters.wait_for_volume_resource_status(
- self.backups_client,
- backup['id'], 'available')
-
+ self.create_backup(volume_id=volume['id'])
# try to create incremental backup as admin
self.assertRaises(
exceptions.BadRequest, self.admin_backups_client.create_backup,
@@ -63,18 +53,12 @@
def test_backup_crossproject_user_negative(self):
# create vol as user
- volume = self.volumes_client.create_volume(
- size=CONF.volume.volume_size)['volume']
- waiters.wait_for_volume_resource_status(
- self.volumes_client,
- volume['id'], 'available')
+ volume = self.create_volume(size=CONF.volume.volume_size)
# create backup as admin
- backup = self.admin_backups_client.create_backup(
- volume_id=volume['id'])['backup']
- waiters.wait_for_volume_resource_status(
- self.admin_backups_client,
- backup['id'], 'available')
+
+ self.create_backup(volume_id=volume['id'],
+ backup_client=self.admin_backups_client)
# try to create incremental backup as user
self.assertRaises(
@@ -85,25 +69,14 @@
def test_incremental_backup_respective_parents(self):
# create vol as user
- volume = self.volumes_client.create_volume(
- size=CONF.volume.volume_size)['volume']
- waiters.wait_for_volume_resource_status(
- self.volumes_client,
- volume['id'], 'available')
+ volume = self.create_volume(size=CONF.volume.volume_size)
# create backup as admin
- backup_adm = self.admin_backups_client.create_backup(
- volume_id=volume['id'])['backup']
- waiters.wait_for_volume_resource_status(
- self.admin_backups_client,
- backup_adm['id'], 'available')
+ backup_adm = self.create_backup(
+ volume_id=volume['id'], backup_client=self.admin_backups_client)
# create backup as user
- backup_usr = self.backups_client.create_backup(
- volume_id=volume['id'])['backup']
- waiters.wait_for_volume_resource_status(
- self.backups_client,
- backup_usr['id'], 'available')
+ backup_usr = self.create_backup(volume_id=volume['id'])
# refresh admin backup and assert no child backups
backup_adm = self.admin_backups_client.show_backup(
@@ -111,11 +84,8 @@
self.assertFalse(backup_adm['has_dependent_backups'])
# create incremental backup as admin
- backup_adm_inc = self.admin_backups_client.create_backup(
- volume_id=volume['id'], incremental=True)['backup']
- waiters.wait_for_volume_resource_status(
- self.admin_backups_client,
- backup_adm_inc['id'], 'available')
+ self.create_backup(volume_id=volume['id'], incremental=True,
+ backup_client=self.admin_backups_client)
# refresh user backup and assert no child backups
backup_usr = self.backups_client.show_backup(
@@ -128,11 +98,8 @@
self.assertTrue(backup_adm['has_dependent_backups'])
# create incremental backup as user
- backup_usr_inc = self.backups_client.create_backup(
- volume_id=volume['id'], incremental=True)['backup']
- waiters.wait_for_volume_resource_status(
- self.backups_client,
- backup_usr_inc['id'], 'available')
+ self.create_backup(volume_id=volume['id'],
+ incremental=True)
# refresh user backup and assert it has childs
backup_usr = self.backups_client.show_backup(
diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py
index 418fd33..f948a93 100644
--- a/cinder_tempest_plugin/api/volume/base.py
+++ b/cinder_tempest_plugin/api/volume/base.py
@@ -13,10 +13,10 @@
# License for the specific language governing permissions and limitations
# under the License.
-from tempest.api.volume import api_microversion_fixture
from tempest.common import compute
from tempest.common import waiters
from tempest import config
+from tempest.lib.common import api_microversion_fixture
from tempest.lib.common import api_version_utils
from tempest.lib.common.utils import data_utils
from tempest.lib.common.utils import test_utils
@@ -58,7 +58,7 @@
def setUp(self):
super(BaseVolumeTest, self).setUp()
self.useFixture(api_microversion_fixture.APIMicroversionFixture(
- self.request_microversion))
+ volume_microversion=self.request_microversion))
@classmethod
def resource_setup(cls):
@@ -72,7 +72,7 @@
def create_volume(cls, wait_until='available', **kwargs):
"""Wrapper utility that returns a test volume.
- :param wait_until: wait till volume status.
+ :param wait_until: wait till volume status, None means no wait.
"""
if 'size' not in kwargs:
kwargs['size'] = CONF.volume.volume_size
@@ -93,8 +93,9 @@
cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
cls.volumes_client.delete_volume,
volume['id'])
- waiters.wait_for_volume_resource_status(cls.volumes_client,
- volume['id'], wait_until)
+ if wait_until:
+ waiters.wait_for_volume_resource_status(cls.volumes_client,
+ volume['id'], wait_until)
return volume
@classmethod
@@ -199,3 +200,19 @@
cls.admin_volume_types_client.delete_volume_type, type_id)
test_utils.call_and_ignore_notfound_exc(
cls.admin_volume_types_client.wait_for_resource_deletion, type_id)
+
+
+class CreateMultipleResourceTest(BaseVolumeTest):
+
+ def _create_multiple_resource(self, callback, repeat_count=5,
+ **kwargs):
+
+ res = []
+ for _ in range(repeat_count):
+ res.append(callback(**kwargs)['id'])
+ return res
+
+ def _wait_for_multiple_resources(self, callback, wait_list, **kwargs):
+
+ for r in wait_list:
+ callback(resource_id=r, **kwargs)
diff --git a/cinder_tempest_plugin/api/volume/test_create_from_image.py b/cinder_tempest_plugin/api/volume/test_create_from_image.py
index dc296c0..acb1943 100644
--- a/cinder_tempest_plugin/api/volume/test_create_from_image.py
+++ b/cinder_tempest_plugin/api/volume/test_create_from_image.py
@@ -23,64 +23,6 @@
CONF = config.CONF
-class VolumeFromImageTest(base.BaseVolumeTest):
-
- @classmethod
- def skip_checks(cls):
- super(VolumeFromImageTest, cls).skip_checks()
- if not CONF.service_available.glance:
- raise cls.skipException("Glance service is disabled")
-
- @classmethod
- def create_volume_no_wait(cls, **kwargs):
- """Returns a test volume.
-
- This does not wait for volume creation to finish,
- so that multiple operations can happen on the
- Cinder server in parallel.
- """
- if 'size' not in kwargs:
- kwargs['size'] = CONF.volume.volume_size
-
- if 'imageRef' in kwargs:
- image = cls.os_primary.image_client_v2.show_image(
- kwargs['imageRef'])
- min_disk = image['min_disk']
- kwargs['size'] = max(kwargs['size'], min_disk)
-
- if 'name' not in kwargs:
- name = data_utils.rand_name(cls.__name__ + '-Volume')
- kwargs['name'] = name
-
- volume = cls.volumes_client.create_volume(**kwargs)['volume']
- cls.addClassResourceCleanup(
- cls.volumes_client.wait_for_resource_deletion, volume['id'])
- cls.addClassResourceCleanup(test_utils.call_and_ignore_notfound_exc,
- cls.volumes_client.delete_volume,
- volume['id'])
-
- return volume
-
- @decorators.idempotent_id('8976a11b-1ddc-49b6-b66f-8c26adf3fa9e')
- def test_create_from_image_multiple(self):
- """Create a handful of volumes from the same image at once.
-
- The purpose of this test is to stress volume drivers,
- image download, the image cache, etc., within Cinder.
- """
-
- img_uuid = CONF.compute.image_ref
-
- vols = []
- for v in range(0, 5):
- vols.append(self.create_volume_no_wait(imageRef=img_uuid))
-
- for v in vols:
- waiters.wait_for_volume_resource_status(self.volumes_client,
- v['id'],
- 'available')
-
-
class VolumeAndVolumeTypeFromImageTest(base.BaseVolumeAdminTest):
# needs AdminTest as superclass to manipulate volume_types
diff --git a/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py b/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py
new file mode 100644
index 0000000..10a79f0
--- /dev/null
+++ b/cinder_tempest_plugin/api/volume/test_multiple_volume_from_resource.py
@@ -0,0 +1,105 @@
+# Copyright 2022 Red Hat, Inc.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+
+from cinder_tempest_plugin.api.volume import base
+
+CONF = config.CONF
+
+
+class CreateVolumesFromSnapshotTest(base.CreateMultipleResourceTest):
+
+ @decorators.idempotent_id('3b879ad1-d861-4ad3-b2c8-c89162e867c3')
+ def test_create_multiple_volume_from_snapshot(self):
+ """Create multiple volumes from a snapshot."""
+
+ volume = self.create_volume()
+ snapshot = self.create_snapshot(volume_id=volume['id'])
+ kwargs_create = {"'snapshot_id": snapshot['id'], "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromSourceVolumeTest(base.CreateMultipleResourceTest):
+
+ @decorators.idempotent_id('b4a250d1-3ffd-4727-a2f5-9d858b298558')
+ def test_create_multiple_volume_from_source_volume(self):
+ """Create multiple volumes from a source volume.
+
+ The purpose of this test is to check the synchronization
+ of driver clone method with simultaneous requests.
+ """
+
+ volume = self.create_volume()
+ kwargs_create = {"'source_volid": volume['id'], "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromBackupTest(base.CreateMultipleResourceTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(CreateVolumesFromBackupTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.backup:
+ raise cls.skipException("Cinder backup feature disabled")
+
+ @decorators.idempotent_id('9db67083-bf1a-486c-8f77-3778467f39a1')
+ def test_create_multiple_volume_from_backup(self):
+ """Create multiple volumes from a backup."""
+
+ volume = self.create_volume()
+ backup = self.create_backup(volume_id=volume['id'])
+ kwargs_create = {"'backup_id": backup['id'], "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
+
+
+class CreateVolumesFromImageTest(base.CreateMultipleResourceTest):
+
+ @classmethod
+ def skip_checks(cls):
+ super(CreateVolumesFromImageTest, cls).skip_checks()
+ if not CONF.service_available.glance:
+ raise cls.skipException("Glance service is disabled")
+
+ @decorators.idempotent_id('8976a11b-1ddc-49b6-b66f-8c26adf3fa9e')
+ def test_create_from_image_multiple(self):
+ """Create a handful of volumes from the same image at once.
+
+ The purpose of this test is to stress volume drivers,
+ image download, the image cache, etc., within Cinder.
+ """
+
+ img_uuid = CONF.compute.image_ref
+
+ kwargs_create = {"'imageRef": img_uuid, "wait_until": None}
+ res = self._create_multiple_resource(self.create_volume,
+ **kwargs_create)
+ kwargs_wait = {"client": self.volumes_client, "status": "available"}
+ self._wait_for_multiple_resources(
+ waiters.wait_for_volume_resource_status, res, **kwargs_wait)
diff --git a/cinder_tempest_plugin/api/volume/test_volume_unicode.py b/cinder_tempest_plugin/api/volume/test_volume_unicode.py
index ff6473a..026271b 100644
--- a/cinder_tempest_plugin/api/volume/test_volume_unicode.py
+++ b/cinder_tempest_plugin/api/volume/test_volume_unicode.py
@@ -72,13 +72,13 @@
@decorators.idempotent_id('332be44d-5418-4fb3-a8f0-a3587de6929f')
def test_snapshot_create_volume_description_non_ascii_code(self):
# Create a volume with non-ascii description
- description = u'\u05e7\u05d9\u05d9\u05e4\u05e9'
+ description = '\u05e7\u05d9\u05d9\u05e4\u05e9'
volume = self.create_volume(description=description)
vol_info = self.volumes_client.show_volume(volume['id'])['volume']
self.assertEqual(description, vol_info['description'])
# Create a snapshot with different non-ascii description
- description = u'\u4e2d\u56fd\u793e\u533a'
+ description = '\u4e2d\u56fd\u793e\u533a'
snapshot = self.create_snapshot(volume['id'], description=description)
snapshot_info = self.snapshots_client.show_snapshot(
snapshot['id'])['snapshot']
diff --git a/cinder_tempest_plugin/scenario/manager.py b/cinder_tempest_plugin/scenario/manager.py
index a2b5c6e..8598ade 100644
--- a/cinder_tempest_plugin/scenario/manager.py
+++ b/cinder_tempest_plugin/scenario/manager.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import contextlib
+
from oslo_log import log
from tempest.common import waiters
@@ -55,20 +57,25 @@
if item not in disks_list_before_attach][0]
return volume_name
+ @contextlib.contextmanager
+ def mount_dev_path(self, ssh_client, dev_name, mount_path):
+ if dev_name is not None:
+ ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
+ mount_path))
+ yield
+ ssh_client.exec_command('sudo umount %s' % mount_path)
+ else:
+ yield
+
def _get_file_md5(self, ip_address, filename, dev_name=None,
mount_path='/mnt', private_key=None, server=None):
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
- if dev_name is not None:
- ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
- mount_path))
-
- md5_sum = ssh_client.exec_command(
- 'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
- if dev_name is not None:
- ssh_client.exec_command('sudo umount %s' % mount_path)
+ with self.mount_dev_path(ssh_client, dev_name, mount_path):
+ md5_sum = ssh_client.exec_command(
+ 'sudo md5sum %s/%s|cut -c 1-32' % (mount_path, filename))
return md5_sum
def _count_files(self, ip_address, dev_name=None, mount_path='/mnt',
@@ -76,12 +83,9 @@
ssh_client = self.get_remote_client(ip_address,
private_key=private_key,
server=server)
- if dev_name is not None:
- ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
- mount_path))
- count = ssh_client.exec_command('sudo ls -l %s | wc -l' % mount_path)
- if dev_name is not None:
- ssh_client.exec_command('sudo umount %s' % mount_path)
+ with self.mount_dev_path(ssh_client, dev_name, mount_path):
+ count = ssh_client.exec_command(
+ 'sudo ls -l %s | wc -l' % mount_path)
# We subtract 2 from the count since `wc -l` also includes the count
# of new line character and while creating the filesystem, a
# lost+found folder is also created
@@ -100,17 +104,13 @@
private_key=private_key,
server=server)
- if dev_name is not None:
- ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
- mount_path))
- ssh_client.exec_command(
- 'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
- (mount_path, filename))
- md5 = ssh_client.exec_command(
- 'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
- ssh_client.exec_command('sudo sync')
- if dev_name is not None:
- ssh_client.exec_command('sudo umount %s' % mount_path)
+ with self.mount_dev_path(ssh_client, dev_name, mount_path):
+ ssh_client.exec_command(
+ 'sudo dd bs=1024 count=100 if=/dev/urandom of=/%s/%s' %
+ (mount_path, filename))
+ md5 = ssh_client.exec_command(
+ 'sudo md5sum -b %s/%s|cut -c 1-32' % (mount_path, filename))
+ ssh_client.exec_command('sudo sync')
return md5
def get_md5_from_file(self, instance, instance_ip, filename,
diff --git a/cinder_tempest_plugin/scenario/test_snapshots.py b/cinder_tempest_plugin/scenario/test_snapshots.py
index 5a9611f..99e1057 100644
--- a/cinder_tempest_plugin/scenario/test_snapshots.py
+++ b/cinder_tempest_plugin/scenario/test_snapshots.py
@@ -36,7 +36,7 @@
1) Create an instance with ephemeral disk
2) Create a volume, attach it to the instance and create a filesystem
on it and mount it
- 3) Mount the volume, create a file and write data into it, Unmount it
+ 3) Create a file and write data into it, Unmount it
4) create snapshot
5) repeat 3 and 4 two more times (simply creating 3 snapshots)
@@ -93,41 +93,21 @@
# Detach the volume
self.nova_volume_detach(server, volume)
- # Create volume from snapshot, attach it to instance and check file
- # and contents for snap1
- volume_snap_1 = self.create_volume(snapshot_id=snapshot1['id'])
- volume_device_name, __ = self._attach_and_get_volume_device_name(
- server, volume_snap_1, instance_ip, self.keypair['private_key'])
- count_snap_1, md5_file_1 = self.get_md5_from_file(
- server, instance_ip, 'file1', dev_name=volume_device_name)
- # Detach the volume
- self.nova_volume_detach(server, volume_snap_1)
+ snap_map = {1: snapshot1, 2: snapshot2, 3: snapshot3}
+ file_map = {1: file1_md5, 2: file2_md5, 3: file3_md5}
- self.assertEqual(count_snap_1, 1)
- self.assertEqual(file1_md5, md5_file_1)
+ # Loop over 3 times to check the data integrity of all 3 snapshots
+ for i in range(1, 4):
+ # Create volume from snapshot, attach it to instance and check file
+ # and contents for snap
+ volume_snap = self.create_volume(snapshot_id=snap_map[i]['id'])
+ volume_device_name, __ = self._attach_and_get_volume_device_name(
+ server, volume_snap, instance_ip, self.keypair['private_key'])
+ count_snap, md5_file = self.get_md5_from_file(
+ server, instance_ip, 'file' + str(i),
+ dev_name=volume_device_name)
+ # Detach the volume
+ self.nova_volume_detach(server, volume_snap)
- # Create volume from snapshot, attach it to instance and check file
- # and contents for snap2
- volume_snap_2 = self.create_volume(snapshot_id=snapshot2['id'])
- volume_device_name, __ = self._attach_and_get_volume_device_name(
- server, volume_snap_2, instance_ip, self.keypair['private_key'])
- count_snap_2, md5_file_2 = self.get_md5_from_file(
- server, instance_ip, 'file2', dev_name=volume_device_name)
- # Detach the volume
- self.nova_volume_detach(server, volume_snap_2)
-
- self.assertEqual(count_snap_2, 2)
- self.assertEqual(file2_md5, md5_file_2)
-
- # Create volume from snapshot, attach it to instance and check file
- # and contents for snap3
- volume_snap_3 = self.create_volume(snapshot_id=snapshot3['id'])
- volume_device_name, __ = self._attach_and_get_volume_device_name(
- server, volume_snap_3, instance_ip, self.keypair['private_key'])
- count_snap_3, md5_file_3 = self.get_md5_from_file(
- server, instance_ip, 'file3', dev_name=volume_device_name)
- # Detach the volume
- self.nova_volume_detach(server, volume_snap_3)
-
- self.assertEqual(count_snap_3, 3)
- self.assertEqual(file3_md5, md5_file_3)
+ self.assertEqual(count_snap, i)
+ self.assertEqual(file_map[i], md5_file)
diff --git a/cinder_tempest_plugin/services/consistencygroups_client.py b/cinder_tempest_plugin/services/consistencygroups_client.py
index a29a90a..cea99ed 100644
--- a/cinder_tempest_plugin/services/consistencygroups_client.py
+++ b/cinder_tempest_plugin/services/consistencygroups_client.py
@@ -14,10 +14,10 @@
# License for the specific language governing permissions and limitations
# under the License.
+import http.client as http_client
import time
from oslo_serialization import jsonutils as json
-from six.moves import http_client
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
diff --git a/requirements.txt b/requirements.txt
index 40ef3a4..4d75108 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,6 +4,5 @@
pbr!=2.1.0,>=2.0.0 # Apache-2.0
oslo.config>=5.1.0 # Apache-2.0
-six>=1.10.0 # MIT
oslo.serialization!=2.19.1,>=2.18.0 # Apache-2.0
tempest>=27.0.0 # Apache-2.0
diff --git a/setup.cfg b/setup.cfg
index 7866a06..3b246b5 100644
--- a/setup.cfg
+++ b/setup.cfg
@@ -1,11 +1,12 @@
[metadata]
name = cinder-tempest-plugin
summary = Tempest plugin tests for Cinder.
-description-file =
+description_file =
README.rst
author = OpenStack
-author-email = openstack-discuss@lists.openstack.org
-home-page = http://www.openstack.org/
+author_email = openstack-discuss@lists.openstack.org
+home_page = http://www.openstack.org/
+python_requires = >=3.6
classifier =
Environment :: OpenStack
Intended Audience :: Information Technology
@@ -13,10 +14,12 @@
License :: OSI Approved :: Apache Software License
Operating System :: POSIX :: Linux
Programming Language :: Python
+ Programming Language :: Python :: 3 :: Only
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
+ Programming Language :: Python :: 3.9
[files]
packages =