Merge "[goal] Migrate cinder-tempest-plugin jobs to focal"
diff --git a/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
new file mode 100644
index 0000000..d1fa730
--- /dev/null
+++ b/cinder_tempest_plugin/api/volume/admin/test_volume_backup.py
@@ -0,0 +1,140 @@
+# Copyright (C) 2020 Canonical Ltd.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import waiters
+from tempest import config
+from tempest.lib import decorators
+from tempest.lib import exceptions
+
+from cinder_tempest_plugin.api.volume import base
+
+CONF = config.CONF
+
+
+class VolumesBackupsTest(base.BaseVolumeAdminTest):
+ @classmethod
+ def setup_clients(cls):
+ super(VolumesBackupsTest, cls).setup_clients()
+ cls.admin_volume_client = cls.os_admin.volumes_client_latest
+ cls.backups_client = cls.os_primary.backups_client_latest
+ cls.volumes_client = cls.os_primary.volumes_client_latest
+
+ @classmethod
+ def skip_checks(cls):
+ super(VolumesBackupsTest, cls).skip_checks()
+ if not CONF.volume_feature_enabled.backup:
+ raise cls.skipException("Cinder backup feature disabled")
+
+ @decorators.idempotent_id('2daadb2e-409a-4ede-a6ce-6002ec324372')
+ def test_backup_crossproject_admin_negative(self):
+
+ # create vol as user
+ volume = self.volumes_client.create_volume(
+ size=CONF.volume.volume_size)['volume']
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client,
+ volume['id'], 'available')
+
+ # create backup as user
+ backup = self.backups_client.create_backup(
+ volume_id=volume['id'])['backup']
+ waiters.wait_for_volume_resource_status(
+ self.backups_client,
+ backup['id'], 'available')
+
+ # try to create incremental backup as admin
+ self.assertRaises(
+ exceptions.BadRequest, self.admin_backups_client.create_backup,
+ volume_id=volume['id'], incremental=True)
+
+ @decorators.idempotent_id('b9feb593-5809-4207-90d3-28e627730f13')
+ def test_backup_crossproject_user_negative(self):
+
+ # create vol as user
+ volume = self.volumes_client.create_volume(
+ size=CONF.volume.volume_size)['volume']
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client,
+ volume['id'], 'available')
+
+ # create backup as admin
+ backup = self.admin_backups_client.create_backup(
+ volume_id=volume['id'])['backup']
+ waiters.wait_for_volume_resource_status(
+ self.admin_backups_client,
+ backup['id'], 'available')
+
+ # try to create incremental backup as user
+ self.assertRaises(
+ exceptions.BadRequest, self.backups_client.create_backup,
+ volume_id=volume['id'], incremental=True)
+
+ @decorators.idempotent_id('ce15f528-bfc1-492d-81db-b6168b631587')
+ def test_incremental_backup_respective_parents(self):
+
+ # create vol as user
+ volume = self.volumes_client.create_volume(
+ size=CONF.volume.volume_size)['volume']
+ waiters.wait_for_volume_resource_status(
+ self.volumes_client,
+ volume['id'], 'available')
+
+ # create backup as admin
+ backup_adm = self.admin_backups_client.create_backup(
+ volume_id=volume['id'])['backup']
+ waiters.wait_for_volume_resource_status(
+ self.admin_backups_client,
+ backup_adm['id'], 'available')
+
+ # create backup as user
+ backup_usr = self.backups_client.create_backup(
+ volume_id=volume['id'])['backup']
+ waiters.wait_for_volume_resource_status(
+ self.backups_client,
+ backup_usr['id'], 'available')
+
+ # refresh admin backup and assert no child backups
+ backup_adm = self.admin_backups_client.show_backup(
+ backup_adm['id'])['backup']
+ self.assertFalse(backup_adm['has_dependent_backups'])
+
+ # create incremental backup as admin
+ backup_adm_inc = self.admin_backups_client.create_backup(
+ volume_id=volume['id'], incremental=True)['backup']
+ waiters.wait_for_volume_resource_status(
+ self.admin_backups_client,
+ backup_adm_inc['id'], 'available')
+
+ # refresh user backup and assert no child backups
+ backup_usr = self.backups_client.show_backup(
+ backup_usr['id'])['backup']
+ self.assertFalse(backup_usr['has_dependent_backups'])
+
+ # refresh admin backup and assert it has childs
+ backup_adm = self.admin_backups_client.show_backup(
+ backup_adm['id'])['backup']
+ self.assertTrue(backup_adm['has_dependent_backups'])
+
+ # create incremental backup as user
+ backup_usr_inc = self.backups_client.create_backup(
+ volume_id=volume['id'], incremental=True)['backup']
+ waiters.wait_for_volume_resource_status(
+ self.backups_client,
+ backup_usr_inc['id'], 'available')
+
+ # refresh user backup and assert it has childs
+ backup_usr = self.backups_client.show_backup(
+ backup_usr['id'])['backup']
+ self.assertTrue(backup_usr['has_dependent_backups'])
diff --git a/cinder_tempest_plugin/api/volume/base.py b/cinder_tempest_plugin/api/volume/base.py
index 675d0bc..418fd33 100644
--- a/cinder_tempest_plugin/api/volume/base.py
+++ b/cinder_tempest_plugin/api/volume/base.py
@@ -164,3 +164,38 @@
super(BaseVolumeAdminTest, cls).setup_clients()
cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
+ cls.admin_backups_client = cls.os_admin.backups_client_latest
+ cls.admin_volumes_client = cls.os_admin.volumes_client_latest
+
+ @classmethod
+ def create_volume_type(cls, name=None, **kwargs):
+ """Create a test volume-type"""
+
+ name = name or data_utils.rand_name(cls.__name__ + '-volume-type')
+ volume_type = cls.admin_volume_types_client.create_volume_type(
+ name=name, **kwargs)['volume_type']
+ cls.addClassResourceCleanup(cls._clear_volume_type, volume_type)
+ return volume_type
+
+ @classmethod
+ def _clear_volume_type(cls, volume_type):
+ # If image caching is enabled, we must delete the cached volume
+ # before cinder will allow us to delete the volume_type. This function
+ # solves that problem by taking the brute-force approach of deleting
+ # any volumes of this volume_type that exist *no matter what project
+ # they are in*. Since this won't happen until the teardown of the
+ # test class, that should be OK.
+ type_id = volume_type['id']
+ type_name = volume_type['name']
+
+ volumes = cls.admin_volumes_client.list_volumes(
+ detail=True, params={'all_tenants': 1})['volumes']
+ for volume in [v for v in volumes if v['volume_type'] == type_name]:
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volumes_client.delete_volume, volume['id'])
+ cls.admin_volumes_client.wait_for_resource_deletion(volume['id'])
+
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volume_types_client.delete_volume_type, type_id)
+ test_utils.call_and_ignore_notfound_exc(
+ cls.admin_volume_types_client.wait_for_resource_deletion, type_id)
diff --git a/cinder_tempest_plugin/api/volume/test_create_from_image.py b/cinder_tempest_plugin/api/volume/test_create_from_image.py
index 02fbd24..dc296c0 100644
--- a/cinder_tempest_plugin/api/volume/test_create_from_image.py
+++ b/cinder_tempest_plugin/api/volume/test_create_from_image.py
@@ -10,6 +10,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import io
+
from tempest.common import waiters
from tempest import config
from tempest.lib.common.utils import data_utils
@@ -77,3 +79,84 @@
waiters.wait_for_volume_resource_status(self.volumes_client,
v['id'],
'available')
+
+
+class VolumeAndVolumeTypeFromImageTest(base.BaseVolumeAdminTest):
+ # needs AdminTest as superclass to manipulate volume_types
+
+ @classmethod
+ def skip_checks(cls):
+ super(VolumeAndVolumeTypeFromImageTest, cls).skip_checks()
+ if not CONF.service_available.glance:
+ raise cls.skipException("Glance service is disabled")
+
+ @classmethod
+ def create_image_with_data(cls, **kwargs):
+ # we do this as a class method so we can use the
+ # addClassResourceCleanup functionality of tempest.test.BaseTestCase
+ images_client = cls.os_primary.image_client_v2
+ if 'min_disk' not in kwargs:
+ kwargs['min_disk'] = 1
+ response = images_client.create_image(**kwargs)
+ image_id = response['id']
+ cls.addClassResourceCleanup(
+ images_client.wait_for_resource_deletion, image_id)
+ cls.addClassResourceCleanup(
+ test_utils.call_and_ignore_notfound_exc,
+ images_client.delete_image, image_id)
+
+ # upload "data" to image
+ image_file = io.BytesIO(data_utils.random_bytes(size=1024))
+ images_client.store_image_file(image_id, image_file)
+
+ waiters.wait_for_image_status(images_client, image_id, 'active')
+ image = images_client.show_image(image_id)
+ return image
+
+ @decorators.idempotent_id('6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6')
+ def test_create_from_image_with_volume_type_image_property(self):
+ """Verify that the cinder_img_volume_type image property works.
+
+ When a volume is created from an image containing the
+ cinder_img_volume_type property and no volume_type is specified
+ in the volume-create request, the volume_type of the resulting
+ volume should be the one specified by the image property.
+ """
+
+ volume_type_meta = 'cinder_img_volume_type'
+ volume_type_name = 'vol-type-for-6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6'
+ description = ('Generic volume_type for test '
+ '6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6')
+ proto = CONF.volume.storage_protocol
+ vendor = CONF.volume.vendor_name
+ extra_specs = {"storage_protocol": proto,
+ "vendor_name": vendor}
+ kwargs = {'description': description,
+ 'extra_specs': extra_specs,
+ 'os-volume-type-access:is_public': True}
+ volume_type = self.create_volume_type(name=volume_type_name,
+ **kwargs)
+ # quick sanity check
+ self.assertEqual(volume_type_name, volume_type['name'])
+
+ # create an image in glance
+ kwargs = {'disk_format': 'raw',
+ 'container_format': 'bare',
+ 'name': ('image-for-test-'
+ '6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6'),
+ 'visibility': 'private',
+ volume_type_meta: volume_type_name}
+ image = self.create_image_with_data(**kwargs)
+ # quick sanity check
+ self.assertEqual(volume_type_name, image[volume_type_meta])
+
+ # create volume from image
+ kwargs = {'name': ('volume-for-test-'
+ '6e9266ff-a917-4dd5-aa4a-c36e59e7a2a6'),
+ 'imageRef': image['id']}
+ # this is the whole point of the test, so make sure this is true
+ self.assertNotIn('volume_type', kwargs)
+ volume = self.create_volume(**kwargs)
+
+ found_volume_type = volume['volume_type']
+ self.assertEqual(volume_type_name, found_volume_type)
diff --git a/cinder_tempest_plugin/exceptions.py b/cinder_tempest_plugin/exceptions.py
new file mode 100644
index 0000000..4825f19
--- /dev/null
+++ b/cinder_tempest_plugin/exceptions.py
@@ -0,0 +1,22 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.lib import exceptions
+
+
+class ConsistencyGroupException(exceptions.TempestException):
+ message = "Consistency group %(cg_id)s failed and is in ERROR status"
+
+
+class ConsistencyGroupSnapshotException(exceptions.TempestException):
+ message = ("Consistency group snapshot %(cgsnapshot_id)s failed and is "
+ "in ERROR status")
diff --git a/cinder_tempest_plugin/services/consistencygroups_client.py b/cinder_tempest_plugin/services/consistencygroups_client.py
index 10415d4..a29a90a 100644
--- a/cinder_tempest_plugin/services/consistencygroups_client.py
+++ b/cinder_tempest_plugin/services/consistencygroups_client.py
@@ -18,10 +18,11 @@
from oslo_serialization import jsonutils as json
from six.moves import http_client
-from tempest import exceptions
from tempest.lib.common import rest_client
from tempest.lib import exceptions as lib_exc
+from cinder_tempest_plugin import exceptions as volume_exc
+
class ConsistencyGroupsClient(rest_client.RestClient):
"""Client class to send CRUD Volume ConsistencyGroup API requests"""
@@ -137,14 +138,14 @@
body = self.show_consistencygroup(cg_id)['consistencygroup']
cg_status = body['status']
if cg_status == 'error':
- raise exceptions.ConsistencyGroupException(cg_id=cg_id)
+ raise volume_exc.ConsistencyGroupException(cg_id=cg_id)
if int(time.time()) - start >= self.build_timeout:
message = ('Consistency group %s failed to reach %s status '
'(current %s) within the required time (%s s).' %
(cg_id, status, cg_status,
self.build_timeout))
- raise exceptions.TimeoutException(message)
+ raise lib_exc.TimeoutException(message)
def wait_for_consistencygroup_deletion(self, cg_id):
"""Waits for consistency group deletion"""
@@ -155,7 +156,7 @@
except lib_exc.NotFound:
return
if int(time.time()) - start_time >= self.build_timeout:
- raise exceptions.TimeoutException
+ raise lib_exc.TimeoutException
time.sleep(self.build_interval)
def wait_for_cgsnapshot_status(self, cgsnapshot_id, status):
@@ -169,7 +170,7 @@
body = self.show_cgsnapshot(cgsnapshot_id)['cgsnapshot']
cgsnapshot_status = body['status']
if cgsnapshot_status == 'error':
- raise exceptions.ConsistencyGroupSnapshotException(
+ raise volume_exc.ConsistencyGroupSnapshotException(
cgsnapshot_id=cgsnapshot_id)
if int(time.time()) - start >= self.build_timeout:
@@ -178,7 +179,7 @@
'(%s s).' %
(cgsnapshot_id, status, cgsnapshot_status,
self.build_timeout))
- raise exceptions.TimeoutException(message)
+ raise lib_exc.TimeoutException(message)
def wait_for_cgsnapshot_deletion(self, cgsnapshot_id):
"""Waits for consistency group snapshot deletion"""
@@ -189,5 +190,5 @@
except lib_exc.NotFound:
return
if int(time.time()) - start_time >= self.build_timeout:
- raise exceptions.TimeoutException
+ raise lib_exc.TimeoutException
time.sleep(self.build_interval)