Migrate volume while attached to an instance
Test scenario with a bootable volume that is migrated via:
retype --on-demand <volume_id>
This will exercise cinder retype and migration as well as
nova swap_volume for a bootable volume.
Requires at least 2 Cinder backends which are configured in
devstack with:
CINDER_ENABLED_BACKENDS=lvm:lvmdriver-1,lvm:lvmdriver-2
Co-Authored-By: Erlon R. Cruz <erlon.cruz@fit-tecnologia.org.br>
Change-Id: I4ed9b1f30d4e4e595c44a0ce243c2463069833d7
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index fe648a0..8303caf 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -189,6 +189,25 @@
raise lib_exc.TimeoutException(message)
+def wait_for_volume_retype(client, volume_id, new_volume_type):
+ """Waits for a Volume to have a new volume type."""
+ body = client.show_volume(volume_id)['volume']
+ current_volume_type = body['volume_type']
+ start = int(time.time())
+
+ while current_volume_type != new_volume_type:
+ time.sleep(client.build_interval)
+ body = client.show_volume(volume_id)['volume']
+ current_volume_type = body['volume_type']
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Volume %s failed to reach %s volume type (current %s) '
+ 'within the required time (%s s).' %
+ (volume_id, new_volume_type, current_volume_type,
+ client.build_timeout))
+ raise exceptions.TimeoutException(message)
+
+
def wait_for_snapshot_status(client, snapshot_id, status):
"""Waits for a Snapshot to reach a given status."""
body = client.show_snapshot(snapshot_id)['snapshot']
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 46949ad..8c930c3 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -248,6 +248,27 @@
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
+ def create_volume_type(self, client=None, name=None, backend_name=None):
+ if not client:
+ client = self.admin_volume_types_client
+ if not name:
+ class_name = self.__class__.__name__
+ name = data_utils.rand_name(class_name + '-volume-type')
+ randomized_name = data_utils.rand_name('scenario-type-' + name)
+
+ LOG.debug("Creating a volume type: %s on backend %s",
+ randomized_name, backend_name)
+ extra_specs = {}
+ if backend_name:
+ extra_specs = {"volume_backend_name": backend_name}
+
+ body = client.create_volume_type(name=randomized_name,
+ extra_specs=extra_specs)
+ volume_type = body['volume_type']
+ self.assertIn('id', volume_type)
+ self.addCleanup(client.delete_volume_type, volume_type['id'])
+ return volume_type
+
def _create_loginable_secgroup_rule(self, secgroup_id=None):
_client = self.compute_security_groups_client
_client_rules = self.compute_security_group_rules_client
@@ -1246,19 +1267,6 @@
cls.admin_encryption_types_client =\
cls.os_adm.encryption_types_client
- def create_volume_type(self, client=None, name=None):
- if not client:
- client = self.admin_volume_types_client
- if not name:
- name = 'generic'
- randomized_name = data_utils.rand_name('scenario-type-' + name)
- LOG.debug("Creating a volume type: %s", randomized_name)
- body = client.create_volume_type(
- name=randomized_name)['volume_type']
- self.assertIn('id', body)
- self.addCleanup(client.delete_volume_type, body['id'])
- return body
-
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
control_location=None):
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
new file mode 100644
index 0000000..dfda18d
--- /dev/null
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -0,0 +1,128 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from tempest.common import waiters
+from tempest import config
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+LOG = logging.getLogger(__name__)
+
+
+class TestVolumeMigrateRetypeAttached(manager.ScenarioTest):
+
+ """This test case attempts to reproduce the following steps:
+
+ * Create 2 volume types representing 2 different backends
+ * Create in Cinder some bootable volume importing a Glance image using
+ * volume_type_1
+ * Boot an instance from the bootable volume
+ * Write to the volume
+ * Perform a cinder retype --on-demand of the volume to type of backend #2
+ * Check written content of migrated volume
+ """
+
+ credentials = ['primary', 'admin']
+
+ @classmethod
+ def setup_clients(cls):
+ super(TestVolumeMigrateRetypeAttached, cls).setup_clients()
+ if CONF.volume_feature_enabled.api_v1:
+ cls.admin_volume_types_client = cls.os_adm.volume_types_client
+ else:
+ cls.admin_volume_types_client = cls.os_adm.volume_types_v2_client
+
+ @classmethod
+ def skip_checks(cls):
+ super(TestVolumeMigrateRetypeAttached, cls).skip_checks()
+ if not CONF.volume_feature_enabled.multi_backend:
+ raise cls.skipException("Cinder multi-backend feature disabled")
+
+ if len(set(CONF.volume.backend_names)) < 2:
+ raise cls.skipException("Requires at least two different "
+ "backend names")
+
+ def _boot_instance_from_volume(self, vol_id, keypair, security_group):
+
+ key_name = keypair['name']
+ security_groups = [{'name': security_group['name']}]
+ block_device_mapping = [{'device_name': 'vda', 'volume_id': vol_id,
+ 'delete_on_termination': False}]
+
+ return self.create_server(image_id='', wait_until='ACTIVE',
+ key_name=key_name,
+ security_groups=security_groups,
+ block_device_mapping=block_device_mapping)
+
+ def _create_volume_types(self):
+ backend_names = CONF.volume.backend_names
+
+ backend_source = backend_names[0]
+ backend_dest = backend_names[1]
+
+ source_body = self.create_volume_type(backend_name=backend_source)
+ dest_body = self.create_volume_type(backend_name=backend_dest)
+
+ LOG.info("Created Volume types: %(src)s -> %(src_backend)s, %(dst)s "
+ "-> %(dst_backend)s", {'src': source_body['name'],
+ 'src_backend': backend_source,
+ 'dst': dest_body['name'],
+ 'dst_backend': backend_dest})
+ return source_body['name'], dest_body['name']
+
+ def _volume_retype_with_migration(self, volume_id, new_volume_type):
+ migration_policy = 'on-demand'
+ self.volumes_client.retype_volume(
+ volume_id, new_type=new_volume_type,
+ migration_policy=migration_policy)
+ waiters.wait_for_volume_retype(self.volumes_client,
+ volume_id, new_volume_type)
+
+ @test.idempotent_id('deadd2c2-beef-4dce-98be-f86765ff311b')
+ @test.services('compute', 'volume')
+ def test_volume_migrate_attached(self):
+ LOG.info("Creating keypair and security group")
+ keypair = self.create_keypair()
+ security_group = self._create_security_group()
+
+ # create volume types
+ LOG.info("Creating Volume types")
+ source_type, dest_type = self._create_volume_types()
+
+ # create an instance from volume
+ LOG.info("Booting instance from volume")
+ volume_origin = self.create_volume(imageRef=CONF.compute.image_ref,
+ volume_type=source_type)
+
+ instance = self._boot_instance_from_volume(volume_origin['id'],
+ keypair, security_group)
+
+ # write content to volume on instance
+ LOG.info("Setting timestamp in instance %s", instance['id'])
+ ip_instance = self.get_server_ip(instance)
+ timestamp = self.create_timestamp(ip_instance,
+ private_key=keypair['private_key'])
+
+ # retype volume with migration from backend #1 to backend #2
+ LOG.info("Retyping Volume %s to new type %s", volume_origin['id'],
+ dest_type)
+ self._volume_retype_with_migration(volume_origin['id'], dest_type)
+
+ # check the content of written file
+ LOG.info("Getting timestamp in postmigrated instance %s",
+ instance['id'])
+ timestamp2 = self.get_timestamp(ip_instance,
+ private_key=keypair['private_key'])
+ self.assertEqual(timestamp, timestamp2)