Merge "Revert "Refactor random naming""
diff --git a/releasenotes/notes/add-migrate-volume-and-list-hosts-to-v3-volume-client-library-ad3529260db58f00.yaml b/releasenotes/notes/add-migrate-volume-and-list-hosts-to-v3-volume-client-library-ad3529260db58f00.yaml
new file mode 100644
index 0000000..ca6a78d
--- /dev/null
+++ b/releasenotes/notes/add-migrate-volume-and-list-hosts-to-v3-volume-client-library-ad3529260db58f00.yaml
@@ -0,0 +1,8 @@
+---
+features:
+ - |
+ Add list host API support to the volume v3 client library.
+ This feature enables callers to list all hosts for a given project.
+ - |
+ Add migrate volume API support to the volume v3 client library.
+ This features allows callers to migrate volumes between backends.
diff --git a/releasenotes/notes/deprecate-dns_servers-option-0xf2f297ee47a5ff.yaml b/releasenotes/notes/deprecate-dns_servers-option-0xf2f297ee47a5ff.yaml
new file mode 100644
index 0000000..30551cb
--- /dev/null
+++ b/releasenotes/notes/deprecate-dns_servers-option-0xf2f297ee47a5ff.yaml
@@ -0,0 +1,6 @@
+---
+deprecations:
+ - |
+ The config option ``CONF.network.dns_servers`` is no longer used
+ anywhere, so it is deprecated and will be removed in the future.
+
diff --git a/tempest/api/compute/admin/test_quotas.py b/tempest/api/compute/admin/test_quotas.py
index 12c7255..0060ffe 100644
--- a/tempest/api/compute/admin/test_quotas.py
+++ b/tempest/api/compute/admin/test_quotas.py
@@ -212,7 +212,7 @@
# 'danger' flag.
@decorators.idempotent_id('7932ab0f-5136-4075-b201-c0e2338df51a')
def test_update_default_quotas(self):
- LOG.debug("get the current 'default' quota class values")
+ # get the current 'default' quota class values
body = (self.adm_client.show_quota_class_set('default')
['quota_class_set'])
self.assertEqual('default', body.pop('id'))
@@ -224,9 +224,14 @@
# there is a real chance that we go from -1 (unlimited)
# to a very small number which causes issues.
body[quota] = default + 100
- LOG.debug("update limits for the default quota class set")
+ # update limits for the default quota class set
update_body = self.adm_client.update_quota_class_set(
'default', **body)['quota_class_set']
- LOG.debug("assert that the response has all of the changed values")
+ # assert that the response has all of the changed values
self.assertThat(update_body.items(),
matchers.ContainsAll(body.items()))
+ # check quota values are changed
+ show_body = self.adm_client.show_quota_class_set(
+ 'default')['quota_class_set']
+ self.assertThat(show_body.items(),
+ matchers.ContainsAll(body.items()))
diff --git a/tempest/api/identity/admin/v3/test_endpoint_groups.py b/tempest/api/identity/admin/v3/test_endpoint_groups.py
index 625568d..7d85dc9 100644
--- a/tempest/api/identity/admin/v3/test_endpoint_groups.py
+++ b/tempest/api/identity/admin/v3/test_endpoint_groups.py
@@ -69,6 +69,7 @@
@decorators.idempotent_id('7c69e7a1-f865-402d-a2ea-44493017315a')
def test_create_list_show_check_delete_endpoint_group(self):
service_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
filters = {'service_id': service_id}
@@ -129,6 +130,7 @@
# Creating an endpoint group so as to check update endpoint group
# with new values
service1_id = self._create_service()
+ self.addCleanup(self.services_client.delete_service, service1_id)
name = data_utils.rand_name('service_group')
description = data_utils.rand_name('description')
filters = {'service_id': service1_id}
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 0e86f05..77ec0f8 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -104,8 +104,8 @@
body = client.show_server(server_id)['server']
except lib_exc.NotFound:
return
- old_status = server_status = body['status']
- old_task_state = task_state = _get_task_state(body)
+ old_status = body['status']
+ old_task_state = _get_task_state(body)
start_time = int(time.time())
while True:
time.sleep(client.build_interval)
@@ -213,6 +213,31 @@
resource_name, resource_id, status, time.time() - start)
+def wait_for_volume_migration(client, volume_id, new_host):
+ """Waits for a Volume to move to a new host."""
+ body = client.show_volume(volume_id)['volume']
+ host = body['os-vol-host-attr:host']
+ migration_status = body['migration_status']
+ start = int(time.time())
+
+ # new_host is hostname@backend while current_host is hostname@backend#type
+ while migration_status != 'success' or new_host not in host:
+ time.sleep(client.build_interval)
+ body = client.show_volume(volume_id)['volume']
+ host = body['os-vol-host-attr:host']
+ migration_status = body['migration_status']
+
+ if migration_status == 'error':
+ message = ('volume %s failed to migrate.' % (volume_id))
+ raise lib_exc.TempestException(message)
+
+ if int(time.time()) - start >= client.build_timeout:
+ message = ('Volume %s failed to migrate to %s (current %s) '
+ 'within the required time (%s s).' %
+ (volume_id, new_host, host, client.build_timeout))
+ raise lib_exc.TimeoutException(message)
+
+
def wait_for_volume_retype(client, volume_id, new_volume_type):
"""Waits for a Volume to have a new volume type."""
body = client.show_volume(volume_id)['volume']
diff --git a/tempest/config.py b/tempest/config.py
index 24ae3ae..f692a4b 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -685,7 +685,10 @@
cfg.ListOpt('dns_servers',
default=["8.8.8.8", "8.8.4.4"],
help="List of dns servers which should be used"
- " for subnet creation"),
+ " for subnet creation",
+ deprecated_for_removal=True,
+ deprecated_reason="This config option is no longer "
+ "used anywhere, so it can be removed."),
cfg.StrOpt('port_vnic_type',
choices=[None, 'normal', 'direct', 'macvtap'],
help="vnic_type to use when launching instances"
diff --git a/tempest/lib/api_schema/response/compute/v2_1/volumes.py b/tempest/lib/api_schema/response/compute/v2_1/volumes.py
index c35dae9..d367f2a 100644
--- a/tempest/lib/api_schema/response/compute/v2_1/volumes.py
+++ b/tempest/lib/api_schema/response/compute/v2_1/volumes.py
@@ -50,7 +50,8 @@
# If it would come as empty array "[]" then,
# those elements can be defined as 'required'.
}
- }
+ },
+ 'os-vol-host-attr:host': {'type': 'string'},
},
'additionalProperties': False,
'required': ['id', 'status', 'displayName', 'availabilityZone',
diff --git a/tempest/lib/services/volume/v3/volumes_client.py b/tempest/lib/services/volume/v3/volumes_client.py
index fec2950..2dbdd11 100644
--- a/tempest/lib/services/volume/v3/volumes_client.py
+++ b/tempest/lib/services/volume/v3/volumes_client.py
@@ -35,6 +35,16 @@
return params
return urllib.urlencode(params)
+ def list_hosts(self):
+ """Lists all hosts summary info that is not disabled.
+
+ https://developer.openstack.org/api-ref/block-storage/v3/index.html#list-all-hosts-for-a-project
+ """
+ resp, body = self.get('os-hosts')
+ body = json.loads(body)
+ self.expected_success(200, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def list_volumes(self, detail=False, params=None):
"""List all the volumes created.
@@ -55,6 +65,19 @@
self.expected_success(200, resp.status)
return rest_client.ResponseBody(resp, body)
+ def migrate_volume(self, volume_id, **kwargs):
+ """Migrate a volume to a new backend
+
+ For a full list of available parameters please refer to the offical
+ API reference:
+
+ https://developer.openstack.org/api-ref/block-storage/v3/index.html#migrate-a-volume
+ """
+ post_body = json.dumps({'os-migrate_volume': kwargs})
+ resp, body = self.post('volumes/%s/action' % volume_id, post_body)
+ self.expected_success(202, resp.status)
+ return rest_client.ResponseBody(resp, body)
+
def show_volume(self, volume_id):
"""Returns the details of a single volume."""
url = "volumes/%s" % volume_id
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 7992585..f46c7e8 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -292,11 +292,14 @@
% CONF.network.build_timeout)
_, new_nic = self.diff_list[0]
- ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
- new_port['fixed_ips'][0]['ip_address'],
- CONF.network.project_network_mask_bits,
- new_nic))
- ssh_client.exec_command("sudo ip link set %s up" % new_nic)
+ ip_output = ssh_client.exec_command('ip a')
+ ip_address = new_port['fixed_ips'][0]['ip_address']
+ ip_mask = CONF.network.project_network_mask_bits
+ # check if the address is not already in use, if not, set it
+ if ' ' + ip_address + '/' + str(ip_mask) not in ip_output:
+ ssh_client.exec_command("sudo ip addr add %s/%s dev %s" % (
+ ip_address, ip_mask, new_nic))
+ ssh_client.exec_command("sudo ip link set %s up" % new_nic)
def _get_server_nics(self, ssh_client):
reg = re.compile(r'(?P<num>\d+): (?P<nic_name>\w+)[@]?.*:')
diff --git a/tempest/scenario/test_volume_migrate_attached.py b/tempest/scenario/test_volume_migrate_attached.py
index c54bb38..106500e 100644
--- a/tempest/scenario/test_volume_migrate_attached.py
+++ b/tempest/scenario/test_volume_migrate_attached.py
@@ -33,6 +33,9 @@
* Write to the volume
* Perform a cinder retype --on-demand of the volume to type of backend #2
* Check written content of migrated volume
+ * Check the type of the volume has been updated.
+ * Check the volume is still in-use and the migration was successful.
+ * Check that the same volume is attached to the instance.
"""
credentials = ['primary', 'admin']
@@ -78,7 +81,8 @@
'src_backend': backend_source,
'dst': dest_body['name'],
'dst_backend': backend_dest})
- return source_body['name'], dest_body['name']
+ return ({'name': source_body['name'], 'host': backend_source},
+ {'name': dest_body['name'], 'host': backend_dest})
def _volume_retype_with_migration(self, volume_id, new_volume_type):
# NOTE: The 'on-demand' migration requires admin operation, so
@@ -93,7 +97,7 @@
@decorators.attr(type='slow')
@decorators.idempotent_id('deadd2c2-beef-4dce-98be-f86765ff311b')
@utils.services('compute', 'volume')
- def test_volume_migrate_attached(self):
+ def test_volume_retype_attached(self):
LOG.info("Creating keypair and security group")
keypair = self.create_keypair()
security_group = self._create_security_group()
@@ -104,11 +108,11 @@
# create an instance from volume
LOG.info("Booting instance from volume")
- volume_origin = self.create_volume(imageRef=CONF.compute.image_ref,
- volume_type=source_type)
+ volume_id = self.create_volume(imageRef=CONF.compute.image_ref,
+ volume_type=source_type['name'])['id']
- instance = self._boot_instance_from_volume(volume_origin['id'],
- keypair, security_group)
+ instance = self._boot_instance_from_volume(volume_id, keypair,
+ security_group)
# write content to volume on instance
LOG.info("Setting timestamp in instance %s", instance['id'])
@@ -118,9 +122,11 @@
server=instance)
# retype volume with migration from backend #1 to backend #2
- LOG.info("Retyping Volume %s to new type %s", volume_origin['id'],
- dest_type)
- self._volume_retype_with_migration(volume_origin['id'], dest_type)
+ LOG.info("Retyping Volume %s to new type %s", volume_id,
+ dest_type['name'])
+ # This method calls for the retype of the volume before calling a
+ # waiter that asserts that the volume type has changed successfully.
+ self._volume_retype_with_migration(volume_id, dest_type['name'])
# check the content of written file
LOG.info("Getting timestamp in postmigrated instance %s",
@@ -129,3 +135,82 @@
private_key=keypair['private_key'],
server=instance)
self.assertEqual(timestamp, timestamp2)
+
+ # Assert that the volume is on the new host, is still in-use and has a
+ # migration_status of success
+ volume = self.admin_volumes_client.show_volume(volume_id)['volume']
+ # dest_type is host@backend, os-vol-host-attr:host is host@backend#type
+ self.assertIn(dest_type['host'], volume['os-vol-host-attr:host'])
+ self.assertEqual('in-use', volume['status'])
+ self.assertEqual('success', volume['migration_status'])
+
+ # Assert that the same volume id is attached to the instance, ensuring
+ # the os-migrate_volume_completion Cinder API has been called.
+ attached_volumes = self.servers_client.list_volume_attachments(
+ instance['id'])['volumeAttachments']
+ self.assertEqual(volume_id, attached_volumes[0]['id'])
+
+ @decorators.attr(type='slow')
+ @decorators.idempotent_id('fe47b1ed-640e-4e3b-a090-200e25607362')
+ @utils.services('compute', 'volume')
+ def test_volume_migrate_attached(self):
+ LOG.info("Creating keypair and security group")
+ keypair = self.create_keypair()
+ security_group = self._create_security_group()
+
+ LOG.info("Creating volume")
+ # Create a unique volume type to avoid using the backend default
+ migratable_type = self.create_volume_type()['name']
+ volume_id = self.create_volume(imageRef=CONF.compute.image_ref,
+ volume_type=migratable_type)['id']
+ volume = self.admin_volumes_client.show_volume(volume_id)
+
+ LOG.info("Booting instance from volume")
+ instance = self._boot_instance_from_volume(volume_id, keypair,
+ security_group)
+
+ # Identify the source and destination hosts for the migration
+ src_host = volume['volume']['os-vol-host-attr:host']
+
+ # Select the first c-vol host that isn't hosting the volume as the dest
+ # host['host_name'] should take the format of host@backend.
+ # src_host should take the format of host@backend#type
+ hosts = self.admin_volumes_client.list_hosts()['hosts']
+ for host in hosts:
+ if (host['service'] == 'cinder-volume' and
+ not src_host.startswith(host['host_name'])):
+ dest_host = host['host_name']
+ break
+
+ ip_instance = self.get_server_ip(instance)
+ timestamp = self.create_timestamp(ip_instance,
+ private_key=keypair['private_key'],
+ server=instance)
+
+ LOG.info("Migrating Volume %s from host %s to host %s",
+ volume_id, src_host, dest_host)
+ self.admin_volumes_client.migrate_volume(volume_id, host=dest_host)
+
+ # This waiter asserts that the migration_status is success and that
+ # the volume has moved to the dest_host
+ waiters.wait_for_volume_migration(self.admin_volumes_client, volume_id,
+ dest_host)
+
+ # check the content of written file
+ LOG.info("Getting timestamp in postmigrated instance %s",
+ instance['id'])
+ timestamp2 = self.get_timestamp(ip_instance,
+ private_key=keypair['private_key'],
+ server=instance)
+ self.assertEqual(timestamp, timestamp2)
+
+ # Assert that the volume is in-use
+ volume = self.admin_volumes_client.show_volume(volume_id)['volume']
+ self.assertEqual('in-use', volume['status'])
+
+ # Assert that the same volume id is attached to the instance, ensuring
+ # the os-migrate_volume_completion Cinder API has been called
+ attached_volumes = self.servers_client.list_volume_attachments(
+ instance['id'])['volumeAttachments']
+ attached_volume_id = attached_volumes[0]['id']
+ self.assertEqual(volume_id, attached_volume_id)
diff --git a/tempest/tests/common/test_waiters.py b/tempest/tests/common/test_waiters.py
index 938d226..d56e8a4 100644
--- a/tempest/tests/common/test_waiters.py
+++ b/tempest/tests/common/test_waiters.py
@@ -148,3 +148,68 @@
list_interfaces.assert_has_calls([mock.call('server_id'),
mock.call('server_id')])
sleep.assert_called_once_with(client.build_interval)
+
+
+class TestVolumeWaiters(base.TestCase):
+ vol_migrating_src_host = {
+ 'volume': {'migration_status': 'migrating',
+ 'os-vol-host-attr:host': 'src_host@backend#type'}}
+ vol_migrating_dst_host = {
+ 'volume': {'migration_status': 'migrating',
+ 'os-vol-host-attr:host': 'dst_host@backend#type'}}
+ vol_migration_success = {
+ 'volume': {'migration_status': 'success',
+ 'os-vol-host-attr:host': 'dst_host@backend#type'}}
+ vol_migration_error = {
+ 'volume': {'migration_status': 'error',
+ 'os-vol-host-attr:host': 'src_host@backend#type'}}
+
+ def test_wait_for_volume_migration_timeout(self):
+ show_volume = mock.MagicMock(return_value=self.vol_migrating_src_host)
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', side_effect=[0., client.build_timeout + 1.])
+ self.patch('time.sleep')
+ self.assertRaises(lib_exc.TimeoutException,
+ waiters.wait_for_volume_migration,
+ client, mock.sentinel.volume_id, 'dst_host')
+
+ def test_wait_for_volume_migration_error(self):
+ show_volume = mock.MagicMock(side_effect=[
+ self.vol_migrating_src_host,
+ self.vol_migrating_src_host,
+ self.vol_migration_error])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', return_value=0.)
+ self.patch('time.sleep')
+ self.assertRaises(lib_exc.TempestException,
+ waiters.wait_for_volume_migration,
+ client, mock.sentinel.volume_id, 'dst_host')
+
+ def test_wait_for_volume_migration_success_and_dst(self):
+ show_volume = mock.MagicMock(side_effect=[
+ self.vol_migrating_src_host,
+ self.vol_migrating_dst_host,
+ self.vol_migration_success])
+ client = mock.Mock(spec=volumes_client.VolumesClient,
+ resource_type="volume",
+ build_interval=1,
+ build_timeout=1,
+ show_volume=show_volume)
+ self.patch('time.time', return_value=0.)
+ self.patch('time.sleep')
+ waiters.wait_for_volume_migration(
+ client, mock.sentinel.volume_id, 'dst_host')
+
+ # Assert that we wait until migration_status is success and dst_host is
+ # part of the returned os-vol-host-attr:host.
+ show_volume.assert_has_calls([mock.call(mock.sentinel.volume_id),
+ mock.call(mock.sentinel.volume_id),
+ mock.call(mock.sentinel.volume_id)])
diff --git a/tools/tempest-plugin-sanity.sh b/tools/tempest-plugin-sanity.sh
index 703dce2..500ff02 100644
--- a/tools/tempest-plugin-sanity.sh
+++ b/tools/tempest-plugin-sanity.sh
@@ -46,7 +46,6 @@
# List of projects having tempest plugin stale or unmaintained for a long time
# (6 months or more)
# TODO(masayukig): Some of these can be removed from BLACKLIST in the future.
-# airship-tempest-plugin: https://review.openstack.org/#/c/634387/
# barbican-tempest-plugin: https://review.openstack.org/#/c/634631/
# intel-nfv-ci-tests: https://review.openstack.org/#/c/634640/
# networking-ansible: https://review.openstack.org/#/c/634647/
@@ -61,7 +60,6 @@
# valet: https://review.openstack.org/#/c/638339/
# vitrage-tempest-plugin: https://review.openstack.org/#/c/639003/
BLACKLIST="
-airship-tempest-plugin
barbican-tempest-plugin
intel-nfv-ci-tests
networking-ansible
@@ -94,7 +92,7 @@
# function to create virtualenv to perform sanity operation
function prepare_workspace() {
SANITY_DIR=$(pwd)
- virtualenv --clear "$SANITY_DIR"/.venv
+ virtualenv -p python3 --clear "$SANITY_DIR"/.venv
export TVENV="$SANITY_DIR/tools/with_venv.sh"
cd "$SANITY_DIR"
diff --git a/tox.ini b/tox.ini
index 230249f..9bee3dd 100644
--- a/tox.ini
+++ b/tox.ini
@@ -265,6 +265,7 @@
[testenv:plugin-sanity-check]
# perform tempest plugin sanity
+basepython = python3
whitelist_externals = bash
commands =
bash tools/tempest-plugin-sanity.sh