Merge "Drop EOL branches from job definitions"
diff --git a/playbooks/devstack-tempest.yaml b/playbooks/devstack-tempest.yaml
index 269999c..6d70bc3 100644
--- a/playbooks/devstack-tempest.yaml
+++ b/playbooks/devstack-tempest.yaml
@@ -20,6 +20,11 @@
include_role:
name: acl-devstack-files
+ - name: Set source and destination host
+ include_role:
+ name: set-src-dest-host
+ when: tempest_set_src_dest_host is defined and tempest_set_src_dest_host | bool
+
- name: Run tempest cleanup init-saved-state
include_role:
name: tempest-cleanup
diff --git a/releasenotes/notes/add-option-to-specify-source-host.yaml b/releasenotes/notes/add-option-to-specify-source-host.yaml
new file mode 100644
index 0000000..f8df40a
--- /dev/null
+++ b/releasenotes/notes/add-option-to-specify-source-host.yaml
@@ -0,0 +1,5 @@
+---
+features:
+ - Add a new config options migration_source_host and migration_dest_host
+ in the compute section, which if is set takes source or destination
+ host from options, otherwise a host is chosen automatically.
diff --git a/roles/set-src-dest-host/defaults/main.yaml b/roles/set-src-dest-host/defaults/main.yaml
new file mode 100644
index 0000000..fea05c8
--- /dev/null
+++ b/roles/set-src-dest-host/defaults/main.yaml
@@ -0,0 +1 @@
+devstack_base_dir: /opt/stack
diff --git a/roles/set-src-dest-host/tasks/main.yaml b/roles/set-src-dest-host/tasks/main.yaml
new file mode 100644
index 0000000..78b7a2c
--- /dev/null
+++ b/roles/set-src-dest-host/tasks/main.yaml
@@ -0,0 +1,29 @@
+- name: Find out hostnames
+ set_fact:
+ devstack_hostnames: "{{ devstack_hostnames|default([]) + [hostvars[zj_item]['ansible_hostname'] | default('unknown')] }}"
+ loop: "{{ query('inventory_hostnames', 'all,!localhost') }}"
+ loop_control:
+ loop_var: zj_item
+ ignore_errors: yes # noqa ignore-errors
+
+- name: Found hostnames
+ debug:
+ msg: |
+ # Available hosts
+ {{ devstack_hostnames }}
+
+- name: Set migration_source_host in tempest.conf
+ become: true
+ community.general.ini_file:
+ path: "{{ devstack_base_dir }}/tempest/etc/tempest.conf"
+ section: compute
+ option: migration_source_host
+ value: "{{ devstack_hostnames[0] }}"
+
+- name: Set migration_dest_host in tempest.conf
+ become: true
+ community.general.ini_file:
+ path: "{{ devstack_base_dir }}/tempest/etc/tempest.conf"
+ section: compute
+ option: migration_dest_host
+ value: "{{ devstack_hostnames[1] }}"
diff --git a/tempest/api/image/v2/admin/test_image_caching.py b/tempest/api/image/v2/admin/test_image_caching.py
index 75369c9..333f946 100644
--- a/tempest/api/image/v2/admin/test_image_caching.py
+++ b/tempest/api/image/v2/admin/test_image_caching.py
@@ -37,13 +37,17 @@
# NOTE(abhishekk): As caching is enabled instance boot or volume
# boot or image download can also cache image, so we are going to
# maintain our caching information to avoid disturbing other tests
- self.cached_info = {}
+ self.cached_info = []
+ self.cached_info_remote = []
def tearDown(self):
# Delete all from cache/queue if we exit abruptly
for image_id in self.cached_info:
- self.os_admin.image_cache_client.cache_delete(
- image_id)
+ self.os_admin.image_cache_client.cache_delete(image_id)
+
+ for image_id in self.cached_info_remote:
+ self.os_admin.image_cache_client.cache_delete(image_id)
+
super(ImageCachingTest, self).tearDown()
@classmethod
@@ -75,19 +79,13 @@
image = self.client.show_image(image['id'])
return image
- def _assertCheckQueues(self, queued_images):
- for image in self.cached_info:
- if self.cached_info[image] == 'queued':
- self.assertIn(image, queued_images)
-
- def _assertCheckCache(self, cached_images):
+ def _assertCheckCache(self, cached_images, cached):
cached_list = []
for image in cached_images:
cached_list.append(image['image_id'])
- for image in self.cached_info:
- if self.cached_info[image] == 'cached':
- self.assertIn(image, cached_list)
+ for image in cached:
+ self.assertIn(image, cached_list)
@decorators.idempotent_id('4bf6adba-2f9f-47e9-a6d5-37f21ad4387c')
def test_image_caching_cycle(self):
@@ -97,10 +95,9 @@
self.assertRaises(lib_exc.Forbidden,
self.os_primary.image_cache_client.list_cache)
- # Check there is nothing is queued for cached by us
+ # Check there is nothing cached by us
output = self.os_admin.image_cache_client.list_cache()
- self._assertCheckQueues(output['queued_images'])
- self._assertCheckCache(output['cached_images'])
+ self._assertCheckCache(output['cached_images'], self.cached_info)
# Non-existing image should raise NotFound exception
self.assertRaises(lib_exc.NotFound,
@@ -122,12 +119,6 @@
# Queue image for caching
self.os_admin.image_cache_client.cache_queue(image['id'])
- self.cached_info[image['id']] = 'queued'
- # Verify that we have 1 image for queueing and 0 for caching
- output = self.os_admin.image_cache_client.list_cache()
- self._assertCheckQueues(output['queued_images'])
- self._assertCheckCache(output['cached_images'])
-
# Wait for image caching
LOG.info("Waiting for image %s to get cached", image['id'])
caching = waiters.wait_for_caching(
@@ -135,10 +126,9 @@
self.os_admin.image_cache_client,
image['id'])
- self.cached_info[image['id']] = 'cached'
- # verify that we have image in cache and not in queued
- self._assertCheckQueues(caching['queued_images'])
- self._assertCheckCache(caching['cached_images'])
+ self.cached_info.append(image['id'])
+ # verify that we have image cached
+ self._assertCheckCache(caching['cached_images'], self.cached_info)
# Verify that we can delete images from caching and queueing with
# api call.
@@ -152,4 +142,78 @@
self.os_admin.image_cache_client.cache_clear,
target="invalid")
# Remove all data from local information
- self.cached_info = {}
+ self.cached_info = []
+
+ @decorators.idempotent_id('0a6b7e10-bc30-4a41-91ff-69fb4f5e65f2')
+ def test_remote_and_self_cache(self):
+ """Test image cache works with self and remote glance service"""
+ if not CONF.image.alternate_image_endpoint:
+ raise self.skipException('No image_remote service to test '
+ 'against')
+
+ # Check there is nothing is cached by us on current and
+ # remote node
+ output = self.os_admin.image_cache_client.list_cache()
+ self._assertCheckCache(output['cached_images'], self.cached_info)
+
+ output = self.os_admin.cache_client_remote.list_cache()
+ self._assertCheckCache(output['cached_images'],
+ self.cached_info_remote)
+
+ # Create one image
+ image = self.image_create_and_upload(name='first',
+ container_format='bare',
+ disk_format='raw',
+ visibility='private')
+ self.assertEqual('active', image['status'])
+
+ # Queue image for caching on local node
+ self.os_admin.image_cache_client.cache_queue(image['id'])
+ # Wait for image caching
+ LOG.info("Waiting for image %s to get cached", image['id'])
+ caching = waiters.wait_for_caching(
+ self.client,
+ self.os_admin.image_cache_client,
+ image['id'])
+ self.cached_info.append(image['id'])
+ # verify that we have image in cache on local node
+ self._assertCheckCache(caching['cached_images'], self.cached_info)
+ # verify that we don't have anything cached on remote node
+ output = self.os_admin.cache_client_remote.list_cache()
+ self._assertCheckCache(output['cached_images'],
+ self.cached_info_remote)
+
+ # cache same image on remote node
+ self.os_admin.cache_client_remote.cache_queue(image['id'])
+ # Wait for image caching
+ LOG.info("Waiting for image %s to get cached", image['id'])
+ caching = waiters.wait_for_caching(
+ self.client,
+ self.os_admin.cache_client_remote,
+ image['id'])
+ self.cached_info_remote.append(image['id'])
+
+ # verify that we have image cached on remote node
+ output = self.os_admin.cache_client_remote.list_cache()
+ self._assertCheckCache(output['cached_images'],
+ self.cached_info_remote)
+
+ # Verify that we can delete image from remote cache and it
+ # still present in local cache
+ self.os_admin.cache_client_remote.cache_clear()
+ output = self.os_admin.cache_client_remote.list_cache()
+ self.assertEqual(0, len(output['queued_images']))
+ self.assertEqual(0, len(output['cached_images']))
+
+ output = self.os_admin.image_cache_client.list_cache()
+ self._assertCheckCache(output['cached_images'], self.cached_info)
+
+ # Delete image from local cache as well
+ self.os_admin.image_cache_client.cache_clear()
+ output = self.os_admin.image_cache_client.list_cache()
+ self.assertEqual(0, len(output['queued_images']))
+ self.assertEqual(0, len(output['cached_images']))
+
+ # Remove all data from local and remote information
+ self.cached_info = []
+ self.cached_info_remote = []
diff --git a/tempest/api/image/v2/test_images_negative.py b/tempest/api/image/v2/test_images_negative.py
index 80c01a5..f0b891f 100644
--- a/tempest/api/image/v2/test_images_negative.py
+++ b/tempest/api/image/v2/test_images_negative.py
@@ -58,7 +58,10 @@
def test_get_delete_deleted_image(self):
"""Get and delete the deleted image"""
# create and delete image
- image = self.client.create_image(name='test',
+ image_name = data_utils.rand_name(
+ prefix=CONF.resource_name_prefix,
+ name="test")
+ image = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw')
self.client.delete_image(image['id'])
@@ -111,7 +114,10 @@
@decorators.idempotent_id('ab980a34-8410-40eb-872b-f264752f46e5')
def test_delete_protected_image(self):
"""Create a protected image"""
- image = self.create_image(protected=True)
+ image_name = data_utils.rand_name(
+ prefix=CONF.resource_name_prefix,
+ name="test")
+ image = self.create_image(name=image_name, protected=True)
self.addCleanup(self.client.update_image, image['id'],
[dict(replace="/protected", value=False)])
@@ -132,7 +138,10 @@
if not CONF.image_feature_enabled.os_glance_reserved:
raise self.skipException('os_glance_reserved is not enabled')
- image = self.create_image(name='test',
+ image_name = data_utils.rand_name(
+ prefix=CONF.resource_name_prefix,
+ name="test")
+ image = self.create_image(name=image_name,
container_format='bare',
disk_format='raw')
self.assertRaises(lib_exc.Forbidden,
@@ -152,9 +161,12 @@
if not CONF.image_feature_enabled.os_glance_reserved:
raise self.skipException('os_glance_reserved is not enabled')
+ image_name = data_utils.rand_name(
+ prefix=CONF.resource_name_prefix,
+ name="test")
self.assertRaises(lib_exc.Forbidden,
self.create_image,
- name='test',
+ name=image_name,
container_format='bare',
disk_format='raw',
os_glance_foo='bar')
@@ -195,7 +207,10 @@
if 'web-download' not in self.available_import_methods:
raise self.skipException('Server does not support '
'web-download import method')
- image = self.client.create_image(name='test',
+ image_name = data_utils.rand_name(
+ prefix=CONF.resource_name_prefix,
+ name="test")
+ image = self.client.create_image(name=image_name,
container_format='bare',
disk_format='raw')
# Now try to get image details
diff --git a/tempest/clients.py b/tempest/clients.py
index 5b31cf8..5338ed4 100644
--- a/tempest/clients.py
+++ b/tempest/clients.py
@@ -104,6 +104,15 @@
service=CONF.image.alternate_image_endpoint,
endpoint_type=CONF.image.alternate_image_endpoint_type,
region=CONF.image.region)
+ # NOTE(abhishekk): If no alternate endpoint is configured,
+ # this client will work the same as the base
+ # self.image_cache_client. If your test needs to know if
+ # these are different, check the config option to see if
+ # the alternate_image_endpoint is set.
+ self.cache_client_remote = self.image_v2.ImageCacheClient(
+ service=CONF.image.alternate_image_endpoint,
+ endpoint_type=CONF.image.alternate_image_endpoint_type,
+ region=CONF.image.region)
def _set_compute_clients(self):
self.agents_client = self.compute.AgentsClient()
diff --git a/tempest/config.py b/tempest/config.py
index 0e3f465..8ed1ff6 100644
--- a/tempest/config.py
+++ b/tempest/config.py
@@ -395,6 +395,17 @@
'allow_availability_zone_fallback=False in cinder.conf), '
'the volume create request will fail and the instance '
'will fail the build request.'),
+ cfg.StrOpt('migration_source_host',
+ default=None,
+ help="Specify source host for live-migration, cold-migration"
+ " and resize tests. If option is not set tests will use"
+ " host automatically."),
+ cfg.StrOpt('migration_dest_host',
+ default=None,
+ help="Specify destination host for live-migration and cold"
+ " migration. If option is not set tests will use host"
+ " automatically."),
+
]
placement_group = cfg.OptGroup(name='placement',
diff --git a/tempest/lib/cli/base.py b/tempest/lib/cli/base.py
index c9cffd2..c308c30 100644
--- a/tempest/lib/cli/base.py
+++ b/tempest/lib/cli/base.py
@@ -374,12 +374,13 @@
:param merge_stderr: if True the stderr buffer is merged into stdout
:type merge_stderr: boolean
"""
- creds = ('--os-username %s --os-project-name %s --os-password %s '
+ creds = ('--os-username %s --os-password %s '
'--os-auth-url %s' %
(self.username,
- self.tenant_name,
self.password,
self.uri))
+ if self.tenant_name is not None:
+ creds += ' --os-project-name %s' % self.tenant_name
if self.identity_api_version:
if cmd not in self.CLIENTS_WITHOUT_IDENTITY_VERSION:
creds += ' --os-identity-api-version %s' % (
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index 7c986cc..5f30909 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -441,7 +441,7 @@
'container': container}
args.update(kwargs)
backup = self.backups_client.create_backup(volume_id=volume_id,
- **kwargs)['backup']
+ **args)['backup']
self.addCleanup(self.backups_client.delete_backup, backup['id'])
waiters.wait_for_volume_resource_status(self.backups_client,
backup['id'], 'available')
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 882afff..3a93f74 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -28,25 +28,12 @@
LOG = log.getLogger(__name__)
-class TestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
- """Check VM connectivity after some advanced instance operations executed:
-
- * Stop/Start an instance
- * Reboot an instance
- * Rebuild an instance
- * Pause/Unpause an instance
- * Suspend/Resume an instance
- * Resize an instance
- """
-
- @classmethod
- def setup_clients(cls):
- super(TestNetworkAdvancedServerOps, cls).setup_clients()
- cls.admin_servers_client = cls.os_admin.servers_client
+class BaseTestNetworkAdvancedServerOps(manager.NetworkScenarioTest):
+ """Base class for defining methods used in tests."""
@classmethod
def skip_checks(cls):
- super(TestNetworkAdvancedServerOps, cls).skip_checks()
+ super(BaseTestNetworkAdvancedServerOps, cls).skip_checks()
if not (CONF.network.project_networks_reachable or
CONF.network.public_network_id):
msg = ('Either project_networks_reachable must be "true", or '
@@ -56,26 +43,52 @@
raise cls.skipException("Floating ips are not available")
@classmethod
+ def setup_clients(cls):
+ super(BaseTestNetworkAdvancedServerOps, cls).setup_clients()
+ cls.admin_servers_client = cls.os_admin.servers_client
+ cls.sec_group_rules_client = \
+ cls.os_primary.security_group_rules_client
+ cls.sec_groups_client = cls.os_primary.security_groups_client
+ cls.keypairs_client = cls.os_primary.keypairs_client
+ cls.floating_ips_client = cls.os_primary.floating_ips_client
+ cls.servers_client = cls.os_primary.servers_client
+
+ @classmethod
def setup_credentials(cls):
# Create no network resources for these tests.
cls.set_network_resources()
- super(TestNetworkAdvancedServerOps, cls).setup_credentials()
+ super(BaseTestNetworkAdvancedServerOps, cls).setup_credentials()
- def _setup_server(self, keypair):
+ def _setup_server(self, keypair, host_spec=None):
security_groups = []
if utils.is_extension_enabled('security-group', 'network'):
- security_group = self.create_security_group()
+ sec_args = {
+ 'security_group_rules_client':
+ self.sec_group_rules_client,
+ 'security_groups_client':
+ self.sec_groups_client
+ }
+ security_group = self.create_security_group(**sec_args)
security_groups = [{'name': security_group['name']}]
network, _, _ = self.setup_network_subnet_with_router()
- server = self.create_server(
- networks=[{'uuid': network['id']}],
- key_name=keypair['name'],
- security_groups=security_groups)
+ server_args = {
+ 'networks': [{'uuid': network['id']}],
+ 'key_name': keypair['name'],
+ 'security_groups': security_groups,
+ }
+
+ if host_spec is not None:
+ server_args['host'] = host_spec
+ # by default, host can be specified by administrators only
+ server_args['clients'] = self.os_admin
+
+ server = self.create_server(**server_args)
return server
def _setup_network(self, server, keypair):
public_network_id = CONF.network.public_network_id
- floating_ip = self.create_floating_ip(server, public_network_id)
+ floating_ip = self.create_floating_ip(
+ server, public_network_id, client=self.floating_ips_client)
# Verify that we can indeed connect to the server before we mess with
# it's state
self._wait_server_status_and_check_network_connectivity(
@@ -107,6 +120,148 @@
self._check_network_connectivity(server, keypair, floating_ip,
username=username)
+ def _test_server_connectivity_resize(self, src_host=None):
+ resize_flavor = CONF.compute.flavor_ref_alt
+ keypair = self.create_keypair()
+ server = self._setup_server(keypair, src_host)
+ if src_host:
+ server_host = self.get_host_for_server(server['id'])
+ self.assertEqual(server_host, src_host)
+ floating_ip = self._setup_network(server, keypair)
+ self.servers_client.resize_server(server['id'],
+ flavor_ref=resize_flavor)
+ waiters.wait_for_server_status(self.servers_client, server['id'],
+ 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize_server(server['id'])
+ server = self.servers_client.show_server(server['id'])['server']
+ # Nova API > 2.46 no longer includes flavor.id, and schema check
+ # will cover whether 'id' should be in flavor
+ if server['flavor'].get('id'):
+ self.assertEqual(resize_flavor, server['flavor']['id'])
+ else:
+ flavor = self.flavors_client.show_flavor(resize_flavor)['flavor']
+ self.assertEqual(flavor['name'], server['flavor']['original_name'])
+ for key in ['ram', 'vcpus', 'disk']:
+ self.assertEqual(flavor[key], server['flavor'][key])
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ def _test_server_connectivity_cold_migration(self, source_host=None,
+ dest_host=None):
+ keypair = self.create_keypair(client=self.keypairs_client)
+ server = self._setup_server(keypair, source_host)
+ floating_ip = self._setup_network(server, keypair)
+ src_host = self.get_host_for_server(server['id'])
+ if source_host:
+ self.assertEqual(src_host, source_host)
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ self.admin_servers_client.migrate_server(
+ server['id'], host=dest_host)
+ waiters.wait_for_server_status(self.servers_client, server['id'],
+ 'VERIFY_RESIZE')
+ self.servers_client.confirm_resize_server(server['id'])
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+ dst_host = self.get_host_for_server(server['id'])
+ if dest_host:
+ self.assertEqual(dst_host, dest_host)
+ self.assertNotEqual(src_host, dst_host)
+
+ def _test_server_connectivity_live_migration(self, source_host=None,
+ dest_host=None,
+ migration=False):
+ keypair = self.create_keypair(client=self.keypairs_client)
+ server = self._setup_server(keypair, source_host)
+ floating_ip = self._setup_network(server, keypair)
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ block_migration = (CONF.compute_feature_enabled.
+ block_migration_for_live_migration)
+ src_host = self.get_host_for_server(server['id'])
+ if source_host:
+ self.assertEqual(src_host, source_host)
+
+ downtime_meter = net_downtime.NetDowntimeMeter(
+ floating_ip['floating_ip_address'])
+ self.useFixture(downtime_meter)
+
+ migration_kwargs = {'host': None, 'block_migration': block_migration}
+
+ # check if microversion is less than 2.25 because of
+ # disk_over_commit is depracted since compute api version 2.25
+ # if min_microversion is None, it runs on version < 2.25
+ if not migration and (CONF.compute.min_microversion is None or
+ CONF.compute.min_microversion < '2.25'):
+ migration_kwargs['disk_over_commit'] = False
+
+ if dest_host:
+ migration_kwargs['host'] = dest_host
+
+ self.admin_servers_client.live_migrate_server(
+ server['id'], **migration_kwargs)
+ waiters.wait_for_server_status(self.servers_client,
+ server['id'], 'ACTIVE')
+
+ dst_host = self.get_host_for_server(server['id'])
+ if dest_host:
+ self.assertEqual(dst_host, dest_host)
+
+ self.assertNotEqual(src_host, dst_host, 'Server did not migrate')
+
+ # we first wait until the VM replies pings again, then check the
+ # network downtime
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ downtime = downtime_meter.get_downtime()
+ self.assertIsNotNone(downtime)
+ LOG.debug("Downtime seconds measured with downtime_meter = %r",
+ downtime)
+ allowed_downtime = CONF.validation.allowed_network_downtime
+ self.assertLessEqual(
+ downtime, allowed_downtime,
+ "Downtime of {} seconds is higher than expected '{}'".format(
+ downtime, allowed_downtime))
+
+ def _test_server_connectivity_cold_migration_revert(self, source_host=None,
+ dest_host=None):
+ keypair = self.create_keypair(client=self.keypairs_client)
+ server = self._setup_server(keypair, source_host)
+ floating_ip = self._setup_network(server, keypair)
+ src_host = self.get_host_for_server(server['id'])
+ if source_host:
+ self.assertEqual(src_host, source_host)
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+
+ self.admin_servers_client.migrate_server(
+ server['id'], host=dest_host)
+ waiters.wait_for_server_status(self.servers_client, server['id'],
+ 'VERIFY_RESIZE')
+ if dest_host:
+ self.assertEqual(dest_host,
+ self.get_host_for_server(server['id']))
+ self.servers_client.revert_resize_server(server['id'])
+ self._wait_server_status_and_check_network_connectivity(
+ server, keypair, floating_ip)
+ dst_host = self.get_host_for_server(server['id'])
+
+ self.assertEqual(src_host, dst_host)
+
+
+class TestNetworkAdvancedServerOps(BaseTestNetworkAdvancedServerOps):
+ """Check VM connectivity after some advanced instance operations executed:
+
+ * Stop/Start an instance
+ * Reboot an instance
+ * Rebuild an instance
+ * Pause/Unpause an instance
+ * Suspend/Resume an instance
+ """
+
@decorators.idempotent_id('61f1aa9a-1573-410e-9054-afa557cab021')
@decorators.attr(type='slow')
@utils.services('compute', 'network')
@@ -190,27 +345,7 @@
@decorators.attr(type='slow')
@utils.services('compute', 'network')
def test_server_connectivity_resize(self):
- resize_flavor = CONF.compute.flavor_ref_alt
- keypair = self.create_keypair()
- server = self._setup_server(keypair)
- floating_ip = self._setup_network(server, keypair)
- self.servers_client.resize_server(server['id'],
- flavor_ref=resize_flavor)
- waiters.wait_for_server_status(self.servers_client, server['id'],
- 'VERIFY_RESIZE')
- self.servers_client.confirm_resize_server(server['id'])
- server = self.servers_client.show_server(server['id'])['server']
- # Nova API > 2.46 no longer includes flavor.id, and schema check
- # will cover whether 'id' should be in flavor
- if server['flavor'].get('id'):
- self.assertEqual(resize_flavor, server['flavor']['id'])
- else:
- flavor = self.flavors_client.show_flavor(resize_flavor)['flavor']
- self.assertEqual(flavor['name'], server['flavor']['original_name'])
- for key in ['ram', 'vcpus', 'disk']:
- self.assertEqual(flavor[key], server['flavor'][key])
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
+ self._test_server_connectivity_resize()
@decorators.idempotent_id('a4858f6c-401e-4155-9a49-d5cd053d1a2f')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
@@ -221,22 +356,7 @@
@decorators.attr(type=['slow', 'multinode'])
@utils.services('compute', 'network')
def test_server_connectivity_cold_migration(self):
- keypair = self.create_keypair()
- server = self._setup_server(keypair)
- floating_ip = self._setup_network(server, keypair)
- src_host = self.get_host_for_server(server['id'])
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
-
- self.admin_servers_client.migrate_server(server['id'])
- waiters.wait_for_server_status(self.servers_client, server['id'],
- 'VERIFY_RESIZE')
- self.servers_client.confirm_resize_server(server['id'])
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
- dst_host = self.get_host_for_server(server['id'])
-
- self.assertNotEqual(src_host, dst_host)
+ self._test_server_connectivity_cold_migration()
@decorators.idempotent_id('03fd1562-faad-11e7-9ea0-fa163e65f5ce')
@testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
@@ -247,52 +367,7 @@
@decorators.attr(type=['slow', 'multinode'])
@utils.services('compute', 'network')
def test_server_connectivity_live_migration(self):
- keypair = self.create_keypair()
- server = self._setup_server(keypair)
- floating_ip = self._setup_network(server, keypair)
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
-
- block_migration = (CONF.compute_feature_enabled.
- block_migration_for_live_migration)
- old_host = self.get_host_for_server(server['id'])
-
- downtime_meter = net_downtime.NetDowntimeMeter(
- floating_ip['floating_ip_address'])
- self.useFixture(downtime_meter)
-
- migration_kwargs = {'host': None, 'block_migration': block_migration}
-
- # check if microversion is less than 2.25 because of
- # disk_over_commit is depracted since compute api version 2.25
- # if min_microversion is None, it runs on version < 2.25
- if (CONF.compute.min_microversion is None or
- CONF.compute.min_microversion < 2.25):
- migration_kwargs['disk_over_commit'] = False
-
- self.admin_servers_client.live_migrate_server(
- server['id'], **migration_kwargs)
-
- waiters.wait_for_server_status(self.servers_client,
- server['id'], 'ACTIVE')
-
- new_host = self.get_host_for_server(server['id'])
- self.assertNotEqual(old_host, new_host, 'Server did not migrate')
-
- # we first wait until the VM replies pings again, then check the
- # network downtime
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
-
- downtime = downtime_meter.get_downtime()
- self.assertIsNotNone(downtime)
- LOG.debug("Downtime seconds measured with downtime_meter = %r",
- downtime)
- allowed_downtime = CONF.validation.allowed_network_downtime
- self.assertLessEqual(
- downtime, allowed_downtime,
- "Downtime of {} seconds is higher than expected '{}'".format(
- downtime, allowed_downtime))
+ self._test_server_connectivity_live_migration()
@decorators.idempotent_id('25b188d7-0183-4b1e-a11d-15840c8e2fd6')
@testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
@@ -303,19 +378,95 @@
@decorators.attr(type=['slow', 'multinode'])
@utils.services('compute', 'network')
def test_server_connectivity_cold_migration_revert(self):
- keypair = self.create_keypair()
- server = self._setup_server(keypair)
- floating_ip = self._setup_network(server, keypair)
- src_host = self.get_host_for_server(server['id'])
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
+ self._test_server_connectivity_cold_migration_revert()
- self.admin_servers_client.migrate_server(server['id'])
- waiters.wait_for_server_status(self.servers_client, server['id'],
- 'VERIFY_RESIZE')
- self.servers_client.revert_resize_server(server['id'])
- self._wait_server_status_and_check_network_connectivity(
- server, keypair, floating_ip)
- dst_host = self.get_host_for_server(server['id'])
- self.assertEqual(src_host, dst_host)
+class TestNetworkAdvancedServerMigrationWithHost(
+ BaseTestNetworkAdvancedServerOps):
+
+ """Check VM connectivity with specifying source and destination hosts:
+
+ * Resize an instance by creating server on configured source host
+ * Migrate server by creating it on configured source host and migrate it
+ - Cold Migration
+ - Cold Migration with revert
+ - Live Migration
+ """
+ credentials = ['primary', 'admin']
+ compute_min_microversion = "2.74"
+
+ @classmethod
+ def skip_checks(cls):
+ super(TestNetworkAdvancedServerMigrationWithHost, cls).skip_checks()
+ if not (CONF.compute.migration_source_host or
+ CONF.compute.migration_dest_host):
+ raise cls.skipException("migration_source_host or "
+ "migration_dest_host is required")
+ if (CONF.compute.migration_source_host and
+ CONF.compute.migration_dest_host and
+ CONF.compute.migration_source_host ==
+ CONF.compute.migration_dest_host):
+ raise cls.skipException("migration_source_host and "
+ "migration_dest_host must be different")
+
+ @classmethod
+ def setup_clients(cls):
+ super(BaseTestNetworkAdvancedServerOps, cls).setup_clients()
+ cls.sec_group_rules_client = \
+ cls.os_admin.security_group_rules_client
+ cls.sec_groups_client = cls.os_admin.security_groups_client
+ cls.keypairs_client = cls.os_admin.keypairs_client
+ cls.floating_ips_client = cls.os_admin.floating_ips_client
+ cls.servers_client = cls.os_admin.servers_client
+ cls.admin_servers_client = cls.os_admin.servers_client
+
+ @decorators.idempotent_id('06e23934-79ae-11ee-b962-0242ac120002')
+ @testtools.skipUnless(CONF.compute_feature_enabled.resize,
+ 'Resize is not available.')
+ @decorators.attr(type='slow')
+ @utils.services('compute', 'network')
+ def test_server_connectivity_resize(self):
+ source_host = CONF.compute.migration_source_host
+ self._test_server_connectivity_resize(src_host=source_host)
+
+ @decorators.idempotent_id('14f0c9e6-79ae-11ee-b962-0242ac120002')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @decorators.attr(type=['slow', 'multinode'])
+ @utils.services('compute', 'network')
+ def test_server_connectivity_cold_migration(self):
+ source_host = CONF.compute.migration_source_host
+ dest_host = CONF.compute.migration_dest_host
+ self._test_server_connectivity_cold_migration(
+ source_host=source_host, dest_host=dest_host)
+
+ @decorators.idempotent_id('1c13933e-79ae-11ee-b962-0242ac120002')
+ @testtools.skipUnless(CONF.compute_feature_enabled.live_migration,
+ 'Live migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @decorators.attr(type=['slow', 'multinode'])
+ @utils.services('compute', 'network')
+ def test_server_connectivity_live_migration(self):
+ source_host = CONF.compute.migration_source_host
+ dest_host = CONF.compute.migration_dest_host
+ self._test_server_connectivity_live_migration(
+ source_host=source_host, dest_host=dest_host, migration=True)
+
+ @decorators.idempotent_id('2627789a-79ae-11ee-b962-0242ac120002')
+ @testtools.skipUnless(CONF.compute_feature_enabled.cold_migration,
+ 'Cold migration is not available.')
+ @testtools.skipUnless(CONF.compute.min_compute_nodes > 1,
+ 'Less than 2 compute nodes, skipping multinode '
+ 'tests.')
+ @decorators.attr(type=['slow', 'multinode'])
+ @utils.services('compute', 'network')
+ def test_server_connectivity_cold_migration_revert(self):
+ source_host = CONF.compute.migration_source_host
+ dest_host = CONF.compute.migration_dest_host
+ self._test_server_connectivity_cold_migration_revert(
+ source_host=source_host, dest_host=dest_host)
diff --git a/zuul.d/integrated-gate.yaml b/zuul.d/integrated-gate.yaml
index a08fe92..86a460a 100644
--- a/zuul.d/integrated-gate.yaml
+++ b/zuul.d/integrated-gate.yaml
@@ -238,6 +238,7 @@
# requests to services and can cause more oom issues. To avoid the
# oom issue, setting the concurrency to 4 in this job.
tempest_concurrency: 4
+ tempest_set_src_dest_host: true
devstack_localrc:
USE_PYTHON3: true
devstack_plugins: