Merge "Move volume v2 service client unit tests to v3 dir"
diff --git a/tempest/api/compute/admin/test_flavors_microversions.py b/tempest/api/compute/admin/test_flavors_microversions.py
index 9f014e6..31b9217 100644
--- a/tempest/api/compute/admin/test_flavors_microversions.py
+++ b/tempest/api/compute/admin/test_flavors_microversions.py
@@ -33,14 +33,14 @@
disk=10,
id=flavor_id)['id']
# Checking show API response schema
- self.flavors_client.show_flavor(new_flavor_id)['flavor']
+ self.flavors_client.show_flavor(new_flavor_id)
# Checking update API response schema
self.admin_flavors_client.update_flavor(new_flavor_id,
- description='new')['flavor']
+ description='new')
# Checking list details API response schema
- self.flavors_client.list_flavors(detail=True)['flavors']
+ self.flavors_client.list_flavors(detail=True)
# Checking list API response schema
- self.flavors_client.list_flavors()['flavors']
+ self.flavors_client.list_flavors()
class FlavorsV261TestJSON(FlavorsV255TestJSON):
diff --git a/tempest/api/compute/admin/test_floating_ips_bulk.py b/tempest/api/compute/admin/test_floating_ips_bulk.py
index 72d09ed..2d7e1a7 100644
--- a/tempest/api/compute/admin/test_floating_ips_bulk.py
+++ b/tempest/api/compute/admin/test_floating_ips_bulk.py
@@ -25,6 +25,8 @@
CONF = config.CONF
+# TODO(stephenfin): Remove this test class once the nova queens branch goes
+# into extended maintenance mode.
class FloatingIPsBulkAdminTestJSON(base.BaseV2ComputeAdminTest):
"""Tests Floating IPs Bulk APIs that require admin privileges.
@@ -32,6 +34,7 @@
content/ext-os-floating-ips-bulk.html
"""
max_microversion = '2.35'
+ depends_on_nova_network = True
@classmethod
def setup_clients(cls):
diff --git a/tempest/api/compute/admin/test_live_migration.py b/tempest/api/compute/admin/test_live_migration.py
index bc38144..8350f7c 100644
--- a/tempest/api/compute/admin/test_live_migration.py
+++ b/tempest/api/compute/admin/test_live_migration.py
@@ -66,7 +66,8 @@
kwargs = dict()
block_migration = getattr(self, 'block_migration', None)
if self.block_migration is None:
- kwargs['disk_over_commit'] = False
+ if self.is_requested_microversion_compatible('2.24'):
+ kwargs['disk_over_commit'] = False
block_migration = (CONF.compute_feature_enabled.
block_migration_for_live_migration and
not volume_backed)
diff --git a/tempest/api/compute/admin/test_live_migration_negative.py b/tempest/api/compute/admin/test_live_migration_negative.py
index deabbc2..8327a3b 100644
--- a/tempest/api/compute/admin/test_live_migration_negative.py
+++ b/tempest/api/compute/admin/test_live_migration_negative.py
@@ -32,9 +32,10 @@
def _migrate_server_to(self, server_id, dest_host):
bmflm = CONF.compute_feature_enabled.block_migration_for_live_migration
- self.admin_servers_client.live_migrate_server(
- server_id, host=dest_host, block_migration=bmflm,
- disk_over_commit=False)
+ kwargs = dict(host=dest_host, block_migration=bmflm)
+ if self.is_requested_microversion_compatible('2.24'):
+ kwargs['disk_over_commit'] = False
+ self.admin_servers_client.live_migrate_server(server_id, **kwargs)
@decorators.attr(type=['negative'])
@decorators.idempotent_id('7fb7856e-ae92-44c9-861a-af62d7830bcb')
diff --git a/tempest/api/compute/admin/test_services_negative.py b/tempest/api/compute/admin/test_services_negative.py
index 993c8ec..d264829 100644
--- a/tempest/api/compute/admin/test_services_negative.py
+++ b/tempest/api/compute/admin/test_services_negative.py
@@ -74,8 +74,6 @@
@classmethod
def resource_setup(cls):
super(ServicesAdminNegativeV253TestJSON, cls).resource_setup()
- # Nova returns 400 if `binary` is not nova-compute.
- cls.binary = 'nova-compute'
cls.fake_service_id = data_utils.rand_uuid()
@decorators.attr(type=['negative'])
diff --git a/tempest/api/volume/admin/test_volume_services_negative.py b/tempest/api/volume/admin/test_volume_services_negative.py
index 6f3dbc6..3a863a1 100644
--- a/tempest/api/volume/admin/test_volume_services_negative.py
+++ b/tempest/api/volume/admin/test_volume_services_negative.py
@@ -23,10 +23,9 @@
@classmethod
def resource_setup(cls):
super(VolumeServicesNegativeTest, cls).resource_setup()
- cls.services = cls.admin_volume_services_client.list_services()[
- 'services']
- cls.host = cls.services[0]['host']
- cls.binary = cls.services[0]['binary']
+ services = cls.admin_volume_services_client.list_services()['services']
+ cls.host = services[0]['host']
+ cls.binary = services[0]['binary']
@decorators.attr(type='negative')
@decorators.idempotent_id('3246ce65-ba70-4159-aa3b-082c28e4b484')
diff --git a/tempest/api/volume/test_volumes_extend.py b/tempest/api/volume/test_volumes_extend.py
index 5d339c4..ac9a9c7 100644
--- a/tempest/api/volume/test_volumes_extend.py
+++ b/tempest/api/volume/test_volumes_extend.py
@@ -33,7 +33,7 @@
def test_volume_extend(self):
# Extend Volume Test.
volume = self.create_volume(image_ref=self.image_ref)
- extend_size = volume['size'] + 1
+ extend_size = volume['size'] * 2
self.volumes_client.extend_volume(volume['id'],
new_size=extend_size)
waiters.wait_for_volume_resource_status(self.volumes_client,
@@ -48,7 +48,7 @@
volume = self.create_volume()
self.create_snapshot(volume['id'])
- extend_size = volume['size'] + 1
+ extend_size = volume['size'] * 2
self.volumes_client.extend_volume(volume['id'], new_size=extend_size)
waiters.wait_for_volume_resource_status(self.volumes_client,
diff --git a/tempest/api/volume/test_volumes_negative.py b/tempest/api/volume/test_volumes_negative.py
index f139283..866bd87 100644
--- a/tempest/api/volume/test_volumes_negative.py
+++ b/tempest/api/volume/test_volumes_negative.py
@@ -103,21 +103,24 @@
def test_create_volume_with_nonexistent_volume_type(self):
# Should not be able to create volume with non-existent volume type
self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
- size='1', volume_type=data_utils.rand_uuid())
+ size=CONF.volume.volume_size,
+ volume_type=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0c36f6ae-4604-4017-b0a9-34fdc63096f9')
def test_create_volume_with_nonexistent_snapshot_id(self):
# Should not be able to create volume with non-existent snapshot
self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
- size='1', snapshot_id=data_utils.rand_uuid())
+ size=CONF.volume.volume_size,
+ snapshot_id=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('47c73e08-4be8-45bb-bfdf-0c4e79b88344')
def test_create_volume_with_nonexistent_source_volid(self):
# Should not be able to create volume with non-existent source volume
self.assertRaises(lib_exc.NotFound, self.volumes_client.create_volume,
- size='1', source_volid=data_utils.rand_uuid())
+ size=CONF.volume.volume_size,
+ source_volid=data_utils.rand_uuid())
@decorators.attr(type=['negative'])
@decorators.idempotent_id('0186422c-999a-480e-a026-6a665744c30c')
diff --git a/tempest/api/volume/test_volumes_snapshots_negative.py b/tempest/api/volume/test_volumes_snapshots_negative.py
index ea5f036..0453c0a 100644
--- a/tempest/api/volume/test_volumes_snapshots_negative.py
+++ b/tempest/api/volume/test_volumes_snapshots_negative.py
@@ -50,7 +50,7 @@
@decorators.idempotent_id('677863d1-34f9-456d-b6ac-9924f667a7f4')
def test_volume_from_snapshot_decreasing_size(self):
# Creates a volume a snapshot passing a size different from the source
- src_size = CONF.volume.volume_size + 1
+ src_size = CONF.volume.volume_size * 2
src_vol = self.create_volume(size=src_size)
src_snap = self.create_snapshot(src_vol['id'])
@@ -58,7 +58,7 @@
# Destination volume smaller than source
self.assertRaises(lib_exc.BadRequest,
self.volumes_client.create_volume,
- size=src_size - 1,
+ size=CONF.volume.volume_size,
snapshot_id=src_snap['id'])
@decorators.attr(type=['negative'])
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index c5d41a0..9db7f92 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -543,7 +543,7 @@
volume['id'], 'available')
def ping_ip_address(self, ip_address, should_succeed=True,
- ping_timeout=None, mtu=None):
+ ping_timeout=None, mtu=None, server=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1']
@@ -577,12 +577,16 @@
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
+ if server:
+ self._log_console_output([server])
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True,
+ extra_msg="",
+ server=None,
mtu=None):
"""Check server connectivity
@@ -592,43 +596,36 @@
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
+ :param extra_msg: Message to help with debugging if ``ping_ip_address``
+ fails
+ :param server: The server whose console to log for debugging
:param mtu: network MTU to use for connectivity validation
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
+ LOG.debug('checking network connections to IP %s with user: %s',
+ ip_address, username)
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
+ if extra_msg:
+ msg = "%s\n%s" % (extra_msg, msg)
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect,
- mtu=mtu),
+ mtu=mtu, server=server),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
- self.get_remote_client(ip_address, username, private_key)
-
- def check_public_network_connectivity(self, ip_address, username,
- private_key, should_connect=True,
- msg=None, servers=None, mtu=None):
- # The target login is assumed to have been configured for
- # key-based authentication by cloud-init.
- LOG.debug('checking network connections to IP %s with user: %s',
- ip_address, username)
- try:
- self.check_vm_connectivity(ip_address,
- username,
- private_key,
- should_connect=should_connect,
- mtu=mtu)
- except Exception:
- ex_msg = 'Public network connectivity check failed'
- if msg:
- ex_msg += ": " + msg
- LOG.exception(ex_msg)
- self._log_console_output(servers)
- raise
+ try:
+ self.get_remote_client(ip_address, username, private_key,
+ server=server)
+ except Exception:
+ if not extra_msg:
+ extra_msg = 'Failed to ssh to %s' % ip_address
+ LOG.exception(extra_msg)
+ raise
def create_floating_ip(self, thing, pool_name=None):
"""Create a floating IP and associates to a server on Nova"""
diff --git a/tempest/scenario/test_network_advanced_server_ops.py b/tempest/scenario/test_network_advanced_server_ops.py
index 87ce951..b0e4669 100644
--- a/tempest/scenario/test_network_advanced_server_ops.py
+++ b/tempest/scenario/test_network_advanced_server_ops.py
@@ -90,9 +90,10 @@
floating_ip_addr = floating_ip['floating_ip_address']
# Check FloatingIP status before checking the connectivity
self.check_floating_ip_status(floating_ip, 'ACTIVE')
- self.check_public_network_connectivity(floating_ip_addr, username,
- private_key, should_connect,
- servers=[server])
+ self.check_vm_connectivity(floating_ip_addr, username,
+ private_key, should_connect,
+ 'Public network connectivity check failed',
+ server)
def _wait_server_status_and_check_network_connectivity(self, server,
keypair,
diff --git a/tempest/scenario/test_network_basic_ops.py b/tempest/scenario/test_network_basic_ops.py
index 8212e75..c1132cf 100644
--- a/tempest/scenario/test_network_basic_ops.py
+++ b/tempest/scenario/test_network_basic_ops.py
@@ -175,7 +175,7 @@
def _get_server_key(self, server):
return self.keypairs[server['key_name']]['private_key']
- def check_public_network_connectivity(
+ def _check_public_network_connectivity(
self, should_connect=True, msg=None,
should_check_floating_ip_status=True, mtu=None):
"""Verifies connectivty to a VM via public network and floating IP
@@ -199,13 +199,18 @@
if should_connect:
private_key = self._get_server_key(server)
floatingip_status = 'ACTIVE'
+
# Check FloatingIP Status before initiating a connection
if should_check_floating_ip_status:
self.check_floating_ip_status(floating_ip, floatingip_status)
- # call the common method in the parent class
- super(TestNetworkBasicOps, self).check_public_network_connectivity(
- ip_address, ssh_login, private_key, should_connect, msg,
- self.servers, mtu=mtu)
+
+ message = 'Public network connectivity check failed'
+ if msg:
+ message += '. Reason: %s' % msg
+
+ self.check_vm_connectivity(
+ ip_address, ssh_login, private_key, should_connect,
+ message, server, mtu=mtu)
def _disassociate_floating_ips(self):
floating_ip, _ = self.floating_ip_tuple
@@ -404,17 +409,17 @@
"""
self._setup_network_and_servers()
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._disassociate_floating_ips()
- self.check_public_network_connectivity(should_connect=False,
- msg="after disassociate "
- "floating ip")
+ self._check_public_network_connectivity(should_connect=False,
+ msg="after disassociate "
+ "floating ip")
self._reassociate_floating_ips()
- self.check_public_network_connectivity(should_connect=True,
- msg="after re-associate "
- "floating ip")
+ self._check_public_network_connectivity(should_connect=True,
+ msg="after re-associate "
+ "floating ip")
@decorators.idempotent_id('b158ea55-472e-4086-8fa9-c64ac0c6c1d0')
@testtools.skipUnless(utils.is_extension_enabled('net-mtu', 'network'),
@@ -425,10 +430,10 @@
"""Validate that network MTU sized frames fit through."""
self._setup_network_and_servers()
# first check that connectivity works in general for the instance
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
# now that we checked general connectivity, test that full size frames
# can also pass between nodes
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=True, mtu=self.network['mtu'])
@decorators.idempotent_id('1546850e-fbaa-42f5-8b5f-03d8a6a95f15')
@@ -467,7 +472,7 @@
"""
self._setup_network_and_servers()
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
self._check_network_internal_connectivity(network=self.network)
self._check_network_external_connectivity()
self._create_new_network(create_gateway=True)
@@ -502,7 +507,7 @@
"""
self._setup_network_and_servers()
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
self._check_network_internal_connectivity(network=self.new_net)
@@ -524,19 +529,19 @@
admin_state_up attribute of router to True
"""
self._setup_network_and_servers()
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of router to False")
self._update_router_admin_state(self.router, False)
# TODO(alokmaurya): Remove should_check_floating_ip_status=False check
# once bug 1396310 is fixed
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of router to False",
should_check_floating_ip_status=False)
self._update_router_admin_state(self.router, True)
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of router to True")
@@ -581,7 +586,7 @@
renew_timeout = CONF.network.build_timeout
self._setup_network_and_servers(dns_nameservers=[initial_dns_server])
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
floating_ip, server = self.floating_ip_tuple
ip_address = floating_ip['floating_ip_address']
@@ -656,20 +661,20 @@
private_key=private_key,
server=server2)
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=True, msg="before updating "
"admin_state_up of instance port to False")
self.check_remote_connectivity(ssh_client, dest=server_pip,
should_succeed=True)
self.ports_client.update_port(port_id, admin_state_up=False)
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=False, msg="after updating "
"admin_state_up of instance port to False",
should_check_floating_ip_status=False)
self.check_remote_connectivity(ssh_client, dest=server_pip,
should_succeed=False)
self.ports_client.update_port(port_id, admin_state_up=True)
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=True, msg="after updating "
"admin_state_up of instance port to True")
self.check_remote_connectivity(ssh_client, dest=server_pip,
@@ -766,7 +771,7 @@
msg = "Rescheduling test does not apply to distributed routers."
raise self.skipException(msg)
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
# remove resource from agents
hosting_agents = set(a["id"] for a in
@@ -783,7 +788,7 @@
'unscheduling router failed')
# verify resource is un-functional
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=False,
msg='after router unscheduling',
)
@@ -800,7 +805,7 @@
"target agent")
# verify resource is functional
- self.check_public_network_connectivity(
+ self._check_public_network_connectivity(
should_connect=True,
msg='After router rescheduling')
@@ -834,7 +839,7 @@
# Create server
self._setup_network_and_servers()
- self.check_public_network_connectivity(should_connect=True)
+ self._check_public_network_connectivity(should_connect=True)
self._create_new_network()
self._hotplug_server()
fip, server = self.floating_ip_tuple