Merge "Change way how second NIC in guest OS is configured" into mcp/xenial/rocky
diff --git a/tempest/api/compute/servers/test_create_server.py b/tempest/api/compute/servers/test_create_server.py
index 4f0dbad..846f3be 100644
--- a/tempest/api/compute/servers/test_create_server.py
+++ b/tempest/api/compute/servers/test_create_server.py
@@ -162,6 +162,12 @@
@classmethod
def skip_checks(cls):
super(ServersTestBootFromVolume, cls).skip_checks()
+ msg = None
if not utils.get_service_list()['volume']:
msg = "Volume service not enabled."
+ # The tests are not launched in upstream and not working with Ironic
+ if (hasattr(CONF.service_available, 'ironic') and
+ CONF.service_available.ironic):
+ msg = "Volume tests are not available for ironic."
+ if msg is not None:
raise cls.skipException(msg)
diff --git a/tempest/api/compute/servers/test_server_actions.py b/tempest/api/compute/servers/test_server_actions.py
index f6494b5..901a391 100644
--- a/tempest/api/compute/servers/test_server_actions.py
+++ b/tempest/api/compute/servers/test_server_actions.py
@@ -608,6 +608,8 @@
@decorators.idempotent_id('77eba8e0-036e-4635-944b-f7a8f3b78dc9')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Barbican service is enabled, unshelve will not work')
@utils.services('image')
def test_shelve_unshelve_server(self):
if CONF.image_feature_enabled.api_v2:
diff --git a/tempest/api/compute/servers/test_servers_negative.py b/tempest/api/compute/servers/test_servers_negative.py
index 0c1c05c..602f3be 100644
--- a/tempest/api/compute/servers/test_servers_negative.py
+++ b/tempest/api/compute/servers/test_servers_negative.py
@@ -475,6 +475,8 @@
@decorators.idempotent_id('443e4f9b-e6bf-4389-b601-3a710f15fddd')
@testtools.skipUnless(CONF.compute_feature_enabled.shelve,
'Shelve is not available.')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Barbican service is enabled, unshelve will not work')
@decorators.attr(type=['negative'])
def test_shelve_shelved_server(self):
# shelve a shelved server.
diff --git a/tempest/api/compute/volumes/test_attach_volume.py b/tempest/api/compute/volumes/test_attach_volume.py
index caa445d..f1869a9 100644
--- a/tempest/api/compute/volumes/test_attach_volume.py
+++ b/tempest/api/compute/volumes/test_attach_volume.py
@@ -221,6 +221,8 @@
self.assertEqual(number_of_volumes, counted_volumes)
@decorators.idempotent_id('13a940b6-3474-4c3c-b03f-29b89112bfee')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Barbican service is enabled, unshelve will not work')
def test_attach_volume_shelved_or_offload_server(self):
# Create server, count number of volumes on it, shelve
# server and attach pre-created volume to shelved server
@@ -246,6 +248,8 @@
self.assertIsNotNone(volume_attachment['device'])
@decorators.idempotent_id('b54e86dd-a070-49c4-9c07-59ae6dae15aa')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Barbican service is enabled, unshelve will not work')
def test_detach_volume_shelved_or_offload_server(self):
# Count number of volumes on instance, shelve
# server and attach pre-created volume to shelved server
diff --git a/tempest/api/network/test_networks.py b/tempest/api/network/test_networks.py
index 7345fd1..f727f02 100644
--- a/tempest/api/network/test_networks.py
+++ b/tempest/api/network/test_networks.py
@@ -310,6 +310,9 @@
def test_create_delete_subnet_with_dhcp_enabled(self):
self._create_verify_delete_subnet(enable_dhcp=True)
+ @testtools.skipIf(getattr(CONF.service_available, 'contrail', False),
+ "Update of gateway is not supported in case of"
+ "Contrail")
@decorators.idempotent_id('3d3852eb-3009-49ec-97ac-5ce83b73010a')
def test_update_subnet_gw_dns_host_routes_dhcp(self):
network = self.create_network()
diff --git a/tempest/api/network/test_ports.py b/tempest/api/network/test_ports.py
index 246a5c3..8c8b0b1 100644
--- a/tempest/api/network/test_ports.py
+++ b/tempest/api/network/test_ports.py
@@ -300,6 +300,9 @@
for port in ports:
self.assertEqual(sorted(fields), sorted(port.keys()))
+ @testtools.skipIf(getattr(CONF.service_available, 'contrail', False),
+ "Contrail backend don't allow removing of fixed ip from "
+ "a port.")
@decorators.idempotent_id('63aeadd4-3b49-427f-a3b1-19ca81f06270')
def test_create_update_port_with_second_ip(self):
# Create a network with two subnets
diff --git a/tempest/common/waiters.py b/tempest/common/waiters.py
index 0e86f05..9fa2e7f 100644
--- a/tempest/common/waiters.py
+++ b/tempest/common/waiters.py
@@ -14,6 +14,7 @@
import time
from oslo_log import log as logging
+import six
from tempest.common import image as common_image
from tempest import config
@@ -265,13 +266,16 @@
def wait_for_interface_status(client, server_id, port_id, status):
- """Waits for an interface to reach a given status."""
+ """Waits for an interface to reach a (one of) given status(es)."""
+ if isinstance(status, six.string_types):
+ status = [status]
+
body = (client.show_interface(server_id, port_id)
['interfaceAttachment'])
interface_status = body['port_state']
start = int(time.time())
- while(interface_status != status):
+ while(interface_status not in status):
time.sleep(client.build_interval)
body = (client.show_interface(server_id, port_id)
['interfaceAttachment'])
@@ -279,8 +283,8 @@
timed_out = int(time.time()) - start >= client.build_timeout
- if interface_status != status and timed_out:
- message = ('Interface %s failed to reach %s status '
+ if interface_status not in status and timed_out:
+ message = ('Interface %s failed to reach either of %s statuses '
'(current %s) within the required time (%s s).' %
(port_id, status, interface_status,
client.build_timeout))
diff --git a/tempest/scenario/manager.py b/tempest/scenario/manager.py
index cdc30b9..8d25bff 100644
--- a/tempest/scenario/manager.py
+++ b/tempest/scenario/manager.py
@@ -83,7 +83,6 @@
if CONF.service_available.cinder:
cls.volumes_client = cls.os_primary.volumes_client_latest
cls.snapshots_client = cls.os_primary.snapshots_client_latest
- cls.backups_client = cls.os_primary.backups_client_latest
# ## Test functions library
#
@@ -94,10 +93,6 @@
if not client:
client = self.ports_client
name = data_utils.rand_name(self.__class__.__name__)
- if CONF.network.port_vnic_type and 'binding:vnic_type' not in kwargs:
- kwargs['binding:vnic_type'] = CONF.network.port_vnic_type
- if CONF.network.port_profile and 'binding:profile' not in kwargs:
- kwargs['binding:profile'] = CONF.network.port_profile
result = client.create_port(
name=name,
network_id=network_id,
@@ -146,18 +141,13 @@
vnic_type = CONF.network.port_vnic_type
profile = CONF.network.port_profile
- # If vnic_type or profile are configured create port for
+ # If vnic_type is configured create port for
# every network
- if vnic_type or profile:
+ if vnic_type:
ports = []
- create_port_body = {}
- if vnic_type:
- create_port_body['binding:vnic_type'] = vnic_type
-
- if profile:
- create_port_body['binding:profile'] = profile
-
+ create_port_body = {'binding:vnic_type': vnic_type,
+ 'binding:profile': profile}
if kwargs:
# Convert security group names to security group ids
# to pass to create_port
@@ -249,37 +239,6 @@
volume = self.volumes_client.show_volume(volume['id'])['volume']
return volume
- def create_backup(self, volume_id, name=None, description=None,
- force=False, snapshot_id=None, incremental=False,
- container=None):
-
- name = name or data_utils.rand_name(
- self.__class__.__name__ + "-backup")
- kwargs = {'name': name,
- 'description': description,
- 'force': force,
- 'snapshot_id': snapshot_id,
- 'incremental': incremental,
- 'container': container}
- backup = self.backups_client.create_backup(volume_id=volume_id,
- **kwargs)['backup']
- self.addCleanup(self.backups_client.delete_backup, backup['id'])
- waiters.wait_for_volume_resource_status(self.backups_client,
- backup['id'], 'available')
- return backup
-
- def restore_backup(self, backup_id):
- restore = self.backups_client.restore_backup(backup_id)['restore']
- self.addCleanup(self.volumes_client.delete_volume,
- restore['volume_id'])
- waiters.wait_for_volume_resource_status(self.backups_client,
- backup_id, 'available')
- waiters.wait_for_volume_resource_status(self.volumes_client,
- restore['volume_id'],
- 'available')
- self.assertEqual(backup_id, restore['backup_id'])
- return restore
-
def create_volume_snapshot(self, volume_id, name=None, description=None,
metadata=None, force=False):
name = name or data_utils.rand_name(
@@ -295,15 +254,15 @@
self.addCleanup(self.snapshots_client.delete_snapshot, snapshot['id'])
waiters.wait_for_volume_resource_status(self.snapshots_client,
snapshot['id'], 'available')
- snapshot = self.snapshots_client.show_snapshot(
- snapshot['id'])['snapshot']
return snapshot
def create_volume_type(self, client=None, name=None, backend_name=None):
if not client:
- client = self.os_admin.volume_types_client_latest
- randomized_name = name or data_utils.rand_name(
- 'volume-type-' + self.__class__.__name__)
+ client = self.os_admin.volume_types_v2_client
+ if not name:
+ class_name = self.__class__.__name__
+ name = data_utils.rand_name(class_name + '-volume-type')
+ randomized_name = data_utils.rand_name('scenario-type-' + name)
LOG.debug("Creating a volume type: %s on backend %s",
randomized_name, backend_name)
@@ -447,9 +406,7 @@
disk_format=img_disk_format,
properties=img_properties)
except IOError:
- LOG.warning(
- "A(n) %s image was not found. Retrying with uec image.",
- img_disk_format)
+ LOG.debug("A qcow2 image was not found. Try to get a uec image.")
kernel = self._image_create('scenario-aki', 'aki', aki_img_path)
ramdisk = self._image_create('scenario-ari', 'ari', ari_img_path)
properties = {'kernel_id': kernel, 'ramdisk_id': ramdisk}
@@ -547,7 +504,7 @@
volume['id'], 'available')
def ping_ip_address(self, ip_address, should_succeed=True,
- ping_timeout=None, mtu=None, server=None):
+ ping_timeout=None, mtu=None):
timeout = ping_timeout or CONF.validation.ping_timeout
cmd = ['ping', '-c1', '-w1']
@@ -581,16 +538,12 @@
'caller': caller, 'ip': ip_address, 'timeout': timeout,
'result': 'expected' if result else 'unexpected'
})
- if server:
- self._log_console_output([server])
return result
def check_vm_connectivity(self, ip_address,
username=None,
private_key=None,
should_connect=True,
- extra_msg="",
- server=None,
mtu=None):
"""Check server connectivity
@@ -600,36 +553,43 @@
:param should_connect: True/False indicates positive/negative test
positive - attempt ping and ssh
negative - attempt ping and fail if succeed
- :param extra_msg: Message to help with debugging if ``ping_ip_address``
- fails
- :param server: The server whose console to log for debugging
:param mtu: network MTU to use for connectivity validation
:raises: AssertError if the result of the connectivity check does
not match the value of the should_connect param
"""
- LOG.debug('checking network connections to IP %s with user: %s',
- ip_address, username)
if should_connect:
msg = "Timed out waiting for %s to become reachable" % ip_address
else:
msg = "ip address %s is reachable" % ip_address
- if extra_msg:
- msg = "%s\n%s" % (extra_msg, msg)
self.assertTrue(self.ping_ip_address(ip_address,
should_succeed=should_connect,
- mtu=mtu, server=server),
+ mtu=mtu),
msg=msg)
if should_connect:
# no need to check ssh for negative connectivity
- try:
- self.get_remote_client(ip_address, username, private_key,
- server=server)
- except Exception:
- if not extra_msg:
- extra_msg = 'Failed to ssh to %s' % ip_address
- LOG.exception(extra_msg)
- raise
+ self.get_remote_client(ip_address, username, private_key)
+
+ def check_public_network_connectivity(self, ip_address, username,
+ private_key, should_connect=True,
+ msg=None, servers=None, mtu=None):
+ # The target login is assumed to have been configured for
+ # key-based authentication by cloud-init.
+ LOG.debug('checking network connections to IP %s with user: %s',
+ ip_address, username)
+ try:
+ self.check_vm_connectivity(ip_address,
+ username,
+ private_key,
+ should_connect=should_connect,
+ mtu=mtu)
+ except Exception:
+ ex_msg = 'Public network connectivity check failed'
+ if msg:
+ ex_msg += ": " + msg
+ LOG.exception(ex_msg)
+ self._log_console_output(servers)
+ raise
def create_floating_ip(self, thing, pool_name=None):
"""Create a floating IP and associates to a server on Nova"""
@@ -646,10 +606,9 @@
return floating_ip
def create_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None):
ssh_client = self.get_remote_client(ip_address,
- private_key=private_key,
- server=server)
+ private_key=private_key)
if dev_name is not None:
ssh_client.make_fs(dev_name)
ssh_client.exec_command('sudo mount /dev/%s %s' % (dev_name,
@@ -663,10 +622,9 @@
return timestamp
def get_timestamp(self, ip_address, dev_name=None, mount_path='/mnt',
- private_key=None, server=None):
+ private_key=None):
ssh_client = self.get_remote_client(ip_address,
- private_key=private_key,
- server=server)
+ private_key=private_key)
if dev_name is not None:
ssh_client.mount(dev_name, mount_path)
timestamp = ssh_client.exec_command('sudo cat %s/timestamp'
@@ -812,16 +770,17 @@
return subnet
def _get_server_port_id_and_ip4(self, server, ip_addr=None):
- if ip_addr:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'],
- fixed_ips='ip_address=%s' % ip_addr)['ports']
- else:
- ports = self.os_admin.ports_client.list_ports(
- device_id=server['id'])['ports']
+ ports = self.os_admin.ports_client.list_ports(
+ device_id=server['id'], fixed_ip=ip_addr)['ports']
# A port can have more than one IP address in some cases.
- # If the network is dual-stack (IPv4 + IPv6), this port is associated
- # with 2 subnets
+ # If the network is dual-stack (IPv4 + IPv6), this port
+ # is associated with 2 subnets
+ port_map = [{"id": p["id"],
+ "ip": fxip["ip_address"],
+ "status": p["status"]}
+ for p in ports
+ for fxip in p["fixed_ips"]
+ if netutils.is_valid_ipv4(fxip["ip_address"])]
p_status = ['ACTIVE']
# NOTE(vsaienko) With Ironic, instances live on separate hardware
# servers. Neutron does not bind ports for Ironic instances, as a
@@ -829,22 +788,31 @@
# TODO(vsaienko) remove once bug: #1599836 is resolved.
if getattr(CONF.service_available, 'ironic', False):
p_status.append('DOWN')
- port_map = [(p["id"], fxip["ip_address"])
- for p in ports
- for fxip in p["fixed_ips"]
- if (netutils.is_valid_ipv4(fxip["ip_address"]) and
- p['status'] in p_status)]
- inactive = [p for p in ports if p['status'] != 'ACTIVE']
+ # TODO(pas-ha) add a new waiter that will wait for all/at least one
+ # port on instance at once, and use it here
+ for pm in port_map:
+ try:
+ port = waiters.wait_for_interface_status(
+ self.os_admin.interfaces_client,
+ server['id'], pm["id"], p_status)
+ pm["status"] = port['port_state']
+ except lib_exc.TimeoutException:
+ # NOTE(pas-ha) as server might have several IPv4 ports
+ # we need at least one of them to be in appropriate state
+ # so just ignore timeouts and deal with it later
+ pass
+ port_map = [p for p in port_map if p["status"] in p_status]
+ inactive = [p for p in port_map if p["status"] != 'ACTIVE']
if inactive:
- LOG.warning("Instance has ports that are not ACTIVE: %s", inactive)
-
+ LOG.warning("Instance has port that are not ACTIVE: %s", inactive)
self.assertNotEmpty(port_map,
"No IPv4 addresses found in: %s" % ports)
self.assertEqual(len(port_map), 1,
"Found multiple IPv4 addresses: %s. "
"Unable to determine which port to target."
% port_map)
- return port_map[0]
+
+ return port_map[0]["id"], port_map[0]["ip"]
def _get_network_by_name(self, network_name):
net = self.os_admin.networks_client.list_networks(
@@ -1208,9 +1176,9 @@
@classmethod
def setup_clients(cls):
super(EncryptionScenarioTest, cls).setup_clients()
- cls.admin_volume_types_client = cls.os_admin.volume_types_client_latest
+ cls.admin_volume_types_client = cls.os_admin.volume_types_v2_client
cls.admin_encryption_types_client =\
- cls.os_admin.encryption_types_client_latest
+ cls.os_admin.encryption_types_v2_client
def create_encryption_type(self, client=None, type_id=None, provider=None,
key_size=None, cipher=None,
diff --git a/tempest/scenario/test_encrypted_cinder_volumes.py b/tempest/scenario/test_encrypted_cinder_volumes.py
index 8c210d5..e2e54d2 100644
--- a/tempest/scenario/test_encrypted_cinder_volumes.py
+++ b/tempest/scenario/test_encrypted_cinder_volumes.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
@@ -54,6 +56,8 @@
@decorators.idempotent_id('79165fb4-5534-4b9d-8429-97ccffb8f86e')
@decorators.attr(type='slow')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Image Signature Verification enabled')
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_luks(self):
server = self.launch_instance()
@@ -63,6 +67,8 @@
@decorators.idempotent_id('cbc752ed-b716-4717-910f-956cce965722')
@decorators.attr(type='slow')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Image Signature Verification enabled')
@utils.services('compute', 'volume', 'image')
def test_encrypted_cinder_volumes_cryptsetup(self):
server = self.launch_instance()
diff --git a/tempest/scenario/test_minimum_basic.py b/tempest/scenario/test_minimum_basic.py
index 2b35e45..5ecef44 100644
--- a/tempest/scenario/test_minimum_basic.py
+++ b/tempest/scenario/test_minimum_basic.py
@@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations
# under the License.
+import testtools
+
from tempest.common import custom_matchers
from tempest.common import utils
from tempest.common import waiters
@@ -99,6 +101,8 @@
return address
@decorators.idempotent_id('bdbb5441-9204-419d-a225-b4fdbfb1a1a8')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Image Signature Verification enabled')
@utils.services('compute', 'volume', 'image', 'network')
def test_minimum_basic_scenario(self):
image = self.glance_image_create()
diff --git a/tempest/scenario/test_server_multinode.py b/tempest/scenario/test_server_multinode.py
index fdf875c..16202a1 100644
--- a/tempest/scenario/test_server_multinode.py
+++ b/tempest/scenario/test_server_multinode.py
@@ -33,6 +33,9 @@
if CONF.compute.min_compute_nodes < 2:
raise cls.skipException(
"Less than 2 compute nodes, skipping multinode tests.")
+ if getattr(CONF.service_available, 'ironic', False):
+ raise cls.skipException(
+ "Does not work for hybrid cloud")
@decorators.idempotent_id('9cecbe35-b9d4-48da-a37e-7ce70aa43d30')
@decorators.attr(type='smoke')
diff --git a/tempest/scenario/test_shelve_instance.py b/tempest/scenario/test_shelve_instance.py
index d6b6d14..49c92bd 100644
--- a/tempest/scenario/test_shelve_instance.py
+++ b/tempest/scenario/test_shelve_instance.py
@@ -80,6 +80,8 @@
@decorators.idempotent_id('1164e700-0af0-4a4c-8792-35909a88743c')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Barbican service is enabled, unshelve will not work')
@utils.services('compute', 'network', 'image')
def test_shelve_instance(self):
self._create_server_then_shelve_and_unshelve()
@@ -88,6 +90,8 @@
@decorators.idempotent_id('c1b6318c-b9da-490b-9c67-9339b627271f')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Barbican service is enabled, unshelve will not work')
@utils.services('compute', 'volume', 'network', 'image')
def test_shelve_volume_backed_instance(self):
self._create_server_then_shelve_and_unshelve(boot_from_volume=True)
diff --git a/tempest/scenario/test_snapshot_pattern.py b/tempest/scenario/test_snapshot_pattern.py
index a33d4d4..5b62242 100644
--- a/tempest/scenario/test_snapshot_pattern.py
+++ b/tempest/scenario/test_snapshot_pattern.py
@@ -44,6 +44,8 @@
@decorators.attr(type='slow')
@testtools.skipUnless(CONF.network.public_network_id,
'The public_network_id option must be specified.')
+ @testtools.skipIf(getattr(CONF.service_available, 'barbican', False),
+ 'Image Signature Verification enabled')
@utils.services('compute', 'network', 'image')
def test_snapshot_pattern(self):
# prepare for booting an instance