Merge "Test multiple boot interfaces as part of one CI job"
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index 8fbfbd5..5a71b84 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -33,13 +33,13 @@
"is expected to be available")
ironic_scope_enforcement = cfg.BoolOpt('ironic',
- default=False,
+ default=True,
help='Wheter or not ironic is '
'exepcted to enforce auth '
'scope.')
inspector_scope_enforcement = cfg.BoolOpt('ironic_inspector',
- default=False,
+ default=True,
help='Whether or not '
'ironic-inspector is expected '
'to enforce auth scope.')
@@ -238,6 +238,17 @@
cfg.StrOpt('default_boot_option',
# No good default here, we need to actually set it.
help="The default boot option the testing nodes are using."),
+ cfg.BoolOpt("rebuild_remote_dhcpless",
+ default=True,
+ help="If we should issue a rebuild request when testing "
+ "dhcpless virtual media deployments. This may be useful "
+ "if bug 2032377 is not fixed in the agent ramdisk."),
+ cfg.StrOpt("public_subnet_id",
+ help="The public subnet ID where routers will be bound for "
+ "testing purposes with the dhcp-less test scenario."),
+ cfg.StrOpt("public_subnet_ip",
+ help="The public subnet IP to bind the public router to for "
+ "dhcp-less testing.")
]
BaremetalFeaturesGroup = [
@@ -258,6 +269,14 @@
default=False,
help="Defines if in-band RAID can be built in deploy time "
"(possible starting with Victoria)."),
+ cfg.BoolOpt('dhcpless_vmedia',
+ default=False,
+ help="Defines if it is possible to execute DHCP-Less "
+ "deployment of baremetal nodes through virtual media. "
+ "This test requires full OS images with configuration "
+ "support for embedded network metadata through glean "
+ "or cloud-init, and thus cannot be executed with "
+ "most default job configurations."),
]
BaremetalIntrospectionGroup = [
diff --git a/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py b/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py
index db06a9e..628a075 100644
--- a/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py
+++ b/ironic_tempest_plugin/services/baremetal/v1/json/baremetal_client.py
@@ -261,7 +261,7 @@
"""
node = {}
- for field in ('resource_class', 'name', 'description'):
+ for field in ('resource_class', 'name', 'description', 'shard'):
if kwargs.get(field):
node[field] = kwargs[field]
@@ -518,7 +518,8 @@
# TODO(dtantsur): maintenance is set differently
# in newer API versions.
'maintenance',
- 'description')
+ 'description',
+ 'shard')
if not patch:
patch = self._make_patch(node_attributes, **kwargs)
diff --git a/ironic_tempest_plugin/tests/api/admin/test_nodes.py b/ironic_tempest_plugin/tests/api/admin/test_nodes.py
index 8e8e217..d6d1833 100644
--- a/ironic_tempest_plugin/tests/api/admin/test_nodes.py
+++ b/ironic_tempest_plugin/tests/api/admin/test_nodes.py
@@ -413,6 +413,184 @@
self.client.vif_detach(self.node['uuid'], self.nport_id)
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('628350f8-4498-4204-b546-f3c01b93c7e3')
+ def test_vif_already_attached_on_internal_info(self):
+ """Negative test for duplicated attachment/detachment of VIFs.
+
+ Test steps:
+ 1) Create chassis and node in setUp.
+ 2) Create port for the node.
+ 3) Attach VIF to the node.
+ 4) Try to attach the same VIF to the node.
+ 5) Detach VIF from the node.
+ 6) Try to detach the same VIF from the node.
+ """
+ self.useFixture(
+ api_microversion_fixture.APIMicroversionFixture('1.28'))
+ _, self.port = self.create_port(self.node['uuid'],
+ data_utils.rand_mac_address())
+ self.client.vif_attach(self.node['uuid'], self.nport_id)
+ _, body = self.client.vif_list(self.node['uuid'])
+ self.assertEqual({'vifs': [{'id': self.nport_id}]}, body)
+ self.assertRaises(lib_exc.Conflict, self.client.vif_attach,
+ self.node['uuid'], self.nport_id)
+ self.client.vif_detach(self.node['uuid'], self.nport_id)
+ self.assertRaises(lib_exc.BadRequest, self.client.vif_detach,
+ self.node['uuid'], self.nport_id)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('ec0c14a4-6853-4907-9091-755c9c6c152f')
+ def test_vif_already_attached_with_portgroups(self):
+ """Negative test: try duplicated attachment of VIFs with port groups.
+
+ Test steps:
+ 1) Create chassis and node in setUp.
+ 2) Create port for the node.
+ 3) Create port group for the node.
+ 4) Plug port into port group.
+ 5) Attach VIF to the node.
+ 6) Try to attach the same VIF to the node.
+ 7) Check that VIF was not attached to port when portgroup is busy.
+ 8) Detach VIF from the node.
+ 9) Check there is no VIF data in port group internal info.
+ """
+ _, self.port = self.create_port(self.node['uuid'],
+ data_utils.rand_mac_address())
+ _, self.portgroup = self.create_portgroup(
+ self.node['uuid'], address=data_utils.rand_mac_address())
+
+ patch = [{'path': '/portgroup_uuid',
+ 'op': 'add',
+ 'value': self.portgroup['uuid']}]
+ self.client.update_port(self.port['uuid'], patch)
+ self.client.vif_attach(self.node['uuid'], self.nport_id)
+ self.assertRaises(lib_exc.Conflict, self.client.vif_attach,
+ self.node['uuid'], self.nport_id)
+
+ _, port = self.client.show_port(self.port['uuid'])
+ self.assertNotIn('tenant_vif_port_id', port['internal_info'])
+
+ self.client.vif_detach(self.node['uuid'], self.nport_id)
+ _, portgroup = self.client.show_portgroup(self.portgroup['uuid'])
+ self.assertNotIn('tenant_vif_port_id', portgroup['internal_info'])
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('91e08d6a-0438-4171-b404-bc86b0bc8861')
+ def test_vif_attach_no_free_port(self):
+ """Negative test for VIF attachment attempt with no free ports.
+
+ Test steps:
+ 1) Create chassis and node in setUp.
+ 2) Create port for the node.
+ 3) Attach VIF to the node.
+ 4) Try to attach new VIF to the same node with port.
+ 5) Check that VIF still attached with original port.
+ """
+ _, self.port = self.create_port(self.node['uuid'],
+ data_utils.rand_mac_address())
+ self.client.vif_attach(self.node['uuid'], self.nport_id)
+ self.assertRaises(lib_exc.BadRequest, self.client.vif_attach,
+ self.node['uuid'], 'test-vif-new')
+ _, port = self.client.show_port(self.port['uuid'])
+ self.assertEqual(self.nport_id,
+ port['internal_info']['tenant_vif_port_id'])
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('85b610cd-5ba8-49a7-8ce2-5e364056fd29')
+ def test_vif_attach_no_port(self):
+ """Negative test for VIF attachment attempt with no ports."""
+ self.assertRaises(lib_exc.BadRequest,
+ self.client.vif_attach,
+ self.node['uuid'], self.nport_id)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('481777b8-8f0d-4932-af3c-1a8e166949f3')
+ def test_vif_attach_with_empty_portgroup(self):
+ """Negative test for VIF attachment attempt to empty port group.
+
+ Test steps:
+ 1) Create chassis and node in setUp.
+ 2) Create a port group for the node.
+ 3) Try to attach VIF to the node.
+ 4) Check that VIF info did not get to port group when there is no port.
+ """
+ _, self.portgroup = self.create_portgroup(
+ self.node['uuid'], address=data_utils.rand_mac_address())
+ self.assertRaises(lib_exc.BadRequest, self.client.vif_attach,
+ self.node['uuid'], self.nport_id)
+ _, portgroup = self.client.show_portgroup(self.portgroup['uuid'])
+ self.assertNotIn('tenant_vif_port_id', portgroup['internal_info'])
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('5572fecc-bc22-461a-a891-7f74e2e936bd')
+ def test_vif_attach_port_not_in_portgroup(self):
+ """Negative test for VIF attachment to the port not in port group.
+
+ Test steps:
+ 1) Create chassis and node in setUp.
+ 2) Create a port group for the node.
+ 3) Create a port for the node.
+ 4) Attach VIF to the node.
+ 5) Check that VIF was attached to the port.
+ 6) Check that VIF was NOT attached to the portgroup.
+ """
+ _, self.portgroup = self.create_portgroup(
+ self.node['uuid'], address=data_utils.rand_mac_address())
+ _, self.port = self.create_port(self.node['uuid'],
+ data_utils.rand_mac_address())
+ self.client.vif_attach(self.node['uuid'], self.nport_id)
+ _, port = self.client.show_port(self.port['uuid'])
+ self.assertEqual(self.nport_id,
+ port['internal_info']['tenant_vif_port_id'])
+ _, portgroup = self.client.show_portgroup(self.portgroup['uuid'])
+ self.assertNotIn('tenant_vif_port_id', portgroup['internal_info'])
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('3affca81-9f3f-4dab-ad3d-77c892d8d0d7')
+ def test_vif_attach_node_doesnt_exist(self):
+ """Negative test to try to attach VIF to not-existing node."""
+ self.assertRaises(lib_exc.NotFound,
+ self.client.vif_attach,
+ data_utils.rand_uuid(),
+ self.nport_id)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('9290e1f9-7e75-4e12-aea7-3649348e7f36')
+ def test_vif_attach_no_args(self):
+ """Negative test for VIF attachment with lack of arguments."""
+ self.assertRaises(lib_exc.BadRequest,
+ self.client.vif_attach,
+ self.node['uuid'], '')
+ self.assertRaises(lib_exc.BadRequest,
+ self.client.vif_attach,
+ '', '')
+ self.assertRaises(lib_exc.BadRequest,
+ self.client.vif_attach,
+ '', self.nport_id)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('da036225-47b0-43b7-9586-0d6390bd3cd9')
+ def test_vif_detach_not_existing(self):
+ """Negative test for VIF detachment of not existing VIF."""
+ self.assertRaises(lib_exc.BadRequest,
+ self.client.vif_detach,
+ self.node['uuid'], self.nport_id)
+
+ @decorators.attr(type='negative')
+ @decorators.idempotent_id('ff3c9ce2-4979-4a54-a860-fab088a6669f')
+ def test_vif_detach_no_args(self):
+ """Negative test for VIF detachment with lack of arguments."""
+ self.assertRaises(lib_exc.BadRequest,
+ self.client.vif_detach,
+ self.node['uuid'], '')
+ self.assertRaises(lib_exc.NotFound,
+ self.client.vif_detach,
+ '', '')
+ self.assertRaises(lib_exc.NotFound,
+ self.client.vif_detach,
+ '', self.nport_id)
+
class TestHardwareInterfaces(base.BaseBaremetalTest):
diff --git a/ironic_tempest_plugin/tests/api/admin/test_shards.py b/ironic_tempest_plugin/tests/api/admin/test_shards.py
new file mode 100644
index 0000000..ffe6914
--- /dev/null
+++ b/ironic_tempest_plugin/tests/api/admin/test_shards.py
@@ -0,0 +1,128 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest import config
+from tempest.lib import decorators
+
+from ironic_tempest_plugin.tests.api import base
+
+CONF = config.CONF
+
+
+class TestAddShardsToNode(base.BaseBaremetalTest):
+ """Tests for baremetal shards."""
+
+ min_microversion = '1.82'
+
+ def setUp(self):
+ super(TestAddShardsToNode, self).setUp()
+ # set a minimum API version
+ _, self.chassis = self.create_chassis()
+
+ @decorators.idempotent_id('6f1e241d-4386-4730-b9ff-28c6a3dcad31')
+ def test_add_shard_to_node_at_create(self):
+ shard = 'at-create'
+
+ _, body = self.create_node(self.chassis['uuid'], shard=shard)
+ self.assertEqual(shard, body['shard'])
+
+ @decorators.idempotent_id('2eb91d29-e0a5-472b-aeb8-ef6d98eb0f3c')
+ def test_add_shard_to_node_post_create(self):
+ shard = 'post-create'
+
+ _, node = self.create_node(self.chassis['uuid'])
+ _, before = self.client.show_node(node['uuid'])
+ self.assertIsNone(before['shard'])
+
+ self.client.update_node(node['uuid'], shard=shard)
+
+ _, after = self.client.show_node(node['uuid'])
+ self.assertEqual(shard, after['shard'])
+
+
+class TestNodeShardQueries(base.BaseBaremetalTest):
+ """Tests for baremetal shards."""
+
+ min_microversion = '1.82'
+
+ def setUp(self):
+ super(TestNodeShardQueries, self).setUp()
+ _, self.chassis = self.create_chassis()
+ _, bad_node = self.create_node(self.chassis['uuid'], shard='bad')
+ _, none_node = self.create_node(self.chassis['uuid']) # shard=None
+ self.bad_node_id = bad_node['uuid']
+ self.none_node_id = none_node['uuid']
+
+ def _setup_nodes(self, good_shard, num=2):
+ good_node_ids = []
+ for i in range(num):
+ _, node = self.create_node(self.chassis['uuid'], shard=good_shard)
+ good_node_ids.append(node['uuid'])
+
+ return good_node_ids
+
+ @decorators.idempotent_id('df74c989-6972-4104-a8d6-bd8e8d811353')
+ def test_show_all_nodes(self):
+ """Validate unfiltered API query will return nodes with a shard."""
+ shard = "oneshardtest"
+ good_node_ids = self._setup_nodes(shard)
+
+ _, fetched_nodes = self.client.list_nodes()
+ fetched_node_ids = [node['uuid'] for node in fetched_nodes['nodes']]
+
+ for node_id in good_node_ids:
+ self.assertIn(node_id, fetched_node_ids)
+
+ @decorators.idempotent_id('6f1e241d-4386-4730-b9ff-28c6a3dcad31')
+ def test_only_show_requested_shard(self):
+ """Validate filtering nodes by shard."""
+ shard = "oneshardtest"
+ good_node_ids = self._setup_nodes(shard)
+
+ _, fetched_nodes = self.client.list_nodes(shard=shard)
+ fetched_node_ids = [node['uuid'] for node in fetched_nodes['nodes']]
+
+ self.assertItemsEqual(good_node_ids, fetched_node_ids)
+
+ @decorators.idempotent_id('6f1e241d-4386-4730-b9ff-28c6a3dcad31')
+ def test_only_show_multiple_requested_shards(self):
+ """Validate filtering nodes by multiple shards."""
+ shard = "multishardtest"
+ second_shard = "multishardtest2"
+ good_node_ids = self._setup_nodes(shard)
+ _, second_shard_node = self.create_node(
+ self.chassis['uuid'], shard=second_shard)
+ good_node_ids.append(second_shard_node['uuid'])
+
+ _, fetched_nodes = self.client.list_nodes(
+ shard=','.join([shard, second_shard]))
+ fetched_node_ids = [node['uuid'] for node in fetched_nodes['nodes']]
+
+ self.assertItemsEqual(good_node_ids, fetched_node_ids)
+ self.assertNotIn(self.bad_node_id, fetched_node_ids)
+ self.assertNotIn(self.none_node_id, fetched_node_ids)
+
+ @decorators.idempotent_id('f7a2eeb7-d16e-480c-b698-3448491c73a1')
+ def test_show_sharded_nodes(self):
+ _, fetched_nodes = self.client.list_nodes(sharded=True)
+ fetched_node_ids = [node['uuid'] for node in fetched_nodes['nodes']]
+
+ # NOTE(JayF): All other nodes under test are sharded
+ self.assertNotIn(self.none_node_id, fetched_node_ids)
+
+ @decorators.idempotent_id('f7a2eeb7-d16e-480c-b698-3448491c73a1')
+ def test_show_unsharded_nodes(self):
+ _, fetched_nodes = self.client.list_nodes(sharded=False)
+ fetched_node_ids = [node['uuid'] for node in fetched_nodes['nodes']]
+
+ self.assertIn(self.none_node_id, fetched_node_ids)
+ self.assertNotIn(self.bad_node_id, fetched_node_ids)
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
index f943522..a66d7d6 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
@@ -15,6 +15,7 @@
# License for the specific language governing permissions and limitations
# under the License.
+import ipaddress
import random
from oslo_utils import uuidutils
@@ -265,8 +266,66 @@
raise lib_exc.TimeoutException(msg)
@classmethod
+ def gen_config_drive_net_info(cls, node_id, n_port):
+ # Find the port with the vif.
+ use_port = None
+ _, body = cls.baremetal_client.list_node_ports(node_id)
+ for port in body['ports']:
+ _, p = cls.baremetal_client.show_port(port['uuid'])
+ if 'tenant_vif_port_id' in p['internal_info']:
+ use_port = p
+ break
+ if not use_port:
+ m = ('Unable to determine proper mac address to use for config '
+ 'to apply for the virtual media port test.')
+ raise lib_exc.InvalidConfiguration(m)
+ vif_mac_address = use_port['address']
+ if CONF.validation.ip_version_for_ssh == 4:
+ ip_version = "ipv4"
+ else:
+ ip_version = "ipv6"
+ ip_address = n_port['fixed_ips'][0]['ip_address']
+ subnet_id = n_port['fixed_ips'][0]['subnet_id']
+ subnet = cls.os_primary.subnets_client.show_subnet(
+ subnet_id).get('subnet')
+ ip_netmask = str(ipaddress.ip_network(subnet.get('cidr')).netmask)
+ if ip_version == "ipv4":
+ route = [{
+ "netmask": "0.0.0.0",
+ "network": "0.0.0.0",
+ "gateway": subnet.get('gateway_ip'),
+ }]
+ else:
+ # Eh... the data structure doesn't really allow for
+ # this to be easy since a default route with v6
+ # is just referred to as ::/0
+ # so network and netmask would be ::, which is
+ # semi-mind-breaking. Anyway, route advertisers are
+ # expected in this case.
+ route = []
+
+ return {
+ "links": [{"id": "port-test",
+ "type": "vif",
+ "ethernet_mac_address": vif_mac_address}],
+ "networks": [
+ {
+ "id": "network0",
+ "type": ip_version,
+ "link": "port-test",
+ "ip_address": ip_address,
+ "netmask": ip_netmask,
+ "network_id": "network0",
+ "routes": route
+ }
+ ],
+ "services": []
+ }
+
+ @classmethod
def boot_node(cls, image_ref=None, image_checksum=None,
- boot_option=None):
+ boot_option=None, config_drive_networking=False,
+ fallback_network=None):
"""Boot ironic node.
The following actions are executed:
@@ -282,7 +341,13 @@
:param boot_option: The defaut boot option to utilize. If not
specified, the ironic deployment default shall
be utilized.
+ :param config_drive_networking: If we should load configuration drive
+ with network_data values.
+ :param fallback_network: Network to use if we are not able to detect
+ a network for use.
"""
+ config_drive = {}
+
if image_ref is None:
image_ref = cls.image_ref
if image_checksum is None:
@@ -291,8 +356,22 @@
boot_option = cls.boot_option
network, subnet, router = cls.create_networks()
- n_port = cls.create_neutron_port(network_id=network['id'])
+ try:
+ n_port = cls.create_neutron_port(network_id=network['id'])
+
+ except TypeError:
+ if fallback_network:
+ n_port = cls.create_neutron_port(
+ network_id=fallback_network)
+ else:
+ raise
cls.vif_attach(node_id=cls.node['uuid'], vif_id=n_port['id'])
+ config_drive = None
+ if config_drive_networking:
+ config_drive = {}
+ config_drive['network_data'] = cls.gen_config_drive_net_info(
+ cls.node['uuid'], n_port)
+
patch = [{'path': '/instance_info/image_source',
'op': 'add',
'value': image_ref}]
@@ -308,9 +387,15 @@
patch.append({'path': '/instance_info/capabilities',
'op': 'add',
'value': {'boot_option': boot_option}})
- # TODO(vsaienko) add testing for custom configdrive
cls.update_node(cls.node['uuid'], patch=patch)
- cls.set_node_provision_state(cls.node['uuid'], 'active')
+
+ if not config_drive:
+ cls.set_node_provision_state(cls.node['uuid'], 'active')
+ else:
+ cls.set_node_provision_state(
+ cls.node['uuid'], 'active',
+ configdrive=config_drive)
+
cls.wait_power_state(cls.node['uuid'],
bm.BaremetalPowerStates.POWER_ON)
cls.wait_provisioning_state(cls.node['uuid'],
@@ -332,7 +417,12 @@
cls.detach_all_vifs_from_node(node_id, force_delete=force_delete)
if cls.delete_node or force_delete:
- cls.set_node_provision_state(node_id, 'deleted')
+ node_state = cls.get_node(node_id)['provision_state']
+ if node_state != bm.BaremetalProvisionStates.AVAILABLE:
+ # Check the state before making the call, to permit tests to
+ # drive node into a clean state before exiting the test, which
+ # is needed for some tests because of complex tests.
+ cls.set_node_provision_state(node_id, 'deleted')
# NOTE(vsaienko) We expect here fast switching from deleted to
# available as automated cleaning is disabled so poll status
# each 1s.
@@ -595,9 +685,16 @@
'Partitioned images are not supported with multitenancy.')
@classmethod
- def set_node_to_active(cls, image_ref=None, image_checksum=None):
- cls.boot_node(image_ref, image_checksum)
- if CONF.validation.connect_method == 'floating':
+ def set_node_to_active(cls, image_ref=None, image_checksum=None,
+ fallback_network=None,
+ config_drive_networking=None,
+ method_to_get_ip=None):
+ cls.boot_node(image_ref, image_checksum,
+ fallback_network=fallback_network,
+ config_drive_networking=config_drive_networking)
+ if method_to_get_ip:
+ cls.node_ip = method_to_get_ip(cls.node['uuid'])
+ elif CONF.validation.connect_method == 'floating':
cls.node_ip = cls.add_floatingip_to_node(cls.node['uuid'])
elif CONF.validation.connect_method == 'fixed':
cls.node_ip = cls.get_server_ip(cls.node['uuid'])
@@ -644,11 +741,7 @@
cls.update_node_driver(cls.node['uuid'], cls.driver, **boot_kwargs)
@classmethod
- def resource_cleanup(cls):
- if CONF.validation.connect_method == 'floating':
- if cls.node_ip:
- cls.cleanup_floating_ip(cls.node_ip)
-
+ def cleanup_vif_attachments(cls):
vifs = cls.get_node_vifs(cls.node['uuid'])
# Remove ports before deleting node, to catch regression for cases
# when user did this prior unprovision node.
@@ -657,6 +750,18 @@
cls.ports_client.delete_port(vif)
except lib_exc.NotFound:
pass
+
+ @classmethod
+ def resource_cleanup(cls):
+ if CONF.validation.connect_method == 'floating':
+ if cls.node_ip:
+ try:
+ cls.cleanup_floating_ip(cls.node_ip)
+ except IndexError:
+ # There is no fip to actually remove in this case.
+ pass
+
+ cls.cleanup_vif_attachments()
cls.terminate_node(cls.node['uuid'])
cls.unreserve_node(cls.node)
base.reset_baremetal_api_microversion()
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_advanced_ops.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_advanced_ops.py
new file mode 100644
index 0000000..7db9ae1
--- /dev/null
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_advanced_ops.py
@@ -0,0 +1,149 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from ironic_tempest_plugin.tests.scenario import \
+ baremetal_standalone_manager as bsm
+
+CONF = config.CONF
+
+
+class BaremetalRedfishDHCPLessDeploy(bsm.BaremetalStandaloneScenarioTest):
+
+ api_microversion = '1.59' # Ussuri for redfish-virtual-media
+ driver = 'redfish'
+ deploy_interface = 'direct'
+ boot_interface = 'redfish-virtual-media'
+ image_ref = CONF.baremetal.whole_disk_image_ref
+ image_checksum = CONF.baremetal.whole_disk_image_checksum
+ wholedisk_image = True
+
+ @classmethod
+ def skip_checks(cls):
+ super(BaremetalRedfishDHCPLessDeploy, cls).skip_checks()
+ if CONF.baremetal_feature_enabled.dhcpless_vmedia:
+ raise cls.skipException("This test requires a full OS image to "
+ "be deployed, and thus must be "
+ "explicitly enabled for testing.")
+
+ if (not CONF.baremetal.public_subnet_id
+ or not CONF.baremetal.public_subnet_ip):
+ raise cls.skipException(
+ "This test requires a public sunbet ID, and public subnet "
+ "IP to use on that subnet to execute. Please see the "
+ "baremetal configuration options public_subnet_id "
+ "and public_subnet_ip respectively, and populate with "
+ "appropriate values to execute this test.")
+
+ def create_tenant_network(self, clients, tenant_cidr, ip_version):
+ # NOTE(TheJulia): self.create_network is an internal method
+ # which just gets the info, doesn't actually create a network.
+ network = self.create_network(
+ networks_client=self.os_admin.networks_client,
+ project_id=clients.credentials.project_id,
+ shared=True)
+
+ router = self.get_router(
+ client=clients.routers_client,
+ project_id=clients.credentials.tenant_id,
+ external_gateway_info={
+ 'network_id': CONF.network.public_network_id,
+ 'external_fixed_ips': [
+ {'subnet_id': CONF.baremetal.public_subnet_id,
+ 'ip_address': CONF.baremetal.public_subnet_ip}]
+ })
+ result = clients.subnets_client.create_subnet(
+ name=data_utils.rand_name('subnet'),
+ network_id=network['id'],
+ tenant_id=clients.credentials.tenant_id,
+ ip_version=CONF.validation.ip_version_for_ssh,
+ cidr=tenant_cidr, enable_dhcp=False)
+ subnet = result['subnet']
+ clients.routers_client.add_router_interface(router['id'],
+ subnet_id=subnet['id'])
+ self.addCleanup(clients.subnets_client.delete_subnet, subnet['id'])
+ self.addCleanup(clients.routers_client.remove_router_interface,
+ router['id'], subnet_id=subnet['id'])
+ return network, subnet, router
+
+ def deploy_vmedia_dhcpless(self, rebuild=False):
+ """Helper to facilitate vmedia testing.
+
+ * Create Network/router without DHCP
+ * Set provisioning_network for this node.
+ * Set cleanup to undo the provisionign network setup.
+ * Launch instance.
+ * Requirement: Instance OS image supports network config from
+ network_data embedded in the OS. i.e. a real image, not
+ cirros.
+ * If so enabled, rebuild the node, Verify rebuild completed.
+ * Via cleanup: Teardown Network/Router
+ """
+
+ # Get the latest state for the node.
+ self.node = self.get_node(self.node['uuid'])
+ prior_prov_net = self.node['driver_info'].get('provisioning_network')
+
+ ip_version = CONF.validation.ip_version_for_ssh
+ tenant_cidr = '10.0.6.0/24'
+ if ip_version == 6:
+ tenant_cidr = 'fd00:33::/64'
+
+ network, subnet, router = self.create_tenant_network(
+ self.os_admin, tenant_cidr, ip_version=ip_version)
+ if prior_prov_net:
+ self.update_node(self.node['uuid'],
+ [{'op': 'replace',
+ 'path': '/driver_info/provisioning_network',
+ 'value': network['id']}])
+ self.addCleanup(self.update_node,
+ self.node['uuid'],
+ [{'op': 'replace',
+ 'path': '/driver_info/provisioning_network',
+ 'value': prior_prov_net}])
+ else:
+ self.update_node(self.node['uuid'],
+ [{'op': 'add',
+ 'path': '/driver_info/provisioning_network',
+ 'value': network['id']}])
+ self.addCleanup(self.update_node,
+ self.node['uuid'],
+ [{'op': 'remove',
+ 'path': '/driver_info/provisioning_network'}])
+
+ self.set_node_to_active(self.image_ref, self.image_checksum,
+ fallback_network=network['id'],
+ config_drive_networking=True,
+ method_to_get_ip=self.get_server_ip)
+
+ # node_ip is set by the prior call to set_node_to_active
+ self.assertTrue(self.ping_ip_address(self.node_ip))
+
+ if rebuild:
+ self.set_node_provision_state(self.node['uuid'], 'rebuild')
+ self.wait_provisioning_state(self.node['uuid'], 'active',
+ timeout=CONF.baremetal.active_timeout,
+ interval=30)
+ # Assert we were able to ping after rebuilding.
+ self.assertTrue(self.ping_ip_address(self.node_ip))
+ # Force delete so we remove the vifs
+ self.terminate_node(self.node['uuid'], force_delete=True)
+
+ @decorators.idempotent_id('1f420ef3-99bd-46c7-b859-ce9c2892697f')
+ @utils.services('image', 'network')
+ def test_ip_access_to_server(self):
+ self.deploy_vmedia_dhcpless(
+ rebuild=CONF.baremetal.rebuild_remote_dhcpless)
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py
index 9f8ff13..f273499 100644
--- a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_ramdisk_iso.py
@@ -22,7 +22,7 @@
CONF = config.CONF
-class BaremetalRamdiskBootIsoIPXE(bsm.BaremetalStandaloneScenarioTest):
+class BaremetalRamdiskBootIsoIPMIIPXE(bsm.BaremetalStandaloneScenarioTest):
driver = 'ipmi'
boot_interface = 'ipxe'
@@ -45,7 +45,8 @@
self.boot_and_verify_ramdisk_node(self.image_ref, iso=True)
-class BaremetalRamdiskBootIsoVMedia(bsm.BaremetalStandaloneScenarioTest):
+class BaremetalRamdiskBootIsoRedfishVMedia(
+ bsm.BaremetalStandaloneScenarioTest):
driver = 'redfish'
boot_interface = 'redfish-virtual-media'
@@ -66,3 +67,26 @@
@utils.services('image', 'network')
def test_ramdisk_boot(self):
self.boot_and_verify_ramdisk_node(self.image_ref, iso=True)
+
+
+class BaremetalRamdiskBootIsoSNMPIPXE(bsm.BaremetalStandaloneScenarioTest):
+
+ driver = 'snmp'
+ boot_interface = 'ipxe'
+ delete_node = False
+ deploy_interface = 'ramdisk'
+ api_microversion = '1.66'
+ image_ref = CONF.baremetal.ramdisk_iso_image_ref
+ wholedisk_image = False
+
+ @classmethod
+ def skip_checks(cls):
+ super().skip_checks()
+ if not cls.image_ref:
+ raise cls.skipException('Skipping ramdisk ISO booting as'
+ 'no ramdisk_iso_image_ref is defined.')
+
+ @decorators.idempotent_id('2859d115-9266-4461-9286-79b146e65dc9')
+ @utils.services('image', 'network')
+ def test_ramdisk_boot(self):
+ self.boot_and_verify_ramdisk_node(self.image_ref, iso=True)
diff --git a/setup.py b/setup.py
index 566d844..cd35c3c 100644
--- a/setup.py
+++ b/setup.py
@@ -13,17 +13,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
-# In python < 2.7.4, a lazy loading of package `pbr` will break
-# setuptools if some other modules registered functions in `atexit`.
-# solution from: http://bugs.python.org/issue15881#msg170215
-try:
- import multiprocessing # noqa
-except ImportError:
- pass
-
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)