Merge "Replaced all punctuation with an underscore in the node resource class."
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index 9e96863..1dce427 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -282,6 +282,10 @@
"support for embedded network metadata through glean "
"or cloud-init, and thus cannot be executed with "
"most default job configurations."),
+ cfg.BoolOpt('trunks_supported',
+ default=False,
+ help="Define if trunks are supported by networking driver "
+ "with baremetal nodes."),
]
BaremetalIntrospectionGroup = [
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_basic_ops.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_basic_ops.py
index e16bfad..09da6d3 100644
--- a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_basic_ops.py
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_basic_ops.py
@@ -135,6 +135,7 @@
image_ref = CONF.baremetal.whole_disk_image_ref
wholedisk_image = True
+ @decorators.unstable_test(bug='2101021')
@decorators.idempotent_id('cde532cc-81ba-4489-b374-b4a85cc203eb')
@utils.services('image', 'network')
def test_ip_access_to_server(self):
diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py
index 0d15fd4..e3a9a58 100644
--- a/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py
+++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py
@@ -50,6 +50,7 @@
TEST_RESCUE_MODE = False
image_ref = None
wholedisk_image = None
+ auto_lease = False
@classmethod
def skip_checks(cls):
@@ -213,13 +214,25 @@
output = client.exec_command(cmd).rstrip()
self.assertEqual(success_string, output)
+ def validate_lessee(self):
+ iinfo = self.node.get('instance_info')
+ dii = self.node.get('driver_internal_info', {})
+ if 'automatic_lessee' in dii and iinfo:
+ # NOTE(JayF): This item not being in instance_info tells us we
+ # set the lessee.
+ self.assertEqual(iinfo['project_id'], self.node['lessee'])
+
def baremetal_server_ops(self):
self.add_keypair()
self.instance, self.node = self.boot_instance(image_id=self.image_ref)
self.validate_image()
self.validate_ports()
self.validate_scheduling()
+ self.validate_lessee()
ip_address = self.get_server_ip(self.instance)
+ self.check_vm_connectivity(ip_address=ip_address,
+ private_key=self.keypair['private_key'],
+ server=self.instance)
vm_client = self.get_remote_client(ip_address, server=self.instance)
# We expect the ephemeral partition to be mounted on /mnt and to have
@@ -261,6 +274,7 @@
def test_baremetal_server_ops_wholedisk_image(self):
self.image_ref = CONF.baremetal.whole_disk_image_ref
self.wholedisk_image = True
+ self.auto_lease = True
self.baremetal_server_ops()
diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
index 086bf21..1afd667 100644
--- a/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
+++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
@@ -13,14 +13,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log as logging
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from ironic_tempest_plugin import manager
from ironic_tempest_plugin.tests.scenario import baremetal_manager
+LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -50,13 +53,16 @@
'a minimum of 2') % CONF.baremetal.available_nodes
raise cls.skipException(msg)
- def create_tenant_network(self, clients, tenant_cidr):
+ def create_tenant_network(self, clients, tenant_cidr, create_router=True):
network = self.create_network(
networks_client=clients.networks_client,
project_id=clients.credentials.project_id)
- router = self.get_router(
- client=clients.routers_client,
- project_id=clients.credentials.tenant_id)
+
+ router = None
+ if create_router:
+ router = self.get_router(
+ client=clients.routers_client,
+ project_id=clients.credentials.tenant_id)
result = clients.subnets_client.create_subnet(
name=data_utils.rand_name('subnet'),
@@ -65,25 +71,39 @@
ip_version=4,
cidr=tenant_cidr)
subnet = result['subnet']
- clients.routers_client.add_router_interface(router['id'],
- subnet_id=subnet['id'])
+ if create_router:
+ clients.routers_client.add_router_interface(router['id'],
+ subnet_id=subnet['id'])
self.addCleanup(clients.subnets_client.delete_subnet, subnet['id'])
- self.addCleanup(clients.routers_client.remove_router_interface,
- router['id'], subnet_id=subnet['id'])
+
+ if create_router:
+ self.addCleanup(clients.routers_client.remove_router_interface,
+ router['id'], subnet_id=subnet['id'])
return network, subnet, router
def verify_l3_connectivity(self, source_ip, private_key,
- destination_ip, conn_expected=True):
+ destination_ip, conn_expected=True, timeout=15):
remote = self.get_remote_client(source_ip, private_key=private_key)
remote.validate_authentication()
+ output = remote.exec_command('ip route')
+ LOG.debug("Routing table on %s is %s", source_ip, output)
+
cmd = 'ping %s -c4 -w4 || exit 0' % destination_ip
success_substring = " bytes from %s" % destination_ip
- output = remote.exec_command(cmd)
- if conn_expected:
- self.assertIn(success_substring, output)
- else:
- self.assertNotIn(success_substring, output)
+
+ def ping_remote():
+ output = remote.exec_command(cmd)
+ LOG.debug("Got output %s while pinging %s", output, destination_ip)
+ if conn_expected:
+ return success_substring in output
+ else:
+ return success_substring not in output
+
+ # NOTE(vsaienko): we may lost couple of pings due to missing ARPs
+ # so do several retries to get stable output.
+ res = test_utils.call_until_true(ping_remote, timeout, 1)
+ self.assertTrue(res)
def multitenancy_check(self, use_vm=False):
tenant_cidr = '10.0.100.0/24'
@@ -98,6 +118,7 @@
clients=self.os_primary,
keypair=keypair,
net_id=network['id'],
+ fixed_ip='10.0.100.101',
)
fixed_ip1 = instance1['addresses'][network['name']][0]['addr']
floating_ip1 = self.create_floating_ip(
@@ -112,7 +133,8 @@
clients=self.os_alt,
key_name=alt_keypair['name'],
flavor=CONF.compute.flavor_ref_alt,
- networks=[{'uuid': alt_network['id']}]
+ networks=[{'uuid': alt_network['id'],
+ 'fixed_ip': '10.0.100.102'}],
)
else:
# Create BM
@@ -120,6 +142,7 @@
keypair=alt_keypair,
clients=self.os_alt,
net_id=alt_network['id'],
+ fixed_ip='10.0.100.102',
)
fixed_ip2 = alt_instance['addresses'][alt_network['name']][0]['addr']
alt_floating_ip = self.create_floating_ip(
@@ -165,3 +188,137 @@
self.skipTest('Compute service Nova is disabled,'
' VM is required to run this test')
self.multitenancy_check(use_vm=True)
+
+ @decorators.idempotent_id('6891929f-a254-43b1-bd97-6ea3ec74d6a9')
+ @utils.services('compute', 'image', 'network')
+ def test_baremetal_vm_multitenancy_trunk(self):
+ """Check Trunk scenario for two baremetal servers
+
+
+ fipA -- RouterA -- NetworkA (10.0.100.0/24)
+ |
+ |eth0
+ eth0:instanceA
+ |eth0.vlan_id
+ |
+ NetworkB(10.0.101.0/24)
+ |
+ |eth0
+ instanceB
+
+
+ * Create instanceA within networkA and FIPA with trunk port plugged to
+ networkA as parent(native vlan/untagged) and networkB as
+ vlan subport
+ * Create instanceB within networkB
+ * Verify connectivity to instanceB from instanceA failed
+ * Assign ip address on subport inside instanceA. This step is needed
+ only unless nova configdrive support of trunks is not implemented.
+ * Verify connectivity to instanceB from instanceA success
+ * Remove subport from instanceA
+ * Verify connectivity to instanceB from instanceA failed
+ * Add subport to instanceA
+ * Verify connectivity to instanceB from instanceA Passed
+ """
+
+ if not CONF.baremetal_feature_enabled.trunks_supported:
+ msg = 'Trunks with baremetal are not supported.'
+ raise self.skipException(msg)
+
+ tenant_a_cidr = '10.0.100.0/24'
+ tenant_b_cidr = '10.0.101.0/24'
+
+ keypair = self.create_keypair()
+ networkA, subnetA, routerA = self.create_tenant_network(
+ self.os_primary, tenant_a_cidr)
+ networkB, subnetB, _ = self.create_tenant_network(
+ self.os_primary, tenant_b_cidr, create_router=False)
+ portB = self.create_port(network_id=networkB["id"])
+
+ parent_port = self.create_port(network_id=networkA["id"])
+ subport = self.create_port(network_id=networkB["id"])
+ subports = [{'port_id': subport['id'], 'segmentation_type': 'inherit'}]
+ trunk = self.os_primary.trunks_client.create_trunk(
+ name="test-trunk", port_id=parent_port['id'],
+ sub_ports=subports)['trunk']
+ self.addCleanup(self.os_primary.trunks_client.delete_trunk,
+ trunk['id'])
+
+ # Create instanceB first as we will not check if its booted,
+ # as we don't have FIP, so it has more time than instanceA
+ # to boot.
+ instanceB, nodeB = self.boot_instance(
+ clients=self.os_primary,
+ keypair=keypair,
+ networks=[{'port': portB['id']}]
+ )
+
+ instanceA, nodeA = self.boot_instance(
+ clients=self.os_primary,
+ keypair=keypair,
+ networks=[{'port': parent_port['id']}]
+ )
+
+ floating_ipA = self.create_floating_ip(
+ instanceA,
+ )['floating_ip_address']
+
+ fixed_ipB = instanceB['addresses'][networkB['name']][0]['addr']
+
+ self.check_vm_connectivity(ip_address=floating_ipA,
+ private_key=keypair['private_key'],
+ server=instanceA)
+ ssh_client = self.get_remote_client(floating_ipA,
+ private_key=keypair['private_key'])
+
+ # TODO(vsaienko): add when cloudinit support is implemented
+ # add validation of network_data.json and drop next ip assignment
+
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=False
+ )
+ vlan_id = trunk['sub_ports'][0]['segmentation_id']
+ subport_ip = subport['fixed_ips'][0]['ip_address']
+
+ interface_name = ssh_client.exec_command(
+ "sudo ip route | awk '/default/ {print $5}'").rstrip()
+ cmds = [
+ f"sudo ip link add link {interface_name} name "
+ f"{interface_name}.{vlan_id} type vlan id {vlan_id}",
+ f"sudo ip addr add {subport_ip}/24 dev {interface_name}.{vlan_id}",
+ f"sudo ip link set dev {interface_name}.{vlan_id} up"]
+
+ for cmd in cmds:
+ ssh_client.exec_command(cmd)
+
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=True
+ )
+
+ self.os_primary.trunks_client.delete_subports_from_trunk(
+ trunk['id'], trunk['sub_ports'])
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=False
+ )
+ self.os_primary.trunks_client.add_subports_to_trunk(
+ trunk['id'], trunk['sub_ports'])
+
+ # NOTE(vsaienko): it may take some time for network driver to
+ # setup vlans as this is async operation.
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=True
+ )
+ self.terminate_instance(instance=instanceA)
+ self.terminate_instance(instance=instanceB)
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index 4d9da27..1c99350 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -6,60 +6,31 @@
check:
jobs:
# NOTE(dtantsur): keep N-3 and older non-voting for these jobs.
- - ironic-standalone
- - ironic-standalone-2024.2
- - ironic-standalone-2024.1
- - ironic-standalone-2023.2:
- voting: false
- ironic-tempest-functional-python3
- ironic-tempest-functional-python3-2024.2
- ironic-tempest-functional-python3-2024.1
- - ironic-tempest-functional-python3-2023.2:
- voting: false
- - ironic-tempest-functional-rbac-scope-enforced-2024.2
- - ironic-tempest-functional-rbac-scope-enforced-2024.1
- - ironic-tempest-functional-rbac-scope-enforced-2023.2:
- voting: false
- ironic-standalone-anaconda
- ironic-standalone-anaconda-2024.2
- ironic-standalone-anaconda-2024.1
- - ironic-standalone-anaconda-2023.2:
- voting: false
- ironic-standalone-redfish
- ironic-standalone-redfish-2024.2
- ironic-standalone-redfish-2024.1
- - ironic-standalone-redfish-2023.2:
- voting: false
# NOTE(dtantsur): inspector is deprecated and rarely sees any changes,
# no point in running many jobs
- ironic-inspector-tempest
- - ironic-inspector-tempest-2024.2:
- voting: false
- - ironic-inspector-tempest-2024.1:
- voting: false
# NOTE(dtantsur): these jobs cover rarely changed tests and are quite
# unstable, so keep them non-voting.
- # NOTE(TheJulia): Except this first one so we can validate fixes to
- # the base tests as we make them.
- - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode
+ - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode:
+ voting: false
- ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-2024.2:
voting: false
- - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-2024.1:
- voting: false
- - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-2023.2:
- voting: false
- ironic-inspector-tempest-discovery:
voting: false
gate:
jobs:
- - ironic-standalone
- - ironic-standalone-2024.2
- - ironic-standalone-2024.1
- ironic-tempest-functional-python3
- ironic-tempest-functional-python3-2024.2
- ironic-tempest-functional-python3-2024.1
- - ironic-tempest-functional-rbac-scope-enforced-2024.2
- - ironic-tempest-functional-rbac-scope-enforced-2024.1
- ironic-standalone-anaconda
- ironic-standalone-anaconda-2024.2
- ironic-standalone-anaconda-2024.1