Add tests to validate trunks
Implement test for multitenancy basic opts that checks trunks
extension (vlan aware VMs).
Related-Prod: RODX-49858
Related-Bug: #1653968
Change-Id: Ibd3fa7e2729a3442cd636db0580cf8f152913960
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index 8213b61..182ac88 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -303,6 +303,10 @@
"support for embedded network metadata through glean "
"or cloud-init, and thus cannot be executed with "
"most default job configurations."),
+ cfg.BoolOpt('trunks_supported',
+ default=False,
+ help="Define if trunks are supported by networking driver "
+ "with baremetal nodes."),
]
BaremetalIntrospectionGroup = [
diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
index 086bf21..b347431 100644
--- a/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
+++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
@@ -13,14 +13,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+from oslo_log import log as logging
from tempest.common import utils
from tempest import config
from tempest.lib.common.utils import data_utils
+from tempest.lib.common.utils import test_utils
from tempest.lib import decorators
from ironic_tempest_plugin import manager
from ironic_tempest_plugin.tests.scenario import baremetal_manager
+LOG = logging.getLogger(__name__)
CONF = config.CONF
@@ -50,13 +53,16 @@
'a minimum of 2') % CONF.baremetal.available_nodes
raise cls.skipException(msg)
- def create_tenant_network(self, clients, tenant_cidr):
+ def create_tenant_network(self, clients, tenant_cidr, create_router=True):
network = self.create_network(
networks_client=clients.networks_client,
project_id=clients.credentials.project_id)
- router = self.get_router(
- client=clients.routers_client,
- project_id=clients.credentials.tenant_id)
+
+ router = None
+ if create_router:
+ router = self.get_router(
+ client=clients.routers_client,
+ project_id=clients.credentials.tenant_id)
result = clients.subnets_client.create_subnet(
name=data_utils.rand_name('subnet'),
@@ -65,25 +71,39 @@
ip_version=4,
cidr=tenant_cidr)
subnet = result['subnet']
- clients.routers_client.add_router_interface(router['id'],
- subnet_id=subnet['id'])
+ if create_router:
+ clients.routers_client.add_router_interface(router['id'],
+ subnet_id=subnet['id'])
self.addCleanup(clients.subnets_client.delete_subnet, subnet['id'])
- self.addCleanup(clients.routers_client.remove_router_interface,
- router['id'], subnet_id=subnet['id'])
+
+ if create_router:
+ self.addCleanup(clients.routers_client.remove_router_interface,
+ router['id'], subnet_id=subnet['id'])
return network, subnet, router
def verify_l3_connectivity(self, source_ip, private_key,
- destination_ip, conn_expected=True):
+ destination_ip, conn_expected=True, timeout=15):
remote = self.get_remote_client(source_ip, private_key=private_key)
remote.validate_authentication()
+ output = remote.exec_command('ip route')
+ LOG.debug("Routing table on %s is %s", source_ip, output)
+
cmd = 'ping %s -c4 -w4 || exit 0' % destination_ip
success_substring = " bytes from %s" % destination_ip
- output = remote.exec_command(cmd)
- if conn_expected:
- self.assertIn(success_substring, output)
- else:
- self.assertNotIn(success_substring, output)
+
+ def ping_remote():
+ output = remote.exec_command(cmd)
+ LOG.debug("Got output %s while pinging %s", output, destination_ip)
+ if conn_expected:
+ return success_substring in output
+ else:
+ return success_substring not in output
+
+ # NOTE(vsaienko): we may lost couple of pings due to missing ARPs
+ # so do several retries to get stable output.
+ res = test_utils.call_until_true(ping_remote, timeout, 1)
+ self.assertTrue(res)
def multitenancy_check(self, use_vm=False):
tenant_cidr = '10.0.100.0/24'
@@ -165,3 +185,137 @@
self.skipTest('Compute service Nova is disabled,'
' VM is required to run this test')
self.multitenancy_check(use_vm=True)
+
+ @decorators.idempotent_id('6891929f-a254-43b1-bd97-6ea3ec74d6a9')
+ @utils.services('compute', 'image', 'network')
+ def test_baremetal_vm_multitenancy_trunk(self):
+ """Check Trunk scenario for two baremetal servers
+
+
+ fipA -- RouterA -- NetworkA (10.0.100.0/24)
+ |
+ |eth0
+ eth0:instanceA
+ |eth0.vlan_id
+ |
+ NetworkB(10.0.101.0/24)
+ |
+ |eth0
+ instanceB
+
+
+ * Create instanceA within networkA and FIPA with trunk port plugged to
+ networkA as parent(native vlan/untagged) and networkB as
+ vlan subport
+ * Create instanceB within networkB
+ * Verify connectivity to instanceB from instanceA failed
+ * Assign ip address on subport inside instanceA. This step is needed
+ only unless nova configdrive support of trunks is not implemented.
+ * Verify connectivity to instanceB from instanceA success
+ * Remove subport from instanceA
+ * Verify connectivity to instanceB from instanceA failed
+ * Add subport to instanceA
+ * Verify connectivity to instanceB from instanceA Passed
+ """
+
+ if not CONF.baremetal_feature_enabled.trunks_supported:
+ msg = 'Trunks with baremetal are not supported.'
+ raise self.skipException(msg)
+
+ tenant_a_cidr = '10.0.100.0/24'
+ tenant_b_cidr = '10.0.101.0/24'
+
+ keypair = self.create_keypair()
+ networkA, subnetA, routerA = self.create_tenant_network(
+ self.os_primary, tenant_a_cidr)
+ networkB, subnetB, _ = self.create_tenant_network(
+ self.os_primary, tenant_b_cidr, create_router=False)
+ portB = self.create_port(network_id=networkB["id"])
+
+ parent_port = self.create_port(network_id=networkA["id"])
+ subport = self.create_port(network_id=networkB["id"])
+ subports = [{'port_id': subport['id'], 'segmentation_type': 'inherit'}]
+ trunk = self.os_primary.trunks_client.create_trunk(
+ name="test-trunk", port_id=parent_port['id'],
+ sub_ports=subports)['trunk']
+ self.addCleanup(self.os_primary.trunks_client.delete_trunk,
+ trunk['id'])
+
+ # Create instanceB first as we will not check if its booted,
+ # as we don't have FIP, so it has more time than instanceA
+ # to boot.
+ instanceB, nodeB = self.boot_instance(
+ clients=self.os_primary,
+ keypair=keypair,
+ networks=[{'port': portB['id']}]
+ )
+
+ instanceA, nodeA = self.boot_instance(
+ clients=self.os_primary,
+ keypair=keypair,
+ networks=[{'port': parent_port['id']}]
+ )
+
+ floating_ipA = self.create_floating_ip(
+ instanceA,
+ )['floating_ip_address']
+
+ fixed_ipB = instanceB['addresses'][networkB['name']][0]['addr']
+
+ self.check_vm_connectivity(ip_address=floating_ipA,
+ private_key=keypair['private_key'],
+ server=instanceA)
+ ssh_client = self.get_remote_client(floating_ipA,
+ private_key=keypair['private_key'])
+
+ # TODO(vsaienko): add when cloudinit support is implemented
+ # add validation of network_data.json and drop next ip assignment
+
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=False
+ )
+ vlan_id = trunk['sub_ports'][0]['segmentation_id']
+ subport_ip = subport['fixed_ips'][0]['ip_address']
+
+ interface_name = ssh_client.exec_command(
+ "sudo ip route | awk '/default/ {print $5}'").rstrip()
+ cmds = [
+ f"sudo ip link add link {interface_name} name "
+ f"{interface_name}.{vlan_id} type vlan id {vlan_id}",
+ f"sudo ip addr add {subport_ip}/24 dev {interface_name}.{vlan_id}",
+ f"sudo ip link set dev {interface_name}.{vlan_id} up"]
+
+ for cmd in cmds:
+ ssh_client.exec_command(cmd)
+
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=True
+ )
+
+ self.os_primary.trunks_client.delete_subports_from_trunk(
+ trunk['id'], trunk['sub_ports'])
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=False
+ )
+ self.os_primary.trunks_client.add_subports_to_trunk(
+ trunk['id'], trunk['sub_ports'])
+
+ # NOTE(vsaienko): it may take some time for network driver to
+ # setup vlans as this is async operation.
+ self.verify_l3_connectivity(
+ floating_ipA,
+ keypair['private_key'],
+ fixed_ipB,
+ conn_expected=True
+ )
+ self.terminate_instance(instance=instanceA)
+ self.terminate_instance(instance=instanceB)