Add test to verify ironic multitenancy

  * Create two tenants with own networks.
  * Boot 2 baremetal instances in the same IP subnet in
    different tenants.
  * Verify L3 connectivity between instances IP's
  * Verify L3 connectivity between instances floating IP's

Co-Authored-By: Vasyl Saienko (vsaienko@mirantis.com)

Change-Id: I4fe31ecae3393abc2779a5e80e348899f9113f1b
Related-Bug: 1520230
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index e39f8f8..00e9a35 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -81,4 +81,7 @@
                     "min_microversion and max_microversion. "
                     "If both values are None, Tempest avoids tests which "
                     "require a microversion."),
+    cfg.BoolOpt('use_provision_network',
+                default=False,
+                help="Whether the Ironic/Neutron tenant isolation is enabled")
 ]
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_manager.py
index a56c941..3f7c044 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_manager.py
@@ -145,38 +145,65 @@
             dest = self.get_remote_client(self.instance)
         dest.validate_authentication()
 
-    def boot_instance(self):
-        self.instance = self.create_server(
-            key_name=self.keypair['name'])
+    def boot_instance(self, clients=None, keypair=None,
+                      net_id=None, fixed_ip=None):
+        if clients is None:
+            servers_client = self.servers_client
+        else:
+            servers_client = clients.servers_client
+        if keypair is None:
+            keypair = self.keypair
 
-        self.wait_node(self.instance['id'])
-        self.node = self.get_node(instance_id=self.instance['id'])
+        if any([net_id, fixed_ip]):
+            network = {}
+            if net_id:
+                network['uuid'] = net_id
+            if fixed_ip:
+                network['fixed_ip'] = fixed_ip
+            instance = self.create_server(
+                key_name=keypair['name'],
+                networks=[network],
+                clients=clients
+            )
+        else:
+            instance = self.create_server(
+                key_name=keypair['name'],
+                clients=clients
+            )
 
-        self.wait_power_state(self.node['uuid'], BaremetalPowerStates.POWER_ON)
+        self.wait_node(instance['id'])
+        node = self.get_node(instance_id=instance['id'])
+
+        self.wait_power_state(node['uuid'], BaremetalPowerStates.POWER_ON)
 
         self.wait_provisioning_state(
-            self.node['uuid'],
+            node['uuid'],
             [BaremetalProvisionStates.DEPLOYWAIT,
              BaremetalProvisionStates.ACTIVE],
             timeout=CONF.baremetal.deploywait_timeout)
 
-        self.wait_provisioning_state(self.node['uuid'],
+        self.wait_provisioning_state(node['uuid'],
                                      BaremetalProvisionStates.ACTIVE,
                                      timeout=CONF.baremetal.active_timeout,
                                      interval=30)
 
-        waiters.wait_for_server_status(self.servers_client,
-                                       self.instance['id'], 'ACTIVE')
-        self.node = self.get_node(instance_id=self.instance['id'])
-        self.instance = (self.servers_client.show_server(self.instance['id'])
-                         ['server'])
+        waiters.wait_for_server_status(servers_client,
+                                       instance['id'], 'ACTIVE')
+        node = self.get_node(instance_id=instance['id'])
+        instance = servers_client.show_server(instance['id'])['server']
 
-    def terminate_instance(self):
-        self.servers_client.delete_server(self.instance['id'])
-        self.wait_power_state(self.node['uuid'],
+        return instance, node
+
+    def terminate_instance(self, instance, servers_client=None):
+        if servers_client is None:
+            servers_client = self.servers_client
+
+        node = self.get_node(instance_id=instance['id'])
+        servers_client.delete_server(instance['id'])
+        self.wait_power_state(node['uuid'],
                               BaremetalPowerStates.POWER_OFF)
         self.wait_provisioning_state(
-            self.node['uuid'],
+            node['uuid'],
             [BaremetalProvisionStates.NOSTATE,
              BaremetalProvisionStates.AVAILABLE],
             timeout=CONF.baremetal.unprovision_timeout,
diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py
index 0564c10..a95b3d4 100644
--- a/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py
+++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_basic_ops.py
@@ -110,7 +110,7 @@
     @test.services('baremetal', 'compute', 'image', 'network')
     def test_baremetal_server_ops(self):
         self.add_keypair()
-        self.boot_instance()
+        self.instance, self.node = self.boot_instance()
         self.validate_ports()
         ip_address = self.get_server_ip(self.instance)
         self.get_remote_client(ip_address).validate_authentication()
@@ -125,4 +125,4 @@
             self.create_timestamp(
                 ip_address, private_key=self.keypair['private_key'])
 
-        self.terminate_instance()
+        self.terminate_instance(self.instance)
diff --git a/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
new file mode 100644
index 0000000..551c885
--- /dev/null
+++ b/ironic_tempest_plugin/tests/scenario/test_baremetal_multitenancy.py
@@ -0,0 +1,155 @@
+#
+# Copyright (c) 2015 Mirantis, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_log import log as logging
+
+from ironic_tempest_plugin.tests.scenario import baremetal_manager
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.scenario import manager
+from tempest import test
+
+CONF = config.CONF
+
+LOG = logging.getLogger(__name__)
+
+
+class BaremetalMultitenancy(baremetal_manager.BaremetalScenarioTest,
+                            manager.NetworkScenarioTest):
+    """Check L2 isolation of baremetal instances in different tenants:
+
+    * Create a keypair, network, subnet and router for the primary tenant
+    * Boot 2 instances in the different tenant's network using the keypair
+    * Associate floating ips to both instance
+    * Verify there is no L3 connectivity between instances of different tenants
+    * Verify connectivity between instances floating IP's
+    * Delete both instances
+    """
+
+    credentials = ['primary', 'alt', 'admin']
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaremetalMultitenancy, cls).skip_checks()
+        if not CONF.baremetal.use_provision_network:
+            msg = 'Ironic/Neutron tenant isolation is not configured.'
+            raise cls.skipException(msg)
+
+    def create_tenant_network(self, clients, tenant_cidr):
+        network = self._create_network(
+            networks_client=clients.networks_client,
+            tenant_id=clients.credentials.tenant_id)
+        router = self._get_router(
+            client=clients.routers_client,
+            tenant_id=clients.credentials.tenant_id)
+
+        result = clients.subnets_client.create_subnet(
+            name=data_utils.rand_name('subnet'),
+            network_id=network['id'],
+            tenant_id=clients.credentials.tenant_id,
+            ip_version=4,
+            cidr=tenant_cidr)
+        subnet = result['subnet']
+        clients.routers_client.add_router_interface(router['id'],
+                                                    subnet_id=subnet['id'])
+        self.addCleanup(clients.subnets_client.delete_subnet, subnet['id'])
+        self.addCleanup(clients.routers_client.remove_router_interface,
+                        router['id'], subnet_id=subnet['id'])
+
+        return network, subnet, router
+
+    def verify_l3_connectivity(self, source_ip, private_key,
+                               destination_ip, conn_expected=True):
+        remote = self.get_remote_client(source_ip, private_key=private_key)
+        remote.validate_authentication()
+
+        cmd = 'ping %s -c4 -w4 || exit 0' % destination_ip
+        success_substring = "64 bytes from %s" % destination_ip
+        output = remote.exec_command(cmd)
+        if conn_expected:
+            self.assertIn(success_substring, output)
+        else:
+            self.assertNotIn(success_substring, output)
+
+    @test.idempotent_id('26e2f145-2a8e-4dc7-8457-7f2eb2c6749d')
+    @test.services('baremetal', 'compute', 'image', 'network')
+    def test_baremetal_multitenancy(self):
+
+        tenant_cidr = '10.0.100.0/24'
+        fixed_ip1 = '10.0.100.3'
+        fixed_ip2 = '10.0.100.5'
+        keypair = self.create_keypair()
+        network, subnet, router = self.create_tenant_network(
+            self.manager, tenant_cidr)
+
+        # Boot 2 instances in the primary tenant network
+        # and check L2 connectivity between them
+        instance1, node1 = self.boot_instance(
+            clients=self.manager,
+            keypair=keypair,
+            net_id=network['id'],
+            fixed_ip=fixed_ip1
+        )
+        floating_ip1 = self.create_floating_ip(
+            instance1,
+        )['floating_ip_address']
+        self.check_vm_connectivity(ip_address=floating_ip1,
+                                   private_key=keypair['private_key'])
+
+        # Boot instance in the alt tenant network and ensure there is no
+        # L2 connectivity between instances of the different tenants
+        alt_keypair = self.create_keypair(self.alt_manager.keypairs_client)
+        alt_network, alt_subnet, alt_router = self.create_tenant_network(
+            self.alt_manager, tenant_cidr)
+
+        alt_instance, alt_node = self.boot_instance(
+            keypair=alt_keypair,
+            clients=self.alt_manager,
+            net_id=alt_network['id'],
+            fixed_ip=fixed_ip2
+        )
+        alt_floating_ip = self.create_floating_ip(
+            alt_instance,
+            client=self.alt_manager.floating_ips_client
+        )['floating_ip_address']
+
+        self.check_vm_connectivity(ip_address=alt_floating_ip,
+                                   private_key=alt_keypair['private_key'])
+
+        self.verify_l3_connectivity(
+            alt_floating_ip,
+            alt_keypair['private_key'],
+            fixed_ip1,
+            conn_expected=False
+        )
+
+        self.verify_l3_connectivity(
+            floating_ip1,
+            keypair['private_key'],
+            fixed_ip2,
+            conn_expected=False
+        )
+
+        self.verify_l3_connectivity(
+            floating_ip1,
+            keypair['private_key'],
+            alt_floating_ip,
+            conn_expected=True
+        )
+
+        self.terminate_instance(
+            instance=alt_instance,
+            servers_client=self.alt_manager.servers_client)
+        self.terminate_instance(instance=instance1)