Merge "Add test for dhcp-less vmedia based deployment"
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index 8fbfbd5..e1a7c5e 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -238,6 +238,17 @@
     cfg.StrOpt('default_boot_option',
                # No good default here, we need to actually set it.
                help="The default boot option the testing nodes are using."),
+    cfg.BoolOpt("rebuild_remote_dhcpless",
+                default=True,
+                help="If we should issue a rebuild request when testing "
+                     "dhcpless virtual media deployments. This may be useful "
+                     "if bug 2032377 is not fixed in the agent ramdisk."),
+    cfg.StrOpt("public_subnet_id",
+               help="The public subnet ID where routers will be bound for "
+                    "testing purposes with the dhcp-less test scenario."),
+    cfg.StrOpt("public_subnet_ip",
+               help="The public subnet IP to bind the public router to for "
+                    "dhcp-less testing.")
 ]
 
 BaremetalFeaturesGroup = [
@@ -258,6 +269,14 @@
                 default=False,
                 help="Defines if in-band RAID can be built in deploy time "
                      "(possible starting with Victoria)."),
+    cfg.BoolOpt('dhcpless_vmedia',
+                default=False,
+                help="Defines if it is possible to execute DHCP-Less "
+                     "deployment of baremetal nodes through virtual media. "
+                     "This test requires full OS images with configuration "
+                     "support for embedded network metadata through glean "
+                     "or cloud-init, and thus cannot be executed with "
+                     "most default job configurations."),
 ]
 
 BaremetalIntrospectionGroup = [
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
index 1ad1485..04e2e5a 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
@@ -15,6 +15,7 @@
 #    License for the specific language governing permissions and limitations
 #    under the License.
 
+import ipaddress
 import random
 
 from oslo_utils import uuidutils
@@ -265,8 +266,66 @@
             raise lib_exc.TimeoutException(msg)
 
     @classmethod
+    def gen_config_drive_net_info(cls, node_id, n_port):
+        # Find the port with the vif.
+        use_port = None
+        _, body = cls.baremetal_client.list_node_ports(node_id)
+        for port in body['ports']:
+            _, p = cls.baremetal_client.show_port(port['uuid'])
+            if 'tenant_vif_port_id' in p['internal_info']:
+                use_port = p
+                break
+        if not use_port:
+            m = ('Unable to determine proper mac address to use for config '
+                 'to apply for the virtual media port test.')
+            raise lib_exc.InvalidConfiguration(m)
+        vif_mac_address = use_port['address']
+        if CONF.validation.ip_version_for_ssh == 4:
+            ip_version = "ipv4"
+        else:
+            ip_version = "ipv6"
+        ip_address = n_port['fixed_ips'][0]['ip_address']
+        subnet_id = n_port['fixed_ips'][0]['subnet_id']
+        subnet = cls.os_primary.subnets_client.show_subnet(
+            subnet_id).get('subnet')
+        ip_netmask = str(ipaddress.ip_network(subnet.get('cidr')).netmask)
+        if ip_version == "ipv4":
+            route = [{
+                "netmask": "0.0.0.0",
+                "network": "0.0.0.0",
+                "gateway": subnet.get('gateway_ip'),
+            }]
+        else:
+            # Eh... the data structure doesn't really allow for
+            # this to be easy since a default route with v6
+            # is just referred to as ::/0
+            # so network and netmask would be ::, which is
+            # semi-mind-breaking. Anyway, route advertisers are
+            # expected in this case.
+            route = []
+
+        return {
+            "links": [{"id": "port-test",
+                       "type": "vif",
+                       "ethernet_mac_address": vif_mac_address}],
+            "networks": [
+                {
+                    "id": "network0",
+                    "type": ip_version,
+                    "link": "port-test",
+                    "ip_address": ip_address,
+                    "netmask": ip_netmask,
+                    "network_id": "network0",
+                    "routes": route
+                }
+            ],
+            "services": []
+        }
+
+    @classmethod
     def boot_node(cls, image_ref=None, image_checksum=None,
-                  boot_option=None):
+                  boot_option=None, config_drive_networking=False,
+                  fallback_network=None):
         """Boot ironic node.
 
         The following actions are executed:
@@ -282,7 +341,13 @@
         :param boot_option: The defaut boot option to utilize. If not
                             specified, the ironic deployment default shall
                             be utilized.
+        :param config_drive_networking: If we should load configuration drive
+            with network_data values.
+        :param fallback_network: Network to use if we are not able to detect
+            a network for use.
         """
+        config_drive = {}
+
         if image_ref is None:
             image_ref = cls.image_ref
         if image_checksum is None:
@@ -291,8 +356,22 @@
             boot_option = cls.boot_option
 
         network, subnet, router = cls.create_networks()
-        n_port = cls.create_neutron_port(network_id=network['id'])
+        try:
+            n_port = cls.create_neutron_port(network_id=network['id'])
+
+        except TypeError:
+            if fallback_network:
+                n_port = cls.create_neutron_port(
+                    network_id=fallback_network)
+            else:
+                raise
         cls.vif_attach(node_id=cls.node['uuid'], vif_id=n_port['id'])
+        config_drive = None
+        if config_drive_networking:
+            config_drive = {}
+            config_drive['network_data'] = cls.gen_config_drive_net_info(
+                cls.node['uuid'], n_port)
+
         patch = [{'path': '/instance_info/image_source',
                   'op': 'add',
                   'value': image_ref}]
@@ -308,9 +387,15 @@
             patch.append({'path': '/instance_info/capabilities',
                           'op': 'add',
                           'value': {'boot_option': boot_option}})
-        # TODO(vsaienko) add testing for custom configdrive
         cls.update_node(cls.node['uuid'], patch=patch)
-        cls.set_node_provision_state(cls.node['uuid'], 'active')
+
+        if not config_drive:
+            cls.set_node_provision_state(cls.node['uuid'], 'active')
+        else:
+            cls.set_node_provision_state(
+                cls.node['uuid'], 'active',
+                configdrive=config_drive)
+
         cls.wait_power_state(cls.node['uuid'],
                              bm.BaremetalPowerStates.POWER_ON)
         cls.wait_provisioning_state(cls.node['uuid'],
@@ -332,7 +417,12 @@
         cls.detach_all_vifs_from_node(node_id, force_delete=force_delete)
 
         if cls.delete_node or force_delete:
-            cls.set_node_provision_state(node_id, 'deleted')
+            node_state = cls.get_node(node_id)['provision_state']
+            if node_state != bm.BaremetalProvisionStates.AVAILABLE:
+                # Check the state before making the call, to permit tests to
+                # drive node into a clean state before exiting the test, which
+                # is needed for some tests because of complex tests.
+                cls.set_node_provision_state(node_id, 'deleted')
             # NOTE(vsaienko) We expect here fast switching from deleted to
             # available as automated cleaning is disabled so poll status
             # each 1s.
@@ -579,9 +669,16 @@
                 'Partitioned images are not supported with multitenancy.')
 
     @classmethod
-    def set_node_to_active(cls, image_ref=None, image_checksum=None):
-        cls.boot_node(image_ref, image_checksum)
-        if CONF.validation.connect_method == 'floating':
+    def set_node_to_active(cls, image_ref=None, image_checksum=None,
+                           fallback_network=None,
+                           config_drive_networking=None,
+                           method_to_get_ip=None):
+        cls.boot_node(image_ref, image_checksum,
+                      fallback_network=fallback_network,
+                      config_drive_networking=config_drive_networking)
+        if method_to_get_ip:
+            cls.node_ip = method_to_get_ip(cls.node['uuid'])
+        elif CONF.validation.connect_method == 'floating':
             cls.node_ip = cls.add_floatingip_to_node(cls.node['uuid'])
         elif CONF.validation.connect_method == 'fixed':
             cls.node_ip = cls.get_server_ip(cls.node['uuid'])
@@ -622,11 +719,7 @@
         cls.update_node_driver(cls.node['uuid'], cls.driver, **boot_kwargs)
 
     @classmethod
-    def resource_cleanup(cls):
-        if CONF.validation.connect_method == 'floating':
-            if cls.node_ip:
-                cls.cleanup_floating_ip(cls.node_ip)
-
+    def cleanup_vif_attachments(cls):
         vifs = cls.get_node_vifs(cls.node['uuid'])
         # Remove ports before deleting node, to catch regression for cases
         # when user did this prior unprovision node.
@@ -635,6 +728,18 @@
                 cls.ports_client.delete_port(vif)
             except lib_exc.NotFound:
                 pass
+
+    @classmethod
+    def resource_cleanup(cls):
+        if CONF.validation.connect_method == 'floating':
+            if cls.node_ip:
+                try:
+                    cls.cleanup_floating_ip(cls.node_ip)
+                except IndexError:
+                    # There is no fip to actually remove in this case.
+                    pass
+
+        cls.cleanup_vif_attachments()
         cls.terminate_node(cls.node['uuid'])
         cls.unreserve_node(cls.node)
         base.reset_baremetal_api_microversion()
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_advanced_ops.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_advanced_ops.py
new file mode 100644
index 0000000..7db9ae1
--- /dev/null
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_advanced_ops.py
@@ -0,0 +1,149 @@
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from tempest.common import utils
+from tempest import config
+from tempest.lib.common.utils import data_utils
+from tempest.lib import decorators
+
+from ironic_tempest_plugin.tests.scenario import \
+    baremetal_standalone_manager as bsm
+
+CONF = config.CONF
+
+
+class BaremetalRedfishDHCPLessDeploy(bsm.BaremetalStandaloneScenarioTest):
+
+    api_microversion = '1.59'  # Ussuri for redfish-virtual-media
+    driver = 'redfish'
+    deploy_interface = 'direct'
+    boot_interface = 'redfish-virtual-media'
+    image_ref = CONF.baremetal.whole_disk_image_ref
+    image_checksum = CONF.baremetal.whole_disk_image_checksum
+    wholedisk_image = True
+
+    @classmethod
+    def skip_checks(cls):
+        super(BaremetalRedfishDHCPLessDeploy, cls).skip_checks()
+        if CONF.baremetal_feature_enabled.dhcpless_vmedia:
+            raise cls.skipException("This test requires a full OS image to "
+                                    "be deployed, and thus must be "
+                                    "explicitly enabled for testing.")
+
+        if (not CONF.baremetal.public_subnet_id
+                or not CONF.baremetal.public_subnet_ip):
+            raise cls.skipException(
+                "This test requires a public sunbet ID, and public subnet "
+                "IP to use on that subnet to execute. Please see the "
+                "baremetal configuration options public_subnet_id "
+                "and public_subnet_ip respectively, and populate with "
+                "appropriate values to execute this test.")
+
+    def create_tenant_network(self, clients, tenant_cidr, ip_version):
+        # NOTE(TheJulia): self.create_network is an internal method
+        # which just gets the info, doesn't actually create a network.
+        network = self.create_network(
+            networks_client=self.os_admin.networks_client,
+            project_id=clients.credentials.project_id,
+            shared=True)
+
+        router = self.get_router(
+            client=clients.routers_client,
+            project_id=clients.credentials.tenant_id,
+            external_gateway_info={
+                'network_id': CONF.network.public_network_id,
+                'external_fixed_ips': [
+                    {'subnet_id': CONF.baremetal.public_subnet_id,
+                     'ip_address': CONF.baremetal.public_subnet_ip}]
+            })
+        result = clients.subnets_client.create_subnet(
+            name=data_utils.rand_name('subnet'),
+            network_id=network['id'],
+            tenant_id=clients.credentials.tenant_id,
+            ip_version=CONF.validation.ip_version_for_ssh,
+            cidr=tenant_cidr, enable_dhcp=False)
+        subnet = result['subnet']
+        clients.routers_client.add_router_interface(router['id'],
+                                                    subnet_id=subnet['id'])
+        self.addCleanup(clients.subnets_client.delete_subnet, subnet['id'])
+        self.addCleanup(clients.routers_client.remove_router_interface,
+                        router['id'], subnet_id=subnet['id'])
+        return network, subnet, router
+
+    def deploy_vmedia_dhcpless(self, rebuild=False):
+        """Helper to facilitate vmedia testing.
+
+        * Create Network/router without DHCP
+        * Set provisioning_network for this node.
+        * Set cleanup to undo the provisionign network setup.
+        * Launch instance.
+          * Requirement: Instance OS image supports network config from
+            network_data embedded in the OS. i.e. a real image, not
+            cirros.
+        * If so enabled, rebuild the node, Verify rebuild completed.
+        * Via cleanup: Teardown Network/Router
+        """
+
+        # Get the latest state for the node.
+        self.node = self.get_node(self.node['uuid'])
+        prior_prov_net = self.node['driver_info'].get('provisioning_network')
+
+        ip_version = CONF.validation.ip_version_for_ssh
+        tenant_cidr = '10.0.6.0/24'
+        if ip_version == 6:
+            tenant_cidr = 'fd00:33::/64'
+
+        network, subnet, router = self.create_tenant_network(
+            self.os_admin, tenant_cidr, ip_version=ip_version)
+        if prior_prov_net:
+            self.update_node(self.node['uuid'],
+                             [{'op': 'replace',
+                               'path': '/driver_info/provisioning_network',
+                               'value': network['id']}])
+            self.addCleanup(self.update_node,
+                            self.node['uuid'],
+                            [{'op': 'replace',
+                              'path': '/driver_info/provisioning_network',
+                              'value': prior_prov_net}])
+        else:
+            self.update_node(self.node['uuid'],
+                             [{'op': 'add',
+                               'path': '/driver_info/provisioning_network',
+                               'value': network['id']}])
+            self.addCleanup(self.update_node,
+                            self.node['uuid'],
+                            [{'op': 'remove',
+                              'path': '/driver_info/provisioning_network'}])
+
+        self.set_node_to_active(self.image_ref, self.image_checksum,
+                                fallback_network=network['id'],
+                                config_drive_networking=True,
+                                method_to_get_ip=self.get_server_ip)
+
+        # node_ip is set by the prior call to set_node_to_active
+        self.assertTrue(self.ping_ip_address(self.node_ip))
+
+        if rebuild:
+            self.set_node_provision_state(self.node['uuid'], 'rebuild')
+            self.wait_provisioning_state(self.node['uuid'], 'active',
+                                         timeout=CONF.baremetal.active_timeout,
+                                         interval=30)
+            # Assert we were able to ping after rebuilding.
+            self.assertTrue(self.ping_ip_address(self.node_ip))
+        # Force delete so we remove the vifs
+        self.terminate_node(self.node['uuid'], force_delete=True)
+
+    @decorators.idempotent_id('1f420ef3-99bd-46c7-b859-ce9c2892697f')
+    @utils.services('image', 'network')
+    def test_ip_access_to_server(self):
+        self.deploy_vmedia_dhcpless(
+            rebuild=CONF.baremetal.rebuild_remote_dhcpless)