Merge "Test software RAID in deploy-time on Victoria and newer"
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index 90d55fc..0bafc10 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -170,6 +170,10 @@
                 help="Defines if software RAID is enabled (available "
                      "starting with Train). Requires at least two disks "
                      "on testing nodes."),
+    cfg.BoolOpt('deploy_time_raid',
+                default=False,
+                help="Defines if in-band RAID can be built in deploy time "
+                     "(possible starting with Victoria)."),
 ]
 
 BaremetalIntrospectionGroup = [
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
index d33f51d..bad3767 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
@@ -572,32 +572,53 @@
         self.assertTrue(self.ping_ip_address(self.node_ip,
                                              should_succeed=should_succeed))
 
-    def build_raid_and_verify_node(self, config=None, clean_steps=None):
+    def build_raid_and_verify_node(self, config=None, deploy_time=False):
         config = config or self.raid_config
-        clean_steps = clean_steps or [
-            {
-                "interface": "raid",
-                "step": "delete_configuration"
-            },
-            # NOTE(dtantsur): software RAID building fails if any
-            # partitions exist on holder devices.
-            {
-                "interface": "deploy",
-                "step": "erase_devices_metadata"
-            },
-            {
-                "interface": "raid",
-                "step": "create_configuration"
-            }
-        ]
-
-        self.baremetal_client.set_node_raid_config(self.node['uuid'], config)
-        self.manual_cleaning(self.node, clean_steps=clean_steps)
+        if deploy_time:
+            steps = [
+                {
+                    "interface": "deploy",
+                    "step": "erase_devices_metadata",
+                    "priority": 98,
+                    "args": {},
+                },
+                {
+                    "interface": "raid",
+                    "step": "apply_configuration",
+                    "priority": 97,
+                    "args": {"raid_config": config},
+                }
+            ]
+            self.baremetal_client.create_deploy_template(
+                'CUSTOM_RAID', steps=steps)
+            self.baremetal_client.add_node_trait(self.node['uuid'],
+                                                 'CUSTOM_RAID')
+        else:
+            steps = [
+                {
+                    "interface": "raid",
+                    "step": "delete_configuration"
+                },
+                {
+                    "interface": "deploy",
+                    "step": "erase_devices_metadata",
+                },
+                {
+                    "interface": "raid",
+                    "step": "create_configuration",
+                }
+            ]
+            self.baremetal_client.set_node_raid_config(self.node['uuid'],
+                                                       config)
+            self.manual_cleaning(self.node, clean_steps=steps)
 
         # NOTE(dtantsur): this is not required, but it allows us to check that
         # the RAID device was in fact created and is used for deployment.
         patch = [{'path': '/properties/root_device',
                   'op': 'add', 'value': {'name': '/dev/md0'}}]
+        if deploy_time:
+            patch.append({'path': '/instance_info/traits',
+                          'op': 'add', 'value': ['CUSTOM_RAID']})
         self.update_node(self.node['uuid'], patch=patch)
         # NOTE(dtantsur): apparently cirros cannot boot from md devices :(
         # So we only move the node to active (verifying deployment).
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
index c068fbe..47e5f63 100644
--- a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
@@ -87,7 +87,7 @@
     wholedisk_image = True
     deploy_interface = 'iscsi'
     raid_interface = 'agent'
-    api_microversion = '1.31'
+    api_microversion = '1.55'
     # Software RAID is always local boot
     boot_option = 'local'
 
@@ -112,7 +112,8 @@
     @decorators.idempotent_id('7ecba4f7-98b8-4ea1-b95e-3ec399f46798')
     @utils.services('image', 'network')
     def test_software_raid(self):
-        self.build_raid_and_verify_node()
+        self.build_raid_and_verify_node(
+            deploy_time=CONF.baremetal_feature_enabled.deploy_time_raid)
         # NOTE(TheJulia): tearing down/terminating the instance does not
         # remove the root device hint, so it is best for us to go ahead
         # and remove it before exiting the test.
@@ -134,7 +135,7 @@
     wholedisk_image = True
     deploy_interface = 'direct'
     raid_interface = 'agent'
-    api_microversion = '1.31'
+    api_microversion = '1.55'
     # Software RAID is always local boot
     boot_option = 'local'
 
@@ -160,7 +161,8 @@
     @decorators.idempotent_id('125361ac-0eb3-4d79-8be2-a91936aa3f46')
     @utils.services('image', 'network')
     def test_software_raid(self):
-        self.build_raid_and_verify_node()
+        self.build_raid_and_verify_node(
+            deploy_time=CONF.baremetal_feature_enabled.deploy_time_raid)
         # NOTE(TheJulia): tearing down/terminating the instance does not
         # remove the root device hint, so it is best for us to go ahead
         # and remove it before exiting the test.
diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml
index b402cc1..b3cd832 100644
--- a/zuul.d/project.yaml
+++ b/zuul.d/project.yaml
@@ -21,16 +21,15 @@
         - ironic-inspector-tempest-train
         - ironic-inspector-tempest-stein:
             voting: false
-        # NOTE(dtantsur): these jobs cover rarely changed tests and are quite
-        # unstable, so keep them non-voting.
-        - ironic-standalone-redfish:
-            voting: false
+        - ironic-standalone-redfish
         - ironic-standalone-redfish-ussuri:
             voting: false
         - ironic-standalone-redfish-train:
             voting: false
         - ironic-standalone-redfish-stein:
             voting: false
+        # NOTE(dtantsur): these jobs cover rarely changed tests and are quite
+        # unstable, so keep them non-voting.
         - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode:
             voting: false
         - ironic-tempest-ipa-wholedisk-direct-tinyipa-multinode-ussuri:
@@ -58,4 +57,5 @@
         - ironic-inspector-tempest
         - ironic-inspector-tempest-ussuri
         - ironic-inspector-tempest-train
+        - ironic-standalone-redfish
         - ironic-inspector-tempest-discovery