Merge "Add iDRAC RAID cleaning steps tests"
diff --git a/ironic_tempest_plugin/config.py b/ironic_tempest_plugin/config.py
index d5c1ebf..c812300 100644
--- a/ironic_tempest_plugin/config.py
+++ b/ironic_tempest_plugin/config.py
@@ -79,6 +79,9 @@
'publicURL', 'adminURL', 'internalURL'],
help="The endpoint type to use for the baremetal provisioning"
" service"),
+ cfg.StrOpt('root_device_name',
+ default='/dev/md0',
+ help="Root device name to be used for node deployment"),
cfg.IntOpt('deploywait_timeout',
default=15,
help="Timeout for Ironic node to reach the "
@@ -134,6 +137,8 @@
cfg.StrOpt('ramdisk_iso_image_ref',
help=("UUID (or url) of an ISO image for the ramdisk boot "
"tests.")),
+ cfg.StrOpt('storage_inventory_file',
+ help="Path to storage inventory file for RAID cleaning tests."),
cfg.ListOpt('enabled_drivers',
default=['fake', 'pxe_ipmitool', 'agent_ipmitool'],
help="List of Ironic enabled drivers."),
diff --git a/ironic_tempest_plugin/exceptions.py b/ironic_tempest_plugin/exceptions.py
index ac08d54..50a4468 100644
--- a/ironic_tempest_plugin/exceptions.py
+++ b/ironic_tempest_plugin/exceptions.py
@@ -23,3 +23,7 @@
class HypervisorUpdateTimeout(exceptions.TempestException):
message = "Hypervisor stats update time out"
+
+
+class RaidCleaningInventoryValidationFailed(exceptions.TempestException):
+ message = "RAID cleaning storage inventory validation failed"
diff --git a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
index e4d8717..27568a3 100644
--- a/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
+++ b/ironic_tempest_plugin/tests/scenario/baremetal_standalone_manager.py
@@ -642,7 +642,8 @@
self.assertTrue(self.ping_ip_address(self.node_ip,
should_succeed=should_succeed))
- def build_raid_and_verify_node(self, config=None, deploy_time=False):
+ def build_raid_and_verify_node(self, config=None, deploy_time=False,
+ erase_device_metadata=True):
config = config or self.raid_config
if deploy_time:
steps = [
@@ -671,14 +672,14 @@
"step": "delete_configuration"
},
{
- "interface": "deploy",
- "step": "erase_devices_metadata",
- },
- {
"interface": "raid",
"step": "create_configuration",
}
]
+ if erase_device_metadata:
+ steps.insert(1, {
+ "interface": "deploy",
+ "step": "erase_devices_metadata"})
self.baremetal_client.set_node_raid_config(self.node['uuid'],
config)
self.manual_cleaning(self.node, clean_steps=steps)
@@ -686,12 +687,14 @@
# The node has been changed, anything at this point, we need to back
# out the raid configuration.
if not deploy_time:
- self.addCleanup(self.remove_raid_configuration, self.node)
+ self.addCleanup(self.remove_raid_configuration, self.node,
+ erase_device_metadata=erase_device_metadata)
# NOTE(dtantsur): this is not required, but it allows us to check that
# the RAID device was in fact created and is used for deployment.
patch = [{'path': '/properties/root_device',
- 'op': 'add', 'value': {'name': '/dev/md0'}}]
+ 'op': 'add', 'value': {
+ 'name': CONF.baremetal.root_device_name}}]
if deploy_time:
patch.append({'path': '/instance_info/traits',
'op': 'add', 'value': ['CUSTOM_RAID']})
@@ -707,18 +710,18 @@
'op': 'remove'}]
self.update_node(self.node['uuid'], patch=patch)
- def remove_raid_configuration(self, node):
+ def remove_raid_configuration(self, node, erase_device_metadata=True):
self.baremetal_client.set_node_raid_config(node['uuid'], {})
steps = [
{
"interface": "raid",
"step": "delete_configuration",
- },
- {
- "interface": "deploy",
- "step": "erase_devices_metadata",
}
]
+ if erase_device_metadata:
+ steps.append({
+ "interface": "deploy",
+ "step": "erase_devices_metadata"})
self.manual_cleaning(node, clean_steps=steps)
def rescue_unrescue(self):
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/storage_inventory_schema.json b/ironic_tempest_plugin/tests/scenario/ironic_standalone/storage_inventory_schema.json
new file mode 100644
index 0000000..6e15b01
--- /dev/null
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/storage_inventory_schema.json
@@ -0,0 +1,101 @@
+{
+ "$schema": "http://json-schema.org/draft-07/schema#",
+ "title": "Storage inventory JSON schema",
+ "type": "object",
+ "properties": {
+ "storage_inventory": {
+ "type": "object",
+ "properties": {
+ "controllers": {
+ "type": ["array", "null"],
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "description": "The unique identifier for this storage controller.",
+ "type": "string",
+ "minLength": 1
+ },
+ "serial_number": {
+ "description": "The serial number for this storage controller.",
+ "type": ["string", "null"]
+ },
+ "manufacturer": {
+ "description": "The manufacturer of this storage controller.",
+ "type": ["string", "null"]
+ },
+ "model": {
+ "description": "The model of the storage controller.",
+ "type": ["string", "null"]
+ },
+ "supported_device_protocols": {
+ "description": "The protocols that the storage controller can use to communicate with attached devices.",
+ "type": ["array", "null"],
+ "items": {
+ "type": "string",
+ "enum": ["sas", "sata", "scsi"]
+ },
+ "minItems": 1
+ },
+ "supported_raid_types": {
+ "description": "The set of RAID types supported by the storage controller.",
+ "type": ["array", "null"],
+ "items": {
+ "type": "string",
+ "enum": ["JBOD", "0", "1", "2", "5", "6", "1+0", "5+0", "6+0"]
+ },
+ "minItems": 1
+ },
+ "drives": {
+ "type": "array",
+ "items": {
+ "type": "object",
+ "properties": {
+ "id": {
+ "description": "The unique identifier for the physical drive.",
+ "type": "string",
+ "minLength": 1
+ },
+ "size_gb": {
+ "description": "The size in GiB of the physical drive.",
+ "type": ["number", "null"],
+ "minimum": 0
+ },
+ "model": {
+ "description": "The model for the physical drive.",
+ "type": ["string", "null"]
+ },
+ "media_type": {
+ "description": "The media type for the physical drive.",
+ "enum": ["hdd", "ssd", null]
+ },
+ "serial_number": {
+ "description": "The serial number for the physical drive.",
+ "type": ["string", "null"]
+ },
+ "protocol": {
+ "description": "The protocol that this drive currently uses to communicate to the storage controller.",
+ "enum": ["sas", "sata", "scsi", null]
+ }
+ },
+ "required": ["id", "size_gb", "model", "media_type", "serial_number", "protocol"],
+ "additionalProperties": false
+ }
+ }
+ },
+ "required": ["id", "serial_number", "manufacturer", "model", "supported_device_protocols", "supported_raid_types"],
+ "additionalProperties": false,
+ "dependencies": {
+ "drives": ["id"]
+ }
+ },
+ "minItems": 1
+ }
+ },
+ "required": ["controllers"],
+ "additionalProperties": false
+ }
+ },
+ "required": ["storage_inventory"],
+ "additionalProperties": false
+}
diff --git a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
index 13fcd9b..e8bcd14 100644
--- a/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
+++ b/ironic_tempest_plugin/tests/scenario/ironic_standalone/test_cleaning.py
@@ -15,11 +15,17 @@
# License for the specific language governing permissions and limitations
# under the License.
+import json
+import os
+
+import jsonschema
+from jsonschema import exceptions as json_schema_exc
from oslo_log import log as logging
from tempest.common import utils
from tempest import config
from tempest.lib import decorators
+from ironic_tempest_plugin import exceptions
from ironic_tempest_plugin.tests.scenario import \
baremetal_standalone_manager as bsm
@@ -227,3 +233,122 @@
management_interface = 'idrac-wsman'
power_interface = 'idrac-wsman'
+
+
+class BaremetalIdracRaidCleaning(bsm.BaremetalStandaloneScenarioTest):
+
+ mandatory_attr = ['driver', 'raid_interface']
+ image_ref = CONF.baremetal.whole_disk_image_ref
+ wholedisk_image = True
+ storage_inventory_info = None
+ driver = 'idrac'
+ api_microversion = '1.31' # to set raid_interface
+ delete_node = False
+
+ @classmethod
+ def skip_checks(cls):
+ """Validates the storage information passed in file using JSON schema.
+
+ :raises: skipException if,
+ 1) storage inventory path is not provided in tempest execution
+ file.
+ 2) storage inventory file is not found on given path.
+ :raises: RaidCleaningInventoryValidationFailed if,
+ validation of the storage inventory fails.
+ """
+ super(BaremetalIdracRaidCleaning, cls).skip_checks()
+ storage_inventory = CONF.baremetal.storage_inventory_file
+ if not storage_inventory:
+ raise cls.skipException("Storage inventory file path missing "
+ "in tempest configuration file. "
+ "Skipping Test case.")
+ try:
+ with open(storage_inventory, 'r') as storage_invent_fobj:
+ cls.storage_inventory_info = json.load(storage_invent_fobj)
+ except IOError:
+ msg = ("Storage Inventory file %(inventory)s is not found. "
+ "Skipping Test Case." %
+ {'inventory': storage_inventory})
+ raise cls.skipException(msg)
+ storage_inventory_schema = os.path.join(os.path.dirname(
+ __file__), 'storage_inventory_schema.json')
+ with open(storage_inventory_schema, 'r') as storage_schema_fobj:
+ schema = json.load(storage_schema_fobj)
+ try:
+ jsonschema.validate(cls.storage_inventory_info, schema)
+ except json_schema_exc.ValidationError as e:
+ error_msg = ("Storage Inventory validation error: %(error)s " %
+ {'error': e})
+ raise exceptions.RaidCleaningInventoryValidationFailed(error_msg)
+
+ def _validate_raid_type_and_drives_count(self, raid_type,
+ minimum_drives_required):
+ for controller in (self.storage_inventory_info[
+ 'storage_inventory']['controllers']):
+ supported_raid_types = controller['supported_raid_types']
+ physical_disks = [pdisk['id'] for pdisk in (
+ controller['drives'])]
+ if raid_type in supported_raid_types and (
+ minimum_drives_required <= len(physical_disks)):
+ return controller
+ error_msg = ("No Controller present in storage inventory which "
+ "supports RAID type %(raid_type)s "
+ "and has at least %(disk_count)s drives." %
+ {'raid_type': raid_type,
+ 'disk_count': minimum_drives_required})
+ raise exceptions.RaidCleaningInventoryValidationFailed(error_msg)
+
+ @decorators.idempotent_id('8a908a3c-f2af-48fb-8553-9163715aa403')
+ @utils.services('image', 'network')
+ def test_hardware_raid(self):
+ controller = self._validate_raid_type_and_drives_count(
+ raid_type='1', minimum_drives_required=2)
+ raid_config = {
+ "logical_disks": [
+ {
+ "size_gb": 40,
+ "raid_level": "1",
+ "controller": controller['id']
+ }
+ ]
+ }
+ self.build_raid_and_verify_node(
+ config=raid_config,
+ deploy_time=CONF.baremetal_feature_enabled.deploy_time_raid,
+ erase_device_metadata=False)
+ self.remove_root_device_hint()
+ self.terminate_node(self.node['uuid'], force_delete=True)
+
+ @decorators.idempotent_id('92fe534d-77f1-422d-84e4-e30fe9e3d928')
+ @utils.services('image', 'network')
+ def test_raid_cleaning_max_size_raid_10(self):
+ controller = self._validate_raid_type_and_drives_count(
+ raid_type='1+0', minimum_drives_required=4)
+ physical_disks = [pdisk['id'] for pdisk in (
+ controller['drives'])]
+ raid_config = {
+ "logical_disks": [
+ {
+ "size_gb": "MAX",
+ "raid_level": "1+0",
+ "controller": controller['id'],
+ "physical_disks": physical_disks
+ }
+ ]
+ }
+ self.build_raid_and_verify_node(
+ config=raid_config,
+ deploy_time=CONF.baremetal_feature_enabled.deploy_time_raid,
+ erase_device_metadata=False)
+ self.remove_root_device_hint()
+ self.terminate_node(self.node['uuid'], force_delete=True)
+
+
+class BaremetalIdracRedfishRaidCleaning(
+ BaremetalIdracRaidCleaning):
+ raid_interface = 'idrac-redfish'
+
+
+class BaremetalIdracWSManRaidCleaning(
+ BaremetalIdracRaidCleaning):
+ raid_interface = 'idrac-wsman'