curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #00801
Re: [Merge] ~gyurco/curtin:imsm into curtin:master
Review: Needs Fixing
Thanks for working on this.
I've provided some feedback below in the code. In addition to what we have here, this will also need updates to curtin/block/schema.py as we're adding new fields to the raid structure.
diff --git a/curtin/block/schemas.py b/curtin/block/schemas.py
index 9e2c41fe..4dc2f0a7 100644
--- a/curtin/block/schemas.py
+++ b/curtin/block/schemas.py
@@ -316,12 +316,14 @@ RAID = {
'preserve': {'$ref': '#/definitions/preserve'},
'ptable': {'$ref': '#/definitions/ptable'},
'spare_devices': {'$ref': '#/definitions/devices'},
+ 'container': {'$ref': '#/definitions/id'},
'type': {'const': 'raid'},
'raidlevel': {
'type': ['integer', 'string'],
'oneOf': [
{'enum': [0, 1, 4, 5, 6, 10]},
- {'enum': ['raid0', 'linear', '0',
+ {'enum': ['container',
+ 'raid0', 'linear', '0',
'raid1', 'mirror', 'stripe', '1',
'raid4', '4',
'raid5', '5',
Diff comments:
> diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
> index 116ee81..ce1399e 100644
> --- a/curtin/block/clear_holders.py
> +++ b/curtin/block/clear_holders.py
> @@ -165,11 +165,17 @@ def shutdown_mdadm(device):
> """
>
> blockdev = block.sysfs_to_devpath(device)
> + query = mdadm.mdadm_query_detail(blockdev)
> +
> + if query.get('MD_CONTAINER'):
This looks like it should be a mdadm method:
mdadm_is_container()
> + LOG.info('Array is in a container, skip discovering raid devices and spares for %s', device)
> + md_devs = ()
> + else:
> + LOG.info('Discovering raid devices and spares for %s', device)
> + md_devs = (
> + mdadm.md_get_devices_list(blockdev) +
> + mdadm.md_get_spares_list(blockdev))
>
> - LOG.info('Discovering raid devices and spares for %s', device)
> - md_devs = (
> - mdadm.md_get_devices_list(blockdev) +
> - mdadm.md_get_spares_list(blockdev))
> mdadm.set_sync_action(blockdev, action="idle")
> mdadm.set_sync_action(blockdev, action="frozen")
Do these functions work against a multi-volume container?
>
> @@ -186,7 +192,7 @@ def shutdown_mdadm(device):
> LOG.debug('Non-fatal error writing to array device %s, '
> 'proceeding with shutdown: %s', blockdev, e)
>
> - LOG.info('Removing raid array members: %s', md_devs)
> + LOG.info('Removing raid array members: ', md_devs)
Not needed for this branch
> for mddev in md_devs:
> try:
> mdadm.fail_device(blockdev, mddev)
> @@ -198,7 +204,7 @@ def shutdown_mdadm(device):
> LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)
> mdadm.mdadm_stop(blockdev)
>
> - LOG.debug('Wiping mdadm member devices: %s' % md_devs)
> + LOG.debug('Wiping mdadm member devices: ', md_devs)
Same.
> for mddev in md_devs:
> mdadm.zero_device(mddev, force=True)
>
> diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
> index 32b467c..97160e1 100644
> --- a/curtin/block/mdadm.py
> +++ b/curtin/block/mdadm.py
> @@ -145,7 +146,7 @@ def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False,
> udev.udevadm_settle()
>
>
> -def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name="",
> +def mdadm_create(md_devname, raidlevel, devices, spares=None, container_devcnt=None, md_name="",
I think we can drop the container_devcnt, raidlevel=container tells is what we're creating.
For the min_devices, if raidlevel=container is set then we at most need 1 device, so we can
adjust the md_minimum_devices method to return 1 for raidlevel=container. When creating
a raid from a container, let's pass the container device, matching the yaml
# create the container
- type: raid
id: disk_raid_container0
raidlevel: container
metadata: imsm
name: /dev/md/imsm0
devices:
- /dev/nvme0n1
- /dev/nvme1n1
- /dev/nvme2n1
- /dev/nvme3n1
mdadm_create(md_devname="/dev/md/imsm0", raidlevel=container, devices=[d1, d2, d3, d4], metadata=imsm)
# create a raid from container, here blockmeta extracts then device name from
# the container id and mdadm_create will examine the container and count the
# devices. This lets the mdadm module use mdadm tools to confirm container
# device is a container and the device count is accurate and the caller of
# mdadm_create doesn't need to also understand storage-config ids.
- type: raid
id: md126
name: /dev/md126
raidlevel: 5
container: disk_raid_container0
mdadm_create(md_devname="/dev/md126", raidlevel=5, devices=[], container=/dev/md/imsm0)
> metadata=None):
> LOG.debug('mdadm_create: ' +
> 'md_name=%s raidlevel=%s ' % (md_devname, raidlevel) +
> @@ -159,8 +160,9 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name="",
> raise ValueError('Invalid raidlevel: [{}]'.format(raidlevel))
>
> min_devices = md_minimum_devices(raidlevel)
> - if len(devices) < min_devices:
> - err = 'Not enough devices for raidlevel: ' + str(raidlevel)
> + devcnt = len(devices) if not container_devcnt else container_devcnt
> + if devcnt < min_devices:
> + err = 'Not enough devices (' + str(devcnt) + ') for raidlevel: ' + str(raidlevel)
See above comment, I suggest we push 'container' into md_minimum_devices, and when creating a raid inside a container
mdadm_create will examine the container device to verify the device count for the selected raidlevel.
> err += ' minimum devices needed: ' + str(min_devices)
> raise ValueError(err)
>
> @@ -171,19 +173,25 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name="",
> (hostname, _err) = util.subp(["hostname", "-s"], rcs=[0], capture=True)
>
> cmd = ["mdadm", "--create", md_devname, "--run",
> - "--metadata=%s" % metadata,
> "--homehost=%s" % hostname.strip(),
> - "--level=%s" % raidlevel,
> - "--raid-devices=%s" % len(devices)]
> + "--raid-devices=%s" % devcnt]
> +
> + if raidlevel == 'container' or not container_devcnt:
> + cmd.append("--metadata=%s" % metadata)
> + if raidlevel != 'container':
> + cmd.append("--level=%s" % raidlevel)
We can simplify this to just:
if raidlevel != 'container':
cmd.extend(['--metadata=%s' % metadata, '--level=%s' % raidlevel])
> +
> if md_name:
> cmd.append("--name=%s" % md_name)
>
> for device in devices:
> - holders = get_holders(device)
> - if len(holders) > 0:
> - LOG.warning('Detected holders during mdadm creation: %s', holders)
> - raise OSError('Failed to remove holders from %s', device)
> - zero_device(device)
> + if not container_devcnt:
> + # clear non-container devices
> + holders = get_holders(device)
> + if len(holders) > 0:
> + LOG.warning('Detected holders during mdadm creation: %s', holders)
> + raise OSError('Failed to remove holders from %s', device)
> + zero_device(device)
I think if you drop this hunk, it will do what we want. When we create a container and devices has a list of block devices; we want to
walk each device and check that it's not being claimed by some other layered block device.
In the case that we're creating a raid *from* a container, if the container creating was successful, then the underlying devices (nvme1, 2, 3, 4, for example)
will already have references to the /dev/md/imsm0 device, and our devices= list is empty anyhow since we create from the container device.
> cmd.append(device)
>
> if spares:
> @@ -603,6 +611,11 @@ def __mdadm_detail_to_dict(input):
> # start after the first newline
> remainder = input[input.find('\n')+1:]
>
> + # keep only the first section (imsm)
s/imsm/metadata=imsm/
> + arraysection = remainder.find('\n[');
no trailing semi-colon needed
> + if arraysection != -1:
> + remainder = remainder[:arraysection]
> +
> # FIXME: probably could do a better regex to match the LHS which
> # has one, two or three words
> rem = r'(\w+|\w+\ \w+|\w+\ \w+\ \w+)\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)'
> diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
> index dee73b1..f061d12 100644
> --- a/curtin/commands/block_meta.py
> +++ b/curtin/commands/block_meta.py
> @@ -1486,21 +1486,32 @@ def raid_handler(info, storage_config):
> raidlevel = info.get('raidlevel')
> spare_devices = info.get('spare_devices')
> md_devname = block.md_path(info.get('name'))
> + container = info.get('container')
> + metadata = info.get('metadata')
> preserve = config.value_as_boolean(info.get('preserve'))
> - if not devices:
> - raise ValueError("devices for raid must be specified")
> + if not devices and not container:
> + raise ValueError("devices or container for raid must be specified")
> if raidlevel not in ['linear', 'raid0', 0, 'stripe', 'raid1', 1, 'mirror',
> - 'raid4', 4, 'raid5', 5, 'raid6', 6, 'raid10', 10]:
> + 'raid4', 4, 'raid5', 5, 'raid6', 6, 'raid10', 10, 'container']:
> raise ValueError("invalid raidlevel '%s'" % raidlevel)
> - if raidlevel in ['linear', 'raid0', 0, 'stripe']:
> + if raidlevel in ['linear', 'raid0', 0, 'stripe', 'container']:
> if spare_devices:
> raise ValueError("spareunsupported in raidlevel '%s'" % raidlevel)
>
> LOG.debug('raid: cfg: %s', util.json_dumps(info))
> - device_paths = list(get_path_to_storage_volume(dev, storage_config) for
> - dev in devices)
> - LOG.debug('raid: device path mapping: %s',
> - list(zip(devices, device_paths)))
> +
> + devcnt = None
> + if container:
> + parent = storage_config.get(container)
> + if not parent:
> + raise ValueError("container with id '%s' not found" % parant)
> + device_paths = [parent.get('name')]
> + devcnt = len(parent.get('devices'))
> + else:
> + device_paths = list(get_path_to_storage_volume(dev, storage_config) for
> + dev in devices)
> + LOG.debug('raid: device path mapping: {}'.format(
> + zip(devices, device_paths)))
Here I'd like to do:
container_device = None
if container:
# determine the /dev/md/XXX path for the container device
container_device = get_path_to_storage_volume(container, storage_config
devices = storage_config.get(container, {}).get('devices', [])
device_paths = list(get_path_to_storage_volume(dev, storage_config) for
dev in devices)
LOG.debug('raid: device path mapping: {}'.format(zip(devices, device_paths)))
>
> spare_device_paths = []
> if spare_devices:
> @@ -1517,8 +1528,8 @@ def raid_handler(info, storage_config):
>
> if create_raid:
> mdadm.mdadm_create(md_devname, raidlevel,
> - device_paths, spare_device_paths,
> - info.get('mdname', ''))
> + device_paths, spare_device_paths, devcnt,
> + info.get('mdname', ''), metadata)
Drop devcnt, and instead we want at end of the params:
container=container_device
>
> wipe_mode = info.get('wipe')
> if wipe_mode:
> diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
> index dba0f74..d5d89ca 100644
> --- a/tests/unittests/test_block_mdadm.py
> +++ b/tests/unittests/test_block_mdadm.py
> @@ -315,6 +333,70 @@ class TestBlockMdadmExamine(CiTestCase):
> self.mock_util.subp.assert_has_calls(expected_calls)
> self.assertEqual(data, {})
>
> + def test_mdadm_examine_no_export_imsm(self):
> + self.mock_util.subp.return_value = ("""/dev/nvme0n1:
> + Magic : Intel Raid ISM Cfg Sig.
> + Version : 1.3.00
> + Orig Family : 6f8c68e3
> + Family : 6f8c68e3
> + Generation : 00000112
> + Attributes : All supported
> + UUID : 7ec12162:ee5cd20b:0ac8b069:cfbd93ec
> + Checksum : 4a5cebe2 correct
> + MPB Sectors : 2
> + Disks : 4
> + RAID Devices : 1
Nice, we'll want to use Disks to determine a container devcnt you've passed to create
> +
> + Disk03 Serial : LJ910504Q41P0FGN
> + State : active
> + Id : 00000000
> + Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
> +
> +[126]:
> + UUID : f9792759:7f61d0c7:e7313d5a:2e7c2e22
> + RAID Level : 5
> + Members : 4
> + Slots : [UUUU]
> + Failed disk : none
> + This Slot : 3
> + Sector Size : 512
> + Array Size : 5860540416 (2794.52 GiB 3000.60 GB)
> + Per Dev Size : 1953515520 (931.51 GiB 1000.20 GB)
> + Sector Offset : 0
> + Num Stripes : 7630912
> + Chunk Size : 128 KiB
> + Reserved : 0
> + Migrate State : idle
> + Map State : normal
> + Dirty State : dirty
> + RWH Policy : off
> +
> + Disk00 Serial : LJ91040H2Y1P0FGN
> + State : active
> + Id : 00000003
> + Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
> +
> + Disk01 Serial : LJ916308CZ1P0FGN
> + State : active
> + Id : 00000002
> + Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
> +
> + Disk02 Serial : LJ916308RF1P0FGN
> + State : active
> + Id : 00000001
> + Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
> + """, "") # mdadm --examine /dev/nvme0n1
> +
> + device = "/dev/nvme0n1"
> + data = mdadm.mdadm_examine(device, export=False)
> +
> + expected_calls = [
> + call(["mdadm", "--examine", device], capture=True),
> + ]
> + self.mock_util.subp.assert_has_calls(expected_calls)
> + self.assertEqual(data['uuid'],
> + '7ec12162:ee5cd20b:0ac8b069:cfbd93ec')
> +
>
> class TestBlockMdadmStop(CiTestCase):
> def setUp(self):
--
https://code.launchpad.net/~gyurco/curtin/+git/curtin/+merge/390307
Your team curtin developers is requested to review the proposed merge of ~gyurco/curtin:imsm into curtin:master.
References