curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #01630
Re: [Merge] ~mwhudson/curtin:reformat-vs-preserve into curtin:master
This looks great. Two comments:
1) let's update doc/topics/storage.rst with the comment you added in the disk handler around what preserve/wipe means for existing compound devices.
2) In the vmtest, create a partition table on top of the raid different than the final ptable in the storage config
Diff comments:
> diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
> index 8cb2784..e5cf659 100644
> --- a/curtin/commands/block_meta.py
> +++ b/curtin/commands/block_meta.py
> @@ -583,7 +583,17 @@ def disk_handler(info, storage_config):
> 'Invalid partition table type: %s in %s' % (ptable, info))
>
> disk = get_path_to_storage_volume(info.get('id'), storage_config)
> - if config.value_as_boolean(info.get('preserve')):
> + # For disks, 'preserve' is what indicates whether the partition
> + # table should be reused or recreated but for compound devices
> + # such as raids, it indicates if the raid should be created or
> + # assumed to already exist. So to allow a pre-existing raid to get
> + # a new partition table, we use presence of 'wipe' field to
> + # indicate if the disk should be reformatted or not.
This is well said. I think we should include this as an update to doc/topics/storage.rst under the preserve section under Disk.
> + if info['type'] == 'disk':
> + preserve_ptable = config.value_as_boolean(info.get('preserve'))
> + else:
> + preserve_ptable = not config.value_as_boolean(info.get('wipe'))
> + if preserve_ptable:
> # Handle preserve flag, verifying if ptable specified in config
> if ptable and ptable != PTABLE_UNSUPPORTED:
> current_ptable = block.get_part_table_type(disk)
> diff --git a/examples/tests/partition-existing-raid.yaml b/examples/tests/partition-existing-raid.yaml
> new file mode 100644
> index 0000000..38b749c
> --- /dev/null
> +++ b/examples/tests/partition-existing-raid.yaml
> @@ -0,0 +1,112 @@
> +showtrace: true
> +
> +bucket:
> + - &setup |
> + parted /dev/disk/by-id/virtio-disk-b --script -- \
> + mklabel gpt \
> + mkpart primary 1GiB 9GiB \
> + set 1 boot on
> + parted /dev/disk/by-id/virtio-disk-c --script -- \
> + mklabel gpt \
> + mkpart primary 1GiB 9GiB \
> + set 1 boot on
> + udevadm settle
> + mdadm --create --metadata 1.2 --level 1 -n 2 /dev/md1 --assume-clean \
> + /dev/disk/by-id/virtio-disk-b-part1 /dev/disk/by-id/virtio-disk-c-part1
We also create a partition table, say msdos, on top of the raid device
so we can verify that we've installed a "new" partition table on the existing raid.
> + udevadm settle
> + mdadm --stop /dev/md1
> + udevadm settle
> +
> +# Create a RAID now to test curtin's reuse of existing RAIDs.
> +early_commands:
> + 00-setup-raid: [sh, -exuc, *setup]
> +
> +storage:
> + config:
> + - type: disk
> + id: id_disk0
> + serial: disk-a
> + ptable: gpt
> + wipe: superblock
> + - type: disk
> + id: id_disk1
> + serial: disk-b
> + ptable: gpt
> + preserve: true
> + - type: disk
> + id: id_disk2
> + serial: disk-c
> + ptable: gpt
> + preserve: true
> + - type: partition
> + id: id_disk0_part1
> + device: id_disk0
> + flag: boot
> + number: 1
> + size: 512M
> + - type: partition
> + id: id_disk0_part2
> + device: id_disk0
> + number: 2
> + size: 3G
> + - type: partition
> + id: id_disk0_part3
> + device: id_disk0
> + number: 3
> + size: 3G
> + - type: partition
> + id: id_disk1_part1
> + device: id_disk1
> + flag: boot
> + number: 1
> + size: 8G
> + preserve: true
> + - type: partition
> + id: id_disk2_part1
> + device: id_disk2
> + flag: boot
> + number: 1
> + size: 8G
> + preserve: true
> + - type: raid
> + id: raid-md1
> + name: md1
> + raidlevel: raid1
> + devices:
> + - id_disk1_part1
> + - id_disk2_part1
> + spare_devices: []
> + metadata: 1.2
> + preserve: true
> + wipe: superblock
> + ptable: gpt
> + - type: partition
> + id: id_raid1_part1
> + device: raid-md1
> + number: 1
> + size: 7G
> + - type: format
> + id: id_efi_format
> + volume: id_disk0_part1
> + fstype: fat32
> + - type: format
> + id: id_root_format
> + volume: id_disk0_part2
> + fstype: ext4
> + - type: format
> + id: id_raid-md1_format
> + volume: id_raid1_part1
> + fstype: ext4
> + - type: mount
> + device: id_root_format
> + id: id_root_mount
> + path: /
> + - type: mount
> + id: id_efi_mount
> + device: id_efi_format
> + path: /boot/efi
> + - type: mount
> + id: id_raid-md1_mount
> + device: id_raid-md1_format
> + path: /srv
> + version: 1
> diff --git a/tests/vmtests/test_preserve_raid.py b/tests/vmtests/test_preserve_raid.py
> index 7fc6daa..6154fa7 100644
> --- a/tests/vmtests/test_preserve_raid.py
> +++ b/tests/vmtests/test_preserve_raid.py
> @@ -37,4 +37,35 @@ class GroovyTestPreserveRAID(relbase.groovy, TestPreserveRAID):
> __test__ = True
>
>
> +class TestPartitionExistingRAID(VMBaseClass):
> + """ Test that curtin can reuse a RAID. """
> + conf_file = "examples/tests/partition-existing-raid.yaml"
> + extra_disks = ['10G', '10G', '10G']
> + uefi = True
> + extra_collect_scripts = []
> +
> + def test_existing_exists(self):
> + pass
Let's confirm that that the raid partition table is in the format we expect (gpt) vs what the pre-existing raid content had (I suggest we place a msdos ptable).
> +
> +
> +class BionicTestPartitionExistingRAID(
> + relbase.bionic, TestPartitionExistingRAID):
> + __test__ = True
> +
> +
> +class FocalTestPartitionExistingRAID(
> + relbase.focal, TestPartitionExistingRAID):
> + __test__ = True
> +
> +
> +class HirsuteTestPartitionExistingRAID(
> + relbase.hirsute, TestPartitionExistingRAID):
> + __test__ = True
> +
> +
> +class GroovyTestPartitionExistingRAID(
> + relbase.groovy, TestPartitionExistingRAID):
> + __test__ = True
> +
> +
> # vi: ts=4 expandtab syntax=python
--
https://code.launchpad.net/~mwhudson/curtin/+git/curtin/+merge/404623
Your team curtin developers is subscribed to branch curtin:master.
References