← Back to team overview

curtin-dev team mailing list archive

[Merge] ~mwhudson/curtin:v2-partition-ops into curtin:master

 

Michael Hudson-Doyle has proposed merging ~mwhudson/curtin:v2-partition-ops into curtin:master.

Commit message:
block_meta: implement v2 partitioning

v2 partitioning respects supplied offsets and deletes
unreferenced partitions.

Requested reviews:
  curtin developers (curtin-dev)

For more details, see:
https://code.launchpad.net/~mwhudson/curtin/+git/curtin/+merge/414079

This change is missing a few things:

1) doc changes
2) dasd support
3) changing at least one vmtest to use v2 config

But it's a start!
-- 
Your team curtin developers is requested to review the proposed merge of ~mwhudson/curtin:v2-partition-ops into curtin:master.
diff --git a/curtin/commands/block_meta_v2.py b/curtin/commands/block_meta_v2.py
index 25f777b..677cd25 100644
--- a/curtin/commands/block_meta_v2.py
+++ b/curtin/commands/block_meta_v2.py
@@ -1,17 +1,253 @@
 # This file is part of curtin. See LICENSE file for copyright and license info.
 
+import os
+from typing import Optional
+
+import attr
+
+from curtin import (block, util)
 from curtin.commands.block_meta import (
     disk_handler as disk_handler_v1,
+    get_path_to_storage_volume,
     partition_handler as partition_handler_v1,
+    partition_verify_sfdisk,
+    )
+from curtin.log import LOG
+from curtin.storage_config import (
+    GPT_GUID_TO_CURTIN_MAP,
     )
 
 
+@attr.s(auto_attribs=True)
+class PartTableEntry:
+    number: int
+    start: int
+    size: int
+    type: str
+    uuid: Optional[str]
+    bootable: bool = False
+
+    def render(self):
+        r = f'{self.number}: '
+        for a in 'start', 'size', 'type', 'uuid':
+            v = getattr(self, a)
+            if v is not None:
+                r += f' {a}={v}'
+        if self.bootable:
+            r += ' bootable'
+        return r
+
+
+ONE_MB_BYTES = 1 << 20
+SECTOR_BYTES = 512
+ONE_MB_SECTORS = ONE_MB_BYTES // SECTOR_BYTES
+
+
+def align_up(size, block_size):
+    return (size + block_size - 1) & ~(block_size - 1)
+
+
+def align_down(size, block_size):
+    return size & ~(block_size - 1)
+
+
+FLAG_TO_GUID = {
+    flag: guid for (guid, (flag, typecode)) in GPT_GUID_TO_CURTIN_MAP.items()
+    }
+
+
+class SFDiskPartTable:
+
+    def __init__(self):
+        self.entries = []
+
+    def render(self):
+        r = ['label: ' + self.label, ''] + [e.render() for e in self.entries]
+        return '\n'.join(r)
+
+    def apply(self, device):
+        sfdisk_script = self.render()
+        LOG.debug("sfdisk input:\n---\n%s\n---\n", sfdisk_script)
+        util.subp(
+            ['sfdisk', '--lock', '--no-tell', device],
+            data=sfdisk_script.encode('ascii'))
+        util.subp(['partprobe', device])
+
+
+class GPTPartTable(SFDiskPartTable):
+
+    label = 'gpt'
+
+    def add(self, action):
+        number = action.get('number', len(self.entries) + 1)
+        if 'offset' in action:
+            start = int(util.human2bytes(action['offset'])) // SECTOR_BYTES
+        else:
+            prev = None
+            for entry in self.entries:
+                if entry.number > number:
+                    continue
+                if prev is None or prev.number < entry.number:
+                    prev = entry
+            if prev is None:
+                start = ONE_MB_SECTORS
+            else:
+                start = align_up(prev.start + prev.size, ONE_MB_SECTORS)
+        size = int(util.human2bytes(action['size'])) // SECTOR_BYTES
+        uuid = action.get('uuid')
+        type = FLAG_TO_GUID.get(action.get('flag'))
+        entry = PartTableEntry(number, start, size, type, uuid)
+        self.entries.append(entry)
+        return entry
+
+
+class DOSPartTable(SFDiskPartTable):
+
+    label = 'dos'
+    _extended = None
+
+    def add(self, action):
+        flag = action.get('flag', None)
+        start = action.get('offset', None)
+        if start is not None:
+            start = int(util.human2bytes(start)) // SECTOR_BYTES
+        if flag == 'logical':
+            if self._extended is None:
+                raise Exception("logical partition without extended partition")
+            prev = None
+            for entry in reversed(self.entries):
+                if entry.number > 4:
+                    prev = entry
+                    break
+            # The number of an logical partition cannot be specified (so the
+            # 'number' from the action is completely ignored here) as the
+            # partitions are numbered by the order they are found in the linked
+            # list of logical partitions. sfdisk just cares that we put a
+            # number > 4 here, in fact we could "number" every logical
+            # partition as "5" but it's not hard to put the number that the
+            # partition will end up getting into the sfdisk input.
+            if prev is None:
+                number = 5
+                if start is None:
+                    start = align_up(
+                        self._extended.start + ONE_MB_SECTORS, ONE_MB_SECTORS)
+            else:
+                number = prev.number + 1
+                if start is None:
+                    start = align_up(
+                        prev.start + prev.size + ONE_MB_SECTORS,
+                        ONE_MB_SECTORS)
+        else:
+            number = action.get('number', len(self.entries) + 1)
+            if number > 4:
+                raise Exception(
+                    "primary partition cannot have number %s" % (number,))
+            if start is None:
+                prev = None
+                for entry in self.entries:
+                    if entry.number > number:
+                        continue
+                    if prev is None or prev.number < entry.number:
+                        prev = entry
+                if prev is None:
+                    start = ONE_MB_SECTORS
+                else:
+                    start = align_up(prev.start + prev.size, ONE_MB_SECTORS)
+        size = int(util.human2bytes(action['size'])) // SECTOR_BYTES
+        FLAG_TO_TYPE = {
+            'extended': 'extended',
+            'boot': 'uefi',
+            'swap': 'swap',
+            'lvm': 'lvm',
+            'raid': 'raid',
+            }
+        type = FLAG_TO_TYPE.get(flag)
+        if flag == 'boot':
+            bootable = True
+        else:
+            bootable = None
+        entry = PartTableEntry(
+            number, start, size, type, uuid=None, bootable=bootable)
+        if flag == 'extended':
+            self._extended = entry
+        self.entries.append(entry)
+        return entry
+
+
+def _find_part_info(sfdisk_info, offset):
+    for part in sfdisk_info['partitions']:
+        if part['start'] == offset:
+            return part
+    else:
+        raise Exception(
+            "could not find existing partition by offset")
+
+
+def _wipe_for_action(action):
+    # If a wipe action is specified, do that.
+    if 'wipe' in action:
+        return action['wipe']
+    # Existing partitions are left alone by default.
+    if action.get('preserve', False):
+        return None
+    # New partitions are wiped by default apart from extended partitions, where
+    # it would destroy the EBR.
+    if action.get('flag') == 'extended':
+        return None
+    return 'superblock'
+
+
 def disk_handler_v2(info, storage_config, handlers):
     disk_handler_v1(info, storage_config, handlers)
 
+    part_actions = []
+
+    for action in storage_config.values():
+        if action['type'] == 'partition' and action['device'] == info['id']:
+            part_actions.append(action)
+
+    table_cls = {
+        'msdos': DOSPartTable,
+        'gpt': GPTPartTable,
+        }.get(info.get('ptable'))
+
+    if table_cls is None:
+        for action in part_actions:
+            partition_handler_v1(action, storage_config, handlers)
+        return
+
+    disk = get_path_to_storage_volume(info.get('id'), storage_config)
+    table = table_cls()
+    preserved_offsets = set()
+    wipes = {}
+
+    sfdisk_info = block.sfdisk_info(disk)
+    for action in part_actions:
+        entry = table.add(action)
+        if action.get('preserve', False):
+            part_info = _find_part_info(sfdisk_info, entry.start)
+            partition_verify_sfdisk(action, sfdisk_info['label'], part_info)
+            preserved_offsets.add(entry.start)
+        wipes[entry.start] = _wipe_for_action(action)
+
+    # Do a superblock wipe of any partitions that are being deleted.
+    for kname, nr, offset, sz in block.sysfs_partition_data(disk):
+        offset_sectors = offset // SECTOR_BYTES
+        if offset_sectors not in preserved_offsets:
+            block.wipe_volume(block.kname_to_path(kname), 'superblock')
+
+    table.apply(disk)
+
+    # Wipe the new partitions as needed.
+    for kname, number, offset, size in block.sysfs_partition_data(disk):
+        offset_sectors = offset // SECTOR_BYTES
+        mode = wipes[offset_sectors]
+        if mode is not None:
+            block.wipe_volume(block.kname_to_path(kname), mode)
+
 
 def partition_handler_v2(info, storage_config, handlers):
-    partition_handler_v1(info, storage_config, handlers)
+    pass
 
 
 # vi: ts=4 expandtab syntax=python
diff --git a/tests/integration/test_block_meta.py b/tests/integration/test_block_meta.py
index 72cb19a..f729e81 100644
--- a/tests/integration/test_block_meta.py
+++ b/tests/integration/test_block_meta.py
@@ -65,6 +65,7 @@ class StorageConfigBuilder:
         if create:
             with open(path, "wb") as f:
                 f.write(b"\0" * int(util.human2bytes(size)))
+        return action
 
     def add_part(self, *, size, **kw):
         if self.cur_image is None:
@@ -77,6 +78,7 @@ class StorageConfigBuilder:
             }
         action.update(**kw)
         self.config.append(action)
+        return action
 
     def set_preserve(self):
         for action in self.config:
@@ -113,18 +115,18 @@ class TestBlockMeta(IntegrationTestCase):
         img = self.tmp_path('image.img')
         config = StorageConfigBuilder(version=version)
         config.add_image(path=img, size='100M', ptable=ptable)
-        config.add_part(size=psize, number=1)
-        config.add_part(size=psize, number=2)
+        p1 = config.add_part(size=psize, number=1)
+        p2 = config.add_part(size=psize, number=2)
         self.run_bm(config.render())
 
         with loop_dev(img) as dev:
             self.assertEqual(
                 summarize_partitions(dev), [
-                    PartData(
-                        number=1, offset=1 << 20, size=psize),
-                    PartData(
-                        number=2, offset=(1 << 20) + psize, size=psize),
+                    PartData(number=1, offset=1 << 20,           size=psize),
+                    PartData(number=2, offset=(1 << 20) + psize, size=psize),
                 ])
+        p1['offset'] = 1 << 20
+        p2['offset'] = (1 << 20) + psize
         config.set_preserve()
         self.run_bm(config.render())
 
@@ -140,10 +142,40 @@ class TestBlockMeta(IntegrationTestCase):
     def test_default_offsets_msdos_v2(self):
         self._test_default_offsets('msdos', 2)
 
-    def _test_non_default_numbering(self, ptable):
+    def _test_specified_offsets(self, ptable, version):
+        psize = 20 << 20
+        img = self.tmp_path('image.img')
+        config = StorageConfigBuilder(version=version)
+        config.add_image(path=img, size='100M', ptable=ptable)
+        config.add_part(size=psize, number=1, offset=psize)
+        config.add_part(size=psize, number=2, offset=psize * 3)
+        self.run_bm(config.render())
+
+        with loop_dev(img) as dev:
+            self.assertEqual(
+                summarize_partitions(dev), [
+                    PartData(number=1, offset=psize,   size=psize),
+                    PartData(number=2, offset=psize*3, size=psize),
+                ])
+        config.set_preserve()
+        self.run_bm(config.render())
+
+    def DONT_test_specified_offsets_gpt_v1(self):
+        self._test_specified_offsets('gpt', 1)
+
+    def DONT_test_specified_offsets_msdos_v1(self):
+        self._test_specified_offsets('msdos', 1)
+
+    def test_specified_offsets_gpt_v2(self):
+        self._test_specified_offsets('gpt', 2)
+
+    def test_specified_offsets_msdos_v2(self):
+        self._test_specified_offsets('msdos', 2)
+
+    def _test_non_default_numbering(self, ptable, version):
         psize = 40 << 20
         img = self.tmp_path('image.img')
-        config = StorageConfigBuilder(version=1)
+        config = StorageConfigBuilder(version=version)
         config.add_image(path=img, size='100M', ptable=ptable)
         config.add_part(size=psize, number=1)
         config.add_part(size=psize, number=4)
@@ -152,21 +184,25 @@ class TestBlockMeta(IntegrationTestCase):
         with loop_dev(img) as dev:
             self.assertEqual(
                 summarize_partitions(dev), [
-                    PartData(
-                        number=1, offset=1 << 20, size=psize),
-                    PartData(
-                        number=4, offset=(1 << 20) + psize, size=psize),
+                    PartData(number=1, offset=1 << 20,           size=psize),
+                    PartData(number=4, offset=(1 << 20) + psize, size=psize),
                 ])
 
-    def test_non_default_numbering_gpt(self):
-        self._test_non_default_numbering('gpt')
+    def test_non_default_numbering_gpt_v1(self):
+        self._test_non_default_numbering('gpt', 1)
+
+    def BROKEN_test_non_default_numbering_msdos_v1(self):
+        self._test_non_default_numbering('msdos', 2)
 
-    def BROKEN_test_non_default_numbering_msdos(self):
-        self._test_non_default_numbering('msdos')
+    def test_non_default_numbering_gpt_v2(self):
+        self._test_non_default_numbering('gpt', 2)
 
-    def test_logical(self):
+    def test_non_default_numbering_msdos_v2(self):
+        self._test_non_default_numbering('msdos', 2)
+
+    def _test_logical(self, version):
         img = self.tmp_path('image.img')
-        config = StorageConfigBuilder(version=1)
+        config = StorageConfigBuilder(version=version)
         config.add_image(path=img, size='100M', ptable='msdos')
         config.add_part(size='50M', number=1, flag='extended')
         config.add_part(size='10M', number=5, flag='logical')
@@ -177,8 +213,8 @@ class TestBlockMeta(IntegrationTestCase):
             self.assertEqual(
                 summarize_partitions(dev), [
                     # extended partitions get a strange size in sysfs
-                    PartData(number=1, offset=1 << 20, size=1 << 10),
-                    PartData(number=5, offset=2 << 20, size=10 << 20),
+                    PartData(number=1, offset=1 << 20,  size=1 << 10),
+                    PartData(number=5, offset=2 << 20,  size=10 << 20),
                     # part 5 takes us to 12 MiB offset, curtin leaves a 1 MiB
                     # gap.
                     PartData(number=6, offset=13 << 20, size=10 << 20),
@@ -187,6 +223,148 @@ class TestBlockMeta(IntegrationTestCase):
             p1kname = block.partition_kname(block.path_to_kname(dev), 1)
             self.assertTrue(block.is_extended_partition('/dev/' + p1kname))
 
+    def test_logical_v1(self):
+        self._test_logical(1)
+
+    def test_logical_v2(self):
+        self._test_logical(2)
+
+    def _test_replace_partition(self, ptable):
+        psize = 20 << 20
+        img = self.tmp_path('image.img')
+        config = StorageConfigBuilder(version=2)
+        config.add_image(path=img, size='100M', ptable=ptable)
+        config.add_part(size=psize, number=1)
+        config.add_part(size=psize, number=2)
+        self.run_bm(config.render())
+
+        with loop_dev(img) as dev:
+            self.assertEqual(
+                summarize_partitions(dev), [
+                    PartData(number=1, offset=1 << 20,           size=psize),
+                    PartData(number=2, offset=(1 << 20) + psize, size=psize),
+                ])
+
+        config = StorageConfigBuilder(version=2)
+        config.add_image(path=img, size='100M', ptable=ptable, preserve=True)
+        config.add_part(size=psize, number=1, offset=1 << 20, preserve=True)
+        config.add_part(size=psize*2, number=2)
+        self.run_bm(config.render())
+
+        with loop_dev(img) as dev:
+            self.assertEqual(
+                summarize_partitions(dev), [
+                    PartData(number=1, offset=1 << 20,           size=psize),
+                    PartData(number=2, offset=(1 << 20) + psize, size=2*psize),
+                ])
+
+    def test_replace_partition_gpt_v2(self):
+        self._test_replace_partition('gpt')
+
+    def test_replace_partition_msdos_v2(self):
+        self._test_replace_partition('msdos')
+
+    def test_delete_logical_partition(self):
+        # The test case that resulted in a lot of hair-pulling:
+        # deleting a logical partition renumbers any later partitions
+        # (so you cannot stably refer to partitions by number!)
+        psize = 20 << 20
+        img = self.tmp_path('image.img')
+        config = StorageConfigBuilder(version=2)
+        config.add_image(path=img, size='100M', ptable='msdos')
+        config.add_part(size='90M', number=1, flag='extended')
+        config.add_part(size=psize, number=5, flag='logical')
+        config.add_part(size=psize, number=6, flag='logical')
+        self.run_bm(config.render())
+
+        with loop_dev(img) as dev:
+            self.assertEqual(
+                summarize_partitions(dev), [
+                    PartData(number=1, offset=1 << 20,           size=1 << 10),
+                    PartData(number=5, offset=(2 << 20),         size=psize),
+                    PartData(number=6, offset=(3 << 20) + psize, size=psize),
+                ])
+
+        config = StorageConfigBuilder(version=2)
+        config.add_image(path=img, size='100M', ptable='msdos', preserve=True)
+        config.add_part(size='90M', number=1, flag='extended', preserve=True)
+        config.add_part(
+            size=psize, number=5, flag='logical', offset=(3 << 20) + psize,
+            preserve=True)
+        self.run_bm(config.render())
+
+        with loop_dev(img) as dev:
+            self.assertEqual(
+                summarize_partitions(dev), [
+                    PartData(number=1, offset=1 << 20,           size=1 << 10),
+                    PartData(number=5, offset=(3 << 20) + psize, size=psize),
+                ])
+
+    def _test_wiping(self, ptable):
+        # Test wiping behaviour.
+        #
+        # Paritions that should be (superblock, i.e. first and last
+        # megabyte) wiped:
+        #
+        # 1) New partitions
+        # 2) Partitions that are being removed, i.e. no longer present
+        # 3) Preserved partitions with an explicit wipe
+        #
+        # Partitions that should not be wiped:
+        #
+        # 4) Preserved partitions with no wipe field.
+        #
+        # We test this by creating some partitions with block-meta,
+        # writing content to them, then running block-meta again, with
+        # each partition matching one of the conditions above.
+        ONE_MEG_BYTES = 1 << 20
+
+        img = self.tmp_path('image.img')
+        config = StorageConfigBuilder(version=2)
+        config.add_image(path=img, size='30M', ptable=ptable)
+        config.add_part(size='5M', number=1, offset='5M')
+        config.add_part(size='5M', number=2, offset='10M')
+        config.add_part(size='5M', number=3, offset='15M')
+        config.add_part(size='5M', number=4, offset='20M')
+        self.run_bm(config.render())
+
+        part_offset_sizes = {}
+        with loop_dev(img) as dev:
+            for kname, number, offset, size in block.sysfs_partition_data(dev):
+                content = bytes([number])
+                with open(block.kname_to_path(kname), 'wb') as fp:
+                    fp.write(content*size)
+                part_offset_sizes[number] = (offset, size)
+
+        config = StorageConfigBuilder(version=2)
+        config.add_image(path=img, size='30M', ptable=ptable, preserve=True)
+        config.add_part(size='5M', number=1, offset='5M')
+        # Partition 2 is being deleted.
+        config.add_part(
+            size='5M', number=3, offset='15M', preserve=True,
+            wipe='superblock')
+        config.add_part(size='5M', number=4, offset='20M', preserve=True)
+        self.run_bm(config.render())
+
+        expected_content = {1: {0}, 2: {0}, 3: {0}, 4: {4}}
+
+        with loop_dev(img) as dev:
+            with open(dev, 'rb') as fp:
+                for nr, (offset, size) in part_offset_sizes.items():
+                    expected = expected_content[nr]
+                    fp.seek(offset)
+                    first = set(fp.read(ONE_MEG_BYTES))
+                    fp.seek(offset + size - ONE_MEG_BYTES)
+                    last = set(fp.read(ONE_MEG_BYTES))
+                    self.assertEqual(first, expected)
+                    self.assertEqual(last, expected)
+
+    def test_wiping_gpt(self):
+        self._test_wiping('gpt')
+
+    def test_wiping_msdos(self):
+        self._test_wiping('msdos')
+
     def test_raw_image(self):
         img = self.tmp_path('image.img')
         config = StorageConfigBuilder(version=1)

Follow ups