curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #02168
[Merge] ~mwhudson/curtin:v2-zero-before-partitioning into curtin:master
Michael Hudson-Doyle has proposed merging ~mwhudson/curtin:v2-zero-before-partitioning into curtin:master.
Commit message:
block_meta_v2: zero start of partitions before they are created
Hooray for regression tests for bug 1718699.
Requested reviews:
curtin developers (curtin-dev)
For more details, see:
https://code.launchpad.net/~mwhudson/curtin/+git/curtin/+merge/416002
I've based this on v2-sector-size which is on it's way to landing so Launchpad will probably show too much diff to start with.
--
Your team curtin developers is requested to review the proposed merge of ~mwhudson/curtin:v2-zero-before-partitioning into curtin:master.
diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
index ca0bc10..f37ebee 100644
--- a/curtin/block/__init__.py
+++ b/curtin/block/__init__.py
@@ -993,19 +993,12 @@ def sysfs_partition_data(blockdev=None, sysfs_path=None):
else:
raise ValueError("Blockdev and sysfs_path cannot both be None")
- # queue property is only on parent devices, ie, we can't read
- # /sys/class/block/vda/vda1/queue/* as queue is only on the
- # parent device
sysfs_prefix = sysfs_path
(parent, partnum) = get_blockdev_for_partition(blockdev)
if partnum:
sysfs_prefix = sys_block_path(parent)
partnum = int(partnum)
- block_size = int(util.load_file(os.path.join(
- sysfs_prefix, 'queue/logical_block_size')))
- unit = block_size
-
ptdata = []
for part_sysfs in get_sysfs_partitions(sysfs_prefix):
data = {}
@@ -1015,8 +1008,12 @@ def sysfs_partition_data(blockdev=None, sysfs_path=None):
continue
data[sfile] = int(util.load_file(dfile))
if partnum is None or data['partition'] == partnum:
- ptdata.append((path_to_kname(part_sysfs), data['partition'],
- data['start'] * unit, data['size'] * unit,))
+ ptdata.append((
+ path_to_kname(part_sysfs),
+ data['partition'],
+ data['start'] * SECTOR_SIZE_BYTES,
+ data['size'] * SECTOR_SIZE_BYTES,
+ ))
return ptdata
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index fbbfeeb..cdf30c5 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -553,6 +553,7 @@ DEVS = set()
def image_handler(info, storage_config, handlers):
path = info['path']
size = int(util.human2bytes(info['size']))
+ sector_size = str(int(util.human2bytes(info.get('sector_size', 512))))
if info.get('preserve', False):
actual_size = os.stat(path).st_size
if size != actual_size:
@@ -571,7 +572,7 @@ def image_handler(info, storage_config, handlers):
raise
try:
dev = util.subp([
- 'losetup', '--show', '--find', path],
+ 'losetup', '--show', '--sector-size', sector_size, '--find', path],
capture=True)[0].strip()
except BaseException:
if os.path.exists(path) and not info.get('preserve'):
diff --git a/curtin/commands/block_meta_v2.py b/curtin/commands/block_meta_v2.py
index 4d000f0..6948fd7 100644
--- a/curtin/commands/block_meta_v2.py
+++ b/curtin/commands/block_meta_v2.py
@@ -39,8 +39,6 @@ class PartTableEntry:
ONE_MIB_BYTES = 1 << 20
-SECTOR_BYTES = 512
-ONE_MIB_SECTORS = ONE_MIB_BYTES // SECTOR_BYTES
def align_up(size, block_size):
@@ -65,8 +63,20 @@ class SFDiskPartTable:
label = None
- def __init__(self):
+ def __init__(self, sector_bytes):
self.entries = []
+ self._sector_bytes = sector_bytes
+ if ONE_MIB_BYTES % sector_bytes != 0:
+ raise Exception(
+ f"sector_bytes {sector_bytes} does not divide 1MiB, cannot "
+ "continue!")
+ self.one_mib_sectors = ONE_MIB_BYTES // sector_bytes
+
+ def bytes2sectors(self, amount):
+ return int(util.human2bytes(amount)) // self._sector_bytes
+
+ def sectors2bytes(self, amount):
+ return amount * self._sector_bytes
def render(self):
r = ['label: ' + self.label, ''] + [e.render() for e in self.entries]
@@ -94,14 +104,14 @@ class GPTPartTable(SFDiskPartTable):
def add(self, action):
number = action.get('number', len(self.entries) + 1)
if 'offset' in action:
- start = int(util.human2bytes(action['offset'])) // SECTOR_BYTES
+ start = self.bytes2sectors(action['offset'])
else:
if self.entries:
prev = self.entries[-1]
- start = align_up(prev.start + prev.size, ONE_MIB_SECTORS)
+ start = align_up(prev.start + prev.size, self.one_mib_sectors)
else:
- start = ONE_MIB_SECTORS
- size = int(util.human2bytes(action['size'])) // SECTOR_BYTES
+ start = self.one_mib_sectors
+ size = self.bytes2sectors(action['size'])
uuid = action.get('uuid')
type = FLAG_TO_GUID.get(action.get('flag'))
entry = PartTableEntry(number, start, size, type, uuid)
@@ -118,7 +128,7 @@ class DOSPartTable(SFDiskPartTable):
flag = action.get('flag', None)
start = action.get('offset', None)
if start is not None:
- start = int(util.human2bytes(start)) // SECTOR_BYTES
+ start = self.bytes2sectors(start)
if flag == 'logical':
if self._extended is None:
raise Exception("logical partition without extended partition")
@@ -138,14 +148,14 @@ class DOSPartTable(SFDiskPartTable):
number = 5
if start is None:
start = align_up(
- self._extended.start + ONE_MIB_SECTORS,
- ONE_MIB_SECTORS)
+ self._extended.start + self.one_mib_sectors,
+ self.one_mib_sectors)
else:
number = prev.number + 1
if start is None:
start = align_up(
- prev.start + prev.size + ONE_MIB_SECTORS,
- ONE_MIB_SECTORS)
+ prev.start + prev.size + self.one_mib_sectors,
+ self.one_mib_sectors)
else:
number = action.get('number', len(self.entries) + 1)
if number > 4:
@@ -157,10 +167,12 @@ class DOSPartTable(SFDiskPartTable):
if entry.number <= 4:
prev = entry
if prev is None:
- start = ONE_MIB_SECTORS
+ start = self.one_mib_sectors
else:
- start = align_up(prev.start + prev.size, ONE_MIB_SECTORS)
- size = int(util.human2bytes(action['size'])) // SECTOR_BYTES
+ start = align_up(
+ prev.start + prev.size,
+ self.one_mib_sectors)
+ size = self.bytes2sectors(action['size'])
type = FLAG_TO_MBR_TYPE.get(flag)
if flag == 'boot':
bootable = True
@@ -217,7 +229,9 @@ def disk_handler_v2(info, storage_config, handlers):
return
disk = get_path_to_storage_volume(info.get('id'), storage_config)
- table = table_cls()
+ (sector_size, _) = block.get_blockdev_sector_size(disk)
+
+ table = table_cls(sector_size)
preserved_offsets = set()
wipes = {}
@@ -234,11 +248,22 @@ def disk_handler_v2(info, storage_config, handlers):
part_info = _find_part_info(sfdisk_info, entry.start)
partition_verify_sfdisk(action, sfdisk_info['label'], part_info)
preserved_offsets.add(entry.start)
- wipes[entry.start] = _wipe_for_action(action)
+ wipe = wipes[entry.start] = _wipe_for_action(action)
+ if wipe is not None:
+ # We do a quick wipe of where any new partitions will be,
+ # because if there is bcache or other metadata there, this
+ # can cause the partition to be used by a storage
+ # subsystem and preventing the exclusive open done by the
+ # wipe_volume call below. See
+ # https://bugs.launchpad.net/curtin/+bug/1718699 for all
+ # the gory details.
+ wipe_offset = table.bytes2sectors(entry.start)
+ LOG.debug('Wiping 1M on %s at offset %s', disk, wipe_offset)
+ block.zero_file_at_offsets(disk, [wipe_offset])
# Do a superblock wipe of any partitions that are being deleted.
for kname, nr, offset, sz in block.sysfs_partition_data(disk):
- offset_sectors = offset // SECTOR_BYTES
+ offset_sectors = table.bytes2sectors(offset)
if offset_sectors not in preserved_offsets:
block.wipe_volume(block.kname_to_path(kname), 'superblock')
@@ -246,7 +271,7 @@ def disk_handler_v2(info, storage_config, handlers):
# Wipe the new partitions as needed.
for kname, number, offset, size in block.sysfs_partition_data(disk):
- offset_sectors = offset // SECTOR_BYTES
+ offset_sectors = table.bytes2sectors(offset)
mode = wipes[offset_sectors]
if mode is not None:
block.wipe_volume(block.kname_to_path(kname), mode)
diff --git a/tests/integration/test_block_meta.py b/tests/integration/test_block_meta.py
index a415c20..0c74cd6 100644
--- a/tests/integration/test_block_meta.py
+++ b/tests/integration/test_block_meta.py
@@ -19,10 +19,11 @@ class IntegrationTestCase(CiTestCase):
@contextlib.contextmanager
-def loop_dev(image):
- dev = util.subp(
- ['losetup', '--show', '--find', '--partscan', image],
- capture=True, decode='ignore')[0].strip()
+def loop_dev(image, sector_size=512):
+ dev = util.subp([
+ 'losetup', '--show', '--find', '--partscan',
+ '--sector-size', str(sector_size), image,
+ ], capture=True, decode='ignore')[0].strip()
try:
udev.udevadm_trigger([dev])
yield dev
@@ -112,17 +113,18 @@ class TestBlockMeta(IntegrationTestCase):
]
util.subp(cmd, env=cmd_env, **kwargs)
- def _test_default_offsets(self, ptable, version):
+ def _test_default_offsets(self, ptable, version, sector_size=512):
psize = 40 << 20
img = self.tmp_path('image.img')
config = StorageConfigBuilder(version=version)
- config.add_image(path=img, size='200M', ptable=ptable)
+ config.add_image(
+ path=img, size='200M', ptable=ptable, sector_size=sector_size)
p1 = config.add_part(size=psize, number=1)
p2 = config.add_part(size=psize, number=2)
p3 = config.add_part(size=psize, number=3)
self.run_bm(config.render())
- with loop_dev(img) as dev:
+ with loop_dev(img, sector_size) as dev:
self.assertEqual(
summarize_partitions(dev), [
PartData(number=1, offset=1 << 20, size=psize),
@@ -147,6 +149,18 @@ class TestBlockMeta(IntegrationTestCase):
def test_default_offsets_msdos_v2(self):
self._test_default_offsets('msdos', 2)
+ def test_default_offsets_gpt_v1_4k(self):
+ self._test_default_offsets('gpt', 1, 4096)
+
+ def test_default_offsets_msdos_v1_4k(self):
+ self._test_default_offsets('msdos', 1, 4096)
+
+ def test_default_offsets_gpt_v2_4k(self):
+ self._test_default_offsets('gpt', 2, 4096)
+
+ def test_default_offsets_msdos_v2_4k(self):
+ self._test_default_offsets('msdos', 2, 4096)
+
def _test_specified_offsets(self, ptable, version):
psize = 20 << 20
img = self.tmp_path('image.img')
Follow ups