curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #02494
[Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
Michael Hudson-Doyle has proposed merging ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master.
Commit message:
block_meta: record which devices were handled by each action
Requested reviews:
curtin developers (curtin-dev)
For more details, see:
https://code.launchpad.net/~mwhudson/curtin/+git/curtin/+merge/429404
--
Your team curtin developers is requested to review the proposed merge of ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master.
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index 5614883..3c2223e 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -17,6 +17,7 @@ from curtin.udev import (compose_udev_equality, udevadm_settle,
udevadm_trigger, udevadm_info)
import glob
+import json
import os
import platform
import string
@@ -427,7 +428,6 @@ def get_poolname(info, storage_config):
return poolname
-
def get_path_to_storage_volume(volume, storage_config):
# Get path to block device for volume. Volume param should refer to id of
# volume in storage config
@@ -550,7 +550,7 @@ def get_path_to_storage_volume(volume, storage_config):
DEVS = set()
-def image_handler(info, storage_config, handlers):
+def image_handler(info, storage_config, context):
path = info['path']
size = int(util.human2bytes(info['size']))
sector_size = str(int(util.human2bytes(info.get('sector_size', 512))))
@@ -578,12 +578,12 @@ def image_handler(info, storage_config, handlers):
if os.path.exists(path) and not info.get('preserve'):
os.unlink(path)
raise
- info['dev'] = dev
+ context.id_to_device[info['id']] = info['dev'] = dev
DEVS.add(dev)
- handlers['disk'](info, storage_config, handlers)
+ context.handlers['disk'](info, storage_config, context)
-def dasd_handler(info, storage_config, handlers):
+def dasd_handler(info, storage_config, context):
""" Prepare the specified dasd device per configuration
params: info: dictionary of configuration, required keys are:
@@ -628,7 +628,7 @@ def dasd_handler(info, storage_config, handlers):
"Dasd %s failed to format" % dasd_device.devname)
-def disk_handler(info, storage_config, handlers):
+def disk_handler(info, storage_config, context):
_dos_names = ['dos', 'msdos']
ptable = info.get('ptable')
if ptable and ptable not in PTABLES_VALID:
@@ -636,6 +636,7 @@ def disk_handler(info, storage_config, handlers):
'Invalid partition table type: %s in %s' % (ptable, info))
disk = get_path_to_storage_volume(info.get('id'), storage_config)
+ context.id_to_device[info['id']] = disk
# For disks, 'preserve' is what indicates whether the partition
# table should be reused or recreated but for compound devices
# such as raids, it indicates if the raid should be created or
@@ -849,7 +850,7 @@ def partition_verify_fdasd(disk_path, partnumber, info):
raise RuntimeError("dasd partitions do not support flags")
-def partition_handler(info, storage_config, handlers):
+def partition_handler(info, storage_config, context):
device = info.get('device')
size = info.get('size')
flag = info.get('flag')
@@ -863,6 +864,8 @@ def partition_handler(info, storage_config, handlers):
disk = get_path_to_storage_volume(device, storage_config)
partnumber = determine_partition_number(info.get('id'), storage_config)
disk_kname = block.path_to_kname(disk)
+ part_path = block.dev_path(block.partition_kname(disk_kname, partnumber))
+ context.id_to_device[info['id']] = part_path
# consider the disks logical sector size when calculating sectors
try:
@@ -938,8 +941,6 @@ def partition_handler(info, storage_config, handlers):
# Handle preserve flag
create_partition = True
if config.value_as_boolean(info.get('preserve')):
- part_path = block.dev_path(
- block.partition_kname(disk_kname, partnumber))
if disk_ptable == 'vtoc':
partition_verify_fdasd(disk, partnumber, info)
else:
@@ -1038,7 +1039,7 @@ def partition_handler(info, storage_config, handlers):
make_dname(info.get('id'), storage_config)
-def format_handler(info, storage_config, handlers):
+def format_handler(info, storage_config, context):
volume = info.get('volume')
if not volume:
raise ValueError("volume must be specified for partition '%s'" %
@@ -1294,7 +1295,7 @@ def mount_apply(fdata, target=None, fstab=None):
LOG.info("fstab not in environment, so not writing")
-def mount_handler(info, storage_config, handlers):
+def mount_handler(info, storage_config, context):
""" Handle storage config type: mount
info = {
@@ -1330,7 +1331,7 @@ def lvm_volgroup_verify(vg_name, device_paths):
verify_volgroup_members(vg_name, device_paths)
-def lvm_volgroup_handler(info, storage_config, handlers):
+def lvm_volgroup_handler(info, storage_config, context):
devices = info.get('devices')
device_paths = []
name = info.get('name')
@@ -1391,7 +1392,7 @@ def lvm_partition_verify(lv_name, vg_name, info):
verify_lv_size(lv_name, info['size'])
-def lvm_partition_handler(info, storage_config, handlers):
+def lvm_partition_handler(info, storage_config, context):
volgroup = storage_config[info['volgroup']]['name']
name = info['name']
if not volgroup:
@@ -1428,9 +1429,11 @@ def lvm_partition_handler(info, storage_config, handlers):
# refresh lvmetad
lvm.lvm_scan()
+ lv_path = get_path_to_storage_volume(info['id'], storage_config)
+ context.id_to_device[info['id']] = lv_path
+
wipe_mode = info.get('wipe', 'superblock')
if wipe_mode and create_lv:
- lv_path = get_path_to_storage_volume(info['id'], storage_config)
LOG.debug('Wiping logical volume %s mode=%s', lv_path, wipe_mode)
block.wipe_volume(lv_path, mode=wipe_mode, exclusive=False)
@@ -1453,7 +1456,7 @@ def dm_crypt_verify(dmcrypt_dev, volume_path):
verify_blkdev_used(dmcrypt_dev, volume_path)
-def dm_crypt_handler(info, storage_config, handlers):
+def dm_crypt_handler(info, storage_config, context):
state = util.load_command_environment(strict=True)
volume = info.get('volume')
keysize = info.get('keysize')
@@ -1462,6 +1465,7 @@ def dm_crypt_handler(info, storage_config, handlers):
if not dm_name:
dm_name = info.get('id')
dmcrypt_dev = os.path.join("/dev", "mapper", dm_name)
+ context.id_to_device[info['id']] = dmcrypt_dev
preserve = config.value_as_boolean(info.get('preserve'))
if not volume:
raise ValueError("volume for cryptsetup to operate on must be \
@@ -1595,12 +1599,13 @@ def raid_verify(md_devname, raidlevel, device_paths, spare_paths, container):
md_devname, raidlevel, device_paths, spare_paths, container)
-def raid_handler(info, storage_config, handlers):
+def raid_handler(info, storage_config, context):
state = util.load_command_environment(strict=True)
devices = info.get('devices')
raidlevel = info.get('raidlevel')
spare_devices = info.get('spare_devices')
md_devname = block.md_path(info.get('name'))
+ context.id_to_device[info['id']] = md_devname
container = info.get('container')
metadata = info.get('metadata')
preserve = config.value_as_boolean(info.get('preserve'))
@@ -1677,7 +1682,7 @@ def raid_handler(info, storage_config, handlers):
# If ptable is specified, call disk_handler on this mdadm device to create
# the table
if info.get('ptable'):
- handlers['disk'](info, storage_config, handlers)
+ context.handlers['disk'](info, storage_config, context)
def verify_bcache_cachedev(cachedev):
@@ -1744,7 +1749,7 @@ def bcache_verify(cachedev, backingdev, cache_mode):
return True
-def bcache_handler(info, storage_config, handlers):
+def bcache_handler(info, storage_config, context):
backing_device = get_path_to_storage_volume(info.get('backing_device'),
storage_config)
cache_device = get_path_to_storage_volume(info.get('cache_device'),
@@ -1777,6 +1782,8 @@ def bcache_handler(info, storage_config, handlers):
if create_bcache and backing_device:
bcache_dev = bcache.create_backing_device(backing_device, cache_device,
cache_mode, cset_uuid)
+ # Not sure what to do in the preserve case here.
+ context.id_to_device[info['id']] = bcache_dev
if cache_mode and not backing_device:
raise ValueError("cache mode specified which can only be set on "
@@ -1792,13 +1799,13 @@ def bcache_handler(info, storage_config, handlers):
make_dname(info.get('id'), storage_config)
if info.get('ptable'):
- handlers['disk'](info, storage_config, handlers)
+ context.handlers['disk'](info, storage_config, context)
LOG.debug('Finished bcache creation for backing %s or caching %s',
backing_device, cache_device)
-def zpool_handler(info, storage_config, handlers):
+def zpool_handler(info, storage_config, context):
"""
Create a zpool based in storage_configuration
"""
@@ -1837,7 +1844,7 @@ def zpool_handler(info, storage_config, handlers):
zfs_properties=fs_properties)
-def zfs_handler(info, storage_config, handlers):
+def zfs_handler(info, storage_config, context):
"""
Create a zfs filesystem
"""
@@ -1980,6 +1987,13 @@ def zfsroot_update_storage_config(storage_config):
return ret
+class BlockMetaContext:
+
+ def __init__(self, handlers):
+ self.handlers = handlers
+ self.id_to_device = {}
+
+
def meta_clear(devices, report_prefix=''):
""" Run clear_holders on specified list of devices.
@@ -2045,8 +2059,10 @@ def meta_custom(args):
# set up reportstack
stack_prefix = state.get('report_stack_prefix', '')
+ context = BlockMetaContext(command_handlers)
+
for item_id, command in storage_config_dict.items():
- handler = command_handlers.get(command['type'])
+ handler = context.handlers.get(command['type'])
if not handler:
raise ValueError("unknown command type '%s'" % command['type'])
with events.ReportEventStack(
@@ -2054,12 +2070,17 @@ def meta_custom(args):
description="configuring %s: %s" % (command['type'],
command['id'])):
try:
- handler(command, storage_config_dict, command_handlers)
+ handler(command, storage_config_dict, context)
except Exception as error:
LOG.error("An error occured handling '%s': %s - %s" %
(item_id, type(error).__name__, error))
raise
+ device_map_path = cfg['storage'].get('device_map_path')
+ if device_map_path is not None:
+ with open(device_map_path, 'w') as fp:
+ json.dump(context.id_to_device, fp)
+
if args.testmode:
util.subp(['losetup', '--detach'] + list(DEVS))
diff --git a/curtin/commands/block_meta_v2.py b/curtin/commands/block_meta_v2.py
index 10647c5..2cb5bc4 100644
--- a/curtin/commands/block_meta_v2.py
+++ b/curtin/commands/block_meta_v2.py
@@ -400,8 +400,8 @@ def partition_verify_sfdisk_v2(part_action, label, sfdisk_part_info,
verify_ptable_flag(devpath, expected_flag, label, sfdisk_part_info)
-def disk_handler_v2(info, storage_config, handlers):
- disk_handler_v1(info, storage_config, handlers)
+def disk_handler_v2(info, storage_config, context):
+ disk_handler_v1(info, storage_config, context)
part_actions = []
@@ -416,7 +416,7 @@ def disk_handler_v2(info, storage_config, handlers):
if table_cls is None:
for action in part_actions:
- partition_handler_v1(action, storage_config, handlers)
+ partition_handler_v1(action, storage_config, context)
return
disk = get_path_to_storage_volume(info.get('id'), storage_config)
@@ -493,8 +493,9 @@ def disk_handler_v2(info, storage_config, handlers):
make_dname(action['id'], storage_config)
-def partition_handler_v2(info, storage_config, handlers):
- pass
+def partition_handler_v2(info, storage_config, context):
+ context.id_to_device[info['id']] = get_path_to_storage_volume(
+ info.get('id'), storage_config)
# vi: ts=4 expandtab syntax=python
diff --git a/tests/integration/test_block_meta.py b/tests/integration/test_block_meta.py
index 053bd7b..fb14a03 100644
--- a/tests/integration/test_block_meta.py
+++ b/tests/integration/test_block_meta.py
@@ -255,12 +255,28 @@ class TestBlockMeta(IntegrationTestCase):
psize = 40 << 20
img = self.tmp_path('image.img')
config = StorageConfigBuilder(version=version)
- config.add_image(
+ disk_action = config.add_image(
path=img, size='200M', ptable=ptable, sector_size=sector_size)
p1 = config.add_part(size=psize, number=1)
p2 = config.add_part(size=psize, number=2)
p3 = config.add_part(size=psize, number=3)
- self.run_bm(config.render())
+ c = config.render()
+
+ # Request that curtin dump the device node path for each action
+ dmp = c['storage']['device_map_path'] = self.tmp_path('map.json')
+
+ self.run_bm(c)
+
+ # We can't check a whole lot about the device map, but we can
+ # check all actions are in the map and each action should be
+ # /dev/loopXXpX were /dev/loopXX is the device for the image.
+ with open(dmp) as fp:
+ device_map = json.load(fp)
+ image_device = device_map[disk_action['id']]
+ for action in c['storage']['config']:
+ self.assertIn(action['id'], device_map)
+ self.assertTrue(
+ device_map[action['id']].startswith(image_device))
with loop_dev(img, sector_size) as dev:
self.assertEqual(
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index fcba94a..7898e4b 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -22,6 +22,9 @@ def random_uuid():
return uuid.uuid4()
+empty_context = block_meta.BlockMetaContext({})
+
+
class TestGetPathToStorageVolume(CiTestCase):
def setUp(self):
@@ -1558,8 +1561,10 @@ class TestLvmPartitionHandler(CiTestCase):
self.assertEqual(int, type(lv_size))
expected_size_str = "%sB" % util.human2bytes(lv_size)
- block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ block_meta.lvm_partition_handler(
+ self.storage_config['lvm-part1'],
+ self.storage_config,
+ empty_context)
call_name, call_args, call_kwargs = self.m_subp.mock_calls[0]
# call_args is an n-tuple of arg list
@@ -1573,7 +1578,7 @@ class TestLvmPartitionHandler(CiTestCase):
self.m_getpath.return_value = devpath
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.m_wipe.assert_called_with(devpath, mode='superblock',
exclusive=False)
@@ -1587,7 +1592,7 @@ class TestLvmPartitionHandler(CiTestCase):
wipe_mode = 'zero'
self.storage_config['lvm-part1']['wipe'] = wipe_mode
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.m_wipe.assert_called_with(devpath, mode=wipe_mode,
exclusive=False)
@@ -1596,7 +1601,7 @@ class TestLvmPartitionHandler(CiTestCase):
m_verify.return_value = True
self.storage_config['lvm-part1']['preserve'] = True
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual(0, self.m_distro.lsb_release.call_count)
self.assertEqual(0, self.m_subp.call_count)
@@ -1606,7 +1611,7 @@ class TestLvmPartitionHandler(CiTestCase):
self.m_lvm.get_lv_size_bytes.return_value = 1073741824.0
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual([call('vg1')],
self.m_lvm.get_lvols_in_volgroup.call_args_list)
self.assertEqual([call('lv1')],
@@ -1619,8 +1624,10 @@ class TestLvmPartitionHandler(CiTestCase):
self.m_lvm.get_lvols_in_volgroup.return_value = []
with self.assertRaises(RuntimeError):
- block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ block_meta.lvm_partition_handler(
+ self.storage_config['lvm-part1'],
+ self.storage_config,
+ empty_context)
self.assertEqual([call('vg1')],
self.m_lvm.get_lvols_in_volgroup.call_args_list)
@@ -1634,8 +1641,10 @@ class TestLvmPartitionHandler(CiTestCase):
self.m_lvm.get_lv_size_bytes.return_value = 0.0
with self.assertRaises(RuntimeError):
- block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config, {})
+ block_meta.lvm_partition_handler(
+ self.storage_config['lvm-part1'],
+ self.storage_config,
+ empty_context)
self.assertEqual([call('vg1')],
self.m_lvm.get_lvols_in_volgroup.call_args_list)
self.assertEqual([call('lv1')],
@@ -1703,7 +1712,7 @@ class TestDmCryptHandler(CiTestCase):
self.m_getpath.return_value = volume_path
info = self.storage_config['dmcrypt0']
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
expected_calls = [
call(['cryptsetup', '--cipher', self.cipher,
'--key-size', self.keysize,
@@ -1721,7 +1730,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
del info['dm_name']
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
expected_calls = [
call(['cryptsetup', '--cipher', self.cipher,
'--key-size', self.keysize,
@@ -1745,7 +1754,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
volume_name = "%s:%s" % (volume_byid, info['dm_name'])
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
expected_calls = [
call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
'--sector-size', '4096', '--name', info['dm_name'],
@@ -1780,7 +1789,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
volume_name = "%s:%s" % (volume_byid, info['dm_name'])
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
expected_calls = [
call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
'--sector-size', '4096', '--name', info['dm_name'],
@@ -1817,7 +1826,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
volume_name = "%s:%s" % (volume_byid, info['dm_name'])
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
expected_calls = [
call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
'--sector-size', '4096', '--name', info['dm_name'],
@@ -1844,7 +1853,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
self.assertEqual(0, self.m_subp.call_count)
self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
@@ -1865,7 +1874,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(info, self.storage_config, empty_context)
self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
@patch('curtin.commands.block_meta.os.path.exists')
@@ -1877,7 +1886,8 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
with self.assertRaises(RuntimeError):
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(
+ info, self.storage_config, empty_context)
@patch('curtin.commands.block_meta.os.path.exists')
def test_dm_crypt_preserve_raises_exception_if_wrong_dev_used(self, m_ex):
@@ -1895,7 +1905,8 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
with self.assertRaises(RuntimeError):
- block_meta.dm_crypt_handler(info, self.storage_config, {})
+ block_meta.dm_crypt_handler(
+ info, self.storage_config, empty_context)
class TestRaidHandler(CiTestCase):
@@ -1993,7 +2004,7 @@ class TestRaidHandler(CiTestCase):
self.storage_config['mddevice']['name'] = param
try:
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
except ValueError:
if param in ['bad/path']:
continue
@@ -2015,7 +2026,7 @@ class TestRaidHandler(CiTestCase):
md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual([call(md_devname, 5, devices, [], None, '', None)],
self.m_mdadm.mdadm_create.call_args_list)
@@ -2029,7 +2040,7 @@ class TestRaidHandler(CiTestCase):
self.m_getpath.side_effect = iter(devices)
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
self.assertEqual(
[call(md_devname, 5, devices, [], None)],
@@ -2046,7 +2057,7 @@ class TestRaidHandler(CiTestCase):
del self.storage_config['mddevice']['devices']
self.storage_config['mddevice']['container'] = self.random_string()
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
self.assertEqual(
[call(md_devname, 5, [], [], devices[0])],
@@ -2062,7 +2073,7 @@ class TestRaidHandler(CiTestCase):
self.m_mdadm.md_check.return_value = True
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
self.assertEqual([call(md_devname, 5, devices, [], None)],
self.m_mdadm.md_check.call_args_list)
@@ -2077,7 +2088,7 @@ class TestRaidHandler(CiTestCase):
self.m_mdadm.md_check.side_effect = iter([ValueError(), None])
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
self.m_mdadm.md_check.call_args_list)
@@ -2095,7 +2106,7 @@ class TestRaidHandler(CiTestCase):
self.storage_config['mddevice']['preserve'] = True
with self.assertRaises(ValueError):
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
self.m_mdadm.md_check.call_args_list)
@@ -2188,7 +2199,7 @@ class TestBcacheHandler(CiTestCase):
self.m_bcache.create_cache_device.return_value = cset_uuid
block_meta.bcache_handler(self.storage_config['id_bcache0'],
- self.storage_config, {})
+ self.storage_config, empty_context)
self.assertEqual([call(caching_device)],
self.m_bcache.create_cache_device.call_args_list)
self.assertEqual([
@@ -2311,7 +2322,8 @@ class TestPartitionHandler(CiTestCase):
self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
self.m_block.get_blockdev_sector_size.return_value = (512, 512)
m_ex_part.return_value = 'disk-sda-part-2'
- block_meta.partition_handler(logical_part, self.storage_config, {})
+ block_meta.partition_handler(
+ logical_part, self.storage_config, empty_context)
m_ex_part.assert_called_with('sda', self.storage_config)
def test_part_handler_raise_exception_missing_extended_part(self):
@@ -2331,7 +2343,8 @@ class TestPartitionHandler(CiTestCase):
self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
self.m_block.get_blockdev_sector_size.return_value = (512, 512)
with self.assertRaises(RuntimeError):
- block_meta.partition_handler(logical_part, self.storage_config, {})
+ block_meta.partition_handler(
+ logical_part, self.storage_config, empty_context)
@patch('curtin.commands.block_meta.partition_verify_fdasd')
def test_part_hander_reuse_vtoc(self, m_verify_fdasd):
@@ -2358,7 +2371,7 @@ class TestPartitionHandler(CiTestCase):
m_verify_fdasd.return_value = True
devpath = self.m_getpath.return_value = self.random_string()
- block_meta.partition_handler(sconfig[1], oconfig, {})
+ block_meta.partition_handler(sconfig[1], oconfig, empty_context)
m_verify_fdasd.assert_has_calls([call(devpath, 1, sconfig[1])])
@@ -2421,7 +2434,7 @@ class TestMultipathPartitionHandler(CiTestCase):
m_part_info.return_value = (2048, 2048)
part2 = self.storage_config['disk-sda-part-2']
- block_meta.partition_handler(part2, self.storage_config, {})
+ block_meta.partition_handler(part2, self.storage_config, empty_context)
expected_calls = [
call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',
@@ -2450,7 +2463,7 @@ class TestMultipathPartitionHandler(CiTestCase):
m_part_info.return_value = (2048, 2048)
part2 = self.storage_config['disk-sda-part-2']
- block_meta.partition_handler(part2, self.storage_config, {})
+ block_meta.partition_handler(part2, self.storage_config, empty_context)
expected_calls = [
call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',
Follow ups
-
[Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Server Team CI bot, 2022-09-08
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Server Team CI bot, 2022-09-07
-
[Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Michael Hudson-Doyle, 2022-09-07
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Dan Bungert, 2022-09-07
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Michael Hudson-Doyle, 2022-09-07
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Dan Bungert, 2022-09-07
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Michael Hudson-Doyle, 2022-09-07
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Dan Bungert, 2022-09-06
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Server Team CI bot, 2022-09-05
-
Re: [Merge] ~mwhudson/curtin:FR-2657-record-block-device-path into curtin:master
From: Server Team CI bot, 2022-09-05