← Back to team overview

curtin-dev team mailing list archive

[Merge] ~mwhudson/curtin:pass-handlers-dict-to-handlers into curtin:master

 

Michael Hudson-Doyle has proposed merging ~mwhudson/curtin:pass-handlers-dict-to-handlers into curtin:master.

Commit message:
block_meta: pass handlers dict to all handlers

when we have different handlers for v2 and v1 we will need the
invocation of e.g. disk_handler from raid_handler to go to the right
version.



Requested reviews:
  curtin developers (curtin-dev)

For more details, see:
https://code.launchpad.net/~mwhudson/curtin/+git/curtin/+merge/412496
-- 
Your team curtin developers is requested to review the proposed merge of ~mwhudson/curtin:pass-handlers-dict-to-handlers into curtin:master.
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index 5c3226d..0917bdf 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -550,7 +550,7 @@ def get_path_to_storage_volume(volume, storage_config):
 DEVS = set()
 
 
-def image_handler(info, storage_config):
+def image_handler(info, storage_config, handlers):
     path = info['path']
     if os.path.exists(path):
         os.unlink(path)
@@ -566,10 +566,10 @@ def image_handler(info, storage_config):
         raise
     info['dev'] = dev
     DEVS.add(dev)
-    disk_handler(info, storage_config)
+    handlers['disk'](info, storage_config, handlers)
 
 
-def dasd_handler(info, storage_config):
+def dasd_handler(info, storage_config, handlers):
     """ Prepare the specified dasd device per configuration
 
     params: info: dictionary of configuration, required keys are:
@@ -614,7 +614,7 @@ def dasd_handler(info, storage_config):
                 "Dasd %s failed to format" % dasd_device.devname)
 
 
-def disk_handler(info, storage_config):
+def disk_handler(info, storage_config, handlers):
     _dos_names = ['dos', 'msdos']
     ptable = info.get('ptable')
     if ptable and ptable not in PTABLES_VALID:
@@ -673,7 +673,6 @@ def disk_handler(info, storage_config):
     if info.get('name'):
         make_dname(info.get('id'), storage_config)
 
-
 def getnumberoflogicaldisks(device, storage_config):
     logicaldisks = 0
     for key, item in storage_config.items():
@@ -843,7 +842,7 @@ def partition_verify_fdasd(disk_path, partnumber, info):
         raise RuntimeError("dasd partitions do not support flags")
 
 
-def partition_handler(info, storage_config):
+def partition_handler(info, storage_config, handlers):
     device = info.get('device')
     size = info.get('size')
     flag = info.get('flag')
@@ -1030,7 +1029,7 @@ def partition_handler(info, storage_config):
         make_dname(info.get('id'), storage_config)
 
 
-def format_handler(info, storage_config):
+def format_handler(info, storage_config, handlers):
     volume = info.get('volume')
     if not volume:
         raise ValueError("volume must be specified for partition '%s'" %
@@ -1280,7 +1279,7 @@ def mount_apply(fdata, target=None, fstab=None):
         LOG.info("fstab not in environment, so not writing")
 
 
-def mount_handler(info, storage_config):
+def mount_handler(info, storage_config, handlers):
     """ Handle storage config type: mount
 
     info = {
@@ -1316,7 +1315,7 @@ def lvm_volgroup_verify(vg_name, device_paths):
     verify_volgroup_members(vg_name, device_paths)
 
 
-def lvm_volgroup_handler(info, storage_config):
+def lvm_volgroup_handler(info, storage_config, handlers):
     devices = info.get('devices')
     device_paths = []
     name = info.get('name')
@@ -1377,7 +1376,7 @@ def lvm_partition_verify(lv_name, vg_name, info):
         verify_lv_size(lv_name, info['size'])
 
 
-def lvm_partition_handler(info, storage_config):
+def lvm_partition_handler(info, storage_config, handlers):
     volgroup = storage_config[info['volgroup']]['name']
     name = info['name']
     if not volgroup:
@@ -1439,7 +1438,7 @@ def dm_crypt_verify(dmcrypt_dev, volume_path):
     verify_blkdev_used(dmcrypt_dev, volume_path)
 
 
-def dm_crypt_handler(info, storage_config):
+def dm_crypt_handler(info, storage_config, handlers):
     state = util.load_command_environment(strict=True)
     volume = info.get('volume')
     keysize = info.get('keysize')
@@ -1581,7 +1580,7 @@ def raid_verify(md_devname, raidlevel, device_paths, spare_paths, container):
         md_devname, raidlevel, device_paths, spare_paths, container)
 
 
-def raid_handler(info, storage_config):
+def raid_handler(info, storage_config, handlers):
     state = util.load_command_environment(strict=True)
     devices = info.get('devices')
     raidlevel = info.get('raidlevel')
@@ -1663,7 +1662,7 @@ def raid_handler(info, storage_config):
     # If ptable is specified, call disk_handler on this mdadm device to create
     # the table
     if info.get('ptable'):
-        disk_handler(info, storage_config)
+        handlers['disk'](info, storage_config, handlers)
 
 
 def verify_bcache_cachedev(cachedev):
@@ -1730,7 +1729,7 @@ def bcache_verify(cachedev, backingdev, cache_mode):
     return True
 
 
-def bcache_handler(info, storage_config):
+def bcache_handler(info, storage_config, handlers):
     backing_device = get_path_to_storage_volume(info.get('backing_device'),
                                                 storage_config)
     cache_device = get_path_to_storage_volume(info.get('cache_device'),
@@ -1778,13 +1777,13 @@ def bcache_handler(info, storage_config):
         make_dname(info.get('id'), storage_config)
 
     if info.get('ptable'):
-        disk_handler(info, storage_config)
+        handlers['disk'](info, storage_config, handlers)
 
     LOG.debug('Finished bcache creation for backing %s or caching %s',
               backing_device, cache_device)
 
 
-def zpool_handler(info, storage_config):
+def zpool_handler(info, storage_config, handlers):
     """
     Create a zpool based in storage_configuration
     """
@@ -1823,7 +1822,7 @@ def zpool_handler(info, storage_config):
                      zfs_properties=fs_properties)
 
 
-def zfs_handler(info, storage_config):
+def zfs_handler(info, storage_config, handlers):
     """
     Create a zfs filesystem
     """
@@ -2029,7 +2028,7 @@ def meta_custom(args):
                 description="configuring %s: %s" % (command['type'],
                                                     command['id'])):
             try:
-                handler(command, storage_config_dict)
+                handler(command, storage_config_dict, command_handlers)
             except Exception as error:
                 LOG.error("An error occured handling '%s': %s - %s" %
                           (item_id, type(error).__name__, error))
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index c3f8d14..b1e1f20 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -371,7 +371,7 @@ class TestBlockMeta(CiTestCase):
         holders = ['md1']
         self.mock_get_holders.return_value = holders
 
-        block_meta.disk_handler(info, self.storage_config)
+        block_meta.disk_handler(info, self.storage_config, {})
 
         print("clear_holders: %s" % self.mock_clear_holders.call_args_list)
         print("assert_clear: %s" % self.mock_assert_clear.call_args_list)
@@ -394,7 +394,7 @@ class TestBlockMeta(CiTestCase):
         self.mock_block_sys_block_path.return_value = '/sys/class/block/xxx'
         self.mock_block_sector_size.return_value = (512, 512)
 
-        block_meta.partition_handler(part_info, self.storage_config)
+        block_meta.partition_handler(part_info, self.storage_config, {})
         part_offset = 2048 * 512
         self.mock_block_zero_file.assert_called_with(disk_kname, [part_offset],
                                                      exclusive=False)
@@ -421,7 +421,7 @@ class TestBlockMeta(CiTestCase):
         }
         self.mock_get_volume_type.return_value = 'part'
 
-        block_meta.mount_handler(mount_info, self.storage_config)
+        block_meta.mount_handler(mount_info, self.storage_config, {})
         options = 'defaults'
         comment = "# / was on /wark/xxx during curtin installation"
         expected = "%s\n%s %s %s %s 0 1\n" % (comment,
@@ -449,7 +449,7 @@ class TestBlockMeta(CiTestCase):
         }
         self.mock_get_volume_type.return_value = 'part'
 
-        block_meta.mount_handler(mount_info, self.storage_config)
+        block_meta.mount_handler(mount_info, self.storage_config, {})
         options = 'ro'
         comment = "# /readonly was on /wark/xxx during curtin installation"
         expected = "%s\n%s %s %s %s 0 1\n" % (comment,
@@ -478,7 +478,7 @@ class TestBlockMeta(CiTestCase):
         }
         self.mock_get_volume_type.return_value = 'part'
 
-        block_meta.mount_handler(mount_info, self.storage_config)
+        block_meta.mount_handler(mount_info, self.storage_config, {})
         options = 'defaults'
         comment = "# /readonly was on /wark/xxx during curtin installation"
         expected = "%s\n%s %s %s %s 0 1\n" % (comment,
@@ -509,7 +509,7 @@ class TestBlockMeta(CiTestCase):
         }
         self.mock_get_volume_type.return_value = 'part'
 
-        block_meta.mount_handler(mount_info, self.storage_config)
+        block_meta.mount_handler(mount_info, self.storage_config, {})
         options = 'defaults'
         comment = "# /readonly was on /wark/xxx during curtin installation"
         expected = "#curtin-test\n%s\n%s %s %s %s 0 1\n" % (comment,
@@ -542,7 +542,7 @@ class TestZpoolHandler(CiTestCase):
         m_getpath.return_value = disk_path
         m_block.disk_to_byid_path.return_value = None
         m_util.load_command_environment.return_value = {'target': 'mytarget'}
-        block_meta.zpool_handler(info, storage_config)
+        block_meta.zpool_handler(info, storage_config, {})
         m_zfs.zpool_create.assert_called_with(
             info['pool'], [disk_path],
             mountpoint="/",
@@ -1256,7 +1256,7 @@ class TestDasdHandler(CiTestCase):
         m_dasd_devname.return_value = disk_path
         m_getpath.return_value = disk_path
         m_dasd_needf.side_effect = [True, False]
-        block_meta.dasd_handler(info, storage_config)
+        block_meta.dasd_handler(info, storage_config, {})
         m_dasd_format.assert_called_with(blksize=4096, layout='cdl',
                                          set_label='cloudimg-rootfs',
                                          mode='quick')
@@ -1278,7 +1278,7 @@ class TestDasdHandler(CiTestCase):
         disk_path = "/wark/dasda"
         m_getpath.return_value = disk_path
         m_dasd_needf.side_effect = [False, False]
-        block_meta.dasd_handler(info, storage_config)
+        block_meta.dasd_handler(info, storage_config, {})
         self.assertEqual(0, m_dasd_format.call_count)
 
     @patch('curtin.commands.block_meta.dasd.DasdDevice.format')
@@ -1298,7 +1298,7 @@ class TestDasdHandler(CiTestCase):
         disk_path = "/wark/dasda"
         m_getpath.return_value = disk_path
         m_dasd_needf.side_effect = [False, False]
-        block_meta.dasd_handler(info, storage_config)
+        block_meta.dasd_handler(info, storage_config, {})
         self.assertEqual(1, m_dasd_needf.call_count)
         self.assertEqual(0, m_dasd_format.call_count)
 
@@ -1321,7 +1321,7 @@ class TestDasdHandler(CiTestCase):
         m_getpath.return_value = disk_path
         m_dasd_needf.side_effect = [True, False]
         with self.assertRaises(ValueError):
-            block_meta.dasd_handler(info, storage_config)
+            block_meta.dasd_handler(info, storage_config, {})
         self.assertEqual(1, m_dasd_needf.call_count)
         self.assertEqual(0, m_dasd_format.call_count)
 
@@ -1344,7 +1344,7 @@ class TestDiskHandler(CiTestCase):
         m_getpath.return_value = disk_path
         m_block.get_part_table_type.return_value = 'vtoc'
         m_getpath.return_value = disk_path
-        block_meta.disk_handler(info, storage_config)
+        block_meta.disk_handler(info, storage_config, {})
         m_getpath.assert_called_with(info['id'], storage_config)
         m_block.get_part_table_type.assert_called_with(disk_path)
 
@@ -1360,7 +1360,7 @@ class TestDiskHandler(CiTestCase):
         m_getpath.return_value = disk_path
         m_block.get_part_table_type.return_value = self.random_string()
         m_getpath.return_value = disk_path
-        block_meta.disk_handler(info, storage_config)
+        block_meta.disk_handler(info, storage_config, {})
         m_getpath.assert_called_with(info['id'], storage_config)
         self.assertEqual(0, m_block.get_part_table_type.call_count)
 
@@ -1376,7 +1376,7 @@ class TestDiskHandler(CiTestCase):
         m_getpath.return_value = disk_path
         m_block.get_part_table_type.return_value = 'gpt'
         m_getpath.return_value = disk_path
-        block_meta.disk_handler(info, storage_config)
+        block_meta.disk_handler(info, storage_config, {})
         m_getpath.assert_called_with(info['id'], storage_config)
         self.assertEqual(0, m_block.get_part_table_type.call_count)
 
@@ -1394,7 +1394,7 @@ class TestDiskHandler(CiTestCase):
         m_block.get_part_table_type.return_value = None
         m_getpath.return_value = disk_path
         with self.assertRaises(ValueError):
-            block_meta.disk_handler(info, storage_config)
+            block_meta.disk_handler(info, storage_config, {})
         m_getpath.assert_called_with(info['id'], storage_config)
         m_block.get_part_table_type.assert_called_with(disk_path)
 
@@ -1406,7 +1406,7 @@ class TestDiskHandler(CiTestCase):
         info = {'ptable': 'vtoc', 'type': 'disk', 'id': 'disk-foobar'}
         path = m_getpath.return_value = self.random_string()
         m_get_holders.return_value = []
-        block_meta.disk_handler(info, OrderedDict())
+        block_meta.disk_handler(info, OrderedDict(), {})
         m_subp.assert_called_once_with(['fdasd', '-c', '/dev/null', path])
 
 
@@ -1453,7 +1453,7 @@ class TestLvmVolgroupHandler(CiTestCase):
         self.m_getpath.side_effect = iter(devices)
 
         block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
-                                        self.storage_config)
+                                        self.storage_config, {})
 
         self.assertEqual([call(['vgcreate', '--force', '--zero=y', '--yes',
                                 'vg1'] + devices,  capture=True)],
@@ -1469,7 +1469,7 @@ class TestLvmVolgroupHandler(CiTestCase):
 
         self.storage_config['lvm-volgroup1']['preserve'] = True
         block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
-                                        self.storage_config)
+                                        self.storage_config, {})
 
         self.assertEqual(0, self.m_subp.call_count)
         self.assertEqual(1, self.m_lvm.lvm_scan.call_count)
@@ -1482,7 +1482,7 @@ class TestLvmVolgroupHandler(CiTestCase):
         self.storage_config['lvm-volgroup1']['preserve'] = True
 
         block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
-                                        self.storage_config)
+                                        self.storage_config, {})
 
         self.assertEqual(1, self.m_lvm.activate_volgroups.call_count)
         self.assertEqual([call('vg1')],
@@ -1499,7 +1499,7 @@ class TestLvmVolgroupHandler(CiTestCase):
 
         with self.assertRaises(RuntimeError):
             block_meta.lvm_volgroup_handler(
-                self.storage_config['lvm-volgroup1'], self.storage_config)
+                self.storage_config['lvm-volgroup1'], self.storage_config, {})
 
         self.assertEqual(1, self.m_lvm.activate_volgroups.call_count)
         self.assertEqual([call('vg1')],
@@ -1550,7 +1550,7 @@ class TestLvmPartitionHandler(CiTestCase):
         expected_size_str = "%sB" % util.human2bytes(lv_size)
 
         block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                         self.storage_config)
+                                         self.storage_config, {})
 
         call_name, call_args, call_kwargs = self.m_subp.mock_calls[0]
         # call_args is an n-tuple of arg list
@@ -1564,7 +1564,7 @@ class TestLvmPartitionHandler(CiTestCase):
         self.m_getpath.return_value = devpath
 
         block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                         self.storage_config)
+                                         self.storage_config, {})
         self.m_wipe.assert_called_with(devpath, mode='superblock',
                                        exclusive=False)
 
@@ -1578,7 +1578,7 @@ class TestLvmPartitionHandler(CiTestCase):
         wipe_mode = 'zero'
         self.storage_config['lvm-part1']['wipe'] = wipe_mode
         block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                         self.storage_config)
+                                         self.storage_config, {})
         self.m_wipe.assert_called_with(devpath, mode=wipe_mode,
                                        exclusive=False)
 
@@ -1587,7 +1587,7 @@ class TestLvmPartitionHandler(CiTestCase):
         m_verify.return_value = True
         self.storage_config['lvm-part1']['preserve'] = True
         block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                         self.storage_config)
+                                         self.storage_config, {})
         self.assertEqual(0, self.m_distro.lsb_release.call_count)
         self.assertEqual(0, self.m_subp.call_count)
 
@@ -1597,7 +1597,7 @@ class TestLvmPartitionHandler(CiTestCase):
         self.m_lvm.get_lv_size_bytes.return_value = 1073741824.0
 
         block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                         self.storage_config)
+                                         self.storage_config, {})
         self.assertEqual([call('vg1')],
                          self.m_lvm.get_lvols_in_volgroup.call_args_list)
         self.assertEqual([call('lv1')],
@@ -1611,7 +1611,7 @@ class TestLvmPartitionHandler(CiTestCase):
 
         with self.assertRaises(RuntimeError):
             block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                             self.storage_config)
+                                             self.storage_config, {})
 
             self.assertEqual([call('vg1')],
                              self.m_lvm.get_lvols_in_volgroup.call_args_list)
@@ -1626,7 +1626,7 @@ class TestLvmPartitionHandler(CiTestCase):
 
         with self.assertRaises(RuntimeError):
             block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
-                                             self.storage_config)
+                                             self.storage_config, {})
             self.assertEqual([call('vg1')],
                              self.m_lvm.get_lvols_in_volgroup.call_args_list)
             self.assertEqual([call('lv1')],
@@ -1694,7 +1694,7 @@ class TestDmCryptHandler(CiTestCase):
         self.m_getpath.return_value = volume_path
 
         info = self.storage_config['dmcrypt0']
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
         expected_calls = [
             call(['cryptsetup', '--cipher', self.cipher,
                   '--key-size', self.keysize,
@@ -1712,7 +1712,7 @@ class TestDmCryptHandler(CiTestCase):
         info = self.storage_config['dmcrypt0']
         del info['dm_name']
 
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
         expected_calls = [
             call(['cryptsetup', '--cipher', self.cipher,
                   '--key-size', self.keysize,
@@ -1736,7 +1736,7 @@ class TestDmCryptHandler(CiTestCase):
 
         info = self.storage_config['dmcrypt0']
         volume_name = "%s:%s" % (volume_byid, info['dm_name'])
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
         expected_calls = [
             call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
                   '--sector-size', '4096', '--name', info['dm_name'],
@@ -1771,7 +1771,7 @@ class TestDmCryptHandler(CiTestCase):
 
         info = self.storage_config['dmcrypt0']
         volume_name = "%s:%s" % (volume_byid, info['dm_name'])
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
         expected_calls = [
             call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
                   '--sector-size', '4096', '--name', info['dm_name'],
@@ -1808,7 +1808,7 @@ class TestDmCryptHandler(CiTestCase):
 
         info = self.storage_config['dmcrypt0']
         volume_name = "%s:%s" % (volume_byid, info['dm_name'])
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
         expected_calls = [
             call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
                   '--sector-size', '4096', '--name', info['dm_name'],
@@ -1835,7 +1835,7 @@ class TestDmCryptHandler(CiTestCase):
 
         info = self.storage_config['dmcrypt0']
         info['preserve'] = True
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
 
         self.assertEqual(0, self.m_subp.call_count)
         self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
@@ -1856,7 +1856,7 @@ class TestDmCryptHandler(CiTestCase):
 
         info = self.storage_config['dmcrypt0']
         info['preserve'] = True
-        block_meta.dm_crypt_handler(info, self.storage_config)
+        block_meta.dm_crypt_handler(info, self.storage_config, {})
         self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
 
     @patch('curtin.commands.block_meta.os.path.exists')
@@ -1868,7 +1868,7 @@ class TestDmCryptHandler(CiTestCase):
         info = self.storage_config['dmcrypt0']
         info['preserve'] = True
         with self.assertRaises(RuntimeError):
-            block_meta.dm_crypt_handler(info, self.storage_config)
+            block_meta.dm_crypt_handler(info, self.storage_config, {})
 
     @patch('curtin.commands.block_meta.os.path.exists')
     def test_dm_crypt_preserve_raises_exception_if_wrong_dev_used(self, m_ex):
@@ -1886,7 +1886,7 @@ class TestDmCryptHandler(CiTestCase):
         info = self.storage_config['dmcrypt0']
         info['preserve'] = True
         with self.assertRaises(RuntimeError):
-            block_meta.dm_crypt_handler(info, self.storage_config)
+            block_meta.dm_crypt_handler(info, self.storage_config, {})
 
 
 class TestRaidHandler(CiTestCase):
@@ -1984,7 +1984,7 @@ class TestRaidHandler(CiTestCase):
             self.storage_config['mddevice']['name'] = param
             try:
                 block_meta.raid_handler(self.storage_config['mddevice'],
-                                        self.storage_config)
+                                        self.storage_config, {})
             except ValueError:
                 if param in ['bad/path']:
                     continue
@@ -2006,7 +2006,7 @@ class TestRaidHandler(CiTestCase):
         md_devname = '/dev/' + self.storage_config['mddevice']['name']
         self.m_getpath.side_effect = iter(devices)
         block_meta.raid_handler(self.storage_config['mddevice'],
-                                self.storage_config)
+                                self.storage_config, {})
         self.assertEqual([call(md_devname, 5, devices, [], None, '', None)],
                          self.m_mdadm.mdadm_create.call_args_list)
 
@@ -2020,7 +2020,7 @@ class TestRaidHandler(CiTestCase):
         self.m_getpath.side_effect = iter(devices)
         self.storage_config['mddevice']['preserve'] = True
         block_meta.raid_handler(self.storage_config['mddevice'],
-                                self.storage_config)
+                                self.storage_config, {})
         self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
         self.assertEqual(
             [call(md_devname, 5, devices, [], None)],
@@ -2037,7 +2037,7 @@ class TestRaidHandler(CiTestCase):
         del self.storage_config['mddevice']['devices']
         self.storage_config['mddevice']['container'] = self.random_string()
         block_meta.raid_handler(self.storage_config['mddevice'],
-                                self.storage_config)
+                                self.storage_config, {})
         self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
         self.assertEqual(
             [call(md_devname, 5, [], [], devices[0])],
@@ -2053,7 +2053,7 @@ class TestRaidHandler(CiTestCase):
         self.m_mdadm.md_check.return_value = True
         self.storage_config['mddevice']['preserve'] = True
         block_meta.raid_handler(self.storage_config['mddevice'],
-                                self.storage_config)
+                                self.storage_config, {})
         self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
         self.assertEqual([call(md_devname, 5, devices, [], None)],
                          self.m_mdadm.md_check.call_args_list)
@@ -2068,7 +2068,7 @@ class TestRaidHandler(CiTestCase):
         self.m_mdadm.md_check.side_effect = iter([ValueError(), None])
         self.storage_config['mddevice']['preserve'] = True
         block_meta.raid_handler(self.storage_config['mddevice'],
-                                self.storage_config)
+                                self.storage_config, {})
         self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
         self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
                          self.m_mdadm.md_check.call_args_list)
@@ -2086,7 +2086,7 @@ class TestRaidHandler(CiTestCase):
         self.storage_config['mddevice']['preserve'] = True
         with self.assertRaises(ValueError):
             block_meta.raid_handler(self.storage_config['mddevice'],
-                                    self.storage_config)
+                                    self.storage_config, {})
         self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
         self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
                          self.m_mdadm.md_check.call_args_list)
@@ -2179,7 +2179,7 @@ class TestBcacheHandler(CiTestCase):
         self.m_bcache.create_cache_device.return_value = cset_uuid
 
         block_meta.bcache_handler(self.storage_config['id_bcache0'],
-                                  self.storage_config)
+                                  self.storage_config, {})
         self.assertEqual([call(caching_device)],
                          self.m_bcache.create_cache_device.call_args_list)
         self.assertEqual([
@@ -2302,7 +2302,7 @@ class TestPartitionHandler(CiTestCase):
         self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
         self.m_block.get_blockdev_sector_size.return_value = (512, 512)
         m_ex_part.return_value = 'disk-sda-part-2'
-        block_meta.partition_handler(logical_part, self.storage_config)
+        block_meta.partition_handler(logical_part, self.storage_config, {})
         m_ex_part.assert_called_with('sda', self.storage_config)
 
     def test_part_handler_raise_exception_missing_extended_part(self):
@@ -2322,7 +2322,7 @@ class TestPartitionHandler(CiTestCase):
         self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
         self.m_block.get_blockdev_sector_size.return_value = (512, 512)
         with self.assertRaises(RuntimeError):
-            block_meta.partition_handler(logical_part, self.storage_config)
+            block_meta.partition_handler(logical_part, self.storage_config, {})
 
     @patch('curtin.commands.block_meta.partition_verify_fdasd')
     def test_part_hander_reuse_vtoc(self, m_verify_fdasd):
@@ -2349,7 +2349,7 @@ class TestPartitionHandler(CiTestCase):
         m_verify_fdasd.return_value = True
         devpath = self.m_getpath.return_value = self.random_string()
 
-        block_meta.partition_handler(sconfig[1], oconfig)
+        block_meta.partition_handler(sconfig[1], oconfig, {})
 
         m_verify_fdasd.assert_has_calls([call(devpath, 1, sconfig[1])])
 
@@ -2412,7 +2412,7 @@ class TestMultipathPartitionHandler(CiTestCase):
         m_part_info.return_value = (2048, 2048)
 
         part2 = self.storage_config['disk-sda-part-2']
-        block_meta.partition_handler(part2, self.storage_config)
+        block_meta.partition_handler(part2, self.storage_config, {})
 
         expected_calls = [
             call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',
@@ -2441,7 +2441,7 @@ class TestMultipathPartitionHandler(CiTestCase):
         m_part_info.return_value = (2048, 2048)
 
         part2 = self.storage_config['disk-sda-part-2']
-        block_meta.partition_handler(part2, self.storage_config)
+        block_meta.partition_handler(part2, self.storage_config, {})
 
         expected_calls = [
             call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',

Follow ups