curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #01573
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
Michael Hudson-Doyle has proposed merging ~mwhudson/curtin:mdadm-check-container into curtin:master.
Commit message:
mdadm: improvements to raid verification
Fix the verification of raid containers, members of containers and raid0 devices (which don't have a degraded or sync_action status at all).
I've also tried to make the verification functions all be consistent about raising ValueError on failure -- there was some confusion between raising an error or returning True / False.
Requested reviews:
curtin developers (curtin-dev)
For more details, see:
https://code.launchpad.net/~mwhudson/curtin/+git/curtin/+merge/402384
--
Your team curtin developers is requested to review the proposed merge of ~mwhudson/curtin:mdadm-check-container into curtin:master.
diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
index a6ac970..a5dfc9f 100644
--- a/curtin/block/mdadm.py
+++ b/curtin/block/mdadm.py
@@ -505,9 +505,9 @@ def md_sysfs_attr_path(md_devname, attrname):
return os.path.join(sysmd, attrname)
-def md_sysfs_attr(md_devname, attrname):
+def md_sysfs_attr(md_devname, attrname, default=''):
""" Return the attribute str of an md device found under the 'md' dir """
- attrdata = ''
+ attrdata = default
if not valid_mdname(md_devname):
raise ValueError('Invalid md devicename: [{}]'.format(md_devname))
@@ -645,45 +645,6 @@ def md_device_key_dev(devname):
return 'MD_DEVICE_' + dev_short(devname) + '_DEV'
-def __upgrade_detail_dict(detail):
- ''' This method attempts to convert mdadm --detail output into
- a KEY=VALUE output the same as mdadm --detail --export from mdadm v3.3
- '''
- # if the input already has MD_UUID, it's already been converted
- if 'MD_UUID' in detail:
- return detail
-
- md_detail = {
- 'MD_LEVEL': detail['raid_level'],
- 'MD_DEVICES': detail['raid_devices'],
- 'MD_METADATA': detail['version'],
- 'MD_NAME': detail['name'].split()[0],
- }
-
- # exmaine has ARRAY UUID
- if 'array_uuid' in detail:
- md_detail.update({'MD_UUID': detail['array_uuid']})
- # query,detail has UUID
- elif 'uuid' in detail:
- md_detail.update({'MD_UUID': detail['uuid']})
-
- device = detail['device']
-
- # MD_DEVICE_vdc1_DEV=/dev/vdc1
- md_detail.update({md_device_key_dev(device): device})
-
- if 'device_role' in detail:
- role = detail['device_role']
- if role != 'spare':
- # device_role = Active device 1
- role = role.split()[-1]
-
- # MD_DEVICE_vdc1_ROLE=spare
- md_detail.update({md_device_key_role(device): role})
-
- return md_detail
-
-
def md_read_run_mdadm_map():
'''
md1 1.2 59beb40f:4c202f67:088e702b:efdf577a /dev/md1
@@ -719,8 +680,6 @@ def md_check_array_uuid(md_devname, md_uuid):
'%s -> %s != %s' % (mduuid_path, mdlink_devname, md_devname))
raise ValueError(err)
- return True
-
def md_get_uuid(md_devname):
valid_mdname(md_devname)
@@ -741,13 +700,24 @@ def _compare_devlist(expected, found):
" Missing: {} Extra: {}".format(missing, extra))
-def md_check_raidlevel(raidlevel):
+def md_check_raidlevel(md_devname, detail, raidlevel):
# Validate raidlevel against what curtin supports configuring
if raidlevel not in VALID_RAID_LEVELS:
err = ('Invalid raidlevel: ' + raidlevel +
' Must be one of: ' + str(VALID_RAID_LEVELS))
raise ValueError(err)
- return True
+ # normalize raidlevel to the values mdadm prints.
+ if isinstance(raidlevel, int) or len(raidlevel) <= 2:
+ raidlevel = 'raid' + str(raidlevel)
+ elif raidlevel == 'stripe':
+ raidlevel = 'raid0'
+ elif raidlevel == 'mirror':
+ raidlevel = 'raid1'
+ actual_level = detail.get("MD_LEVEL")
+ if actual_level != raidlevel:
+ raise ValueError(
+ "raid device %s should have level %r but has level %r" % (
+ md_devname, raidlevel, actual_level))
def md_block_until_in_sync(md_devname):
@@ -770,24 +740,24 @@ def md_check_array_state(md_devname):
# check array state
writable = md_check_array_state_rw(md_devname)
- degraded = md_sysfs_attr(md_devname, 'degraded')
- sync_action = md_sysfs_attr(md_devname, 'sync_action')
+ # Raid 0 arrays do not have degraded or sync_action sysfs
+ # attributes.
+ degraded = md_sysfs_attr(md_devname, 'degraded', None)
+ sync_action = md_sysfs_attr(md_devname, 'sync_action', None)
if not writable:
raise ValueError('Array not in writable state: ' + md_devname)
- if degraded != "0":
+ if degraded is not None and degraded != "0":
raise ValueError('Array in degraded state: ' + md_devname)
- if sync_action != "idle":
+ if degraded is not None and sync_action != "idle":
raise ValueError('Array syncing, not idle state: ' + md_devname)
- return True
-
def md_check_uuid(md_devname):
md_uuid = md_get_uuid(md_devname)
if not md_uuid:
raise ValueError('Failed to get md UUID from device: ' + md_devname)
- return md_check_array_uuid(md_devname, md_uuid)
+ md_check_array_uuid(md_devname, md_uuid)
def md_check_devices(md_devname, devices):
@@ -833,26 +803,35 @@ def md_check_array_membership(md_devname, devices):
raise ValueError(err)
-def md_check(md_devname, raidlevel, devices=[], spares=[]):
+def md_check(md_devname, raidlevel, devices, spares, container):
''' Check passed in variables from storage configuration against
the system we're running upon.
'''
LOG.debug('RAID validation: ' +
- 'name={} raidlevel={} devices={} spares={}'.format(md_devname,
- raidlevel,
- devices,
- spares))
+ 'name={} raidlevel={} devices={} spares={} container={}'.format(
+ md_devname, raidlevel, devices, spares, container))
assert_valid_devpath(md_devname)
- md_check_array_state(md_devname)
- md_check_raidlevel(raidlevel)
+ detail = mdadm_query_detail(md_devname)
+
+ if raidlevel != "container":
+ md_check_array_state(md_devname)
+ md_check_raidlevel(md_devname, detail, raidlevel)
md_check_uuid(md_devname)
- md_check_devices(md_devname, devices)
- md_check_spares(md_devname, spares)
- md_check_array_membership(md_devname, devices + spares)
+ if container is None:
+ md_check_devices(md_devname, devices)
+ md_check_spares(md_devname, spares)
+ md_check_array_membership(md_devname, devices + spares)
+ else:
+ if 'MD_CONTAINER' not in detail:
+ raise ValueError("%s is not in a container" % (
+ md_devname))
+ actual_container = os.path.realpath(detail['MD_CONTAINER'])
+ if actual_container != container:
+ raise ValueError("%s is in container %r, not %r" % (
+ md_devname, actual_container, container))
LOG.debug('RAID array OK: ' + md_devname)
- return True
def md_is_in_container(md_devname):
diff --git a/curtin/block/schemas.py b/curtin/block/schemas.py
index 3923321..d846505 100644
--- a/curtin/block/schemas.py
+++ b/curtin/block/schemas.py
@@ -308,9 +308,13 @@ RAID = {
'title': 'curtin storage configuration for a RAID.',
'description': ('Declarative syntax for specifying RAID.'),
'definitions': definitions,
- 'required': ['id', 'type', 'name', 'raidlevel', 'devices'],
+ 'required': ['id', 'type', 'name', 'raidlevel'],
'type': 'object',
'additionalProperties': False,
+ 'oneOf': [
+ {'required': ['devices']},
+ {'required': ['container']},
+ ],
'properties': {
'id': {'$ref': '#/definitions/id'},
'devices': {'$ref': '#/definitions/devices'},
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index 5a087a3..bb5e236 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -1496,24 +1496,22 @@ def dm_crypt_handler(info, storage_config):
so not writing crypttab")
-def verify_md_components(md_devname, raidlevel, device_paths, spare_paths):
+def verify_md_components(md_devname, raidlevel, device_paths, spare_paths,
+ container):
# check if the array is already up, if not try to assemble
- check_ok = mdadm.md_check(md_devname, raidlevel, device_paths,
- spare_paths)
- if not check_ok:
+ try:
+ mdadm.md_check(md_devname, raidlevel, device_paths,
+ spare_paths, container)
+ except ValueError:
LOG.info("assembling preserved raid for %s", md_devname)
mdadm.mdadm_assemble(md_devname, device_paths, spare_paths)
- check_ok = mdadm.md_check(md_devname, raidlevel, device_paths,
- spare_paths)
- msg = ('Verifying %s raid composition, found raid is %s'
- % (md_devname, 'OK' if check_ok else 'not OK'))
- LOG.debug(msg)
- if not check_ok:
- raise RuntimeError(msg)
+ mdadm.md_check(md_devname, raidlevel, device_paths,
+ spare_paths, container)
-def raid_verify(md_devname, raidlevel, device_paths, spare_paths):
- verify_md_components(md_devname, raidlevel, device_paths, spare_paths)
+def raid_verify(md_devname, raidlevel, device_paths, spare_paths, container):
+ verify_md_components(
+ md_devname, raidlevel, device_paths, spare_paths, container)
def raid_handler(info, storage_config):
@@ -1556,7 +1554,9 @@ def raid_handler(info, storage_config):
create_raid = True
if preserve:
- raid_verify(md_devname, raidlevel, device_paths, spare_device_paths)
+ raid_verify(
+ md_devname, raidlevel, device_paths, spare_device_paths,
+ container_dev)
LOG.debug('raid %s already present, skipping create', md_devname)
create_raid = False
diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
index b04cf82..74396d8 100644
--- a/tests/unittests/test_block_mdadm.py
+++ b/tests/unittests/test_block_mdadm.py
@@ -942,8 +942,8 @@ class TestBlockMdadmMdHelpers(CiTestCase):
devname = '/dev/md0'
md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a'
mock_os.path.realpath.return_value = devname
- rv = mdadm.md_check_array_uuid(devname, md_uuid)
- self.assertTrue(rv)
+ # "assertNotRaises"
+ mdadm.md_check_array_uuid(devname, md_uuid)
@patch('curtin.block.mdadm.os')
def test_md_check_array_uuid_mismatch(self, mock_os):
@@ -970,43 +970,87 @@ class TestBlockMdadmMdHelpers(CiTestCase):
def test_md_check_raid_level(self):
for rl in mdadm.VALID_RAID_LEVELS:
- self.assertTrue(mdadm.md_check_raidlevel(rl))
+ if isinstance(rl, int) or len(rl) <= 2:
+ el = 'raid%s' % (rl,)
+ elif rl == 'stripe':
+ el = 'raid0'
+ elif rl == 'mirror':
+ el = 'raid1'
+ else:
+ el = rl
+ # "assertNotRaises"
+ mdadm.md_check_raidlevel('md0', {'MD_LEVEL': el}, rl)
def test_md_check_raid_level_bad(self):
bogus = '27'
self.assertTrue(bogus not in mdadm.VALID_RAID_LEVELS)
with self.assertRaises(ValueError):
- mdadm.md_check_raidlevel(bogus)
+ mdadm.md_check_raidlevel('md0', {}, bogus)
@patch('curtin.block.mdadm.md_sysfs_attr')
def test_md_check_array_state(self, mock_attr):
mdname = '/dev/md0'
- mock_attr.side_effect = [
- 'clean', # array_state
- '0', # degraded
- 'idle', # sync_action
- ]
- self.assertTrue(mdadm.md_check_array_state(mdname))
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'clean'
+ elif attrname == 'degraded':
+ return '0'
+ elif attrname == 'sync_action':
+ return 'idle'
+
+ mock_attr.side_effect = mock_attr_impl
+ # "assertNotRaises"
+ mdadm.md_check_array_state(mdname)
+
+ @patch('curtin.block.mdadm.md_sysfs_attr')
+ def test_md_check_array_state_raid0(self, mock_attr):
+ # Raid 0 arrays do not have a degraded or sync_action sysfs
+ # attribute.
+ mdname = '/dev/md0'
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'clean'
+ elif attrname == 'degraded':
+ return default
+ elif attrname == 'sync_action':
+ return default
+
+ mock_attr.side_effect = mock_attr_impl
+ # "assertNotRaises"
+ mdadm.md_check_array_state(mdname)
@patch('curtin.block.mdadm.md_sysfs_attr')
def test_md_check_array_state_norw(self, mock_attr):
mdname = '/dev/md0'
- mock_attr.side_effect = [
- 'suspended', # array_state
- '0', # degraded
- 'idle', # sync_action
- ]
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'suspended'
+ elif attrname == 'degraded':
+ return '0'
+ elif attrname == 'sync_action':
+ return 'idle'
+
+ mock_attr.side_effect = mock_attr_impl
with self.assertRaises(ValueError):
mdadm.md_check_array_state(mdname)
@patch('curtin.block.mdadm.md_sysfs_attr')
def test_md_check_array_state_degraded(self, mock_attr):
mdname = '/dev/md0'
- mock_attr.side_effect = [
- 'clean', # array_state
- '1', # degraded
- 'idle', # sync_action
- ]
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'clean'
+ elif attrname == 'degraded':
+ return '1'
+ elif attrname == 'sync_action':
+ return 'idle'
+
+ mock_attr.side_effect = mock_attr_impl
+
with self.assertRaises(ValueError):
mdadm.md_check_array_state(mdname)
@@ -1039,8 +1083,8 @@ class TestBlockMdadmMdHelpers(CiTestCase):
mock_guuid.return_value = '93a73e10:427f280b:b7076c02:204b8f7a'
mock_ckuuid.return_value = True
- rv = mdadm.md_check_uuid(mdname)
- self.assertTrue(rv)
+ # "assertNotRaises"
+ mdadm.md_check_uuid(mdname)
@patch('curtin.block.mdadm.md_check_array_uuid')
@patch('curtin.block.mdadm.md_get_uuid')
@@ -1152,6 +1196,7 @@ class TestBlockMdadmMdHelpers(CiTestCase):
with self.assertRaises(ValueError):
mdadm.md_check_array_membership(mdname, devices)
+ @patch('curtin.block.mdadm.mdadm_query_detail')
@patch('curtin.block.mdadm.md_check_array_membership')
@patch('curtin.block.mdadm.md_check_spares')
@patch('curtin.block.mdadm.md_check_devices')
@@ -1159,7 +1204,7 @@ class TestBlockMdadmMdHelpers(CiTestCase):
@patch('curtin.block.mdadm.md_check_raidlevel')
@patch('curtin.block.mdadm.md_check_array_state')
def test_md_check_all_good(self, mock_array, mock_raid, mock_uuid,
- mock_dev, mock_spare, mock_member):
+ mock_dev, mock_spare, mock_member, mock_detail):
md_devname = '/dev/md0'
raidlevel = 1
devices = ['/dev/vda', '/dev/vdb']
@@ -1171,16 +1216,143 @@ class TestBlockMdadmMdHelpers(CiTestCase):
mock_dev.return_value = None
mock_spare.return_value = None
mock_member.return_value = None
+ detail = {'MD_NAME': 'foo'}
+ mock_detail.return_value = detail
- mdadm.md_check(md_devname, raidlevel, devices=devices, spares=spares)
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=None)
mock_array.assert_has_calls([call(md_devname)])
- mock_raid.assert_has_calls([call(raidlevel)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
mock_uuid.assert_has_calls([call(md_devname)])
mock_dev.assert_has_calls([call(md_devname, devices)])
mock_spare.assert_has_calls([call(md_devname, spares)])
mock_member.assert_has_calls([call(md_devname, devices + spares)])
+ @patch('curtin.block.mdadm.os.path.realpath')
+ @patch('curtin.block.mdadm.mdadm_query_detail')
+ @patch('curtin.block.mdadm.md_check_array_membership')
+ @patch('curtin.block.mdadm.md_check_spares')
+ @patch('curtin.block.mdadm.md_check_devices')
+ @patch('curtin.block.mdadm.md_check_uuid')
+ @patch('curtin.block.mdadm.md_check_raidlevel')
+ @patch('curtin.block.mdadm.md_check_array_state')
+ def test_md_check_all_good_container(self, mock_array, mock_raid,
+ mock_uuid, mock_dev, mock_spare,
+ mock_member, mock_detail,
+ mock_realpath):
+ md_devname = '/dev/md0'
+ raidlevel = 1
+ devices = ['/dev/vda', '/dev/vdb']
+ spares = ['/dev/vdc']
+
+ mock_array.return_value = None
+ mock_raid.return_value = None
+ mock_uuid.return_value = None
+ mock_dev.return_value = None
+ mock_spare.return_value = None
+ mock_member.return_value = None
+ container_name = self.random_string()
+ container_dev = self.random_string()
+ detail = {'MD_CONTAINER': container_name}
+ mock_detail.return_value = detail
+
+ def realpath_impl(path):
+ if path == container_name:
+ return container_dev
+ else:
+ self.fail("unexpected realpath arg %r" % (path,))
+
+ mock_realpath.side_effect = realpath_impl
+
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=container_dev)
+
+ mock_array.assert_has_calls([call(md_devname)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
+ mock_uuid.assert_has_calls([call(md_devname)])
+ mock_dev.assert_has_calls([])
+ mock_spare.assert_has_calls([])
+ mock_member.assert_has_calls([])
+
+ @patch('curtin.block.mdadm.mdadm_query_detail')
+ @patch('curtin.block.mdadm.md_check_array_membership')
+ @patch('curtin.block.mdadm.md_check_spares')
+ @patch('curtin.block.mdadm.md_check_devices')
+ @patch('curtin.block.mdadm.md_check_uuid')
+ @patch('curtin.block.mdadm.md_check_raidlevel')
+ @patch('curtin.block.mdadm.md_check_array_state')
+ def test_md_check_all_no_container(self, mock_array, mock_raid,
+ mock_uuid, mock_dev, mock_spare,
+ mock_member, mock_detail):
+ md_devname = '/dev/md0'
+ raidlevel = 1
+ devices = ['/dev/vda', '/dev/vdb']
+ spares = ['/dev/vdc']
+
+ mock_array.return_value = None
+ mock_raid.return_value = None
+ mock_uuid.return_value = None
+ mock_dev.return_value = None
+ mock_spare.return_value = None
+ mock_member.return_value = None
+ container_name = self.random_string()
+ detail = {}
+
+ mock_detail.return_value = detail
+
+ with self.assertRaises(ValueError):
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=container_name)
+
+ mock_array.assert_has_calls([call(md_devname)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
+ mock_uuid.assert_has_calls([call(md_devname)])
+ mock_dev.assert_has_calls([])
+ mock_spare.assert_has_calls([])
+ mock_member.assert_has_calls([])
+
+ @patch('curtin.block.mdadm.mdadm_query_detail')
+ @patch('curtin.block.mdadm.md_check_array_membership')
+ @patch('curtin.block.mdadm.md_check_spares')
+ @patch('curtin.block.mdadm.md_check_devices')
+ @patch('curtin.block.mdadm.md_check_uuid')
+ @patch('curtin.block.mdadm.md_check_raidlevel')
+ @patch('curtin.block.mdadm.md_check_array_state')
+ def test_md_check_all_wrong_container(self, mock_array, mock_raid,
+ mock_uuid, mock_dev, mock_spare,
+ mock_member, mock_detail):
+ md_devname = '/dev/md0'
+ raidlevel = 1
+ devices = ['/dev/vda', '/dev/vdb']
+ spares = ['/dev/vdc']
+
+ mock_array.return_value = None
+ mock_raid.return_value = None
+ mock_uuid.return_value = None
+ mock_dev.return_value = None
+ mock_spare.return_value = None
+ mock_member.return_value = None
+ container_name = self.random_string()
+ detail = {'MD_CONTAINER': container_name + '1'}
+
+ mock_detail.return_value = detail
+
+ with self.assertRaises(ValueError):
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=container_name)
+
+ mock_array.assert_has_calls([call(md_devname)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
+ mock_uuid.assert_has_calls([call(md_devname)])
+ mock_dev.assert_has_calls([])
+ mock_spare.assert_has_calls([])
+ mock_member.assert_has_calls([])
+
def test_md_check_all_good_devshort(self):
md_devname = 'md0'
raidlevel = 1
@@ -1189,7 +1361,7 @@ class TestBlockMdadmMdHelpers(CiTestCase):
with self.assertRaises(ValueError):
mdadm.md_check(md_devname, raidlevel, devices=devices,
- spares=spares)
+ spares=spares, container=None)
def test_md_present(self):
mdname = 'md0'
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index cd396f2..48a3217 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -2008,12 +2008,32 @@ class TestRaidHandler(CiTestCase):
devices = [self.random_string(), self.random_string(),
self.random_string()]
+ md_devname = '/dev/' + self.storage_config['mddevice']['name']
+ self.m_getpath.side_effect = iter(devices)
+ self.storage_config['mddevice']['preserve'] = True
+ block_meta.raid_handler(self.storage_config['mddevice'],
+ self.storage_config)
+ self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
+ self.assertEqual(
+ [call(md_devname, 5, devices, [], None)],
+ m_verify.call_args_list)
+
+ @patch('curtin.commands.block_meta.raid_verify')
+ def test_raid_handler_preserves_existing_device_container(self, m_verify):
+ """ raid_handler preserves existing device. """
+
+ devices = [self.random_string()]
+ md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
- m_verify.return_value = True
self.storage_config['mddevice']['preserve'] = True
+ del self.storage_config['mddevice']['devices']
+ self.storage_config['mddevice']['container'] = self.random_string()
block_meta.raid_handler(self.storage_config['mddevice'],
self.storage_config)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
+ self.assertEqual(
+ [call(md_devname, 5, [], [], devices[0])],
+ m_verify.call_args_list)
def test_raid_handler_preserve_verifies_md_device(self):
""" raid_handler preserve verifies existing raid device. """
@@ -2027,7 +2047,7 @@ class TestRaidHandler(CiTestCase):
block_meta.raid_handler(self.storage_config['mddevice'],
self.storage_config)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
- self.assertEqual([call(md_devname, 5, devices, [])],
+ self.assertEqual([call(md_devname, 5, devices, [], None)],
self.m_mdadm.md_check.call_args_list)
def test_raid_handler_preserve_verifies_md_device_after_assemble(self):
@@ -2037,12 +2057,12 @@ class TestRaidHandler(CiTestCase):
self.random_string()]
md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
- self.m_mdadm.md_check.side_effect = iter([False, True])
+ self.m_mdadm.md_check.side_effect = iter([ValueError(), None])
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
self.storage_config)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
- self.assertEqual([call(md_devname, 5, devices, [])] * 2,
+ self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
self.m_mdadm.md_check.call_args_list)
self.assertEqual([call(md_devname, devices, [])],
self.m_mdadm.mdadm_assemble.call_args_list)
@@ -2054,13 +2074,13 @@ class TestRaidHandler(CiTestCase):
self.random_string()]
md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
- self.m_mdadm.md_check.side_effect = iter([False, False])
+ self.m_mdadm.md_check.side_effect = iter([ValueError(), ValueError()])
self.storage_config['mddevice']['preserve'] = True
- with self.assertRaises(RuntimeError):
+ with self.assertRaises(ValueError):
block_meta.raid_handler(self.storage_config['mddevice'],
self.storage_config)
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
- self.assertEqual([call(md_devname, 5, devices, [])] * 2,
+ self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
self.m_mdadm.md_check.call_args_list)
self.assertEqual([call(md_devname, devices, [])],
self.m_mdadm.mdadm_assemble.call_args_list)
Follow ups
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Server Team CI bot, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Server Team CI bot, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Server Team CI bot, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Server Team CI bot, 2021-05-10
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Ryan Harper, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Server Team CI bot, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-10
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Dan Bungert, 2021-05-07
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Ryan Harper, 2021-05-07
-
[Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Michael Hudson-Doyle, 2021-05-07
-
Re: [Merge] ~mwhudson/curtin:mdadm-check-container into curtin:master
From: Server Team CI bot, 2021-05-07