← Back to team overview

curtin-dev team mailing list archive

[Merge] ~gyurco/curtin:imsm into curtin:master

 

György Szombathelyi has proposed merging ~gyurco/curtin:imsm into curtin:master.

Commit message:
Support imsm external metadata RAID containers LP: #1893661

Requested reviews:
  curtin developers (curtin-dev)

For more details, see:
https://code.launchpad.net/~gyurco/curtin/+git/curtin/+merge/390307
-- 
Your team curtin developers is requested to review the proposed merge of ~gyurco/curtin:imsm into curtin:master.
diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
index 116ee81..ce1399e 100644
--- a/curtin/block/clear_holders.py
+++ b/curtin/block/clear_holders.py
@@ -165,11 +165,17 @@ def shutdown_mdadm(device):
     """
 
     blockdev = block.sysfs_to_devpath(device)
+    query = mdadm.mdadm_query_detail(blockdev)
+
+    if query.get('MD_CONTAINER'):
+        LOG.info('Array is in a container, skip discovering raid devices and spares for %s', device)
+        md_devs = ()
+    else:
+        LOG.info('Discovering raid devices and spares for %s', device)
+        md_devs = (
+            mdadm.md_get_devices_list(blockdev) +
+            mdadm.md_get_spares_list(blockdev))
 
-    LOG.info('Discovering raid devices and spares for %s', device)
-    md_devs = (
-        mdadm.md_get_devices_list(blockdev) +
-        mdadm.md_get_spares_list(blockdev))
     mdadm.set_sync_action(blockdev, action="idle")
     mdadm.set_sync_action(blockdev, action="frozen")
 
@@ -186,7 +192,7 @@ def shutdown_mdadm(device):
         LOG.debug('Non-fatal error writing to array device %s, '
                   'proceeding with shutdown: %s', blockdev, e)
 
-    LOG.info('Removing raid array members: %s', md_devs)
+    LOG.info('Removing raid array members: ', md_devs)
     for mddev in md_devs:
         try:
             mdadm.fail_device(blockdev, mddev)
@@ -198,7 +204,7 @@ def shutdown_mdadm(device):
     LOG.debug('using mdadm.mdadm_stop on dev: %s', blockdev)
     mdadm.mdadm_stop(blockdev)
 
-    LOG.debug('Wiping mdadm member devices: %s' % md_devs)
+    LOG.debug('Wiping mdadm member devices: ', md_devs)
     for mddev in md_devs:
         mdadm.zero_device(mddev, force=True)
 
diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
index 32b467c..97160e1 100644
--- a/curtin/block/mdadm.py
+++ b/curtin/block/mdadm.py
@@ -26,6 +26,7 @@ from curtin.log import LOG
 
 NOSPARE_RAID_LEVELS = [
     'linear', 'raid0', '0', 0,
+    'container'
 ]
 
 SPARE_RAID_LEVELS = [
@@ -145,7 +146,7 @@ def mdadm_assemble(md_devname=None, devices=[], spares=[], scan=False,
     udev.udevadm_settle()
 
 
-def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name="",
+def mdadm_create(md_devname, raidlevel, devices, spares=None, container_devcnt=None, md_name="",
                  metadata=None):
     LOG.debug('mdadm_create: ' +
               'md_name=%s raidlevel=%s ' % (md_devname, raidlevel) +
@@ -159,8 +160,9 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name="",
         raise ValueError('Invalid raidlevel: [{}]'.format(raidlevel))
 
     min_devices = md_minimum_devices(raidlevel)
-    if len(devices) < min_devices:
-        err = 'Not enough devices for raidlevel: ' + str(raidlevel)
+    devcnt = len(devices) if not container_devcnt else container_devcnt
+    if devcnt < min_devices:
+        err = 'Not enough devices (' + str(devcnt) + ') for raidlevel: ' + str(raidlevel)
         err += ' minimum devices needed: ' + str(min_devices)
         raise ValueError(err)
 
@@ -171,19 +173,25 @@ def mdadm_create(md_devname, raidlevel, devices, spares=None, md_name="",
     (hostname, _err) = util.subp(["hostname", "-s"], rcs=[0], capture=True)
 
     cmd = ["mdadm", "--create", md_devname, "--run",
-           "--metadata=%s" % metadata,
            "--homehost=%s" % hostname.strip(),
-           "--level=%s" % raidlevel,
-           "--raid-devices=%s" % len(devices)]
+           "--raid-devices=%s" % devcnt]
+
+    if raidlevel == 'container' or not container_devcnt:
+        cmd.append("--metadata=%s" % metadata)
+    if raidlevel != 'container':
+        cmd.append("--level=%s" % raidlevel)
+
     if md_name:
         cmd.append("--name=%s" % md_name)
 
     for device in devices:
-        holders = get_holders(device)
-        if len(holders) > 0:
-            LOG.warning('Detected holders during mdadm creation: %s', holders)
-            raise OSError('Failed to remove holders from %s', device)
-        zero_device(device)
+        if not container_devcnt:
+            # clear non-container devices
+            holders = get_holders(device)
+            if len(holders) > 0:
+                LOG.warning('Detected holders during mdadm creation: %s', holders)
+                raise OSError('Failed to remove holders from %s', device)
+            zero_device(device)
         cmd.append(device)
 
     if spares:
@@ -508,7 +516,7 @@ def md_sysfs_attr(md_devname, attrname):
 
 
 def md_raidlevel_short(raidlevel):
-    if isinstance(raidlevel, int) or raidlevel in ['linear', 'stripe']:
+    if isinstance(raidlevel, int) or raidlevel in ['linear', 'stripe', 'container']:
         return raidlevel
 
     return int(raidlevel.replace('raid', ''))
@@ -517,7 +525,7 @@ def md_raidlevel_short(raidlevel):
 def md_minimum_devices(raidlevel):
     ''' return the minimum number of devices for a given raid level '''
     rl = md_raidlevel_short(raidlevel)
-    if rl in [0, 1, 'linear', 'stripe']:
+    if rl in [0, 1, 'linear', 'stripe', 'container']:
         return 2
     if rl in [5]:
         return 3
@@ -603,6 +611,11 @@ def __mdadm_detail_to_dict(input):
     # start after the first newline
     remainder = input[input.find('\n')+1:]
 
+    # keep only the first section (imsm)
+    arraysection = remainder.find('\n[');
+    if arraysection != -1:
+        remainder = remainder[:arraysection]
+
     #  FIXME: probably could do a better regex to match the LHS which
     #         has one, two or three words
     rem = r'(\w+|\w+\ \w+|\w+\ \w+\ \w+)\ \:\ ([a-zA-Z0-9\-\.,: \(\)=\']+)'
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index dee73b1..f061d12 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -1486,21 +1486,32 @@ def raid_handler(info, storage_config):
     raidlevel = info.get('raidlevel')
     spare_devices = info.get('spare_devices')
     md_devname = block.md_path(info.get('name'))
+    container = info.get('container')
+    metadata = info.get('metadata')
     preserve = config.value_as_boolean(info.get('preserve'))
-    if not devices:
-        raise ValueError("devices for raid must be specified")
+    if not devices and not container:
+        raise ValueError("devices or container for raid must be specified")
     if raidlevel not in ['linear', 'raid0', 0, 'stripe', 'raid1', 1, 'mirror',
-                         'raid4', 4, 'raid5', 5, 'raid6', 6, 'raid10', 10]:
+                         'raid4', 4, 'raid5', 5, 'raid6', 6, 'raid10', 10, 'container']:
         raise ValueError("invalid raidlevel '%s'" % raidlevel)
-    if raidlevel in ['linear', 'raid0', 0, 'stripe']:
+    if raidlevel in ['linear', 'raid0', 0, 'stripe', 'container']:
         if spare_devices:
             raise ValueError("spareunsupported in raidlevel '%s'" % raidlevel)
 
     LOG.debug('raid: cfg: %s', util.json_dumps(info))
-    device_paths = list(get_path_to_storage_volume(dev, storage_config) for
-                        dev in devices)
-    LOG.debug('raid: device path mapping: %s',
-              list(zip(devices, device_paths)))
+
+    devcnt = None
+    if container:
+        parent = storage_config.get(container)
+        if not parent:
+            raise ValueError("container with id '%s' not found" % parant)
+        device_paths = [parent.get('name')]
+        devcnt = len(parent.get('devices'))
+    else:
+        device_paths = list(get_path_to_storage_volume(dev, storage_config) for
+                    dev in devices)
+        LOG.debug('raid: device path mapping: {}'.format(
+            zip(devices, device_paths)))
 
     spare_device_paths = []
     if spare_devices:
@@ -1517,8 +1528,8 @@ def raid_handler(info, storage_config):
 
     if create_raid:
         mdadm.mdadm_create(md_devname, raidlevel,
-                           device_paths, spare_device_paths,
-                           info.get('mdname', ''))
+                           device_paths, spare_device_paths, devcnt,
+                           info.get('mdname', ''), metadata)
 
     wipe_mode = info.get('wipe')
     if wipe_mode:
diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
index dba0f74..d5d89ca 100644
--- a/tests/unittests/test_block_mdadm.py
+++ b/tests/unittests/test_block_mdadm.py
@@ -120,10 +120,12 @@ class TestBlockMdadmCreate(CiTestCase):
         side_effects.append(("", ""))  # mdadm create
         # build command how mdadm_create does
         cmd = (["mdadm", "--create", md_devname, "--run",
-                "--metadata=%s" % metadata,
-                "--homehost=%s" % hostname, "--level=%s" % raidlevel,
-                "--raid-devices=%s" % len(devices)] +
-               devices)
+                "--homehost=%s" % hostname,
+                "--raid-devices=%s" % len(devices),
+                "--metadata=%s" % metadata])
+        if raidlevel != 'container':
+            cmd += ["--level=%s" % raidlevel]
+        cmd += devices
         if spares:
             cmd += ["--spare-devices=%s" % len(spares)] + spares
 
@@ -228,6 +230,22 @@ class TestBlockMdadmCreate(CiTestCase):
                            devices=devices, spares=spares)
         self.mock_util.subp.assert_has_calls(expected_calls)
 
+    def test_mdadm_create_imsm_container(self):
+        md_devname = "/dev/md/imsm"
+        raidlevel = 'container'
+        devices = ['/dev/nvme0n1', '/dev/nvme1n1', '/dev/nvme2n1']
+        metadata = 'imsm'
+        spares = []
+        (side_effects, expected_calls) = self.prepare_mock(md_devname,
+                                                           raidlevel,
+                                                           devices,
+                                                           spares,
+                                                           metadata)
+
+        self.mock_util.subp.side_effect = side_effects
+        mdadm.mdadm_create(md_devname=md_devname, raidlevel=raidlevel,
+                           devices=devices, spares=spares, metadata=metadata)
+        self.mock_util.subp.assert_has_calls(expected_calls)
 
 class TestBlockMdadmExamine(CiTestCase):
     def setUp(self):
@@ -315,6 +333,70 @@ class TestBlockMdadmExamine(CiTestCase):
         self.mock_util.subp.assert_has_calls(expected_calls)
         self.assertEqual(data, {})
 
+    def test_mdadm_examine_no_export_imsm(self):
+        self.mock_util.subp.return_value = ("""/dev/nvme0n1:
+          Magic : Intel Raid ISM Cfg Sig.
+        Version : 1.3.00
+    Orig Family : 6f8c68e3
+         Family : 6f8c68e3
+     Generation : 00000112
+     Attributes : All supported
+           UUID : 7ec12162:ee5cd20b:0ac8b069:cfbd93ec
+       Checksum : 4a5cebe2 correct
+    MPB Sectors : 2
+          Disks : 4
+   RAID Devices : 1
+
+  Disk03 Serial : LJ910504Q41P0FGN
+          State : active
+             Id : 00000000
+    Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
+
+[126]:
+           UUID : f9792759:7f61d0c7:e7313d5a:2e7c2e22
+     RAID Level : 5
+        Members : 4
+          Slots : [UUUU]
+    Failed disk : none
+      This Slot : 3
+    Sector Size : 512
+     Array Size : 5860540416 (2794.52 GiB 3000.60 GB)
+   Per Dev Size : 1953515520 (931.51 GiB 1000.20 GB)
+  Sector Offset : 0
+    Num Stripes : 7630912
+     Chunk Size : 128 KiB
+       Reserved : 0
+  Migrate State : idle
+      Map State : normal
+    Dirty State : dirty
+     RWH Policy : off
+
+  Disk00 Serial : LJ91040H2Y1P0FGN
+          State : active
+             Id : 00000003
+    Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
+
+  Disk01 Serial : LJ916308CZ1P0FGN
+          State : active
+             Id : 00000002
+    Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
+
+  Disk02 Serial : LJ916308RF1P0FGN
+          State : active
+             Id : 00000001
+    Usable Size : 1953514766 (931.51 GiB 1000.20 GB)
+        """, "")   # mdadm --examine /dev/nvme0n1
+
+        device = "/dev/nvme0n1"
+        data = mdadm.mdadm_examine(device, export=False)
+
+        expected_calls = [
+            call(["mdadm", "--examine", device], capture=True),
+        ]
+        self.mock_util.subp.assert_has_calls(expected_calls)
+        self.assertEqual(data['uuid'],
+                         '7ec12162:ee5cd20b:0ac8b069:cfbd93ec')
+
 
 class TestBlockMdadmStop(CiTestCase):
     def setUp(self):
diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
index 25e9e79..6136050 100644
--- a/tests/unittests/test_clear_holders.py
+++ b/tests/unittests/test_clear_holders.py
@@ -238,6 +238,7 @@ class TestClearHolders(CiTestCase):
         mock_mdadm.md_present.return_value = False
         mock_mdadm.md_get_devices_list.return_value = devices
         mock_mdadm.md_get_spares_list.return_value = spares
+        mock_mdadm.mdadm_query_detail.return_value = {}
 
         clear_holders.shutdown_mdadm(self.test_syspath)
 
@@ -256,6 +257,37 @@ class TestClearHolders(CiTestCase):
         mock_mdadm.md_present.assert_called_with(self.test_blockdev)
         self.assertTrue(mock_log.debug.called)
 
+    @mock.patch('curtin.block.wipe_volume')
+    @mock.patch('curtin.block.path_to_kname')
+    @mock.patch('curtin.block.sysfs_to_devpath')
+    @mock.patch('curtin.block.clear_holders.time')
+    @mock.patch('curtin.block.clear_holders.util')
+    @mock.patch('curtin.block.clear_holders.LOG')
+    @mock.patch('curtin.block.clear_holders.mdadm')
+    def test_shutdown_mdadm_in_container(self, mock_mdadm, mock_log, mock_util,
+                            mock_time, mock_sysdev, mock_path, mock_wipe):
+        """test clear_holders.shutdown_mdadm"""
+        devices = ['/dev/wda1', '/dev/wda2']
+        spares = ['/dev/wdb1']
+        md_devs = (devices + spares)
+        mock_sysdev.return_value = self.test_blockdev
+        mock_path.return_value = self.test_blockdev
+        mock_mdadm.md_present.return_value = False
+        mock_mdadm.md_get_devices_list.return_value = devices
+        mock_mdadm.md_get_spares_list.return_value = spares
+        mock_mdadm.mdadm_query_detail.return_value = {'MD_CONTAINER':'/dev/md/imsm0'}
+
+        clear_holders.shutdown_mdadm(self.test_syspath)
+
+        mock_wipe.assert_called_with(self.test_blockdev, exclusive=False,
+                                     mode='superblock', strict=True)
+        mock_mdadm.set_sync_action.assert_has_calls([
+                mock.call(self.test_blockdev, action="idle"),
+                mock.call(self.test_blockdev, action="frozen")])
+        mock_mdadm.mdadm_stop.assert_called_with(self.test_blockdev)
+        mock_mdadm.md_present.assert_called_with(self.test_blockdev)
+        self.assertTrue(mock_log.debug.called)
+
     @mock.patch('curtin.block.clear_holders.os')
     @mock.patch('curtin.block.clear_holders.time')
     @mock.patch('curtin.block.clear_holders.util')
@@ -271,6 +303,7 @@ class TestClearHolders(CiTestCase):
         mock_mdadm.md_present.return_value = True
         mock_util.subp.return_value = ("", "")
         mock_os.path.exists.return_value = True
+        mock_mdadm.mdadm_query_detail.return_value = {}
 
         with self.assertRaises(OSError):
             clear_holders.shutdown_mdadm(self.test_syspath)
@@ -295,6 +328,7 @@ class TestClearHolders(CiTestCase):
         mock_block.path_to_kname.return_value = self.test_blockdev
         mock_mdadm.md_present.return_value = True
         mock_os.path.exists.return_value = False
+        mock_mdadm.mdadm_query_detail.return_value = {}
 
         with self.assertRaises(OSError):
             clear_holders.shutdown_mdadm(self.test_syspath)
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index d954296..98be573 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -1892,7 +1892,7 @@ class TestRaidHandler(CiTestCase):
         self.m_getpath.side_effect = iter(devices)
         block_meta.raid_handler(self.storage_config['mddevice'],
                                 self.storage_config)
-        self.assertEqual([call(md_devname, 5, devices, [], '')],
+        self.assertEqual([call(md_devname, 5, devices, [], None, '', None)],
                          self.m_mdadm.mdadm_create.call_args_list)
 
     @patch('curtin.commands.block_meta.raid_verify')

Follow ups