curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #03250
[Merge] ~ogayot/curtin:nvme-o-tcp-storageconfig into curtin:master
Olivier Gayot has proposed merging ~ogayot/curtin:nvme-o-tcp-storageconfig into curtin:master.
Requested reviews:
Server Team CI bot (server-team-bot): continuous-integration
curtin developers (curtin-dev)
For more details, see:
https://code.launchpad.net/~ogayot/curtin/+git/curtin/+merge/458446
This MP adds partial support for NVMe over TCP.
In the storage configuration, NVMe drives can now have a "nvme_controller" property, holding the identifier to an existing nvme_controller object (which is new), e.g.,
```
- type: disk
id: disk-nvme0n1
path: /dev/nvme0n1
nvme_controller: nvme-controller-nvme0
- type: disk
id: disk-nvme1n1
path: /dev/nvme1n1
nvme_controller: nvme-controller-nvme1
- type: nvme_controller
id: nvme-controller-nvme0
transport: pcie
- type: nvme_controller
id: nvme-controller-nvme1
transport: tcp
tcp_port: 4420
tcp_addr: 1.2.3.4
```
In the presence of a nvme_controller section having transport=tcp in the storage config, curtin will install nvme-stas (and nvme-cli) and configure the service so that the drives can be discovered and made available when the target system boots.
Current limitations:
* For the target system to boot correctly, we only support placing non-critical partitions (e.g., /home) on remote storage. For the next iteration, the plan is to support placing the rootfs (i.e., /) on remote NVMe drives, while preserving the /boot and /boot/efi partitions on local storage.
* If a nvme_controller section is present in the storage configuration, curtin will end up installing nvme-stas and nvme-cli on the target system ; even if the nvme_controller section denotes the use of PCIe (local storage).
* Curtin itself will not automatically append the _netdev option if a given mount uses remote storage, so we would expect the storage configuration to specify the option, e.g.:
```
-type: mount
path: /home
device: ...
options: defaults,_netdev
id: mount-2
```
--
Your team curtin developers is requested to review the proposed merge of ~ogayot/curtin:nvme-o-tcp-storageconfig into curtin:master.
diff --git a/curtin/block/deps.py b/curtin/block/deps.py
index 8a310b6..e5370b6 100644
--- a/curtin/block/deps.py
+++ b/curtin/block/deps.py
@@ -69,6 +69,7 @@ def detect_required_packages_mapping(osfamily=DISTROS.debian):
'lvm_partition': ['lvm2'],
'lvm_volgroup': ['lvm2'],
'ntfs': ['ntfs-3g'],
+ 'nvme_controller': ['nvme-cli', 'nvme-stas'],
'raid': ['mdadm'],
'reiserfs': ['reiserfsprogs'],
'xfs': ['xfsprogs'],
@@ -89,6 +90,7 @@ def detect_required_packages_mapping(osfamily=DISTROS.debian):
'lvm_partition': ['lvm2'],
'lvm_volgroup': ['lvm2'],
'ntfs': [],
+ 'nvme_controller': [],
'raid': ['mdadm'],
'reiserfs': [],
'xfs': ['xfsprogs'],
@@ -109,6 +111,7 @@ def detect_required_packages_mapping(osfamily=DISTROS.debian):
'lvm_partition': ['lvm2'],
'lvm_volgroup': ['lvm2'],
'ntfs': [],
+ 'nvme_controller': [],
'raid': ['mdadm'],
'reiserfs': [],
'xfs': ['xfsprogs'],
diff --git a/curtin/block/schemas.py b/curtin/block/schemas.py
index 6a5c5b4..503e870 100644
--- a/curtin/block/schemas.py
+++ b/curtin/block/schemas.py
@@ -144,6 +144,7 @@ DISK = {
'minimum': 0,
'maximum': 1
},
+ 'nvme_controller': {'$ref': '#/definitions/ref_id'},
},
}
DM_CRYPT = {
@@ -275,6 +276,23 @@ MOUNT = {
'pattern': r'[0-9]'},
},
}
+NVME = {
+ '$schema': 'http://json-schema.org/draft-07/schema#',
+ 'name': 'CURTIN-NVME',
+ 'title': 'curtin storage configuration for NVMe controllers',
+ 'description': ('Declarative syntax for specifying NVMe controllers.'),
+ 'definitions': definitions,
+ 'required': ['id', 'type', 'transport'],
+ 'type': 'object',
+ 'additionalProperties': False,
+ 'properties': {
+ 'id': {'$ref': '#/definitions/id'},
+ 'type': {'const': 'nvme_controller'},
+ 'transport': {'type': 'string'},
+ 'tcp_port': {'type': 'integer'},
+ 'tcp_addr': {'type': 'string'},
+ },
+}
PARTITION = {
'$schema': 'http://json-schema.org/draft-07/schema#',
'name': 'CURTIN-PARTITION',
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index ebae27c..8f1011b 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -2018,6 +2018,11 @@ def zpool_handler(info, storage_config, context):
zfs_properties=fs_properties)
+def nvme_controller_handler(info, storage_config, context):
+ '''Handle the NVMe Controller storage section. This is currently a no-op,
+ the section is handled in curthooks.'''
+
+
def zfs_handler(info, storage_config, context):
"""
Create a zfs filesystem
@@ -2207,6 +2212,7 @@ def meta_custom(args):
'bcache': bcache_handler,
'zfs': zfs_handler,
'zpool': zpool_handler,
+ 'nvme_controller': nvme_controller_handler,
}
if args.testmode:
diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
index 4be2cb4..971499d 100644
--- a/curtin/commands/curthooks.py
+++ b/curtin/commands/curthooks.py
@@ -1,14 +1,16 @@
# This file is part of curtin. See LICENSE file for copyright and license info.
import copy
+import contextlib
import glob
import os
+import pathlib
import platform
import re
import sys
import shutil
import textwrap
-from typing import List, Tuple
+from typing import List, Set, Tuple
from curtin import config
from curtin import block
@@ -1498,6 +1500,56 @@ def configure_mdadm(cfg, state_etcd, target, osfamily=DISTROS.debian):
data=None, target=target)
+def get_nvme_stas_controller_directives(cfg) -> Set[str]:
+ """Parse the storage configuration and return a set of "controller ="
+ directives to write in the [Controllers] section of a nvme-stas
+ configuration file."""
+ directives = set()
+ if 'storage' not in cfg or not isinstance(cfg['storage'], dict):
+ return directives
+ storage = cfg['storage']
+ if 'config' not in storage or storage['config'] == 'disabled':
+ return directives
+ config = storage['config']
+ for item in config:
+ if item['type'] != 'nvme_controller':
+ continue
+ if item['transport'] != 'tcp':
+ continue
+ controller_props = {
+ 'transport': 'tcp',
+ 'traddr': item["tcp_addr"],
+ 'trsvcid': item["tcp_port"],
+ }
+
+ props_str = ';'.join([f'{k}={v}' for k, v in controller_props.items()])
+ directives.add(f'controller = {props_str}')
+
+ return directives
+
+
+def configure_nvme_stas(cfg, target):
+ """If any NVMe controller using the TCP transport is present in the storage
+ configuration, create a nvme-stas configuration so that the remote drives
+ can be made available at boot."""
+ controllers = get_nvme_stas_controller_directives(cfg)
+
+ if not controllers:
+ return
+
+ LOG.info('NVMe-over-TCP configuration found, writing nvme-stas configuration')
+ target = pathlib.Path(target)
+ stas_dir = target / 'etc' / 'stas'
+ stas_dir.mkdir(parents=True, exist_ok=True)
+ with (stas_dir / 'stafd-curtin.conf').open('w', encoding='utf-8') as fh:
+ print('[Controllers]', file=fh)
+ for controller in controllers:
+ print(controller, file=fh)
+
+ with contextlib.suppress(FileNotFoundError):
+ (stas_dir / 'stafd.conf').replace(stas_dir / '.stafd.conf.bak')
+ (stas_dir / 'stafd.conf').symlink_to('stafd-curtin.conf')
+
def handle_cloudconfig(cfg, base_dir=None):
"""write cloud-init configuration files into base_dir.
@@ -1760,6 +1812,12 @@ def builtin_curthooks(cfg, target, state):
description="configuring raid (mdadm) service"):
configure_mdadm(cfg, state_etcd, target, osfamily=osfamily)
+ with events.ReportEventStack(
+ name=stack_prefix + '/configuring-nvme-stas-service',
+ reporting_enabled=True, level="INFO",
+ description="configuring NVMe STorage Appliance Services"):
+ configure_nvme_stas(cfg, target)
+
if osfamily == DISTROS.debian:
with events.ReportEventStack(
name=stack_prefix + '/installing-kernel',
diff --git a/curtin/storage_config.py b/curtin/storage_config.py
index af7b6f3..469db41 100644
--- a/curtin/storage_config.py
+++ b/curtin/storage_config.py
@@ -50,6 +50,7 @@ STORAGE_CONFIG_TYPES = {
'bcache': StorageConfig(type='bcache', schema=schemas.BCACHE),
'dasd': StorageConfig(type='dasd', schema=schemas.DASD),
'disk': StorageConfig(type='disk', schema=schemas.DISK),
+ 'nvme_controller': StorageConfig(type='nvme_controller', schema=schemas.NVME),
'dm_crypt': StorageConfig(type='dm_crypt', schema=schemas.DM_CRYPT),
'format': StorageConfig(type='format', schema=schemas.FORMAT),
'lvm_partition': StorageConfig(type='lvm_partition',
@@ -159,12 +160,13 @@ def _stype_to_deps(stype):
depends_keys = {
'bcache': {'backing_device', 'cache_device'},
'dasd': set(),
- 'disk': set(),
+ 'disk': {'nvme_controller'},
'dm_crypt': {'volume'},
'format': {'volume'},
'lvm_partition': {'volgroup'},
'lvm_volgroup': {'devices'},
'mount': {'device'},
+ 'nvme_controller': set(),
'partition': {'device'},
'raid': {'devices', 'spare_devices', 'container'},
'zfs': {'pool'},
@@ -184,6 +186,7 @@ def _stype_to_order_key(stype):
'lvm_partition': {'name'},
'lvm_volgroup': {'name'},
'mount': {'path'},
+ 'nvme_controller': default_sort,
'partition': {'number'},
'raid': default_sort,
'zfs': {'volume'},
@@ -204,7 +207,7 @@ def _validate_dep_type(source_id, dep_key, dep_id, sconfig):
'bcache': {'bcache', 'disk', 'dm_crypt', 'lvm_partition',
'partition', 'raid'},
'dasd': {},
- 'disk': {'dasd'},
+ 'disk': {'dasd', 'nvme_controller'},
'dm_crypt': {'bcache', 'disk', 'dm_crypt', 'lvm_partition',
'partition', 'raid'},
'format': {'bcache', 'disk', 'dm_crypt', 'lvm_partition',
@@ -212,6 +215,7 @@ def _validate_dep_type(source_id, dep_key, dep_id, sconfig):
'lvm_partition': {'lvm_volgroup'},
'lvm_volgroup': {'bcache', 'disk', 'dm_crypt', 'partition', 'raid'},
'mount': {'format'},
+ 'nvme_controller': {},
'partition': {'bcache', 'disk', 'raid', 'partition'},
'raid': {'bcache', 'disk', 'dm_crypt', 'lvm_partition',
'partition', 'raid'},
@@ -231,7 +235,7 @@ def _validate_dep_type(source_id, dep_key, dep_id, sconfig):
if source_type not in depends:
raise ValueError('Invalid source_type: %s' % source_type)
if dep_type not in depends:
- raise ValueError('Invalid type in depedency: %s' % dep_type)
+ raise ValueError('Invalid type in dependency: %s' % dep_type)
source_deps = depends[source_type]
result = dep_type in source_deps
@@ -753,6 +757,11 @@ class BlockdevParser(ProbertParser):
entry['ptable'] = ptype
else:
entry['ptable'] = schemas._ptable_unsupported
+
+ match = re.fullmatch(r'/dev/(?P<ctrler>nvme\d+)n\d', devname)
+ if match is not None:
+ entry['nvme_controller'] = f'nvme-controller-{match["ctrler"]}'
+
return entry
if entry['type'] == 'partition':
@@ -1174,6 +1183,39 @@ class MountParser(ProbertParser):
return (configs, errors)
+class NVMeParser(ProbertParser):
+
+ probe_data_key = 'nvme'
+
+ def asdict(self, ctrler_id: str, ctrler_props):
+ action = {
+ 'type': 'nvme_controller',
+ 'id': f'nvme-controller-{ctrler_id}',
+ 'transport': ctrler_props['NVME_TRTYPE'],
+ }
+ if action['transport'] == 'tcp':
+ action['tcp_addr'] = ctrler_props['NVME_TRADDR']
+ action['tcp_port'] = int(ctrler_props['NVME_TRSVCID'])
+
+ return action
+
+ def parse(self):
+ """ parse probert 'nvme' data format """
+
+ errors = []
+ configs = []
+ for ctrler_id, ctrler_props in self.class_data.items():
+ entry = self.asdict(ctrler_id, ctrler_props)
+ if entry:
+ try:
+ validate_config(entry)
+ except ValueError as e:
+ errors.append(e)
+ continue
+ configs.append(entry)
+ return configs, errors
+
+
class ZfsParser(ProbertParser):
probe_data_key = 'zfs'
@@ -1318,6 +1360,7 @@ def extract_storage_config(probe_data, strict=False):
'lvm': LvmParser,
'raid': RaidParser,
'mount': MountParser,
+ 'nvme': NVMeParser,
'zfs': ZfsParser,
}
configs = []
@@ -1339,11 +1382,12 @@ def extract_storage_config(probe_data, strict=False):
raids = [cfg for cfg in configs if cfg.get('type') == 'raid']
dmcrypts = [cfg for cfg in configs if cfg.get('type') == 'dm_crypt']
mounts = [cfg for cfg in configs if cfg.get('type') == 'mount']
+ nvmes = [cfg for cfg in configs if cfg.get('type') == 'nvme_controller']
bcache = [cfg for cfg in configs if cfg.get('type') == 'bcache']
zpool = [cfg for cfg in configs if cfg.get('type') == 'zpool']
zfs = [cfg for cfg in configs if cfg.get('type') == 'zfs']
- ordered = (dasd + disk + part + format + lvols + lparts + raids +
+ ordered = (nvmes + dasd + disk + part + format + lvols + lparts + raids +
dmcrypts + mounts + bcache + zpool + zfs)
final_config = {'storage': {'version': 2, 'config': ordered}}
diff --git a/tests/data/probert_storage_bogus_wwn.json b/tests/data/probert_storage_bogus_wwn.json
index b3211fd..d817515 100644
--- a/tests/data/probert_storage_bogus_wwn.json
+++ b/tests/data/probert_storage_bogus_wwn.json
@@ -1254,5 +1254,48 @@
"bcache": {
"backing": {},
"caching": {}
+ },
+ "nvme": {
+ "nvme0": {
+ "DEVNAME": "/dev/nvme0",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1c.4/0000:04:00.0/nvme/nvme0",
+ "MAJOR": "238",
+ "MINOR": "0",
+ "NVME_TRTYPE": "pcie",
+ "SUBSYSTEM": "nvme",
+ "attrs": {
+ "address": "0000:04:00.0",
+ "cntlid": "5",
+ "cntrltype": "io",
+ "dctype": "none",
+ "dev": "238:0",
+ "device": null,
+ "firmware_rev": "2B2QEXM7",
+ "hmb": "1",
+ "kato": "0",
+ "model": "SAMSUNG SSD 970 EVO Plus 500GB",
+ "numa_node": "0",
+ "power/async": "disabled",
+ "power/autosuspend_delay_ms": null,
+ "power/control": "auto",
+ "power/pm_qos_latency_tolerance_us": "100000",
+ "power/runtime_active_kids": "0",
+ "power/runtime_active_time": "0",
+ "power/runtime_enabled": "disabled",
+ "power/runtime_status": "unsupported",
+ "power/runtime_suspended_time": "0",
+ "power/runtime_usage": "0",
+ "queue_count": "9",
+ "rescan_controller": null,
+ "reset_controller": null,
+ "serial": "S4EVNJ0N203359W",
+ "sqsize": "1023",
+ "state": "live",
+ "subsysnqn": "nqn.1994-11.com.samsung:nvme:970M.2:S4EVNJ0N203359W",
+ "subsystem": "nvme",
+ "transport": "pcie",
+ "uevent": "MAJOR=238\nMINOR=0\nDEVNAME=nvme0\nNVME_TRTYPE=pcie"
+ }
+ }
}
}
diff --git a/tests/data/probert_storage_nvme_multipath.json b/tests/data/probert_storage_nvme_multipath.json
index 56a761d..9718368 100644
--- a/tests/data/probert_storage_nvme_multipath.json
+++ b/tests/data/probert_storage_nvme_multipath.json
@@ -306,5 +306,48 @@
"uevent": "MAJOR=259\nMINOR=4\nDEVNAME=nvme0n1p3\nDEVTYPE=partition\nPARTN=3"
}
}
+ },
+ "nvme": {
+ "nvme0": {
+ "DEVNAME": "/dev/nvme0",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1d.0/0000:03:00.0/nvme/nvme0",
+ "MAJOR": "238",
+ "MINOR": "0",
+ "NVME_TRTYPE": "pcie",
+ "SUBSYSTEM": "nvme",
+ "attrs": {
+ "address": "0000:03:00.0",
+ "cntlid": "5",
+ "cntrltype": "io",
+ "dctype": "none",
+ "dev": "238:0",
+ "device": null,
+ "firmware_rev": "GPJA0B3Q",
+ "hmb": "1",
+ "kato": "0",
+ "model": "SAMSUNG MZPLL3T2HAJQ-00005",
+ "numa_node": "0",
+ "power/async": "disabled",
+ "power/autosuspend_delay_ms": null,
+ "power/control": "auto",
+ "power/pm_qos_latency_tolerance_us": "100000",
+ "power/runtime_active_kids": "0",
+ "power/runtime_active_time": "0",
+ "power/runtime_enabled": "disabled",
+ "power/runtime_status": "unsupported",
+ "power/runtime_suspended_time": "0",
+ "power/runtime_usage": "0",
+ "queue_count": "9",
+ "rescan_controller": null,
+ "reset_controller": null,
+ "serial": "S4CCNE0M300015",
+ "sqsize": "1023",
+ "state": "live",
+ "subsysnqn": "nqn.1994-11.com.samsung:nvme:MZPLL3T2HAJQ-00005M.2:S64DMZ0T351601T ",
+ "subsystem": "nvme",
+ "transport": "pcie",
+ "uevent": "MAJOR=238\nMINOR=0\nDEVNAME=nvme0\nNVME_TRTYPE=pcie"
+ }
+ }
}
}
diff --git a/tests/data/probert_storage_nvme_uuid.json b/tests/data/probert_storage_nvme_uuid.json
index c54239b..d93dffc 100644
--- a/tests/data/probert_storage_nvme_uuid.json
+++ b/tests/data/probert_storage_nvme_uuid.json
@@ -306,5 +306,48 @@
"uevent": "MAJOR=259\nMINOR=4\nDEVNAME=nvme0n1p3\nDEVTYPE=partition\nPARTN=3"
}
}
+ },
+ "nvme": {
+ "nvme0": {
+ "DEVNAME": "/dev/nvme0",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1d.0/0000:03:00.0/nvme/nvme0",
+ "MAJOR": "238",
+ "MINOR": "0",
+ "NVME_TRTYPE": "pcie",
+ "SUBSYSTEM": "nvme",
+ "attrs": {
+ "address": "0000:03:00.0",
+ "cntlid": "5",
+ "cntrltype": "io",
+ "dctype": "none",
+ "dev": "238:0",
+ "device": null,
+ "firmware_rev": "GPJA0B3Q",
+ "hmb": "1",
+ "kato": "0",
+ "model": "SAMSUNG MZPLL3T2HAJQ-00005",
+ "numa_node": "0",
+ "power/async": "disabled",
+ "power/autosuspend_delay_ms": null,
+ "power/control": "auto",
+ "power/pm_qos_latency_tolerance_us": "100000",
+ "power/runtime_active_kids": "0",
+ "power/runtime_active_time": "0",
+ "power/runtime_enabled": "disabled",
+ "power/runtime_status": "unsupported",
+ "power/runtime_suspended_time": "0",
+ "power/runtime_usage": "0",
+ "queue_count": "9",
+ "rescan_controller": null,
+ "reset_controller": null,
+ "serial": "S4CCNE0M300015",
+ "sqsize": "1023",
+ "state": "live",
+ "subsysnqn": "nqn.1994-11.com.samsung:nvme:MZPLL3T2HAJQ-00005M.2:S64DMZ0T351601T ",
+ "subsystem": "nvme",
+ "transport": "pcie",
+ "uevent": "MAJOR=238\nMINOR=0\nDEVNAME=nvme0\nNVME_TRTYPE=pcie"
+ }
+ }
}
}
diff --git a/tests/unittests/test_curthooks.py b/tests/unittests/test_curthooks.py
index 0728260..73b8973 100644
--- a/tests/unittests/test_curthooks.py
+++ b/tests/unittests/test_curthooks.py
@@ -2017,6 +2017,81 @@ class TestCurthooksGrubDebconf(CiTestCase):
self.m_debconf.assert_called_with(expectedcfg, target)
+class TestCurthooksNVMeStas(CiTestCase):
+ def test_get_nvme_stas_controller_directives__no_nvme_controller(self):
+ self.assertFalse(curthooks.get_nvme_stas_controller_directives({
+ "storage": {
+ "config": [
+ {"type": "partition"},
+ {"type": "mount"},
+ {"type": "disk"},
+ ],
+ },
+ }))
+
+ def test_get_nvme_stas_controller_directives__pcie_controller(self):
+ self.assertFalse(curthooks.get_nvme_stas_controller_directives({
+ "storage": {
+ "config": [
+ {"type": "nvme_controller", "transport": "pcie"},
+ ],
+ },
+ }))
+
+ def test_get_nvme_stas_controller_directives__tcp_controller(self):
+ expected = {"controller = transport=tcp;traddr=1.2.3.4;trsvcid=1111"}
+
+ result = curthooks.get_nvme_stas_controller_directives({
+ "storage": {
+ "config": [
+ {
+ "type": "nvme_controller",
+ "transport": "tcp",
+ "tcp_addr": "1.2.3.4",
+ "tcp_port": "1111",
+ },
+ ],
+ },
+ })
+ self.assertEqual(expected, result)
+
+ def test_get_nvme_stas_controller_directives__three_nvme_controllers(self):
+ expected = {"controller = transport=tcp;traddr=1.2.3.4;trsvcid=1111",
+ "controller = transport=tcp;traddr=4.5.6.7;trsvcid=1212"}
+
+ result = curthooks.get_nvme_stas_controller_directives({
+ "storage": {
+ "config": [
+ {
+ "type": "nvme_controller",
+ "transport": "tcp",
+ "tcp_addr": "1.2.3.4",
+ "tcp_port": "1111",
+ }, {
+ "type": "nvme_controller",
+ "transport": "tcp",
+ "tcp_addr": "4.5.6.7",
+ "tcp_port": "1212",
+ }, {
+ "type": "nvme_controller",
+ "transport": "pcie",
+ },
+ ],
+ },
+ })
+ self.assertEqual(expected, result)
+
+ def test_get_nvme_stas_controller_directives__empty_conf(self):
+ self.assertFalse(curthooks.get_nvme_stas_controller_directives({}))
+ self.assertFalse(curthooks.get_nvme_stas_controller_directives({"storage": False}))
+ self.assertFalse(curthooks.get_nvme_stas_controller_directives({"storage": {}}))
+ self.assertFalse(curthooks.get_nvme_stas_controller_directives({
+ "storage": {
+ "config": "disabled",
+ },
+ }))
+
+
class TestUefiFindGrubDeviceIds(CiTestCase):
def _sconfig(self, cfg):
diff --git a/tests/unittests/test_storage_config.py b/tests/unittests/test_storage_config.py
index caaac29..7b0f68c 100644
--- a/tests/unittests/test_storage_config.py
+++ b/tests/unittests/test_storage_config.py
@@ -1087,6 +1087,7 @@ class TestExtractStorageConfig(CiTestCase):
'serial': 'SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015',
'type': 'disk',
'wwn': 'eui.344343304d3000150025384500000004',
+ 'nvme_controller': 'nvme-controller-nvme0',
}
self.assertEqual(1, len(disks))
self.assertEqual(expected_dict, disks[0])
@@ -1104,6 +1105,7 @@ class TestExtractStorageConfig(CiTestCase):
'serial': 'SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015',
'type': 'disk',
'wwn': 'uuid.344343304d3000150025384500000004',
+ 'nvme_controller': 'nvme-controller-nvme0',
}
self.assertEqual(1, len(disks))
self.assertEqual(expected_dict, disks[0])