curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #01984
[Merge] ~dbungert/curtin:ubuntu/devel into curtin:ubuntu/devel
Dan Bungert has proposed merging ~dbungert/curtin:ubuntu/devel into curtin:ubuntu/devel.
Commit message:
21.3 merge for Jammy
Requested reviews:
curtin developers (curtin-dev)
For more details, see:
https://code.launchpad.net/~dbungert/curtin/+git/curtin/+merge/412821
--
Your team curtin developers is requested to review the proposed merge of ~dbungert/curtin:ubuntu/devel into curtin:ubuntu/devel.
diff --git a/Makefile b/Makefile
index 187132c..c04c4cf 100644
--- a/Makefile
+++ b/Makefile
@@ -70,6 +70,12 @@ vmtest-deps:
sync-images:
@$(CWD)/tools/vmtest-sync-images
+integration-deps:
+ @$(CWD)/tools/vmtest-create-static-images
+
+integration: integration-deps
+ $(PYTHON3) -m pytest tests/integration
+
clean:
rm -rf doc/_build
diff --git a/bin/curtin b/bin/curtin
index 793fbcb..89cb319 100755
--- a/bin/curtin
+++ b/bin/curtin
@@ -4,7 +4,7 @@
PY3OR2_MAIN="curtin"
PY3OR2_MCHECK="curtin.deps.check"
PY3OR2_PYTHONS=${PY3OR2_PYTHONS:-"python3:python"}
-PYTHON=${PY3OR2_PYTHON}
+PYTHON=${PYTHON:-"$PY3OR2_PYTHON"}
PY3OR2_DEBUG=${PY3OR2_DEBUG:-0}
debug() {
diff --git a/curtin/__init__.py b/curtin/__init__.py
index 0178a7b..8a3e850 100644
--- a/curtin/__init__.py
+++ b/curtin/__init__.py
@@ -36,8 +36,10 @@ FEATURES = [
'HAS_VERSION_MODULE',
# uefi_reoder has fallback support if BootCurrent is missing
'UEFI_REORDER_FALLBACK_SUPPORT',
+ # fstabs by default are output with passno = 1 if not nodev
+ 'FSTAB_DEFAULT_FSCK_ON_BLK'
]
-__version__ = "21.2"
+__version__ = "21.3"
# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
index 1b33002..ca0bc10 100644
--- a/curtin/block/__init__.py
+++ b/curtin/block/__init__.py
@@ -62,7 +62,8 @@ def dev_path(devname):
convert device name to path in /dev
"""
if devname.startswith('/dev/'):
- return devname
+ # it could be something like /dev/mapper/mpatha-part2
+ return os.path.realpath(devname)
else:
return '/dev/' + devname
@@ -131,7 +132,8 @@ def partition_kname(disk_kname, partition_number):
os.path.realpath('%s-part%s' % (disk_link,
partition_number)))
- for dev_type in ['bcache', 'nvme', 'mmcblk', 'cciss', 'mpath', 'md']:
+ for dev_type in ['bcache', 'nvme', 'mmcblk', 'cciss', 'mpath', 'md',
+ 'loop']:
if disk_kname.startswith(dev_type):
partition_number = "p%s" % partition_number
break
@@ -225,6 +227,10 @@ def _lsblock_pairs_to_dict(lines):
cur = {}
for tok in toks:
k, v = tok.split("=", 1)
+ if k == 'MAJ_MIN':
+ k = 'MAJ:MIN'
+ else:
+ k = k.replace('_', '-')
cur[k] = v
# use KNAME, as NAME may include spaces and other info,
# for example, lvm decices may show 'dm0 lvm1'
@@ -1203,7 +1209,7 @@ def wipe_file(path, reader=None, buflen=4 * 1024 * 1024, exclusive=True):
fp.write(pbuf)
-def quick_zero(path, partitions=True, exclusive=True, strict=False):
+def quick_zero(path, partitions=True, exclusive=True):
"""
zero 1M at front, 1M at end, and 1M at front
if this is a block device and partitions is true, then
@@ -1227,11 +1233,11 @@ def quick_zero(path, partitions=True, exclusive=True, strict=False):
for (pt, kname, ptnum) in pt_names:
LOG.debug('Wiping path: dev:%s kname:%s partnum:%s',
pt, kname, ptnum)
- quick_zero(pt, partitions=False, strict=strict)
+ quick_zero(pt, partitions=False)
LOG.debug("wiping 1M on %s at offsets %s", path, offsets)
return zero_file_at_offsets(path, offsets, buflen=buflen, count=count,
- exclusive=exclusive, strict=strict)
+ exclusive=exclusive)
def zero_file_at_offsets(path, offsets, buflen=1024, count=1024, strict=False,
@@ -1286,7 +1292,7 @@ def zero_file_at_offsets(path, offsets, buflen=1024, count=1024, strict=False,
fp.write(buf)
-def wipe_volume(path, mode="superblock", exclusive=True, strict=False):
+def wipe_volume(path, mode="superblock", exclusive=True):
"""wipe a volume/block device
:param path: a path to a block device
@@ -1299,7 +1305,6 @@ def wipe_volume(path, mode="superblock", exclusive=True, strict=False):
volume and beginning and end of any partitions that are
known to be on this device.
:param exclusive: boolean to control how path is opened
- :param strict: boolean to control when to raise errors on write failures
"""
if mode == "pvremove":
# We need to use --force --force in case it's already in a volgroup and
@@ -1317,9 +1322,9 @@ def wipe_volume(path, mode="superblock", exclusive=True, strict=False):
with open("/dev/urandom", "rb") as reader:
wipe_file(path, reader=reader.read, exclusive=exclusive)
elif mode == "superblock":
- quick_zero(path, partitions=False, exclusive=exclusive, strict=strict)
+ quick_zero(path, partitions=False, exclusive=exclusive)
elif mode == "superblock-recursive":
- quick_zero(path, partitions=True, exclusive=exclusive, strict=strict)
+ quick_zero(path, partitions=True, exclusive=exclusive)
else:
raise ValueError("wipe mode %s not supported" % mode)
diff --git a/curtin/block/clear_holders.py b/curtin/block/clear_holders.py
index c182d91..ba02657 100644
--- a/curtin/block/clear_holders.py
+++ b/curtin/block/clear_holders.py
@@ -332,7 +332,7 @@ def wipe_superblock(device):
time.sleep(wait)
-def _wipe_superblock(blockdev, exclusive=True, strict=True):
+def _wipe_superblock(blockdev, exclusive=True):
""" No checks, just call wipe_volume """
retries = [1, 3, 5, 7]
@@ -341,8 +341,7 @@ def _wipe_superblock(blockdev, exclusive=True, strict=True):
LOG.debug('wiping %s attempt %s/%s',
blockdev, attempt + 1, len(retries))
try:
- block.wipe_volume(blockdev, mode='superblock',
- exclusive=exclusive, strict=strict)
+ block.wipe_volume(blockdev, mode='superblock', exclusive=exclusive)
LOG.debug('successfully wiped device %s on attempt %s/%s',
blockdev, attempt + 1, len(retries))
return
diff --git a/curtin/block/lvm.py b/curtin/block/lvm.py
index bd0f1aa..333b46f 100644
--- a/curtin/block/lvm.py
+++ b/curtin/block/lvm.py
@@ -81,7 +81,7 @@ def activate_volgroups(multipath=False):
"""
cmd = ['vgchange', '--activate=y']
if multipath:
- # only operate on mp devices
+ # only operate on mp devices or encrypted volumes
mp_filter = generate_multipath_dev_mapper_filter()
cmd.extend(['--config', 'devices{ %s }' % mp_filter])
@@ -100,12 +100,14 @@ def _generate_multipath_filter(accept=None):
def generate_multipath_dev_mapper_filter():
- return _generate_multipath_filter(accept=['/dev/mapper/mpath.*'])
+ return _generate_multipath_filter(
+ accept=['/dev/mapper/mpath.*', '/dev/mapper/dm_crypt-.*'])
def generate_multipath_dm_uuid_filter():
- return _generate_multipath_filter(
- accept=['/dev/disk/by-id/dm-uuid-.*mpath-.*'])
+ return _generate_multipath_filter(accept=[
+ '/dev/disk/by-id/dm-uuid-.*mpath-.*',
+ '/dev/disk/by-id/.*dm_crypt-.*'])
def lvm_scan(activate=True, multipath=False):
@@ -126,8 +128,9 @@ def lvm_scan(activate=True, multipath=False):
release = 'xenial'
if multipath:
- # only operate on mp devices
- mponly = 'devices{ filter = [ "a|/dev/mapper/mpath.*|", "r|.*|" ] }'
+ # only operate on mp devices or encrypted volumes
+ mponly = 'devices{ filter = [ "a|%s|", "a|%s|", "r|.*|" ] }' % (
+ '/dev/mapper/mpath.*', '/dev/mapper/dm_crypt-.*')
for cmd in [['pvscan'], ['vgscan']]:
if release != 'precise' and lvmetad_running():
diff --git a/curtin/block/mdadm.py b/curtin/block/mdadm.py
index a6ac970..7bb6b96 100644
--- a/curtin/block/mdadm.py
+++ b/curtin/block/mdadm.py
@@ -505,9 +505,9 @@ def md_sysfs_attr_path(md_devname, attrname):
return os.path.join(sysmd, attrname)
-def md_sysfs_attr(md_devname, attrname):
+def md_sysfs_attr(md_devname, attrname, default=''):
""" Return the attribute str of an md device found under the 'md' dir """
- attrdata = ''
+ attrdata = default
if not valid_mdname(md_devname):
raise ValueError('Invalid md devicename: [{}]'.format(md_devname))
@@ -645,45 +645,6 @@ def md_device_key_dev(devname):
return 'MD_DEVICE_' + dev_short(devname) + '_DEV'
-def __upgrade_detail_dict(detail):
- ''' This method attempts to convert mdadm --detail output into
- a KEY=VALUE output the same as mdadm --detail --export from mdadm v3.3
- '''
- # if the input already has MD_UUID, it's already been converted
- if 'MD_UUID' in detail:
- return detail
-
- md_detail = {
- 'MD_LEVEL': detail['raid_level'],
- 'MD_DEVICES': detail['raid_devices'],
- 'MD_METADATA': detail['version'],
- 'MD_NAME': detail['name'].split()[0],
- }
-
- # exmaine has ARRAY UUID
- if 'array_uuid' in detail:
- md_detail.update({'MD_UUID': detail['array_uuid']})
- # query,detail has UUID
- elif 'uuid' in detail:
- md_detail.update({'MD_UUID': detail['uuid']})
-
- device = detail['device']
-
- # MD_DEVICE_vdc1_DEV=/dev/vdc1
- md_detail.update({md_device_key_dev(device): device})
-
- if 'device_role' in detail:
- role = detail['device_role']
- if role != 'spare':
- # device_role = Active device 1
- role = role.split()[-1]
-
- # MD_DEVICE_vdc1_ROLE=spare
- md_detail.update({md_device_key_role(device): role})
-
- return md_detail
-
-
def md_read_run_mdadm_map():
'''
md1 1.2 59beb40f:4c202f67:088e702b:efdf577a /dev/md1
@@ -719,8 +680,6 @@ def md_check_array_uuid(md_devname, md_uuid):
'%s -> %s != %s' % (mduuid_path, mdlink_devname, md_devname))
raise ValueError(err)
- return True
-
def md_get_uuid(md_devname):
valid_mdname(md_devname)
@@ -741,13 +700,24 @@ def _compare_devlist(expected, found):
" Missing: {} Extra: {}".format(missing, extra))
-def md_check_raidlevel(raidlevel):
+def md_check_raidlevel(md_devname, detail, raidlevel):
# Validate raidlevel against what curtin supports configuring
if raidlevel not in VALID_RAID_LEVELS:
err = ('Invalid raidlevel: ' + raidlevel +
' Must be one of: ' + str(VALID_RAID_LEVELS))
raise ValueError(err)
- return True
+ # normalize raidlevel to the values mdadm prints.
+ if isinstance(raidlevel, int) or len(raidlevel) <= 2:
+ raidlevel = 'raid' + str(raidlevel)
+ elif raidlevel == 'stripe':
+ raidlevel = 'raid0'
+ elif raidlevel == 'mirror':
+ raidlevel = 'raid1'
+ actual_level = detail.get("MD_LEVEL")
+ if actual_level != raidlevel:
+ raise ValueError(
+ "raid device %s should have level %r but has level %r" % (
+ md_devname, raidlevel, actual_level))
def md_block_until_in_sync(md_devname):
@@ -770,24 +740,25 @@ def md_check_array_state(md_devname):
# check array state
writable = md_check_array_state_rw(md_devname)
- degraded = md_sysfs_attr(md_devname, 'degraded')
- sync_action = md_sysfs_attr(md_devname, 'sync_action')
+ # Raid 0 arrays do not have degraded or sync_action sysfs
+ # attributes.
+ degraded = md_sysfs_attr(md_devname, 'degraded', None)
+ sync_action = md_sysfs_attr(md_devname, 'sync_action', None)
if not writable:
raise ValueError('Array not in writable state: ' + md_devname)
- if degraded != "0":
+ if degraded is not None and degraded != "0":
raise ValueError('Array in degraded state: ' + md_devname)
- if sync_action != "idle":
- raise ValueError('Array syncing, not idle state: ' + md_devname)
-
- return True
+ if degraded is not None and sync_action not in ("idle", "resync"):
+ raise ValueError(
+ 'Array is %s, not idle: %s' % (sync_action, md_devname))
def md_check_uuid(md_devname):
md_uuid = md_get_uuid(md_devname)
if not md_uuid:
raise ValueError('Failed to get md UUID from device: ' + md_devname)
- return md_check_array_uuid(md_devname, md_uuid)
+ md_check_array_uuid(md_devname, md_uuid)
def md_check_devices(md_devname, devices):
@@ -833,26 +804,35 @@ def md_check_array_membership(md_devname, devices):
raise ValueError(err)
-def md_check(md_devname, raidlevel, devices=[], spares=[]):
+def md_check(md_devname, raidlevel, devices, spares, container):
''' Check passed in variables from storage configuration against
the system we're running upon.
'''
LOG.debug('RAID validation: ' +
- 'name={} raidlevel={} devices={} spares={}'.format(md_devname,
- raidlevel,
- devices,
- spares))
+ 'name={} raidlevel={} devices={} spares={} container={}'.format(
+ md_devname, raidlevel, devices, spares, container))
assert_valid_devpath(md_devname)
- md_check_array_state(md_devname)
- md_check_raidlevel(raidlevel)
+ detail = mdadm_query_detail(md_devname)
+
+ if raidlevel != "container":
+ md_check_array_state(md_devname)
+ md_check_raidlevel(md_devname, detail, raidlevel)
md_check_uuid(md_devname)
- md_check_devices(md_devname, devices)
- md_check_spares(md_devname, spares)
- md_check_array_membership(md_devname, devices + spares)
+ if container is None:
+ md_check_devices(md_devname, devices)
+ md_check_spares(md_devname, spares)
+ md_check_array_membership(md_devname, devices + spares)
+ else:
+ if 'MD_CONTAINER' not in detail:
+ raise ValueError("%s is not in a container" % (
+ md_devname))
+ actual_container = os.path.realpath(detail['MD_CONTAINER'])
+ if actual_container != container:
+ raise ValueError("%s is in container %r, not %r" % (
+ md_devname, actual_container, container))
LOG.debug('RAID array OK: ' + md_devname)
- return True
def md_is_in_container(md_devname):
diff --git a/curtin/block/multipath.py b/curtin/block/multipath.py
index e317184..0f9170e 100644
--- a/curtin/block/multipath.py
+++ b/curtin/block/multipath.py
@@ -65,11 +65,10 @@ def is_mpath_device(devpath, info=None):
def is_mpath_member(devpath, info=None):
""" Check if a device is a multipath member (a path), returns boolean. """
result = False
- try:
- util.subp(['multipath', '-c', devpath], capture=True)
+ if not info:
+ info = udev.udevadm_info(devpath)
+ if info.get("DM_MULTIPATH_DEVICE_PATH") == "1":
result = True
- except util.ProcessExecutionError:
- pass
LOG.debug('%s is multipath device member? %s', devpath, result)
return result
@@ -81,7 +80,7 @@ def is_mpath_partition(devpath, info=None):
if devpath.startswith('/dev/dm-'):
if not info:
info = udev.udevadm_info(devpath)
- if 'DM_PART' in udev.udevadm_info(devpath):
+ if 'DM_PART' in info and 'DM_MPATH' in info:
result = True
LOG.debug("%s is multipath device partition? %s", devpath, result)
diff --git a/curtin/block/schemas.py b/curtin/block/schemas.py
index 4dc2f0a..84a5279 100644
--- a/curtin/block/schemas.py
+++ b/curtin/block/schemas.py
@@ -29,11 +29,6 @@ definitions = {
'type': 'string',
'pattern': _uuid_pattern,
},
- 'fstype': {
- 'type': 'string',
- 'oneOf': [
- {'pattern': r'^__.*__$'}, # XXX: Accept vmtest values?
- {'enum': _fstypes}]},
'params': {
'type': 'object',
'patternProperties': {
@@ -141,7 +136,7 @@ DISK = {
'type': 'string',
'oneOf': [
{'pattern': r'^0x(\d|[a-zA-Z])+'},
- {'pattern': r'^(nvme|eui)\.([-0-9a-zA-Z])+'}],
+ {'pattern': r'^(nvme|eui|uuid)\.([-0-9a-zA-Z])+'}],
},
'grub_device': {
'type': ['boolean', 'integer'],
@@ -187,11 +182,20 @@ FORMAT = {
'preserve': {'$ref': '#/definitions/preserve'},
'uuid': {'$ref': '#/definitions/uuid'}, # XXX: This is not used
'type': {'const': 'format'},
- 'fstype': {'$ref': '#/definitions/fstype'},
+ 'fstype': {'type': 'string'},
'label': {'type': 'string'},
'volume': {'$ref': '#/definitions/ref_id'},
'extra_options': {'type': 'array', 'items': {'type': 'string'}},
- }
+ },
+ 'anyOf': [
+ # XXX: Accept vmtest values?
+ {'properties': {'fstype': {'pattern': r'^__.*__$'}}},
+ {'properties': {'fstype': {'enum': _fstypes}}},
+ {
+ 'properties': {'preserve': {'enum': [True]}},
+ 'required': ['preserve'] # this looks redundant but isn't
+ }
+ ]
}
LVM_PARTITION = {
'$schema': 'http://json-schema.org/draft-07/schema#',
@@ -262,6 +266,10 @@ MOUNT = {
],
},
'spec': {'type': 'string'}, # XXX: Tighten this to fstab fs_spec
+ 'freq': {'type': ['integer', 'string'],
+ 'pattern': r'[0-9]'},
+ 'passno': {'type': ['integer', 'string'],
+ 'pattern': r'[0-9]'},
},
}
PARTITION = {
@@ -304,9 +312,13 @@ RAID = {
'title': 'curtin storage configuration for a RAID.',
'description': ('Declarative syntax for specifying RAID.'),
'definitions': definitions,
- 'required': ['id', 'type', 'name', 'raidlevel', 'devices'],
+ 'required': ['id', 'type', 'name', 'raidlevel'],
'type': 'object',
'additionalProperties': False,
+ 'oneOf': [
+ {'required': ['devices']},
+ {'required': ['container']},
+ ],
'properties': {
'id': {'$ref': '#/definitions/id'},
'devices': {'$ref': '#/definitions/devices'},
@@ -315,6 +327,7 @@ RAID = {
'metadata': {'type': ['string', 'number']},
'preserve': {'$ref': '#/definitions/preserve'},
'ptable': {'$ref': '#/definitions/ptable'},
+ 'wipe': {'$ref': '#/definitions/wipe'},
'spare_devices': {'$ref': '#/definitions/devices'},
'container': {'$ref': '#/definitions/id'},
'type': {'const': 'raid'},
diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
index e7d84c0..9ea2d30 100644
--- a/curtin/commands/apt_config.py
+++ b/curtin/commands/apt_config.py
@@ -11,6 +11,8 @@ import os
import re
import sys
+from aptsources.sourceslist import SourceEntry
+
from curtin.log import LOG
from curtin import (config, distro, gpg, paths, util)
@@ -186,9 +188,11 @@ def mirrorurl_to_apt_fileprefix(mirror):
return string
-def rename_apt_lists(new_mirrors, target=None):
+def rename_apt_lists(new_mirrors, target=None, arch=None):
"""rename_apt_lists - rename apt lists to preserve old cache data"""
- default_mirrors = get_default_mirrors(distro.get_architecture(target))
+ if arch is None:
+ arch = distro.get_architecture(target)
+ default_mirrors = get_default_mirrors(arch)
pre = paths.target_path(target, APT_LISTS)
for (name, omirror) in default_mirrors.items():
@@ -211,22 +215,40 @@ def rename_apt_lists(new_mirrors, target=None):
LOG.warn("Failed to rename apt list:", exc_info=True)
-def mirror_to_placeholder(tmpl, mirror, placeholder):
- """ mirror_to_placeholder
- replace the specified mirror in a template with a placeholder string
- Checks for existance of the expected mirror and warns if not found
- """
- if mirror not in tmpl:
- if mirror.endswith("/") and mirror[:-1] in tmpl:
- LOG.debug("mirror_to_placeholder: '%s' did not exist in tmpl, "
- "did without a trailing /. Accomodating.", mirror)
- mirror = mirror[:-1]
- else:
- LOG.warn("Expected mirror '%s' not found in: %s", mirror, tmpl)
- return tmpl.replace(mirror, placeholder)
+def update_default_mirrors(entries, mirrors, target, arch=None):
+ """replace existing default repos with the configured mirror"""
+
+ if arch is None:
+ arch = distro.get_architecture(target)
+ defaults = get_default_mirrors(arch)
+ mirrors_replacement = {
+ defaults['PRIMARY']: mirrors["MIRROR"],
+ defaults['SECURITY']: mirrors["SECURITY"],
+ }
+ # allow original file URIs without the trailing slash to match mirror
+ # specifications that have it
+ noslash = {}
+ for key in mirrors_replacement.keys():
+ if key[-1] == '/':
+ noslash[key[:-1]] = mirrors_replacement[key]
-def map_known_suites(suite):
+ mirrors_replacement.update(noslash)
+
+ for entry in entries:
+ entry.uri = mirrors_replacement.get(entry.uri, entry.uri)
+ return entries
+
+
+def update_mirrors(entries, mirrors):
+ """perform template replacement of mirror placeholders with configured
+ values"""
+ for entry in entries:
+ entry.uri = util.render_string(entry.uri, mirrors)
+ return entries
+
+
+def map_known_suites(suite, release):
"""there are a few default names which will be auto-extended.
This comes at the inability to use those names literally as suites,
but on the other hand increases readability of the cfg quite a lot"""
@@ -236,83 +258,101 @@ def map_known_suites(suite):
'proposed': '$RELEASE-proposed',
'release': '$RELEASE'}
try:
- retsuite = mapping[suite]
+ template_suite = mapping[suite]
except KeyError:
- retsuite = suite
- return retsuite
+ template_suite = suite
+ return util.render_string(template_suite, {'RELEASE': release})
+
+def commentify(entry):
+ # handle commenting ourselves - it handles lines with
+ # options better
+ return SourceEntry('# ' + str(entry))
-def disable_suites(disabled, src, release):
+
+def disable_suites(disabled, entries, release):
"""reads the config for suites to be disabled and removes those
from the template"""
if not disabled:
- return src
+ return entries
- retsrc = src
+ suites_to_disable = []
for suite in disabled:
- suite = map_known_suites(suite)
- releasesuite = util.render_string(suite, {'RELEASE': release})
- LOG.debug("Disabling suite %s as %s", suite, releasesuite)
+ release_suite = map_known_suites(suite, release)
+ LOG.debug("Disabling suite %s as %s", suite, release_suite)
+ suites_to_disable.append(release_suite)
- newsrc = ""
- for line in retsrc.splitlines(True):
- if line.startswith("#"):
- newsrc += line
- continue
+ output = []
+ for entry in entries:
+ if not entry.disabled and entry.dist in suites_to_disable:
+ entry = commentify(entry)
+ output.append(entry)
+ return output
- # sources.list allow options in cols[1] which can have spaces
- # so the actual suite can be [2] or later. example:
- # deb [ arch=amd64,armel k=v ] http://example.com/debian
- cols = line.split()
- if len(cols) > 1:
- pcol = 2
- if cols[1].startswith("["):
- for col in cols[1:]:
- pcol += 1
- if col.endswith("]"):
- break
- if cols[pcol] == releasesuite:
- line = '# suite disabled by curtin: %s' % line
- newsrc += line
- retsrc = newsrc
+def disable_components(disabled, entries):
+ """reads the config for components to be disabled and remove those
+ from the entries"""
+ if not disabled:
+ return entries
+
+ # purposefully skip disabling the main component
+ comps_to_disable = {comp for comp in disabled if comp != 'main'}
+
+ output = []
+ for entry in entries:
+ if not entry.disabled and comps_to_disable.intersection(entry.comps):
+ output.append(commentify(entry))
+ entry.comps = [comp for comp in entry.comps
+ if comp not in comps_to_disable]
+ if entry.comps:
+ output.append(entry)
+ else:
+ output.append(entry)
+ return output
+
- return retsrc
+def update_dist(entries, release):
+ for entry in entries:
+ entry.dist = util.render_string(entry.dist, {'RELEASE': release})
+ return entries
-def generate_sources_list(cfg, release, mirrors, target=None):
+def entries_to_str(entries):
+ return ''.join([str(entry) + '\n' for entry in entries])
+
+
+def generate_sources_list(cfg, release, mirrors, target=None, arch=None):
""" generate_sources_list
create a source.list file based on a custom or default template
by replacing mirrors and release in the template
"""
- default_mirrors = get_default_mirrors(distro.get_architecture(target))
aptsrc = "/etc/apt/sources.list"
- params = {'RELEASE': release}
- for k in mirrors:
- params[k] = mirrors[k]
tmpl = cfg.get('sources_list', None)
+ from_file = False
if tmpl is None:
LOG.info("No custom template provided, fall back to modify"
"mirrors in %s on the target system", aptsrc)
tmpl = util.load_file(paths.target_path(target, aptsrc))
- # Strategy if no custom template was provided:
- # - Only replacing mirrors
- # - no reason to replace "release" as it is from target anyway
- # - The less we depend upon, the more stable this is against changes
- # - warn if expected original content wasn't found
- tmpl = mirror_to_placeholder(tmpl, default_mirrors['PRIMARY'],
- "$MIRROR")
- tmpl = mirror_to_placeholder(tmpl, default_mirrors['SECURITY'],
- "$SECURITY")
+ from_file = True
+
+ entries = [SourceEntry(line) for line in tmpl.splitlines(True)]
+ if from_file:
+ # when loading from an existing file, we also replace default
+ # URIs with configured mirrors
+ entries = update_default_mirrors(entries, mirrors, target, arch)
+
+ entries = update_mirrors(entries, mirrors)
+ entries = update_dist(entries, release)
+ entries = disable_suites(cfg.get('disable_suites'), entries, release)
+ entries = disable_components(cfg.get('disable_components'), entries)
+ output = entries_to_str(entries)
orig = paths.target_path(target, aptsrc)
if os.path.exists(orig):
os.rename(orig, orig + ".curtin.old")
-
- rendered = util.render_string(tmpl, params)
- disabled = disable_suites(cfg.get('disable_suites'), rendered, release)
- util.write_file(paths.target_path(target, aptsrc), disabled, mode=0o644)
+ util.write_file(paths.target_path(target, aptsrc), output, mode=0o644)
def apply_preserve_sources_list(target):
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index cf6bc02..1913cb4 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -27,7 +27,7 @@ import time
FstabData = namedtuple(
"FstabData", ('spec', 'path', 'fstype', 'options', 'freq', 'passno',
'device'))
-FstabData.__new__.__defaults__ = (None, None, None, "", "0", "0", None)
+FstabData.__new__.__defaults__ = (None, None, None, "", "0", "-1", None)
SIMPLE = 'simple'
@@ -72,6 +72,8 @@ CMD_ARGUMENTS = (
'choices': ['ext4', 'ext3'], 'default': None}),
('--umount', {'help': 'unmount any mounted filesystems before exit',
'action': 'store_true', 'default': False}),
+ ('--testmode', {'help': 'enable some test actions',
+ 'action': 'store_true', 'default': False}),
('mode', {'help': 'meta-mode to use',
'choices': [CUSTOM, SIMPLE, SIMPLE_BOOT]}),
)
@@ -81,7 +83,10 @@ CMD_ARGUMENTS = (
@logged_time("BLOCK_META")
def block_meta(args):
# main entry point for the block-meta command.
- state = util.load_command_environment(strict=True)
+ if args.testmode:
+ state = {}
+ else:
+ state = util.load_command_environment(strict=True)
cfg = config.load_command_config(args, state)
dd_images = util.get_dd_images(cfg.get('sources', {}))
@@ -99,7 +104,8 @@ def block_meta(args):
args.devices = devices
LOG.debug('clearing devices=%s', devices)
- meta_clear(devices, state.get('report_stack_prefix', ''))
+ if devices:
+ meta_clear(devices, state.get('report_stack_prefix', ''))
# dd-images requires use of meta_simple
if len(dd_images) > 0 and args.force_mode is False:
@@ -145,7 +151,15 @@ def write_image_to_disk(source, dev):
extractor[source['type']] + '| dd bs=4M of="$2"'),
'--', source['uri'], devnode])
util.subp(['partprobe', devnode])
+
+ udevadm_trigger([devnode])
+ try:
+ lvm.activate_volgroups()
+ except util.ProcessExecutionError:
+ # partial vg may not come up due to missing members, that's OK
+ pass
udevadm_settle()
+
# Images from MAAS have well-known/required paths present
# on the rootfs partition. Use these values to select the
# root (target) partition to complete installation.
@@ -517,6 +531,9 @@ def get_path_to_storage_volume(volume, storage_config):
volume_path = block.kname_to_path(bcache_kname)
LOG.debug('got bcache volume path %s', volume_path)
+ elif vol.get('type') == 'image':
+ volume_path = vol['dev']
+
else:
raise NotImplementedError("cannot determine the path to storage \
volume '%s' with type '%s'" % (volume, vol.get('type')))
@@ -530,7 +547,29 @@ def get_path_to_storage_volume(volume, storage_config):
return volume_path
-def dasd_handler(info, storage_config):
+DEVS = set()
+
+
+def image_handler(info, storage_config, handlers):
+ path = info['path']
+ if os.path.exists(path):
+ os.unlink(path)
+ try:
+ with open(path, 'wb') as fp:
+ fp.truncate(int(util.human2bytes(info['size'])))
+ dev = util.subp([
+ 'losetup', '--show', '--find', path],
+ capture=True)[0].strip()
+ except BaseException:
+ if os.path.exists(path):
+ os.unlink(path)
+ raise
+ info['dev'] = dev
+ DEVS.add(dev)
+ handlers['disk'](info, storage_config, handlers)
+
+
+def dasd_handler(info, storage_config, handlers):
""" Prepare the specified dasd device per configuration
params: info: dictionary of configuration, required keys are:
@@ -575,7 +614,7 @@ def dasd_handler(info, storage_config):
"Dasd %s failed to format" % dasd_device.devname)
-def disk_handler(info, storage_config):
+def disk_handler(info, storage_config, handlers):
_dos_names = ['dos', 'msdos']
ptable = info.get('ptable')
if ptable and ptable not in PTABLES_VALID:
@@ -583,7 +622,18 @@ def disk_handler(info, storage_config):
'Invalid partition table type: %s in %s' % (ptable, info))
disk = get_path_to_storage_volume(info.get('id'), storage_config)
- if config.value_as_boolean(info.get('preserve')):
+ # For disks, 'preserve' is what indicates whether the partition
+ # table should be reused or recreated but for compound devices
+ # such as raids, it indicates if the raid should be created or
+ # assumed to already exist. So to allow a pre-existing raid to get
+ # a new partition table, we use presence of 'wipe' field to
+ # indicate if the disk should be reformatted or not.
+ if info['type'] == 'disk':
+ preserve_ptable = config.value_as_boolean(info.get('preserve'))
+ else:
+ preserve_ptable = config.value_as_boolean(info.get('preserve')) \
+ and not config.value_as_boolean(info.get('wipe'))
+ if preserve_ptable:
# Handle preserve flag, verifying if ptable specified in config
if ptable and ptable != PTABLE_UNSUPPORTED:
current_ptable = block.get_part_table_type(disk)
@@ -591,8 +641,9 @@ def disk_handler(info, storage_config):
if current_ptable not in PTABLES_SUPPORTED:
raise ValueError(
"disk '%s' does not have correct partition table or "
- "cannot be read, but preserve is set to true. "
- "cannot continue installation." % info.get('id'))
+ "cannot be read, but preserve is set to true (or wipe is "
+ "not set). cannot continue installation." %
+ info.get('id'))
LOG.info("disk '%s' marked to be preserved, so keeping partition "
"table" % disk)
else:
@@ -714,12 +765,7 @@ def verify_exists(devpath):
raise RuntimeError("Device %s does not exist" % devpath)
-def verify_size(devpath, expected_size_bytes, sfdisk_info=None):
- if not sfdisk_info:
- sfdisk_info = block.sfdisk_info(devpath)
-
- part_info = block.get_partition_sfdisk_info(devpath,
- sfdisk_info=sfdisk_info)
+def verify_size(devpath, expected_size_bytes, part_info):
(found_type, _code) = ptable_uuid_to_flag_entry(part_info.get('type'))
if found_type == 'extended':
found_size_bytes = int(part_info['size']) * 512
@@ -733,30 +779,25 @@ def verify_size(devpath, expected_size_bytes, sfdisk_info=None):
raise RuntimeError(msg)
-def verify_ptable_flag(devpath, expected_flag, sfdisk_info=None):
+def verify_ptable_flag(devpath, expected_flag, label, part_info):
if (expected_flag not in SGDISK_FLAGS.keys()) and (expected_flag not in
MSDOS_FLAGS.keys()):
raise RuntimeError(
'Cannot verify unknown partition flag: %s' % expected_flag)
- if not sfdisk_info:
- sfdisk_info = block.sfdisk_info(devpath)
-
- entry = block.get_partition_sfdisk_info(devpath, sfdisk_info=sfdisk_info)
- LOG.debug("Device %s ptable entry: %s", devpath, util.json_dumps(entry))
found_flag = None
- if (sfdisk_info['label'] in ('dos', 'msdos')):
+ if (label in ('dos', 'msdos')):
if expected_flag == 'boot':
- found_flag = 'boot' if entry.get('bootable') is True else None
+ found_flag = 'boot' if part_info.get('bootable') is True else None
elif expected_flag == 'extended':
- (found_flag, _code) = ptable_uuid_to_flag_entry(entry['type'])
+ (found_flag, _code) = ptable_uuid_to_flag_entry(part_info['type'])
elif expected_flag == 'logical':
(_parent, partnumber) = block.get_blockdev_for_partition(devpath)
found_flag = 'logical' if int(partnumber) > 4 else None
# gpt and msdos primary partitions look up flag by entry['type']
if found_flag is None:
- (found_flag, _code) = ptable_uuid_to_flag_entry(entry['type'])
+ (found_flag, _code) = ptable_uuid_to_flag_entry(part_info['type'])
msg = (
'Verifying %s partition flag, expecting %s, found %s' % (
devpath, expected_flag, found_flag))
@@ -765,16 +806,13 @@ def verify_ptable_flag(devpath, expected_flag, sfdisk_info=None):
raise RuntimeError(msg)
-def partition_verify_sfdisk(devpath, info):
- verify_exists(devpath)
- sfdisk_info = block.sfdisk_info(devpath)
- if not sfdisk_info:
- raise RuntimeError('Failed to extract sfdisk info from %s' % devpath)
- verify_size(devpath, int(util.human2bytes(info['size'])),
- sfdisk_info=sfdisk_info)
- expected_flag = info.get('flag')
+def partition_verify_sfdisk(part_action, label, sfdisk_part_info):
+ devpath = sfdisk_part_info['node']
+ verify_size(
+ devpath, int(util.human2bytes(part_action['size'])), sfdisk_part_info)
+ expected_flag = part_action.get('flag')
if expected_flag:
- verify_ptable_flag(devpath, info['flag'], sfdisk_info=sfdisk_info)
+ verify_ptable_flag(devpath, expected_flag, label, sfdisk_part_info)
def partition_verify_fdasd(disk_path, partnumber, info):
@@ -792,7 +830,7 @@ def partition_verify_fdasd(disk_path, partnumber, info):
raise RuntimeError("dasd partitions do not support flags")
-def partition_handler(info, storage_config):
+def partition_handler(info, storage_config, handlers):
device = info.get('device')
size = info.get('size')
flag = info.get('flag')
@@ -881,12 +919,14 @@ def partition_handler(info, storage_config):
# Handle preserve flag
create_partition = True
if config.value_as_boolean(info.get('preserve')):
+ part_path = block.dev_path(
+ block.partition_kname(disk_kname, partnumber))
if disk_ptable == 'vtoc':
partition_verify_fdasd(disk, partnumber, info)
else:
- part_path = block.dev_path(
- block.partition_kname(disk_kname, partnumber))
- partition_verify_sfdisk(part_path, info)
+ sfdisk_info = block.sfdisk_info(disk)
+ part_info = block.get_partition_sfdisk_info(part_path, sfdisk_info)
+ partition_verify_sfdisk(info, sfdisk_info['label'], part_info)
LOG.debug(
'%s partition %s already present, skipping create',
disk, partnumber)
@@ -979,7 +1019,7 @@ def partition_handler(info, storage_config):
make_dname(info.get('id'), storage_config)
-def format_handler(info, storage_config):
+def format_handler(info, storage_config, handlers):
volume = info.get('volume')
if not volume:
raise ValueError("volume must be specified for partition '%s'" %
@@ -1020,7 +1060,7 @@ def mount_data(info, storage_config):
fstype = info.get('fstype')
path = info.get('path')
freq = str(info.get('freq', 0))
- passno = str(info.get('passno', 0))
+ passno = str(info.get('passno', -1))
# turn empty options into "defaults", which works in fstab and mount -o.
if not info.get('options'):
@@ -1114,6 +1154,28 @@ def get_volume_spec(device_path):
return devlinks[0] if len(devlinks) else device_path
+def proc_filesystems_passno(fstype):
+ """Examine /proc/filesystems - is this fstype listed and marked nodev?
+
+ :param fstype: a filesystem name such as ext2 or tmpfs
+ :return passno for fstype - nodev fs get 0, else 1"""
+
+ if fstype in ('swap', 'none'):
+ return "0"
+ with open('/proc/filesystems', 'r') as procfs:
+ for line in procfs.readlines():
+ tokens = line.strip('\n').split('\t')
+ if len(tokens) < 2:
+ continue
+
+ devstatus, curfs = tokens[:2]
+ if curfs != fstype:
+ continue
+
+ return "0" if devstatus == 'nodev' else "1"
+ return "1"
+
+
def fstab_line_for_data(fdata):
"""Return a string representing fdata in /etc/fstab format.
@@ -1151,8 +1213,12 @@ def fstab_line_for_data(fdata):
else:
comment = None
+ passno = fdata.passno
+ if int(passno) < 0:
+ passno = proc_filesystems_passno(fdata.fstype)
+
entry = ' '.join((spec, path, fdata.fstype, options,
- fdata.freq, fdata.passno)) + "\n"
+ fdata.freq, passno)) + "\n"
line = '\n'.join([comment, entry] if comment else [entry])
return line
@@ -1203,7 +1269,7 @@ def mount_apply(fdata, target=None, fstab=None):
LOG.info("fstab not in environment, so not writing")
-def mount_handler(info, storage_config):
+def mount_handler(info, storage_config, handlers):
""" Handle storage config type: mount
info = {
@@ -1239,7 +1305,7 @@ def lvm_volgroup_verify(vg_name, device_paths):
verify_volgroup_members(vg_name, device_paths)
-def lvm_volgroup_handler(info, storage_config):
+def lvm_volgroup_handler(info, storage_config, handlers):
devices = info.get('devices')
device_paths = []
name = info.get('name')
@@ -1300,7 +1366,7 @@ def lvm_partition_verify(lv_name, vg_name, info):
verify_lv_size(lv_name, info['size'])
-def lvm_partition_handler(info, storage_config):
+def lvm_partition_handler(info, storage_config, handlers):
volgroup = storage_config[info['volgroup']]['name']
name = info['name']
if not volgroup:
@@ -1324,7 +1390,7 @@ def lvm_partition_handler(info, storage_config):
cmd = ["lvcreate", volgroup, "--name", name, "--zero=y"]
release = distro.lsb_release()['codename']
if release not in ['precise', 'trusty']:
- cmd.extend(["--wipesignatures=y"])
+ cmd.extend(["--wipesignatures=y", "--yes"])
if info.get('size'):
size = util.human2bytes(info["size"])
@@ -1362,7 +1428,7 @@ def dm_crypt_verify(dmcrypt_dev, volume_path):
verify_blkdev_used(dmcrypt_dev, volume_path)
-def dm_crypt_handler(info, storage_config):
+def dm_crypt_handler(info, storage_config, handlers):
state = util.load_command_environment(strict=True)
volume = info.get('volume')
keysize = info.get('keysize')
@@ -1470,27 +1536,41 @@ def dm_crypt_handler(info, storage_config):
so not writing crypttab")
-def verify_md_components(md_devname, raidlevel, device_paths, spare_paths):
+def verify_md_components(md_devname, raidlevel, device_paths, spare_paths,
+ container):
# check if the array is already up, if not try to assemble
- check_ok = mdadm.md_check(md_devname, raidlevel, device_paths,
- spare_paths)
- if not check_ok:
+ errors = []
+ check_ok = False
+ try:
+ mdadm.md_check(md_devname, raidlevel, device_paths,
+ spare_paths, container)
+ check_ok = True
+ except ValueError as err1:
+ errors.append(err1)
LOG.info("assembling preserved raid for %s", md_devname)
mdadm.mdadm_assemble(md_devname, device_paths, spare_paths)
- check_ok = mdadm.md_check(md_devname, raidlevel, device_paths,
- spare_paths)
- msg = ('Verifying %s raid composition, found raid is %s'
+ try:
+ mdadm.md_check(md_devname, raidlevel, device_paths,
+ spare_paths, container)
+ check_ok = True
+ except ValueError as err2:
+ errors.append(err2)
+
+ msg = ('Verified %s raid composition, raid is %s'
% (md_devname, 'OK' if check_ok else 'not OK'))
LOG.debug(msg)
if not check_ok:
- raise RuntimeError(msg)
+ for err in errors:
+ LOG.error("Error checking raid %s: %s", md_devname, err)
+ raise ValueError(msg)
-def raid_verify(md_devname, raidlevel, device_paths, spare_paths):
- verify_md_components(md_devname, raidlevel, device_paths, spare_paths)
+def raid_verify(md_devname, raidlevel, device_paths, spare_paths, container):
+ verify_md_components(
+ md_devname, raidlevel, device_paths, spare_paths, container)
-def raid_handler(info, storage_config):
+def raid_handler(info, storage_config, handlers):
state = util.load_command_environment(strict=True)
devices = info.get('devices')
raidlevel = info.get('raidlevel')
@@ -1530,7 +1610,9 @@ def raid_handler(info, storage_config):
create_raid = True
if preserve:
- raid_verify(md_devname, raidlevel, device_paths, spare_device_paths)
+ raid_verify(
+ md_devname, raidlevel, device_paths, spare_device_paths,
+ container_dev)
LOG.debug('raid %s already present, skipping create', md_devname)
create_raid = False
@@ -1570,7 +1652,7 @@ def raid_handler(info, storage_config):
# If ptable is specified, call disk_handler on this mdadm device to create
# the table
if info.get('ptable'):
- disk_handler(info, storage_config)
+ handlers['disk'](info, storage_config, handlers)
def verify_bcache_cachedev(cachedev):
@@ -1637,7 +1719,7 @@ def bcache_verify(cachedev, backingdev, cache_mode):
return True
-def bcache_handler(info, storage_config):
+def bcache_handler(info, storage_config, handlers):
backing_device = get_path_to_storage_volume(info.get('backing_device'),
storage_config)
cache_device = get_path_to_storage_volume(info.get('cache_device'),
@@ -1685,13 +1767,13 @@ def bcache_handler(info, storage_config):
make_dname(info.get('id'), storage_config)
if info.get('ptable'):
- disk_handler(info, storage_config)
+ handlers['disk'](info, storage_config, handlers)
LOG.debug('Finished bcache creation for backing %s or caching %s',
backing_device, cache_device)
-def zpool_handler(info, storage_config):
+def zpool_handler(info, storage_config, handlers):
"""
Create a zpool based in storage_configuration
"""
@@ -1730,7 +1812,7 @@ def zpool_handler(info, storage_config):
zfs_properties=fs_properties)
-def zfs_handler(info, storage_config):
+def zfs_handler(info, storage_config, handlers):
"""
Create a zfs filesystem
"""
@@ -1913,7 +1995,11 @@ def meta_custom(args):
'zpool': zpool_handler,
}
- state = util.load_command_environment(strict=True)
+ if args.testmode:
+ command_handlers['image'] = image_handler
+ state = {}
+ else:
+ state = util.load_command_environment(strict=True)
cfg = config.load_command_config(args, state)
storage_config_dict = extract_storage_ordered_dict(cfg)
@@ -1932,12 +2018,15 @@ def meta_custom(args):
description="configuring %s: %s" % (command['type'],
command['id'])):
try:
- handler(command, storage_config_dict)
+ handler(command, storage_config_dict, command_handlers)
except Exception as error:
LOG.error("An error occured handling '%s': %s - %s" %
(item_id, type(error).__name__, error))
raise
+ if args.testmode:
+ util.subp(['losetup', '--detach'] + list(DEVS))
+
if args.umount:
util.do_umount(state['target'], recursive=True)
return 0
@@ -1963,10 +2052,14 @@ def meta_simple(args):
serial = i.get("serial")
if serial is None:
continue
- grub = i.get("grub_device")
- diskPath = block.lookup_disk(serial)
- if grub is True:
+ try:
+ diskPath = block.lookup_disk(serial)
+ except ValueError as err:
+ LOG.debug("Skipping disk '%s': %s", i.get("id"), err)
+ continue
+ if i.get("grub_device"):
devpath = diskPath
+ break
devices = args.devices
bootpt = get_bootpt_cfg(
@@ -2004,10 +2097,10 @@ def meta_simple(args):
LOG.warn("No non-removable, installable devices found. List "
"populated with removable devices allowed: %s",
devices)
- elif len(devices) == 0 and devpath:
- devices = [devpath]
- if len(devices) > 1:
+ if devpath is not None:
+ target = devpath
+ elif len(devices) > 1:
if args.devices is not None:
LOG.warn("'%s' mode but multiple devices given. "
"using first found", args.mode)
diff --git a/curtin/commands/curthooks.py b/curtin/commands/curthooks.py
index eaeb9c4..647a0bf 100644
--- a/curtin/commands/curthooks.py
+++ b/curtin/commands/curthooks.py
@@ -177,14 +177,9 @@ def setup_zipl(cfg, target):
# assuming that below gives the "/" rootfs
target_dev = block.get_devices_for_mp(target)[0]
- root_arg = None
- # not mapped rootfs, use UUID
- if 'mapper' in target_dev:
- root_arg = target_dev
- else:
- uuid = block.get_volume_uuid(target_dev)
- if uuid:
- root_arg = "UUID=%s" % uuid
+ # get preferred device path, according to https://wiki.ubuntu.com/FSTAB
+ from curtin.commands.block_meta import get_volume_spec
+ root_arg = get_volume_spec(target_dev)
if not root_arg:
msg = "Failed to identify root= for %s at %s." % (target, target_dev)
@@ -947,7 +942,6 @@ def copy_zkey_repository(zkey_repository, target,
def apply_networking(target, state):
netconf = state.get('network_config')
- interfaces = state.get('interfaces')
def is_valid_src(infile):
with open(infile, 'r') as fp:
@@ -961,11 +955,11 @@ def apply_networking(target, state):
apply_net.apply_net(target, network_state=None, network_config=netconf)
else:
LOG.debug("copying interfaces")
- copy_interfaces(interfaces, target)
+ copy_interfaces(state.get('interfaces'), target)
def copy_interfaces(interfaces, target):
- if not interfaces:
+ if not interfaces or not os.path.exists(interfaces):
LOG.warn("no interfaces file to copy!")
return
eni = os.path.sep.join([target, 'etc/network/interfaces'])
@@ -1293,8 +1287,9 @@ def install_missing_packages(cfg, target, osfamily=DISTROS.debian):
if distro.has_pkg_available(uefi_pkg_signed):
uefi_pkgs.append(uefi_pkg_signed)
- # AMD64 has shim-signed for SecureBoot support
- if arch == "amd64":
+ # amd64 and arm64 (since bionic) has shim-signed for
+ # SecureBoot support
+ if distro.has_pkg_available("shim-signed"):
uefi_pkgs.append("shim-signed")
else:
raise ValueError('Unknown grub2 package list for distro: %s' %
diff --git a/curtin/commands/extract.py b/curtin/commands/extract.py
index 069023f..bfcb076 100644
--- a/curtin/commands/extract.py
+++ b/curtin/commands/extract.py
@@ -1,5 +1,10 @@
# This file is part of curtin. See LICENSE file for copyright and license info.
+try:
+ from abc import ABC
+except ImportError:
+ ABC = object
+import abc
import os
import shutil
import sys
@@ -57,125 +62,106 @@ def extract_root_tgz_url(url, target):
'--', url, target])
-def extract_root_fsimage_url(url, target):
- path = _path_from_file_url(url)
- if path != url or os.path.isfile(path):
- return _extract_root_fsimage(path, target)
-
- wfp = tempfile.NamedTemporaryFile(suffix=".img", delete=False)
- wfp.close()
- try:
- url_helper.download(url, wfp.name, retries=3)
- return _extract_root_fsimage(wfp.name, target)
- finally:
- os.unlink(wfp.name)
-
-
-def _extract_root_fsimage(path, target):
- mp = tempfile.mkdtemp()
- try:
- util.subp(['mount', '-o', 'loop,ro', path, mp], capture=True)
- except util.ProcessExecutionError as e:
- LOG.error("Failed to mount '%s' for extraction: %s", path, e)
- os.rmdir(mp)
- raise e
- try:
- return copy_to_target(mp, target)
- finally:
- util.subp(['umount', mp])
- os.rmdir(mp)
-
-
-def extract_root_layered_fsimage_url(uri, target):
- ''' Build images list to consider from a layered structure
+def mount(device, mountpoint, options=None, type=None):
+ opts = []
+ if options is not None:
+ opts.extend(['-o', options])
+ if type is not None:
+ opts.extend(['-t', type])
+ util.subp(['mount'] + opts + [device, mountpoint], capture=True)
- uri: URI of the layer file
- target: Target file system to provision
- return: None
- '''
- path = _path_from_file_url(uri)
-
- image_stack = _get_image_stack(path)
- LOG.debug("Considering fsimages: '%s'", ",".join(image_stack))
-
- tmp_dir = None
- try:
- # Download every remote images if remote url
- if url_helper.urlparse(path).scheme != "":
- tmp_dir = tempfile.mkdtemp()
- image_stack = _download_layered_images(image_stack, tmp_dir)
-
- # Check that all images exists on disk and are not empty
- for img in image_stack:
- if not os.path.isfile(img) or os.path.getsize(img) <= 0:
- raise ValueError("Failed to use fsimage: '%s' doesn't exist " +
- "or is invalid", img)
-
- return _extract_root_layered_fsimage(image_stack, target)
- finally:
- if tmp_dir and os.path.exists(tmp_dir):
- shutil.rmtree(tmp_dir)
-
-
-def _download_layered_images(image_stack, tmp_dir):
- local_image_stack = []
- try:
- for img_url in image_stack:
- dest_path = os.path.join(tmp_dir,
- os.path.basename(img_url))
- url_helper.download(img_url, dest_path, retries=3)
- local_image_stack.append(dest_path)
- except url_helper.UrlError as e:
- LOG.error("Failed to download '%s'" % img_url)
- raise e
- return local_image_stack
-
-
-def _extract_root_layered_fsimage(image_stack, target):
- mp_base = tempfile.mkdtemp()
- mps = []
- try:
- # Create a mount point for each image file and mount the image
+def unmount(mountpoint):
+ util.subp(['umount', mountpoint], capture=True)
+
+
+class AbstractSourceHandler(ABC):
+ """Encapsulate setting up an installation source for copy_to_target.
+
+ A source hander sets up a curtin installation source (see
+ https://curtin.readthedocs.io/en/latest/topics/config.html#sources)
+ for copying to the target with copy_to_target.
+ """
+
+ @abc.abstractmethod
+ def setup(self):
+ """Set up the source for copying and return the path to it."""
+ pass
+
+ @abc.abstractmethod
+ def cleanup(self):
+ """Perform any necessary clean up of actions performed by setup."""
+ pass
+
+
+class LayeredSourceHandler(AbstractSourceHandler):
+
+ def __init__(self, image_stack):
+ self.image_stack = image_stack
+ self._tmpdir = None
+ self._mounts = []
+
+ def _download(self):
+ new_image_stack = []
+ for path in self.image_stack:
+ if url_helper.urlparse(path).scheme not in ["", "file"]:
+ new_path = os.path.join(self._tmpdir, os.path.basename(path))
+ url_helper.download(path, new_path, retries=3)
+ else:
+ new_path = _path_from_file_url(path)
+ new_image_stack.append(new_path)
+ self.image_stack = new_image_stack
+
+ def setup(self):
+ self._tmpdir = tempfile.mkdtemp()
try:
- for img in image_stack:
- mp = os.path.join(mp_base, os.path.basename(img) + ".dir")
+ self._download()
+ # Check that all images exists on disk and are not empty
+ for img in self.image_stack:
+ if not os.path.isfile(img) or os.path.getsize(img) <= 0:
+ raise ValueError(
+ ("Failed to use fsimage: '%s' doesn't exist " +
+ "or is invalid") % (img,))
+ for img in self.image_stack:
+ mp = os.path.join(
+ self._tmpdir, os.path.basename(img) + ".dir")
os.mkdir(mp)
- util.subp(['mount', '-o', 'loop,ro', img, mp], capture=True)
- mps.insert(0, mp)
- except util.ProcessExecutionError as e:
- LOG.error("Failed to mount '%s' for extraction: %s", img, e)
- raise e
-
- # Prepare
- if len(mps) == 1:
- root_dir = mps[0]
- else:
- # Multiple image files, merge them with an overlay and do the copy
- root_dir = os.path.join(mp_base, "root.dir")
- os.mkdir(root_dir)
- try:
- util.subp(['mount', '-t', 'overlay', 'overlay', '-o',
- 'lowerdir=' + ':'.join(mps), root_dir],
- capture=True)
- mps.append(root_dir)
- except util.ProcessExecutionError as e:
- LOG.error("overlay mount to %s failed: %s", root_dir, e)
- raise e
-
- copy_to_target(root_dir, target)
- finally:
- umount_err_mps = []
- for mp in reversed(mps):
- try:
- util.subp(['umount', mp], capture=True)
- except util.ProcessExecutionError as e:
- LOG.error("can't unmount %s: %e", mp, e)
- umount_err_mps.append(mp)
- if umount_err_mps:
- raise util.ProcessExecutionError(
- "Failed to umount: %s", ", ".join(umount_err_mps))
- shutil.rmtree(mp_base)
+ mount(img, mp, options='loop,ro')
+ self._mounts.append(mp)
+ if len(self._mounts) == 1:
+ root_dir = self._mounts[0]
+ else:
+ # Multiple image files, merge them with an overlay.
+ root_dir = os.path.join(self._tmpdir, "root.dir")
+ os.mkdir(root_dir)
+ mount(
+ 'overlay', root_dir, type='overlay',
+ options='lowerdir=' + ':'.join(reversed(self._mounts)))
+ self._mounts.append(root_dir)
+ return root_dir
+ except Exception:
+ self.cleanup()
+ raise
+
+ def cleanup(self):
+ for mount in reversed(self._mounts):
+ unmount(mount)
+ self._mounts = []
+ if self._tmpdir is not None:
+ shutil.rmtree(self._tmpdir)
+ self._tmpdir = None
+
+
+class TrivialSourceHandler(AbstractSourceHandler):
+
+ def __init__(self, path):
+ self.path = path
+
+ def setup(self):
+ return self.path
+
+ def cleanup(self):
+ pass
def _get_image_stack(uri):
@@ -186,21 +172,45 @@ def _get_image_stack(uri):
'''
image_stack = []
- root_dir = os.path.dirname(uri)
img_name = os.path.basename(uri)
- _, img_ext = os.path.splitext(img_name)
+ root_dir = uri[:-len(img_name)]
+ img_base, img_ext = os.path.splitext(img_name)
- img_parts = img_name.split('.')
- # Last item is the extension
- for i in img_parts[:-1]:
+ if not img_base:
+ return []
+
+ img_parts = img_base.split('.')
+ for i in range(len(img_parts)):
image_stack.append(
- os.path.join(
- root_dir,
- '.'.join(img_parts[0:img_parts.index(i)+1]) + img_ext))
+ root_dir + '.'.join(img_parts[0:i+1]) + img_ext)
return image_stack
+def get_handler_for_source(source):
+ """Return an AbstractSourceHandler for setting up `source`."""
+ if source['uri'].startswith("cp://"):
+ return TrivialSourceHandler(source['uri'][5:])
+ elif source['type'] == "fsimage":
+ return LayeredSourceHandler([source['uri']])
+ elif source['type'] == "fsimage-layered":
+ return LayeredSourceHandler(_get_image_stack(source['uri']))
+ else:
+ return None
+
+
+def extract_source(source, target):
+ handler = get_handler_for_source(source)
+ if handler is not None:
+ root_dir = handler.setup()
+ try:
+ copy_to_target(root_dir, target)
+ finally:
+ handler.cleanup()
+ else:
+ extract_root_tgz_url(source['uri'], target=target)
+
+
def copy_to_target(source, target):
if source.startswith("cp://"):
source = source[5:]
@@ -245,14 +255,7 @@ def extract(args):
source['uri']):
if source['type'].startswith('dd-'):
continue
- if source['uri'].startswith("cp://"):
- copy_to_target(source['uri'], target)
- elif source['type'] == "fsimage":
- extract_root_fsimage_url(source['uri'], target=target)
- elif source['type'] == "fsimage-layered":
- extract_root_layered_fsimage_url(source['uri'], target=target)
- else:
- extract_root_tgz_url(source['uri'], target=target)
+ extract_source(source, target)
if cfg.get('write_files'):
LOG.info("Applying write_files from config.")
diff --git a/curtin/commands/install.py b/curtin/commands/install.py
index a3471f6..0e20e41 100644
--- a/curtin/commands/install.py
+++ b/curtin/commands/install.py
@@ -140,7 +140,7 @@ class WorkingDir(object):
json.dump(config, fp)
# just touch these files to make sure they exist
- for f in (interfaces_f, config_f, fstab_f, netconf_f, netstate_f):
+ for f in (config_f, fstab_f, netconf_f, netstate_f):
with open(f, "ab") as fp:
pass
diff --git a/curtin/distro.py b/curtin/distro.py
index 82a4dd5..8b5fbf8 100644
--- a/curtin/distro.py
+++ b/curtin/distro.py
@@ -114,8 +114,19 @@ def _parse_redhat_release(release_file=None, target=None):
def get_distroinfo(target=None):
- variant_name = os_release(target=target)['ID']
- variant = name_to_distro(variant_name)
+ variant_os_release = os_release(target=target)
+ variant_name = variant_os_release['ID']
+ try:
+ variant = name_to_distro(variant_name)
+ except ValueError:
+ for variant_name in variant_os_release["ID_LIKE"].split():
+ try:
+ variant = name_to_distro(variant_name)
+ break
+ except ValueError:
+ pass
+ else:
+ raise ValueError("Unknown distro: %s" % variant_os_release['ID'])
family = DISTRO_TO_OSFAMILY.get(variant)
return DistroInfo(variant, family)
diff --git a/curtin/storage_config.py b/curtin/storage_config.py
index e6c33cc..405a1e2 100644
--- a/curtin/storage_config.py
+++ b/curtin/storage_config.py
@@ -7,7 +7,7 @@ import re
import yaml
from curtin.log import LOG
-from curtin.block import schemas
+from curtin.block import multipath, schemas
from curtin import config as curtin_config
from curtin import util
@@ -164,7 +164,7 @@ def _stype_to_deps(stype):
'lvm_volgroup': {'devices'},
'mount': {'device'},
'partition': {'device'},
- 'raid': {'devices', 'spare_devices'},
+ 'raid': {'devices', 'spare_devices', 'container'},
'zfs': {'pool'},
'zpool': {'vdevs'},
}
@@ -212,7 +212,7 @@ def _validate_dep_type(source_id, dep_key, dep_id, sconfig):
'mount': {'format'},
'partition': {'bcache', 'disk', 'raid', 'partition'},
'raid': {'bcache', 'disk', 'dm_crypt', 'lvm_partition',
- 'partition'},
+ 'partition', 'raid'},
'zfs': {'zpool'},
'zpool': {'disk', 'partition'},
}
@@ -453,72 +453,15 @@ class ProbertParser(object):
return None
- def is_mpath(self, blockdev):
- if blockdev.get('DM_MULTIPATH_DEVICE_PATH') == "1":
- return True
+ def is_mpath_member(self, blockdev):
+ return multipath.is_mpath_member(blockdev.get('DEVNAME', ''), blockdev)
- return bool('mpath-' in blockdev.get('DM_UUID', ''))
+ def is_mpath_device(self, blockdev):
+ return multipath.is_mpath_device(blockdev.get('DEVNAME', ''), blockdev)
- def get_mpath_name(self, blockdev):
- mpath_data = self.probe_data.get('multipath')
- if not mpath_data:
- return None
-
- bd_name = blockdev['DEVNAME']
- if blockdev['DEVTYPE'] == 'partition':
- bd_name = self.partition_parent_devname(blockdev)
- bd_name = os.path.basename(bd_name)
- for path in mpath_data.get('paths', []):
- if bd_name == path.get('device'):
- rv = path.get('multipath')
- return rv
-
- def find_mpath_member(self, blockdev):
- if blockdev.get('DM_MULTIPATH_DEVICE_PATH') == "1":
- # find all other DM_MULTIPATH_DEVICE_PATH devs with same serial
- serial = blockdev.get('ID_SERIAL')
- members = sorted([os.path.basename(dev['DEVNAME'])
- for dev in self.blockdev_data.values()
- if dev.get("ID_SERIAL", "") == serial and
- dev['DEVTYPE'] == blockdev['DEVTYPE']])
- # [/dev/sda, /dev/sdb]
- # [/dev/sda1, /dev/sda2, /dev/sdb1, /dev/sdb2]
-
- else:
- dm_mpath = blockdev.get('DM_MPATH')
- dm_uuid = blockdev.get('DM_UUID')
- dm_part = blockdev.get('DM_PART')
- dm_name = blockdev.get('DM_NAME')
-
- if dm_mpath:
- multipath = dm_mpath
- elif dm_name:
- multipath = dm_name
- else:
- # part1-mpath-30000000000000064
- # mpath-30000000000000064
- # mpath-36005076306ffd6b60000000000002406
- match = re.search(r'mpath\-([a-zA-Z]*|\d*)+$', dm_uuid)
- if not match:
- LOG.debug('Failed to extract multipath ID pattern from '
- 'DM_UUID value: "%s"', dm_uuid)
- return None
- # remove leading 'mpath-'
- multipath = match.group(0)[6:]
- mpath_data = self.probe_data.get('multipath')
- if not mpath_data:
- return None
- members = sorted([path['device'] for path in mpath_data['paths']
- if path['multipath'] == multipath])
-
- # append partition number if present
- if dm_part:
- members = [member + dm_part for member in members]
-
- if len(members):
- return members[0]
-
- return None
+ def is_mpath_partition(self, blockdev):
+ return multipath.is_mpath_partition(
+ blockdev.get('DEVNAME', ''), blockdev)
def blockdev_to_id(self, blockdev):
""" Examine a blockdev dictionary and return a tuple of curtin
@@ -539,21 +482,18 @@ class ProbertParser(object):
if 'DM_LV_NAME' in blockdev:
devtype = 'lvm-partition'
name = blockdev['DM_LV_NAME']
- elif self.is_mpath(blockdev):
- # extract a multipath member device
- member = self.find_mpath_member(blockdev)
- if member:
- name = member
- else:
- name = blockdev['DM_UUID']
- if 'DM_PART' in blockdev:
- devtype = 'partition'
+ elif self.is_mpath_device(blockdev):
+ devtype = 'mpath-disk'
+ name = blockdev['DM_NAME']
+ elif self.is_mpath_partition(blockdev):
+ devtype = 'mpath-partition'
+ name = '{}-part{}'.format(
+ blockdev['DM_MPATH'], blockdev['DM_PART'])
elif is_dmcrypt(blockdev):
devtype = 'dmcrypt'
name = blockdev['DM_NAME']
elif devname.startswith('/dev/md'):
- if 'MD_NAME' in blockdev:
- devtype = 'raid'
+ devtype = 'raid'
for key, val in {'name': name, 'devtype': devtype}.items():
if not val or val == 'MISSING':
@@ -681,10 +621,15 @@ class BlockdevParser(ProbertParser):
errors = []
for devname, data in self.blockdev_data.items():
- # skip composed devices here, except partitions
+ # skip composed devices here, except partitions and multipath
if data.get('DEVPATH', '').startswith('/devices/virtual/block'):
- if data.get('DEVTYPE', '') != "partition":
- continue
+ if not self.is_mpath_device(data):
+ if not self.is_mpath_partition(data):
+ if data.get('DEVTYPE', '') != "partition":
+ continue
+ # skip disks that are members of multipath devices
+ if self.is_mpath_member(data):
+ continue
entry = self.asdict(data)
if entry:
try:
@@ -698,7 +643,10 @@ class BlockdevParser(ProbertParser):
def valid_id(self, id_value):
# reject wwn=0x0+
if id_value.lower().startswith('0x'):
- return int(id_value, 16) > 0
+ try:
+ return int(id_value, 16) > 0
+ except ValueError:
+ return True
# accept non-empty (removing whitspace) strings
return len(''.join(id_value.split())) > 0
@@ -710,10 +658,16 @@ class BlockdevParser(ProbertParser):
blockdev attribute.
"""
uniq = {}
- source_keys = {
- 'wwn': ['ID_WWN_WITH_EXTENSION', 'ID_WWN'],
- 'serial': ['ID_SERIAL', 'ID_SERIAL_SHORT'],
- }
+ if self.is_mpath_device(blockdev):
+ source_keys = {
+ 'wwn': ['DM_WWN'],
+ 'serial': ['DM_SERIAL'], # only present with focal+
+ }
+ else:
+ source_keys = {
+ 'wwn': ['ID_WWN_WITH_EXTENSION', 'ID_WWN'],
+ 'serial': ['ID_SERIAL', 'ID_SERIAL_SHORT'],
+ }
for skey, id_keys in source_keys.items():
for id_key in id_keys:
if id_key in blockdev and skey not in uniq:
@@ -740,6 +694,10 @@ class BlockdevParser(ProbertParser):
storage config dictionary. This method
will return curtin storage types: disk, partition.
"""
+ dev_type = blockdev_data['DEVTYPE']
+ if self.is_mpath_partition(blockdev_data):
+ dev_type = 'partition'
+
# just disks and partitions
if blockdev_data['DEVTYPE'] not in ["disk", "partition"]:
return None
@@ -752,13 +710,13 @@ class BlockdevParser(ProbertParser):
devname = blockdev_data.get('DEVNAME')
entry = {
- 'type': blockdev_data['DEVTYPE'],
+ 'type': dev_type,
'id': self.blockdev_to_id(blockdev_data),
}
- if blockdev_data.get('DM_MULTIPATH_DEVICE_PATH') == "1":
- mpath_name = self.get_mpath_name(blockdev_data)
- if mpath_name:
- entry['multipath'] = mpath_name
+ if self.is_mpath_device(blockdev_data):
+ entry['multipath'] = blockdev_data['DM_NAME']
+ elif self.is_mpath_partition(blockdev_data):
+ entry['multipath'] = blockdev_data['DM_MPATH']
# default disks to gpt
if entry['type'] == 'disk':
@@ -796,8 +754,17 @@ class BlockdevParser(ProbertParser):
if entry['type'] == 'partition':
attrs = blockdev_data['attrs']
- entry['number'] = int(attrs['partition'])
- parent_devname = self.partition_parent_devname(blockdev_data)
+ if self.is_mpath_partition(blockdev_data):
+ entry['number'] = int(blockdev_data['DM_PART'])
+ parent_devname = self.lookup_devname(
+ '/dev/mapper/' + blockdev_data['DM_MPATH'])
+ if parent_devname is None:
+ raise ValueError(
+ "Cannot find parent mpath device %s for %s" % (
+ blockdev_data['DM_MPATH'], devname))
+ else:
+ entry['number'] = int(attrs['partition'])
+ parent_devname = self.partition_parent_devname(blockdev_data)
parent_blockdev = self.blockdev_data[parent_devname]
if 'ID_PART_TABLE_TYPE' not in parent_blockdev:
# Exclude the fake partition that the kernel creates
@@ -810,9 +777,7 @@ class BlockdevParser(ProbertParser):
if ptable:
part = None
for pentry in ptable['partitions']:
- node = pentry['node']
- node_p = node.replace(parent_devname, '')
- if node_p.replace('p', '') == attrs['partition']:
+ if self.lookup_devname(pentry['node']) == devname:
part = pentry
break
@@ -880,6 +845,9 @@ class FilesystemParser(ProbertParser):
errors.append(err)
continue
+ if self.is_mpath_member(blockdev_data):
+ continue
+
# no floppy, no cdrom
if blockdev_data['MAJOR'] in ["11", "2"]:
continue
@@ -887,21 +855,24 @@ class FilesystemParser(ProbertParser):
volume_id = self.blockdev_to_id(blockdev_data)
# don't capture non-filesystem usage
- if data['USAGE'] != "filesystem":
+ # crypto is just a disguised filesystem
+ if data['USAGE'] not in ("filesystem", "crypto"):
continue
- # ignore types that we cannot create
- if data.get('TYPE') not in schemas._fstypes:
+ entry = self.asdict(volume_id, data)
+ if not entry:
continue
- entry = self.asdict(volume_id, data)
- if entry:
- try:
- validate_config(entry)
- except ValueError as e:
- errors.append(e)
- continue
- configs.append(entry)
+ # allow types that we cannot create only if preserve == true
+ if data.get('TYPE') not in schemas._fstypes:
+ entry['preserve'] = True
+
+ try:
+ validate_config(entry)
+ except ValueError as e:
+ errors.append(e)
+ continue
+ configs.append(entry)
return (configs, errors)
def asdict(self, volume_id, fs_data):
@@ -1078,16 +1049,27 @@ class RaidParser(ProbertParser):
# FIXME, need to handle rich md_name values, rather than mdX
# LP: #1803933
raidname = os.path.basename(devname)
- return {'type': 'raid',
- 'id': 'raid-%s' % raidname,
- 'name': raidname,
- 'raidlevel': raid_data.get('raidlevel'),
- 'devices': sorted([
- self.blockdev_to_id(self.blockdev_data[dev])
- for dev in raid_data.get('devices')]),
- 'spare_devices': sorted([
- self.blockdev_to_id(self.blockdev_data[dev])
- for dev in raid_data.get('spare_devices')])}
+
+ action = {
+ 'type': 'raid',
+ 'id': self.blockdev_to_id(raid_data),
+ 'name': raidname,
+ 'raidlevel': raid_data.get('raidlevel'),
+ }
+
+ if 'MD_METADATA' in raid_data:
+ action['metadata'] = raid_data["MD_METADATA"]
+
+ if 'container' in raid_data:
+ action['container'] = self.blockdev_byid_to_devname(
+ raid_data['container'])
+ else:
+ for k in 'devices', 'spare_devices':
+ action[k] = sorted([
+ self.blockdev_byid_to_devname(dev)
+ for dev in raid_data.get(k, [])])
+
+ return action
def parse(self):
"""parse probert 'raid' data format.
diff --git a/curtin/swap.py b/curtin/swap.py
index 11e95c4..7a0128d 100644
--- a/curtin/swap.py
+++ b/curtin/swap.py
@@ -135,9 +135,10 @@ def setup_swapfile(target, fstab=None, swapfile=None, size=None, maxsize=None,
allocate_cmd = 'fallocate -l "${2}M" "$1"'
# fallocate uses IOCTLs to allocate space in a filesystem, however it's not
- # clear (from curtin's POV) that it creates non-sparse files as required by
- # mkswap so we'll skip fallocate for now and use dd.
- if fstype in ['btrfs', 'xfs']:
+ # clear (from curtin's POV) that it creates non-sparse files on btrfs or
+ # xfs as required by mkswap so we'll skip fallocate for now and use dd. It
+ # is also plain not supported on ext2 and ext3.
+ if fstype in ['btrfs', 'ext2', 'ext3', 'xfs']:
allocate_cmd = 'dd if=/dev/zero "of=$1" bs=1M "count=$2"'
mbsize = str(int(size / (2 ** 20)))
diff --git a/curtin/util.py b/curtin/util.py
index be063d7..5b66b55 100644
--- a/curtin/util.py
+++ b/curtin/util.py
@@ -518,13 +518,85 @@ def do_mount(src, target, opts=None):
return True
-def do_umount(mountpoint, recursive=False):
- # unmount mountpoint. if recursive, unmount all mounts under it.
- # return boolean indicating if mountpoint was previously mounted.
+def do_umount(mountpoint, recursive=False, private=False):
mp = os.path.abspath(mountpoint)
+ # unmount mountpoint.
+ #
+ # if recursive, unmount all mounts under it. if private (which
+ # implies recursive), mark all mountpoints private before
+ # unmounting.
+ #
+ # To explain the 'private' parameter, consider the following sequence:
+ #
+ # mkdir a a/b c
+ # mount --bind a c
+ # mount -t sysfs sysfs a/b
+ #
+ # "umount c" will fail with "mountpoint is busy" because the mount
+ # of a/b "propagates" into the subtree at c, i.e. creates a mount
+ # at "c/b". But if you run "umount -R c" the unmount of c/b will
+ # propagate to back to a and unmount a/b (and as in pratice "a/b"
+ # can be something like driver specific mounts in /sys, this would
+ # be Bad(tm)). So what we do here is iterate over the mountpoints
+ # under `mountpoint` and mark them as "private" doing any
+ # unmounting, which means the unmounts do not propagate to any
+ # mount tree they were cloned from.
+ #
+ # So why not do this always? Well! Several systemd services (like
+ # udevd) run in private mount namespaces, set up so that mount
+ # operations on the default namespace propagate into the service's
+ # namespace (but not the other way). This means if you do this:
+ #
+ # mount /dev/sda2 /tmp/my-mount
+ # mount --make-private /tmp/my-mount
+ # umount /tmp/my-mount
+ #
+ # then the mount operation propagates into udevd's mount namespace
+ # but the unmount operation does not and so /dev/sda2 remains
+ # mounted, which causes problems down the road.
+ #
+ # It would be nice to not have to have the caller care about all
+ # this detail. In particular imagine the target system is set up
+ # at /target and has host directories bind-mounted into it, so the
+ # mount tree looks something like this:
+ #
+ # /dev/vda2 is mounted at /target
+ # /dev/vda1 is mounted at /target/boot
+ # /sys is bind-mounted at /target/sys
+ #
+ # And for whatever reason, a mount has appeared at /sys/foo since
+ # this was setup, so there is now an additional mount at
+ # /target/sys/foo.
+ #
+ # What I would like is to be able to run something like "curtin
+ # unmount /target" and have curtin figure out that the mountpoint
+ # at /target/sys/foo should be made private before unmounting and
+ # the others should not. But I don't know how to do that.
+ #
+ # See "Shared subtree operations" in mount(8) for more on all
+ # this.
+ #
+ # You might also think we could replace all this with a call to
+ # "mount --make-rprivate" followed by a call to "umount
+ # --recursive" but that would fail in the case where `mountpoint`
+ # is not actually a mount point, and not doing that is actually
+ # relied on by other parts of curtin.
+ #
+ # Related bug reports:
+ # https://bugs.launchpad.net/maas/+bug/1928839
+ # https://bugs.launchpad.net/subiquity/+bug/1934775
+ #
+ # return boolean indicating if mountpoint was previously mounted.
ret = False
- for line in reversed(load_file("/proc/mounts", decode=True).splitlines()):
- curmp = line.split()[1]
+ mountpoints = [
+ line.split()[1]
+ for line in load_file("/proc/mounts", decode=True).splitlines()]
+ if private:
+ recursive = True
+ for curmp in mountpoints:
+ if curmp == mp or curmp.startswith(mp + os.path.sep):
+ subp(['mount', '--make-private', curmp])
+ for curmp in reversed(mountpoints):
if curmp == mp or (recursive and curmp.startswith(mp + os.path.sep)):
subp(['umount', curmp])
if curmp == mp:
@@ -695,7 +767,7 @@ class ChrootableTarget(object):
log_call(subp, ['udevadm', 'settle'])
for p in reversed(self.umounts):
- do_umount(p)
+ do_umount(p, private=True)
rconf = paths.target_path(self.target, "/etc/resolv.conf")
if self.sys_resolvconf and self.rconf_d:
diff --git a/debian/changelog b/debian/changelog
index 64ddb71..b8e9bb5 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,71 @@
+curtin (21.3-0ubuntu1) jammy; urgency=medium
+
+ * New upstream release.
+ - version to 21.3 [Dan Bungert] (LP: #1953410)
+ - distro: handle ID_LIKE [Dan Bungert] (LP: #1934513)
+ - block_meta: pass handlers dict to all handlers [Michael Hudson-Doyle]
+ - block_meta: refactor partition verification slightly
+ [Michael Hudson-Doyle]
+ - block-meta: support using a raw image containing LVM volumes
+ [Alexsander de Souza]
+ - lsblk: adjust output to match old format [Dan Bungert]
+ - curtin.commands.extract: Factor preparing sources for copying into a
+ class [Michael Hudson-Doyle]
+ - apt_config: tweaks to allow some functionality in partial tree as
+ non-root [Michael Hudson-Doyle]
+ - Add integration tests for some partitioning operations
+ [Michael Hudson-Doyle]
+ - command/apt: disable_components [Dan Bungert]
+ - commands/apt: use python-apt for sources.list [Dan Bungert]
+ - curthooks: always install shim-signed if available (when UEFI booted)
+ [Michael Hudson-Doyle]
+ - tox,control: fix jenkins CI jobs [Dan Bungert]
+ - tests: update to demonstrate python-apt functional [Dan Bungert]
+ - block:lvm: search encrypted volumes for LVM
+ [Lukas Märdian] (LP: #1940687)
+ - mdadm: allow installation to a syncing array
+ [Michael Hudson-Doyle] (LP: #1939563)
+ - vmtests: s/Groovy/Impish [Dan Bungert]
+ - tweak making mountpoints private some more [Michael Hudson-Doyle]
+ - curthooks: do not unconditionally copy e/n/interfaces to target
+ [Michael Hudson-Doyle]
+ - block: handle /dev/mapper/* in dev_path() [Lukas Märdian]
+ - curthooks: do not add lvm devices filter when / is mutipathed
+ [Michael Hudson-Doyle] (LP: #1895192)
+ - Extend the 'format' schema for unknown fstypes [Dan Bungert]
+ - move making mounts recursively private into do_unmount
+ [Michael Hudson-Doyle]
+ - disk_handler: fix partitioning a new RAID [Michael Hudson-Doyle]
+ - curthooks:setup_zipl: use proper device path in root= arg for cmdline
+ [Lukas Märdian]
+ - fix tearing down ChrootableTarget when mounts appear while it is set up
+ [Michael Hudson-Doyle] (LP: #1928839, #1934775)
+ - Don't override PYTHON env var in bin/curtin [Dan Bungert]
+ - disk_handler: check wipe field when deciding whether to reformat raids
+ [Michael Hudson-Doyle] (LP: #1932976)
+ - storage_config: properly handle raid containers [Michael Hudson-Doyle]
+ - pylintrc: explicitly list the DISTROS generated-members [Paride Legovini]
+ - block_meta: make preserve: true on a raid in a container work
+ [Michael Hudson-Doyle]
+ - Fix NVMe validation for namespaces with UUID
+ [Ryan Norwood] (LP: #1925399)
+ - meta_simple: handle multiple disks in storage config with dd-image
+ [Dan Bungert] (LP: #1925722)
+ - Use /proc/filesystems to decide passno
+ [Dan Bungert] (LP: #1717584, LP: #1785354)
+ - block_meta: fix wiping of existing dasd partition [Michael Hudson-Doyle]
+ - block_meta: pass --yes to lvcreate alongside --wipesignatures=y
+ [Michael Hudson-Doyle] (LP: #1923487)
+ - remove 'strict' arguments to block.wipe_volume and block.quick_zero
+ [Michael Hudson-Doyle] (LP: #1868177)
+ - swap: use dd to allocate swapfiles on ext2 and ext3
+ [Michael Hudson-Doyle] (LP: #1918990)
+ - storage_config: return one type: disk action per multipathed disk
+ [Michael Hudson-Doyle] (LP: #1893818)
+ - vmtest/centos: handle different paths to grub config [Ryan Harper]
+
+ -- Dan Bungert <daniel.bungert@xxxxxxxxxxxxx> Mon, 06 Dec 2021 17:02:22 -0700
+
curtin (21.2-0ubuntu1) hirsute; urgency=medium
* New upstream release.
diff --git a/debian/control b/debian/control
index 9fddcbd..9f0b71d 100644
--- a/debian/control
+++ b/debian/control
@@ -6,6 +6,7 @@ Maintainer: Ubuntu Developers <ubuntu-devel-discuss@xxxxxxxxxxxxxxxx>
Build-Depends: debhelper (>= 7),
dh-python,
python3,
+ python3-apt,
python3-coverage,
python3-mock,
python3-nose,
@@ -49,6 +50,7 @@ Section: python
Architecture: all
Priority: extra
Depends: curtin-common (= ${binary:Version}),
+ python3-apt,
python3-oauthlib,
python3-yaml,
wget,
diff --git a/doc/topics/apt_source.rst b/doc/topics/apt_source.rst
index f996c53..cf0f8bd 100644
--- a/doc/topics/apt_source.rst
+++ b/doc/topics/apt_source.rst
@@ -35,6 +35,8 @@ Features
- disabling suites (=pockets)
+ - disabling components (multiverse, universe, restricted)
+
- per architecture mirror definition
diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
index 75c5537..0f33ec0 100644
--- a/doc/topics/storage.rst
+++ b/doc/topics/storage.rst
@@ -542,6 +542,11 @@ One of ``device`` or ``spec`` must be present.
fstab entry will contain ``_netdev`` to indicate networking is
required to mount this filesystem.
+**freq**: *<dump(8) integer from 0-9 inclusive>*
+
+The ``freq`` key refers to the freq as defined in dump(8).
+Defaults to ``0`` if unspecified.
+
**fstype**: *<fileystem type>*
``fstype`` is only required if ``device`` is not present. It indicates
@@ -552,7 +557,7 @@ to ``/etc/fstab``
The ``options`` key will replace the default options value of ``defaults``.
-.. warning::
+.. warning::
The kernel and user-space utilities may differ between the install
environment and the runtime environment. Not all kernels and user-space
combinations will support all options. Providing options for a mount point
@@ -565,6 +570,14 @@ The ``options`` key will replace the default options value of ``defaults``.
If either of the environments (install or target) do not have support for
the provided options, the behavior is undefined.
+**passno**: *<fsck(8) non-negative integer, typically 0-2>*
+
+The ``passno`` key refers to the fs_passno as defined in fsck(8).
+If unspecified, ``curtin`` will default to 1 or 0, depending on if that
+filesystem is considered to be a 'nodev' device per /proc/filesystems.
+Note that per systemd-fstab-generator(8), systemd interprets passno as a
+boolean.
+
**spec**: *<fs_spec>*
The ``spec`` attribute defines the fsspec as defined in fstab(5).
@@ -852,6 +865,10 @@ If ``wipe`` option is set to values other than 'superblock', curtin will
wipe contents of the assembled raid device. Curtin skips 'superblock` wipes
as it already clears raid data on the members before assembling the array.
+To allow a pre-existing (i.e. ``preserve=true``) raid to get a new partition
+table, set the ``wipe`` field to indicate the disk should be
+reformatted (this is different from disk actions, where the preserve field is
+used for this. But that means something different for raid devices).
**Config Example**::
@@ -917,6 +934,7 @@ the bcache device. This includes checking that backing device and cache
device are enabled and bound correctly (backing device is cached by expected
cache device). If ``cache-mode`` is specified, verify that the mode matches.
+
**wipe**: *superblock, superblock-recursive, pvremove, zero, random*
If ``wipe`` option is set, curtin will wipe the contents of the bcache device.
diff --git a/examples/apt-source.yaml b/examples/apt-source.yaml
index 695c696..f0f7108 100644
--- a/examples/apt-source.yaml
+++ b/examples/apt-source.yaml
@@ -27,8 +27,8 @@ apt:
#
# This is an empty list by default, so nothing is disabled.
#
- # If given, those suites are removed from sources.list after all other
- # modifications have been made.
+ # If given, those suites are removed from sources.list after most other
+ # modifications have been made, but before component removal.
# Suites are even disabled if no other modification was made,
# but not if is preserve_sources_list is active.
# There is a special alias “$RELEASE” as in the sources that will be replace
@@ -45,12 +45,28 @@ apt:
# There is no harm in specifying a suite to be disabled that is not found in
# the source.list file (just a no-op then)
#
- # Note: Lines don’t get deleted, but disabled by being converted to a comment.
# The following example disables all usual defaults except $RELEASE-security.
# On top it disables a custom suite called "mysuite"
disable_suites: [$RELEASE-updates, backports, $RELEASE, mysuite]
- # 1.3 primary/security
+ # 1.3 disable_components
+ #
+ # This is an empty list by default, so nothing is disabled.
+ #
+ # If given, those components are removed from sources.list after all other
+ # modifications have been made.
+ # Components are even disabled if no other modification was made,
+ # but not if is preserve_sources_list is active.
+ # The 'main' component is never disabled. Should that be desired, this can
+ # be achieved by way of a sources_list template.
+ #
+ # There is no harm in specifying a component to be disabled that is not found
+ # in the source.list file (just a no-op then)
+ #
+ # The following example disables all usual default components except main.
+ disable_components: [restricted, universe, multiverse]
+
+ # 1.4 primary/security
#
# define a custom (e.g. localized) mirror that will be used in sources.list
# and any custom sources entries for deb / deb-src lines.
@@ -90,7 +106,7 @@ apt:
# primary: http://archive.ubuntu.com/ubuntu
# security: http://security.ubuntu.com/ubuntu
- # 1.4 sources_list
+ # 1.5 sources_list
#
# Provide a custom template for rendering sources.list
# without one provided curtin will try to modify the sources.list it finds
@@ -105,7 +121,7 @@ apt:
deb $PRIMARY $RELEASE universe restricted
deb $SECURITY $RELEASE-security multiverse
- # 1.5 conf
+ # 1.6 conf
#
# Any apt config string that will be made available to apt
# see the APT.CONF(5) man page for details what can be specified
@@ -117,7 +133,7 @@ apt:
};
};
- # 1.6 (http_|ftp_|https_)proxy
+ # 1.7 (http_|ftp_|https_)proxy
#
# Proxies are the most common apt.conf option, so that for simplified use
# there is a shortcut for those. Those get automatically translated into the
@@ -129,7 +145,7 @@ apt:
ftp_proxy: ftp://[[user][:pass]@]host[:port]/
https_proxy: https://[[user][:pass]@]host[:port]/
- # 1.7 add_apt_repo_match
+ # 1.8 add_apt_repo_match
#
# 'source' entries in apt-sources that match this python regex
# expression will be passed to add-apt-repository
diff --git a/examples/tests/centos_defaults.yaml b/examples/tests/centos_defaults.yaml
index 85e1c03..94e2ae8 100644
--- a/examples/tests/centos_defaults.yaml
+++ b/examples/tests/centos_defaults.yaml
@@ -27,7 +27,7 @@ write_files:
[ -e "$ofile" ] || return 0
cp "$ofile" "$bk" || rerror "failed backup ($ofile -> $bk):" "$@";
}
-
+
update_grub1() {
local cfg="$1" r=""
[ -e "$cfg" ] ||
@@ -48,7 +48,7 @@ write_files:
{ rerror "failed to update grub1 cfg '$cfg'."; return; }
info "updated grub1 cfg '$cfg'."
}
-
+
update_grub2() {
local cfg="$1" defgrub="$2"
[ -e "$cfg" ] || { info "no grub2 config '$cfg'"; return 0; }
@@ -56,8 +56,8 @@ write_files:
sed -i -e '/kernel/n' -e '/console=/n' \
-e "s/root=\([^ ]*\)/root=\1 ${CONPARM}/" "$cfg" ||
{ rerror "failed to update grub2 '$cfg'"; return; }
-
- # update /etc/default/grub. any GRUB_CMDLINE_LINUX remove
+
+ # update /etc/default/grub. any GRUB_CMDLINE_LINUX remove
# any console= and add conparm at the beginning.
local var="GRUB_CMDLINE_LINUX" msg="updated grub2 '$cfg'."
if [ ! -e "$defgrub" ]; then
@@ -77,15 +77,21 @@ write_files:
update_grub1 "$grub1conf" || fail "failed update grub1"
update_grub2 "$grub2conf" "$grub2def" || fail "failed update grub2"
+
+# centos66 images include grub 0.97 which will detect vmtests' ephemeral disk
+# and the install disk which leaves grub configured with two disks. When
+# vmtest reboots into installed disk, there is only one disk and the grub
+# map is no longer valid. Here in 00_grub, we switch hd1 to hd0. MAAS
+# is not affected as their ephemeral image (iscsi or http) is not discovered
+# by grub and therefor the device.map doesn't contain a second device. Cent7
+# has grub2 which uses root by UUID.
+_update_grub_conf:
+ - &update_grub |
+ CONF="/boot/grub2/grub.cfg"
+ [ -f "${CONF}" ] || CONF="/boot/grub/grub.conf"
+ sed -i.curtin -e 's|(hd1,0)|(hd0,0)|g' ${CONF}
+
late_commands:
- # centos66 images include grub 0.97 which will detect vmtests' ephemeral disk
- # and the install disk which leaves grub configured with two disks. When
- # vmtest reboots into installed disk, there is only one disk and the grub
- # map is no longer valid. Here in 00_grub, we switch hd1 to hd0. MAAS
- # is not affected as their ephemeral image (iscsi or http) is not discovered
- # by grub and therefor the device.map doesn't contain a second device. Cent7
- # has grub2 which uses root by UUID
- 00_grub1_boot: [curtin, in-target, --, sed, -i.curtin, -e,
- 's|(hd1,0)|(hd0,0)|g', /boot/grub/grub.conf]
+ 00_grub1_boot: [curtin, in-target, --, 'sh', '-c', *update_grub]
# vmtest wants output to go to serial console so we update grub inside.
00_grub_serial: [curtin, in-target, --, '/root/curtin-send-console-to-serial']
diff --git a/examples/tests/filesystem_battery.yaml b/examples/tests/filesystem_battery.yaml
index 8166360..34657d4 100644
--- a/examples/tests/filesystem_battery.yaml
+++ b/examples/tests/filesystem_battery.yaml
@@ -106,20 +106,24 @@ storage:
path: "/my/tmpfs"
options: size=4194304
fstype: "tmpfs"
+ passno: 1
- id: ramfs1
type: mount
spec: "none"
path: "/my/ramfs"
fstype: "ramfs"
+ passno: 0
- id: bind1
fstype: "none"
options: "bind"
path: "/var/cache"
spec: "/my/bind-over-var-cache"
type: mount
+ freq: 3
- id: bind2
fstype: "none"
options: "bind,ro"
path: "/my/bind-ro-etc"
spec: "/etc"
type: mount
+ freq: 1
diff --git a/examples/tests/multipath-reuse.yaml b/examples/tests/multipath-reuse.yaml
new file mode 100644
index 0000000..24e193e
--- /dev/null
+++ b/examples/tests/multipath-reuse.yaml
@@ -0,0 +1,61 @@
+
+# The point of this test is to test installing to a existing
+# partitions of a multipathed disk
+
+bucket:
+ - &setup |
+ parted /dev/disk/by-id/dm-name-mpatha --script -- \
+ mklabel msdos \
+ mkpart primary ext4 1GiB 4GiB \
+ mkpart primary ext4 4GiB 5GiB \
+ set 1 boot on
+ udevadm settle
+
+early_commands:
+ 00-setup-msdos-ptable: [sh, -exuc, *setup]
+
+install:
+ unmount: disabled
+showtrace: true
+storage:
+ version: 1
+ config:
+ - id: sda
+ type: disk
+ ptable: msdos
+ serial: 'IPR-0 1234567890'
+ name: mpath_a
+ grub_device: true
+ multipath: mpatha
+ path: /dev/disk/by-id/dm-name-mpatha
+ preserve: true
+ - id: sda1
+ type: partition
+ number: 1
+ size: 3GB
+ device: sda
+ flag: boot
+ preserve: true
+ - id: sda2
+ type: partition
+ number: 2
+ size: 1GB
+ device: sda
+ preserve: true
+ - id: sda1_root
+ type: format
+ fstype: ext4
+ volume: sda1
+ - id: sda2_home
+ type: format
+ fstype: ext4
+ volume: sda2
+ - id: sda1_mount
+ type: mount
+ path: /
+ device: sda1_root
+ - id: sda2_mount
+ type: mount
+ path: /home
+ device: sda2_home
+ options: 'defaults,nofail'
diff --git a/examples/tests/partition-existing-raid.yaml b/examples/tests/partition-existing-raid.yaml
new file mode 100644
index 0000000..07cf8d2
--- /dev/null
+++ b/examples/tests/partition-existing-raid.yaml
@@ -0,0 +1,115 @@
+showtrace: true
+
+bucket:
+ - &setup |
+ parted /dev/disk/by-id/virtio-disk-b --script -- \
+ mklabel gpt \
+ mkpart primary 1GiB 9GiB \
+ set 1 boot on
+ parted /dev/disk/by-id/virtio-disk-c --script -- \
+ mklabel gpt \
+ mkpart primary 1GiB 9GiB \
+ set 1 boot on
+ udevadm settle
+ mdadm --create --metadata 1.2 --level 1 -n 2 /dev/md1 --assume-clean \
+ /dev/disk/by-id/virtio-disk-b-part1 /dev/disk/by-id/virtio-disk-c-part1
+ udevadm settle
+ parted /dev/md1 --script -- \
+ mklabel dos
+ udevadm settle
+ mdadm --stop /dev/md1
+ udevadm settle
+
+# Create a RAID now to test curtin's reuse of existing RAIDs.
+early_commands:
+ 00-setup-raid: [sh, -exuc, *setup]
+
+storage:
+ config:
+ - type: disk
+ id: id_disk0
+ serial: disk-a
+ ptable: gpt
+ wipe: superblock
+ - type: disk
+ id: id_disk1
+ serial: disk-b
+ ptable: gpt
+ preserve: true
+ - type: disk
+ id: id_disk2
+ serial: disk-c
+ ptable: gpt
+ preserve: true
+ - type: partition
+ id: id_disk0_part1
+ device: id_disk0
+ flag: boot
+ number: 1
+ size: 512M
+ - type: partition
+ id: id_disk0_part2
+ device: id_disk0
+ number: 2
+ size: 3G
+ - type: partition
+ id: id_disk0_part3
+ device: id_disk0
+ number: 3
+ size: 3G
+ - type: partition
+ id: id_disk1_part1
+ device: id_disk1
+ flag: boot
+ number: 1
+ size: 8G
+ preserve: true
+ - type: partition
+ id: id_disk2_part1
+ device: id_disk2
+ flag: boot
+ number: 1
+ size: 8G
+ preserve: true
+ - type: raid
+ id: raid-md1
+ name: md1
+ raidlevel: raid1
+ devices:
+ - id_disk1_part1
+ - id_disk2_part1
+ spare_devices: []
+ metadata: 1.2
+ preserve: true
+ wipe: superblock
+ ptable: gpt
+ - type: partition
+ id: id_raid1_part1
+ device: raid-md1
+ number: 1
+ size: 7G
+ - type: format
+ id: id_efi_format
+ volume: id_disk0_part1
+ fstype: fat32
+ - type: format
+ id: id_root_format
+ volume: id_disk0_part2
+ fstype: ext4
+ - type: format
+ id: id_raid-md1_format
+ volume: id_raid1_part1
+ fstype: ext4
+ - type: mount
+ device: id_root_format
+ id: id_root_mount
+ path: /
+ - type: mount
+ id: id_efi_mount
+ device: id_efi_format
+ path: /boot/efi
+ - type: mount
+ id: id_raid-md1_mount
+ device: id_raid-md1_format
+ path: /srv
+ version: 1
diff --git a/examples/tests/raid-partition-to-disk.yaml b/examples/tests/raid-partition-to-disk.yaml
index 9c16c26..d3dfd29 100644
--- a/examples/tests/raid-partition-to-disk.yaml
+++ b/examples/tests/raid-partition-to-disk.yaml
@@ -49,6 +49,13 @@ storage:
- disk-b
- disk-c
raidlevel: raid1
+ ptable: gpt
+
+ - type: partition
+ id: md1_part1
+ device: md1
+ number: 1
+ size: 5G
- type: format
id: id_efi_format
@@ -56,7 +63,7 @@ storage:
fstype: fat32
- type: format
id: id_root_format
- volume: md1
+ volume: md1_part1
fstype: ext4
- type: mount
diff --git a/examples/tests/uefi_basic.yaml b/examples/tests/uefi_basic.yaml
index 1f49fa9..91a72ae 100644
--- a/examples/tests/uefi_basic.yaml
+++ b/examples/tests/uefi_basic.yaml
@@ -1,6 +1,10 @@
showtrace: true
early_commands:
+ # Create a small (512KiB) partition to test the fix for LP: #1868177.
+ tinypartition: [
+ "parted", /dev/disk/by-id/virtio-disk-a, "--script", "mklabel", "gpt",
+ "mkpart", "primary", "4096s", "4096s", "5120s"]
# Recreate and test LP:1722322
# Make one disk dirty with an MBR and a storage configuration
# GPT and don't supply wipe: superblock. This will exercise
diff --git a/pylintrc b/pylintrc
index 67a4e01..1b5fa1a 100644
--- a/pylintrc
+++ b/pylintrc
@@ -7,7 +7,7 @@ jobs=0
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=DISTROS\.
+generated-members=redhat,centos,fedora,debian,suse,opensuse,sles,arch,ubuntu,rhel,freebsd,gentoo
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
diff --git a/tests/data/probert_storage_imsm.json b/tests/data/probert_storage_imsm.json
new file mode 100644
index 0000000..3c0d5bc
--- /dev/null
+++ b/tests/data/probert_storage_imsm.json
@@ -0,0 +1,1661 @@
+{
+ "network": {
+ "links": [
+ {
+ "addresses": [],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 1,
+ "family": 0,
+ "flags": 4099,
+ "ifindex": 2,
+ "is_vlan": false,
+ "name": "eno1"
+ },
+ "type": "eth",
+ "udev_data": {
+ "DEVPATH": "/devices/pci0000:16/0000:16:02.0/0000:17:00.0/net/eno1",
+ "ID_BUS": "pci",
+ "ID_MM_CANDIDATE": "1",
+ "ID_MODEL_FROM_DATABASE": "Ethernet Controller X710 for 10GbE SFP+ (Ethernet 10G 4P X710/I350 rNDC)",
+ "ID_MODEL_ID": "0x1572",
+ "ID_NET_DRIVER": "i40e",
+ "ID_NET_LABEL_ONBOARD": "NIC1",
+ "ID_NET_LINK_FILE": "/run/systemd/network/10-netplan-eno1.link",
+ "ID_NET_NAME": "eno1",
+ "ID_NET_NAME_MAC": "enxf8bc121ef9f0",
+ "ID_NET_NAME_ONBOARD": "eno1",
+ "ID_NET_NAME_PATH": "enp23s0f0",
+ "ID_NET_NAMING_SCHEME": "v245",
+ "ID_OUI_FROM_DATABASE": "Dell Inc.",
+ "ID_PATH": "pci-0000:17:00.0",
+ "ID_PATH_TAG": "pci-0000_17_00_0",
+ "ID_PCI_CLASS_FROM_DATABASE": "Network controller",
+ "ID_PCI_SUBCLASS_FROM_DATABASE": "Ethernet controller",
+ "ID_VENDOR_FROM_DATABASE": "Intel Corporation",
+ "ID_VENDOR_ID": "0x8086",
+ "IFINDEX": "2",
+ "INTERFACE": "eno1",
+ "SUBSYSTEM": "net",
+ "SYSTEMD_ALIAS": "/sys/subsystem/net/devices/eno1",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "7548285",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "f8:bc:12:1e:f9:f0",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "carrier": "0",
+ "carrier_changes": "1",
+ "carrier_down_count": "1",
+ "carrier_up_count": "0",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "device": null,
+ "dormant": "0",
+ "duplex": "unknown",
+ "flags": "0x1003",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "2",
+ "iflink": "2",
+ "link_mode": "0",
+ "mtu": "1500",
+ "name_assign_type": "4",
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "down",
+ "phys_port_id": "f8bc121ef9f0",
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": "-1",
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "1",
+ "uevent": "INTERFACE=eno1\nIFINDEX=2"
+ }
+ }
+ },
+ {
+ "addresses": [
+ {
+ "address": "10.101.51.44/22",
+ "family": 2,
+ "scope": "global",
+ "source": "dhcp"
+ },
+ {
+ "address": "fe80::fabc:12ff:fe1e:fa10/64",
+ "family": 10,
+ "scope": "link",
+ "source": "static"
+ }
+ ],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 1,
+ "family": 0,
+ "flags": 69699,
+ "ifindex": 3,
+ "is_vlan": false,
+ "name": "eno3"
+ },
+ "type": "eth",
+ "udev_data": {
+ "DEVPATH": "/devices/pci0000:00/0000:00:1c.0/0000:01:00.0/net/eno3",
+ "ID_BUS": "pci",
+ "ID_MM_CANDIDATE": "1",
+ "ID_MODEL_FROM_DATABASE": "I350 Gigabit Network Connection",
+ "ID_MODEL_ID": "0x1521",
+ "ID_NET_DRIVER": "igb",
+ "ID_NET_LABEL_ONBOARD": "NIC3",
+ "ID_NET_LINK_FILE": "/run/systemd/network/10-netplan-eno3.link",
+ "ID_NET_NAME": "eno3",
+ "ID_NET_NAME_MAC": "enxf8bc121efa10",
+ "ID_NET_NAME_ONBOARD": "eno3",
+ "ID_NET_NAME_PATH": "enp1s0f0",
+ "ID_NET_NAMING_SCHEME": "v245",
+ "ID_OUI_FROM_DATABASE": "Dell Inc.",
+ "ID_PATH": "pci-0000:01:00.0",
+ "ID_PATH_TAG": "pci-0000_01_00_0",
+ "ID_PCI_CLASS_FROM_DATABASE": "Network controller",
+ "ID_PCI_SUBCLASS_FROM_DATABASE": "Ethernet controller",
+ "ID_VENDOR_FROM_DATABASE": "Intel Corporation",
+ "ID_VENDOR_ID": "0x8086",
+ "IFINDEX": "3",
+ "INTERFACE": "eno3",
+ "SUBSYSTEM": "net",
+ "SYSTEMD_ALIAS": "/sys/subsystem/net/devices/eno3",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "8317122",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "f8:bc:12:1e:fa:10",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "carrier": "1",
+ "carrier_changes": "2",
+ "carrier_down_count": "1",
+ "carrier_up_count": "1",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "device": null,
+ "dormant": "0",
+ "duplex": "full",
+ "flags": "0x1003",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "3",
+ "iflink": "3",
+ "link_mode": "0",
+ "mtu": "1500",
+ "name_assign_type": "4",
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "up",
+ "phys_port_id": null,
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": "100",
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "1",
+ "uevent": "INTERFACE=eno3\nIFINDEX=3"
+ }
+ }
+ },
+ {
+ "addresses": [],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 1,
+ "family": 0,
+ "flags": 4099,
+ "ifindex": 4,
+ "is_vlan": false,
+ "name": "eno2"
+ },
+ "type": "eth",
+ "udev_data": {
+ "DEVPATH": "/devices/pci0000:16/0000:16:02.0/0000:17:00.1/net/eno2",
+ "ID_BUS": "pci",
+ "ID_MM_CANDIDATE": "1",
+ "ID_MODEL_FROM_DATABASE": "Ethernet Controller X710 for 10GbE SFP+ (Ethernet 10G X710 rNDC)",
+ "ID_MODEL_ID": "0x1572",
+ "ID_NET_DRIVER": "i40e",
+ "ID_NET_LABEL_ONBOARD": "NIC2",
+ "ID_NET_LINK_FILE": "/run/systemd/network/10-netplan-eno2.link",
+ "ID_NET_NAME": "eno2",
+ "ID_NET_NAME_MAC": "enxf8bc121ef9f2",
+ "ID_NET_NAME_ONBOARD": "eno2",
+ "ID_NET_NAME_PATH": "enp23s0f1",
+ "ID_NET_NAMING_SCHEME": "v245",
+ "ID_OUI_FROM_DATABASE": "Dell Inc.",
+ "ID_PATH": "pci-0000:17:00.1",
+ "ID_PATH_TAG": "pci-0000_17_00_1",
+ "ID_PCI_CLASS_FROM_DATABASE": "Network controller",
+ "ID_PCI_SUBCLASS_FROM_DATABASE": "Ethernet controller",
+ "ID_VENDOR_FROM_DATABASE": "Intel Corporation",
+ "ID_VENDOR_ID": "0x8086",
+ "IFINDEX": "4",
+ "INTERFACE": "eno2",
+ "SUBSYSTEM": "net",
+ "SYSTEMD_ALIAS": "/sys/subsystem/net/devices/eno2",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "7712938",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "f8:bc:12:1e:f9:f2",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "carrier": "0",
+ "carrier_changes": "1",
+ "carrier_down_count": "1",
+ "carrier_up_count": "0",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "device": null,
+ "dormant": "0",
+ "duplex": "unknown",
+ "flags": "0x1003",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "4",
+ "iflink": "4",
+ "link_mode": "0",
+ "mtu": "1500",
+ "name_assign_type": "4",
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "down",
+ "phys_port_id": "f8bc121ef9f2",
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": "-1",
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "1",
+ "uevent": "INTERFACE=eno2\nIFINDEX=4"
+ }
+ }
+ },
+ {
+ "addresses": [],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 1,
+ "family": 0,
+ "flags": 4099,
+ "ifindex": 5,
+ "is_vlan": false,
+ "name": "eno4"
+ },
+ "type": "eth",
+ "udev_data": {
+ "DEVPATH": "/devices/pci0000:00/0000:00:1c.0/0000:01:00.1/net/eno4",
+ "ID_BUS": "pci",
+ "ID_MM_CANDIDATE": "1",
+ "ID_MODEL_FROM_DATABASE": "I350 Gigabit Network Connection",
+ "ID_MODEL_ID": "0x1521",
+ "ID_NET_DRIVER": "igb",
+ "ID_NET_LABEL_ONBOARD": "NIC4",
+ "ID_NET_LINK_FILE": "/run/systemd/network/10-netplan-eno4.link",
+ "ID_NET_NAME": "eno4",
+ "ID_NET_NAME_MAC": "enxf8bc121efa11",
+ "ID_NET_NAME_ONBOARD": "eno4",
+ "ID_NET_NAME_PATH": "enp1s0f1",
+ "ID_NET_NAMING_SCHEME": "v245",
+ "ID_OUI_FROM_DATABASE": "Dell Inc.",
+ "ID_PATH": "pci-0000:01:00.1",
+ "ID_PATH_TAG": "pci-0000_01_00_1",
+ "ID_PCI_CLASS_FROM_DATABASE": "Network controller",
+ "ID_PCI_SUBCLASS_FROM_DATABASE": "Ethernet controller",
+ "ID_VENDOR_FROM_DATABASE": "Intel Corporation",
+ "ID_VENDOR_ID": "0x8086",
+ "IFINDEX": "5",
+ "INTERFACE": "eno4",
+ "SUBSYSTEM": "net",
+ "SYSTEMD_ALIAS": "/sys/subsystem/net/devices/eno4",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "8073969",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "f8:bc:12:1e:fa:11",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "carrier": "0",
+ "carrier_changes": "1",
+ "carrier_down_count": "1",
+ "carrier_up_count": "0",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "device": null,
+ "dormant": "0",
+ "duplex": "unknown",
+ "flags": "0x1003",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "5",
+ "iflink": "5",
+ "link_mode": "0",
+ "mtu": "1500",
+ "name_assign_type": "4",
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "down",
+ "phys_port_id": null,
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": "-1",
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "1",
+ "uevent": "INTERFACE=eno4\nIFINDEX=5"
+ }
+ }
+ },
+ {
+ "addresses": [
+ {
+ "address": "127.0.0.1/8",
+ "family": 2,
+ "scope": "host",
+ "source": "static"
+ },
+ {
+ "address": "::1/128",
+ "family": 10,
+ "scope": "host",
+ "source": "static"
+ }
+ ],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 772,
+ "family": 0,
+ "flags": 65609,
+ "ifindex": 1,
+ "is_vlan": false,
+ "name": "lo"
+ },
+ "type": "lo",
+ "udev_data": {
+ "DEVPATH": "/devices/virtual/net/lo",
+ "ID_MM_CANDIDATE": "1",
+ "ID_NET_LINK_FILE": "/usr/lib/systemd/network/99-default.link",
+ "IFINDEX": "1",
+ "INTERFACE": "lo",
+ "SUBSYSTEM": "net",
+ "USEC_INITIALIZED": "5306867",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "00:00:00:00:00:00",
+ "broadcast": "00:00:00:00:00:00",
+ "carrier": "1",
+ "carrier_changes": "0",
+ "carrier_down_count": "0",
+ "carrier_up_count": "0",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "dormant": "0",
+ "duplex": null,
+ "flags": "0x9",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "1",
+ "iflink": "1",
+ "link_mode": "0",
+ "mtu": "65536",
+ "name_assign_type": null,
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "unknown",
+ "phys_port_id": null,
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": null,
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "772",
+ "uevent": "INTERFACE=lo\nIFINDEX=1"
+ }
+ }
+ }
+ ],
+ "routes": [
+ {
+ "dst": "default",
+ "family": 2,
+ "ifindex": 3,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "10.101.48.0/22",
+ "family": 2,
+ "ifindex": 3,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "169.254.0.0/16",
+ "family": 2,
+ "ifindex": 3,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "10.101.48.0",
+ "family": 2,
+ "ifindex": 3,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "10.101.51.44",
+ "family": 2,
+ "ifindex": 3,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "10.101.51.255",
+ "family": 2,
+ "ifindex": 3,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "127.0.0.0",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "127.0.0.0/8",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "127.0.0.1",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "127.255.255.255",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "::1",
+ "family": 10,
+ "ifindex": 1,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "fe80::/64",
+ "family": 10,
+ "ifindex": 3,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "::1",
+ "family": 10,
+ "ifindex": 1,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "fe80::fabc:12ff:fe1e:fa10",
+ "family": 10,
+ "ifindex": 3,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "ff00::/8",
+ "family": 10,
+ "ifindex": 3,
+ "table": 255,
+ "type": 5
+ }
+ ]
+ },
+ "storage": {
+ "bcache": {
+ "backing": {},
+ "caching": {}
+ },
+ "blockdev": {
+ "/dev/md126": {
+ "DEVLINKS": "/dev/disk/by-id/md-uuid-ac4bee3d:2607dd80:76f9390f:f2d72638 /dev/md/subvol",
+ "DEVNAME": "/dev/md126",
+ "DEVPATH": "/devices/virtual/block/md126",
+ "DEVTYPE": "disk",
+ "MAJOR": "9",
+ "MD_CONTAINER": "/dev/md/container",
+ "MD_DEVICES": "2",
+ "MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "MD_DEVICE_ev_nvme0n1_ROLE": "0",
+ "MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "MD_DEVICE_ev_nvme1n1_ROLE": "1",
+ "MD_DEVNAME": "subvol",
+ "MD_LEVEL": "raid0",
+ "MD_MEMBER": "0",
+ "MD_UUID": "ac4bee3d:2607dd80:76f9390f:f2d72638",
+ "MINOR": "126",
+ "SUBSYSTEM": "block",
+ "SYSTEMD_READY": "0",
+ "TAGS": ":systemd:",
+ "UDISKS_MD_CONTAINER": "/dev/md/container",
+ "UDISKS_MD_DEVICES": "2",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_ROLE": "0",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_ROLE": "1",
+ "UDISKS_MD_DEVNAME": "subvol",
+ "UDISKS_MD_LEVEL": "raid0",
+ "UDISKS_MD_MEMBER": "0",
+ "UDISKS_MD_UUID": "ac4bee3d:2607dd80:76f9390f:f2d72638",
+ "USEC_INITIALIZED": "9797274925",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "9:126",
+ "discard_alignment": "0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "1",
+ "removable": "0",
+ "ro": "0",
+ "size": "214748364800",
+ "stat": " 579 0 33208 192 0 0 0 0 0 376 192 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=9\nMINOR=126\nDEVNAME=md126\nDEVTYPE=disk"
+ }
+ },
+ "/dev/md127": {
+ "DEVLINKS": "/dev/disk/by-id/md-uuid-00000000:00000000:00000000:00000000 /dev/md/container",
+ "DEVNAME": "/dev/md127",
+ "DEVPATH": "/devices/virtual/block/md127",
+ "DEVTYPE": "disk",
+ "MAJOR": "9",
+ "MD_DEVICES": "2",
+ "MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "MD_DEVICE_ev_nvme0n1_ROLE": "spare",
+ "MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "MD_DEVICE_ev_nvme1n1_ROLE": "spare",
+ "MD_DEVNAME": "container",
+ "MD_LEVEL": "container",
+ "MD_METADATA": "imsm",
+ "MD_UUID": "00000000:00000000:00000000:00000000",
+ "MINOR": "127",
+ "SUBSYSTEM": "block",
+ "SYSTEMD_READY": "0",
+ "TAGS": ":systemd:",
+ "UDISKS_MD_DEVICES": "2",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_ROLE": "spare",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_ROLE": "spare",
+ "UDISKS_MD_DEVNAME": "container",
+ "UDISKS_MD_LEVEL": "container",
+ "UDISKS_MD_METADATA": "imsm",
+ "UDISKS_MD_UUID": "00000000:00000000:00000000:00000000",
+ "USEC_INITIALIZED": "9783286242",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "9:127",
+ "discard_alignment": "0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "1",
+ "removable": "0",
+ "ro": "0",
+ "size": "0",
+ "stat": " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=9\nMINOR=127\nDEVNAME=md127\nDEVTYPE=disk"
+ }
+ },
+ "/dev/nvme0n1": {
+ "DEVLINKS": "/dev/disk/by-id/nvme-A400_NVMe_SanDisk_256GB_171877421152 /dev/disk/by-id/nvme-eui.1718774211520001001b444a444677f0 /dev/disk/by-path/pci-0000:b2:05.5-pci-10000:01:00.0-nvme-1 /dev/disk/by-dname/nvme0n1",
+ "DEVNAME": "/dev/nvme0n1",
+ "DEVPATH": "/devices/pci0000:b2/0000:b2:05.5/pci10000:00/10000:00:02.0/10000:01:00.0/nvme/nvme0/nvme0n1",
+ "DEVTYPE": "disk",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_FS_TYPE": "isw_raid_member",
+ "ID_FS_USAGE": "raid",
+ "ID_FS_VERSION": "1.0.00",
+ "ID_MODEL": "A400 NVMe SanDisk 256GB",
+ "ID_PATH": "pci-0000:b2:05.5-pci-10000:01:00.0-nvme-1",
+ "ID_PATH_TAG": "pci-0000_b2_05_5-pci-10000_01_00_0-nvme-1",
+ "ID_REVISION": "A3550012",
+ "ID_SERIAL": "A400 NVMe SanDisk 256GB_171877421152",
+ "ID_SERIAL_SHORT": "171877421152",
+ "ID_WWN": "eui.1718774211520001001b444a444677f0",
+ "MAJOR": "259",
+ "MINOR": "1",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "7921733",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "259:1",
+ "device": null,
+ "discard_alignment": "0",
+ "eui": "00 1b 44 4a 44 46 77 f0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "nguid": "17187742-1152-0001-001b-444a444677f0",
+ "nsid": "1",
+ "range": "0",
+ "removable": "0",
+ "ro": "0",
+ "size": "256060514304",
+ "stat": " 751 0 41085 256 9 3 68 0 0 748 256 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=259\nMINOR=1\nDEVNAME=nvme0n1\nDEVTYPE=disk",
+ "uuid": "17187742-1152-0001-001b-444a444677f0",
+ "wwid": "eui.1718774211520001001b444a444677f0"
+ }
+ },
+ "/dev/nvme1n1": {
+ "DEVLINKS": "/dev/disk/by-id/nvme-eui.1718774241910001001b444a444601b7 /dev/disk/by-id/nvme-A400_NVMe_SanDisk_256GB_171877424191 /dev/disk/by-dname/nvme1n1 /dev/disk/by-path/pci-0000:b2:05.5-pci-10000:02:00.0-nvme-1",
+ "DEVNAME": "/dev/nvme1n1",
+ "DEVPATH": "/devices/pci0000:b2/0000:b2:05.5/pci10000:00/10000:00:03.0/10000:02:00.0/nvme/nvme1/nvme1n1",
+ "DEVTYPE": "disk",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_FS_TYPE": "isw_raid_member",
+ "ID_FS_USAGE": "raid",
+ "ID_FS_VERSION": "1.0.00",
+ "ID_MODEL": "A400 NVMe SanDisk 256GB",
+ "ID_PATH": "pci-0000:b2:05.5-pci-10000:02:00.0-nvme-1",
+ "ID_PATH_TAG": "pci-0000_b2_05_5-pci-10000_02_00_0-nvme-1",
+ "ID_REVISION": "A3550012",
+ "ID_SERIAL": "A400 NVMe SanDisk 256GB_171877424191",
+ "ID_SERIAL_SHORT": "171877424191",
+ "ID_WWN": "eui.1718774241910001001b444a444601b7",
+ "MAJOR": "259",
+ "MINOR": "0",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "7917708",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "259:0",
+ "device": null,
+ "discard_alignment": "0",
+ "eui": "00 1b 44 4a 44 46 01 b7",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "nguid": "17187742-4191-0001-001b-444a444601b7",
+ "nsid": "1",
+ "range": "0",
+ "removable": "0",
+ "ro": "0",
+ "size": "256060514304",
+ "stat": " 581 12 30543 258 12 1 76 0 0 732 259 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=259\nMINOR=0\nDEVNAME=nvme1n1\nDEVTYPE=disk",
+ "uuid": "17187742-4191-0001-001b-444a444601b7",
+ "wwid": "eui.1718774241910001001b444a444601b7"
+ }
+ },
+ "/dev/sda": {
+ "DEVLINKS": "/dev/disk/by-dname/sda /dev/disk/by-path/pci-0000:00:11.5-ata-4 /dev/disk/by-id/scsi-SATA_TOSHIBA_DT01ACA1_Y6AKDK9MS /dev/disk/by-id/scsi-1ATA_TOSHIBA_DT01ACA100_Y6AKDK9MS /dev/disk/by-id/wwn-0x5000039febf22ff6 /dev/disk/by-id/scsi-0ATA_TOSHIBA_DT01ACA1_Y6AKDK9MS /dev/disk/by-id/ata-TOSHIBA_DT01ACA100_Y6AKDK9MS /dev/disk/by-id/scsi-35000039febf22ff6",
+ "DEVNAME": "/dev/sda",
+ "DEVPATH": "/devices/pci0000:00/0000:00:11.5/ata4/host3/target3:0:0/3:0:0:0/block/sda",
+ "DEVTYPE": "disk",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_MODEL": "TOSHIBA_DT01ACA1",
+ "ID_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "c46c7073-cde8-4c80-a200-340fac7bbf78",
+ "ID_PATH": "pci-0000:00:11.5-ata-4",
+ "ID_PATH_TAG": "pci-0000_00_11_5-ata-4",
+ "ID_REVISION": "A810",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "ID_SERIAL_SHORT": "Y6AKDK9MS",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "ID_WWN": "0x5000039febf22ff6",
+ "ID_WWN_WITH_EXTENSION": "0x5000039febf22ff6",
+ "MAJOR": "8",
+ "MINOR": "0",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SCSI_IDENT_LUN_ATA": "TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "SCSI_IDENT_LUN_NAA_REG": "5000039febf22ff6",
+ "SCSI_IDENT_LUN_T10": "ATA_TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "SCSI_IDENT_LUN_VENDOR": "Y6AKDK9MS",
+ "SCSI_IDENT_SERIAL": "Y6AKDK9MS",
+ "SCSI_MODEL": "TOSHIBA_DT01ACA1",
+ "SCSI_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "SCSI_REVISION": "A810",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "8004918",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "8:0",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "16",
+ "removable": "0",
+ "ro": "0",
+ "size": "1000204886016",
+ "stat": " 20511 10015 1972534 403354 27098 33949 1614009 494315 0 305256 995343 0 0 0 0 10880 97673",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=0\nDEVNAME=sda\nDEVTYPE=disk"
+ },
+ "partitiontable": {
+ "device": "/dev/sda",
+ "firstlba": 34,
+ "id": "C46C7073-CDE8-4C80-A200-340FAC7BBF78",
+ "label": "gpt",
+ "lastlba": 1953525134,
+ "partitions": [
+ {
+ "node": "/dev/sda1",
+ "size": 1048576,
+ "start": 2048,
+ "type": "C12A7328-F81F-11D2-BA4B-00A0C93EC93B",
+ "uuid": "DC99182A-4C35-4B34-88F9-5F5FE3145232"
+ },
+ {
+ "node": "/dev/sda2",
+ "size": 1952474511,
+ "start": 1050624,
+ "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ "uuid": "A72A5EDB-0B7A-4FD6-A928-B02D79DC7C12"
+ }
+ ],
+ "unit": "sectors"
+ }
+ },
+ "/dev/sda1": {
+ "DEVLINKS": "/dev/disk/by-label/efi /dev/disk/by-dname/sda-part1 /dev/disk/by-id/scsi-1ATA_TOSHIBA_DT01ACA100_Y6AKDK9MS-part1 /dev/disk/by-id/scsi-0ATA_TOSHIBA_DT01ACA1_Y6AKDK9MS-part1 /dev/disk/by-id/scsi-SATA_TOSHIBA_DT01ACA1_Y6AKDK9MS-part1 /dev/disk/by-id/scsi-35000039febf22ff6-part1 /dev/disk/by-partuuid/dc99182a-4c35-4b34-88f9-5f5fe3145232 /dev/disk/by-id/wwn-0x5000039febf22ff6-part1 /dev/disk/by-uuid/AAE5-D3B4 /dev/disk/by-id/ata-TOSHIBA_DT01ACA100_Y6AKDK9MS-part1 /dev/disk/by-path/pci-0000:00:11.5-ata-4-part1",
+ "DEVNAME": "/dev/sda1",
+ "DEVPATH": "/devices/pci0000:00/0000:00:11.5/ata4/host3/target3:0:0/3:0:0:0/block/sda/sda1",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_FS_LABEL": "efi",
+ "ID_FS_LABEL_ENC": "efi",
+ "ID_FS_TYPE": "vfat",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "AAE5-D3B4",
+ "ID_FS_UUID_ENC": "AAE5-D3B4",
+ "ID_FS_VERSION": "FAT32",
+ "ID_MODEL": "TOSHIBA_DT01ACA1",
+ "ID_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "ID_PART_ENTRY_DISK": "8:0",
+ "ID_PART_ENTRY_NUMBER": "1",
+ "ID_PART_ENTRY_OFFSET": "2048",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "1048576",
+ "ID_PART_ENTRY_TYPE": "c12a7328-f81f-11d2-ba4b-00a0c93ec93b",
+ "ID_PART_ENTRY_UUID": "dc99182a-4c35-4b34-88f9-5f5fe3145232",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "c46c7073-cde8-4c80-a200-340fac7bbf78",
+ "ID_PATH": "pci-0000:00:11.5-ata-4",
+ "ID_PATH_TAG": "pci-0000_00_11_5-ata-4",
+ "ID_REVISION": "A810",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "ID_SERIAL_SHORT": "Y6AKDK9MS",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "ID_WWN": "0x5000039febf22ff6",
+ "ID_WWN_WITH_EXTENSION": "0x5000039febf22ff6",
+ "MAJOR": "8",
+ "MINOR": "1",
+ "PARTN": "1",
+ "SCSI_IDENT_LUN_ATA": "TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "SCSI_IDENT_LUN_NAA_REG": "5000039febf22ff6",
+ "SCSI_IDENT_LUN_T10": "ATA_TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "SCSI_IDENT_LUN_VENDOR": "Y6AKDK9MS",
+ "SCSI_IDENT_SERIAL": "Y6AKDK9MS",
+ "SCSI_MODEL": "TOSHIBA_DT01ACA1",
+ "SCSI_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "SCSI_REVISION": "A810",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "UDISKS_IGNORE": "1",
+ "USEC_INITIALIZED": "8108731",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "8:1",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "1",
+ "ro": "0",
+ "size": "536870912",
+ "start": "2048",
+ "stat": " 181 1013 18926 2661 1 0 1 0 0 2220 2661 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=1\nDEVNAME=sda1\nDEVTYPE=partition\nPARTN=1"
+ },
+ "partitiontable": {
+ "device": "/dev/sda1",
+ "id": "0x00000000",
+ "label": "dos",
+ "partitions": [],
+ "unit": "sectors"
+ }
+ },
+ "/dev/sda2": {
+ "DEVLINKS": "/dev/disk/by-id/scsi-0ATA_TOSHIBA_DT01ACA1_Y6AKDK9MS-part2 /dev/disk/by-id/ata-TOSHIBA_DT01ACA100_Y6AKDK9MS-part2 /dev/disk/by-partuuid/a72a5edb-0b7a-4fd6-a928-b02d79dc7c12 /dev/disk/by-dname/sda-part2 /dev/disk/by-id/scsi-SATA_TOSHIBA_DT01ACA1_Y6AKDK9MS-part2 /dev/disk/by-id/scsi-35000039febf22ff6-part2 /dev/disk/by-uuid/651d27d7-13f2-4ca6-a798-ca8deb8f4f1c /dev/disk/by-id/wwn-0x5000039febf22ff6-part2 /dev/disk/by-path/pci-0000:00:11.5-ata-4-part2 /dev/disk/by-label/root /dev/disk/by-id/scsi-1ATA_TOSHIBA_DT01ACA100_Y6AKDK9MS-part2",
+ "DEVNAME": "/dev/sda2",
+ "DEVPATH": "/devices/pci0000:00/0000:00:11.5/ata4/host3/target3:0:0/3:0:0:0/block/sda/sda2",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_FS_LABEL": "root",
+ "ID_FS_LABEL_ENC": "root",
+ "ID_FS_TYPE": "ext4",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "651d27d7-13f2-4ca6-a798-ca8deb8f4f1c",
+ "ID_FS_UUID_ENC": "651d27d7-13f2-4ca6-a798-ca8deb8f4f1c",
+ "ID_FS_VERSION": "1.0",
+ "ID_MODEL": "TOSHIBA_DT01ACA1",
+ "ID_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "ID_PART_ENTRY_DISK": "8:0",
+ "ID_PART_ENTRY_NUMBER": "2",
+ "ID_PART_ENTRY_OFFSET": "1050624",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "1952474511",
+ "ID_PART_ENTRY_TYPE": "0fc63daf-8483-4772-8e79-3d69d8477de4",
+ "ID_PART_ENTRY_UUID": "a72a5edb-0b7a-4fd6-a928-b02d79dc7c12",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "c46c7073-cde8-4c80-a200-340fac7bbf78",
+ "ID_PATH": "pci-0000:00:11.5-ata-4",
+ "ID_PATH_TAG": "pci-0000_00_11_5-ata-4",
+ "ID_REVISION": "A810",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "ID_SERIAL_SHORT": "Y6AKDK9MS",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "ID_WWN": "0x5000039febf22ff6",
+ "ID_WWN_WITH_EXTENSION": "0x5000039febf22ff6",
+ "MAJOR": "8",
+ "MINOR": "2",
+ "PARTN": "2",
+ "SCSI_IDENT_LUN_ATA": "TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "SCSI_IDENT_LUN_NAA_REG": "5000039febf22ff6",
+ "SCSI_IDENT_LUN_T10": "ATA_TOSHIBA_DT01ACA100_Y6AKDK9MS",
+ "SCSI_IDENT_LUN_VENDOR": "Y6AKDK9MS",
+ "SCSI_IDENT_SERIAL": "Y6AKDK9MS",
+ "SCSI_MODEL": "TOSHIBA_DT01ACA1",
+ "SCSI_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "SCSI_REVISION": "A810",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "8091231",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "8:2",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "2",
+ "ro": "0",
+ "size": "999666949632",
+ "start": "1050624",
+ "stat": " 20119 9002 1940791 387824 20845 33949 1614008 492383 0 293872 880207 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=2\nDEVNAME=sda2\nDEVTYPE=partition\nPARTN=2"
+ }
+ },
+ "/dev/sdb": {
+ "DEVLINKS": "/dev/disk/by-id/scsi-0ATA_TOSHIBA_DT01ACA1_Z6NVLPBMS /dev/disk/by-id/wwn-0x5000039fedcc17ce /dev/disk/by-path/pci-0000:00:11.5-ata-5 /dev/disk/by-id/scsi-35000039fedcc17ce /dev/disk/by-dname/sdb /dev/disk/by-id/scsi-SATA_TOSHIBA_DT01ACA1_Z6NVLPBMS /dev/disk/by-id/scsi-1ATA_TOSHIBA_DT01ACA100_Z6NVLPBMS /dev/disk/by-id/ata-TOSHIBA_DT01ACA100_Z6NVLPBMS",
+ "DEVNAME": "/dev/sdb",
+ "DEVPATH": "/devices/pci0000:00/0000:00:11.5/ata5/host4/target4:0:0/4:0:0:0/block/sdb",
+ "DEVTYPE": "disk",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_MODEL": "TOSHIBA_DT01ACA1",
+ "ID_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "ID_PATH": "pci-0000:00:11.5-ata-5",
+ "ID_PATH_TAG": "pci-0000_00_11_5-ata-5",
+ "ID_REVISION": "A810",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "TOSHIBA_DT01ACA100_Z6NVLPBMS",
+ "ID_SERIAL_SHORT": "Z6NVLPBMS",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "ID_WWN": "0x5000039fedcc17ce",
+ "ID_WWN_WITH_EXTENSION": "0x5000039fedcc17ce",
+ "MAJOR": "8",
+ "MINOR": "16",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SCSI_IDENT_LUN_ATA": "TOSHIBA_DT01ACA100_Z6NVLPBMS",
+ "SCSI_IDENT_LUN_NAA_REG": "5000039fedcc17ce",
+ "SCSI_IDENT_LUN_T10": "ATA_TOSHIBA_DT01ACA100_Z6NVLPBMS",
+ "SCSI_IDENT_LUN_VENDOR": "Z6NVLPBMS",
+ "SCSI_IDENT_SERIAL": "Z6NVLPBMS",
+ "SCSI_MODEL": "TOSHIBA_DT01ACA1",
+ "SCSI_MODEL_ENC": "TOSHIBA\\x20DT01ACA1",
+ "SCSI_REVISION": "A810",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "8444710",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "8:16",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "16",
+ "removable": "0",
+ "ro": "0",
+ "size": "1000204886016",
+ "stat": " 772 0 40882 25598 0 0 0 0 0 25956 25598 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=16\nDEVNAME=sdb\nDEVTYPE=disk"
+ }
+ },
+ "/dev/sdc": {
+ "DEVLINKS": "/dev/disk/by-dname/sdc /dev/disk/by-path/pci-0000:00:14.0-usb-0:1:1.0-scsi-0:0:0:0 /dev/disk/by-id/usb-SanDisk_Ultra_USB_3.0_4C530000021125101524-0:0",
+ "DEVNAME": "/dev/sdc",
+ "DEVPATH": "/devices/pci0000:00/0000:00:14.0/usb2/2-1/2-1:1.0/host6/target6:0:0/6:0:0:0/block/sdc",
+ "DEVTYPE": "disk",
+ "ID_BUS": "usb",
+ "ID_INSTANCE": "0:0",
+ "ID_MODEL": "Ultra_USB_3.0",
+ "ID_MODEL_ENC": "Ultra\\x20USB\\x203.0\\x20\\x20\\x20",
+ "ID_MODEL_ID": "5591",
+ "ID_PATH": "pci-0000:00:14.0-usb-0:1:1.0-scsi-0:0:0:0",
+ "ID_PATH_TAG": "pci-0000_00_14_0-usb-0_1_1_0-scsi-0_0_0_0",
+ "ID_REVISION": "1.00",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "SanDisk_Ultra_USB_3.0_4C530000021125101524-0:0",
+ "ID_SERIAL_SHORT": "4C530000021125101524",
+ "ID_TYPE": "disk",
+ "ID_USB_DRIVER": "usb-storage",
+ "ID_USB_INTERFACES": ":080650:",
+ "ID_USB_INTERFACE_NUM": "00",
+ "ID_VENDOR": "SanDisk",
+ "ID_VENDOR_ENC": "SanDisk\\x20",
+ "ID_VENDOR_ID": "0781",
+ "MAJOR": "8",
+ "MINOR": "32",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SCSI_MODEL": "Ultra_USB_3.0",
+ "SCSI_MODEL_ENC": "Ultra\\x20USB\\x203.0\\x20\\x20\\x20",
+ "SCSI_REVISION": "1.00",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "SanDisk",
+ "SCSI_VENDOR_ENC": "SanDisk\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "8372938",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "51",
+ "dev": "8:32",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "media_change",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "16",
+ "removable": "1",
+ "ro": "0",
+ "size": "15376318464",
+ "stat": " 668 0 37664 590 0 0 0 0 0 704 590 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=32\nDEVNAME=sdc\nDEVTYPE=disk"
+ }
+ },
+ "/dev/sr0": {
+ "DEVLINKS": "/dev/cdrom /dev/disk/by-path/pci-0000:00:17.0-ata-8 /dev/disk/by-id/wwn-0x5001480000000000 /dev/dvdrw /dev/cdrw /dev/disk/by-id/ata-HL-DT-ST_DVD+_-RW_GU90N_KZKGAU90643 /dev/dvd",
+ "DEVNAME": "/dev/sr0",
+ "DEVPATH": "/devices/pci0000:00/0000:00:17.0/ata14/host14/target14:0:0/14:0:0:0/block/sr0",
+ "DEVTYPE": "disk",
+ "ID_ATA": "1",
+ "ID_ATA_FEATURE_SET_PM": "1",
+ "ID_ATA_FEATURE_SET_PM_ENABLED": "1",
+ "ID_ATA_SATA": "1",
+ "ID_ATA_SATA_SIGNAL_RATE_GEN1": "1",
+ "ID_BUS": "ata",
+ "ID_CDROM": "1",
+ "ID_CDROM_CD": "1",
+ "ID_CDROM_CD_R": "1",
+ "ID_CDROM_CD_RW": "1",
+ "ID_CDROM_DVD": "1",
+ "ID_CDROM_DVD_PLUS_R": "1",
+ "ID_CDROM_DVD_PLUS_RW": "1",
+ "ID_CDROM_DVD_PLUS_R_DL": "1",
+ "ID_CDROM_DVD_R": "1",
+ "ID_CDROM_DVD_RAM": "1",
+ "ID_CDROM_DVD_RW": "1",
+ "ID_CDROM_MRW": "1",
+ "ID_CDROM_MRW_W": "1",
+ "ID_FOR_SEAT": "block-pci-0000_00_17_0-ata-8",
+ "ID_MODEL": "HL-DT-ST_DVD+_-RW_GU90N",
+ "ID_MODEL_ENC": "HL-DT-ST\\x20DVD+\\x2f-RW\\x20GU90N\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20\\x20",
+ "ID_PATH": "pci-0000:00:17.0-ata-8",
+ "ID_PATH_TAG": "pci-0000_00_17_0-ata-8",
+ "ID_REVISION": "A3C0",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "HL-DT-ST_DVD+_-RW_GU90N_KZKGAU90643",
+ "ID_SERIAL_SHORT": "KZKGAU90643",
+ "ID_TYPE": "cd",
+ "ID_VENDOR": "HL-DT-ST",
+ "ID_VENDOR_ENC": "HL-DT-ST",
+ "ID_WWN": "0x5001480000000000",
+ "ID_WWN_WITH_EXTENSION": "0x5001480000000000",
+ "MAJOR": "11",
+ "MINOR": "0",
+ "SCSI_MODEL": "DVD+-RW_GU90N",
+ "SCSI_MODEL_ENC": "DVD+-RW\\x20GU90N\\x20\\x20\\x20",
+ "SCSI_REVISION": "A3C0",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "cd/dvd",
+ "SCSI_VENDOR": "HL-DT-ST",
+ "SCSI_VENDOR_ENC": "HL-DT-ST",
+ "SUBSYSTEM": "block",
+ "SYSTEMD_MOUNT_DEVICE_BOUND": "1",
+ "TAGS": ":uaccess:systemd:seat:",
+ "USEC_INITIALIZED": "10374322",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "119",
+ "dev": "11:0",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "media_change eject_request",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "1",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "1",
+ "removable": "1",
+ "ro": "0",
+ "size": "1073741312",
+ "stat": " 11 0 5 27 0 0 0 0 0 52 27 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=11\nMINOR=0\nDEVNAME=sr0\nDEVTYPE=disk"
+ }
+ }
+ },
+ "dasd": {},
+ "dmcrypt": {},
+ "filesystem": {
+ "/dev/sda1": {
+ "LABEL": "efi",
+ "LABEL_ENC": "efi",
+ "TYPE": "vfat",
+ "USAGE": "filesystem",
+ "UUID": "AAE5-D3B4",
+ "UUID_ENC": "AAE5-D3B4",
+ "VERSION": "FAT32"
+ },
+ "/dev/sda2": {
+ "LABEL": "root",
+ "LABEL_ENC": "root",
+ "TYPE": "ext4",
+ "USAGE": "filesystem",
+ "UUID": "651d27d7-13f2-4ca6-a798-ca8deb8f4f1c",
+ "UUID_ENC": "651d27d7-13f2-4ca6-a798-ca8deb8f4f1c",
+ "VERSION": "1.0"
+ }
+ },
+ "lvm": {},
+ "mount": [
+ {
+ "children": [
+ {
+ "children": [
+ {
+ "fstype": "securityfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "securityfs",
+ "target": "/sys/kernel/security"
+ },
+ {
+ "children": [
+ {
+ "fstype": "cgroup2",
+ "options": "rw,nosuid,nodev,noexec,relatime,nsdelegate",
+ "source": "cgroup2",
+ "target": "/sys/fs/cgroup/unified"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,xattr,name=systemd",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/systemd"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,cpu,cpuacct",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/cpu,cpuacct"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,net_cls,net_prio",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/net_cls,net_prio"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,devices",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/devices"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,rdma",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/rdma"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,perf_event",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/perf_event"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,pids",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/pids"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,freezer",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/freezer"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,blkio",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/blkio"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,hugetlb",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/hugetlb"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,memory",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/memory"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,cpuset",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/cpuset"
+ }
+ ],
+ "fstype": "tmpfs",
+ "options": "ro,nosuid,nodev,noexec,mode=755",
+ "source": "tmpfs",
+ "target": "/sys/fs/cgroup"
+ },
+ {
+ "fstype": "pstore",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "pstore",
+ "target": "/sys/fs/pstore"
+ },
+ {
+ "fstype": "efivarfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "efivarfs",
+ "target": "/sys/firmware/efi/efivars"
+ },
+ {
+ "fstype": "bpf",
+ "options": "rw,nosuid,nodev,noexec,relatime,mode=700",
+ "source": "none",
+ "target": "/sys/fs/bpf"
+ },
+ {
+ "fstype": "debugfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "debugfs",
+ "target": "/sys/kernel/debug"
+ },
+ {
+ "fstype": "tracefs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "tracefs",
+ "target": "/sys/kernel/tracing"
+ },
+ {
+ "fstype": "fusectl",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "fusectl",
+ "target": "/sys/fs/fuse/connections"
+ },
+ {
+ "fstype": "configfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "configfs",
+ "target": "/sys/kernel/config"
+ }
+ ],
+ "fstype": "sysfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "sysfs",
+ "target": "/sys"
+ },
+ {
+ "children": [
+ {
+ "fstype": "autofs",
+ "options": "rw,relatime,fd=28,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=21402",
+ "source": "systemd-1",
+ "target": "/proc/sys/fs/binfmt_misc"
+ }
+ ],
+ "fstype": "proc",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "proc",
+ "target": "/proc"
+ },
+ {
+ "children": [
+ {
+ "fstype": "devpts",
+ "options": "rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000",
+ "source": "devpts",
+ "target": "/dev/pts"
+ },
+ {
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev",
+ "source": "tmpfs",
+ "target": "/dev/shm"
+ },
+ {
+ "fstype": "mqueue",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "mqueue",
+ "target": "/dev/mqueue"
+ },
+ {
+ "fstype": "hugetlbfs",
+ "options": "rw,relatime,pagesize=2M",
+ "source": "hugetlbfs",
+ "target": "/dev/hugepages"
+ }
+ ],
+ "fstype": "devtmpfs",
+ "options": "rw,nosuid,noexec,relatime,size=3762756k,nr_inodes=940689,mode=755",
+ "source": "udev",
+ "target": "/dev"
+ },
+ {
+ "children": [
+ {
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,noexec,relatime,size=5120k",
+ "source": "tmpfs",
+ "target": "/run/lock"
+ },
+ {
+ "children": [
+ {
+ "fstype": "fuse.gvfsd-fuse",
+ "options": "rw,nosuid,nodev,relatime,user_id=1000,group_id=1000",
+ "source": "gvfsd-fuse",
+ "target": "/run/user/1000/gvfs"
+ }
+ ],
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,relatime,size=758920k,mode=700,uid=1000,gid=1000",
+ "source": "tmpfs",
+ "target": "/run/user/1000"
+ }
+ ],
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,noexec,relatime,size=758924k,mode=755",
+ "source": "tmpfs",
+ "target": "/run"
+ },
+ {
+ "fstype": "vfat",
+ "options": "rw,relatime,fmask=0022,dmask=0022,codepage=437,iocharset=iso8859-1,shortname=mixed,errors=remount-ro",
+ "source": "/dev/sda1",
+ "target": "/boot/efi"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop0",
+ "target": "/snap/core18/1997"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop3",
+ "target": "/snap/snap-store/518"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop1",
+ "target": "/snap/gnome-3-28-1804/145"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop2",
+ "target": "/snap/gnome-3-34-1804/60"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop4",
+ "target": "/snap/snapd/11588"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop6",
+ "target": "/snap/gnome-3-34-1804/66"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop5",
+ "target": "/snap/snap-store/498"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop8",
+ "target": "/snap/snapd/11841"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop9",
+ "target": "/snap/gtk-common-themes/1515"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop10",
+ "target": "/snap/gnome-3-28-1804/128"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop7",
+ "target": "/snap/gtk-common-themes/1514"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop11",
+ "target": "/snap/core18/2066"
+ }
+ ],
+ "fstype": "ext4",
+ "options": "rw,relatime",
+ "source": "/dev/sda2",
+ "target": "/"
+ }
+ ],
+ "multipath": {
+ "paths": [
+ {
+ "device": "sda",
+ "host_adapter": "[undef]",
+ "host_wwnn": "[undef]",
+ "host_wwpn": "[undef]",
+ "multipath": "[orphan]",
+ "serial": "Y6AKDK9MS",
+ "target_wwnn": "ata-4.00",
+ "target_wwpn": "[undef]"
+ },
+ {
+ "device": "sdb",
+ "host_adapter": "[undef]",
+ "host_wwnn": "[undef]",
+ "host_wwpn": "[undef]",
+ "multipath": "[orphan]",
+ "serial": "Z6NVLPBMS",
+ "target_wwnn": "ata-5.00",
+ "target_wwpn": "[undef]"
+ },
+ {
+ "device": "nvme0n1",
+ "host_adapter": "[undef]",
+ "host_wwnn": "[undef]",
+ "host_wwpn": "[undef]",
+ "multipath": "[orphan]",
+ "serial": "171877421152 ",
+ "target_wwnn": "[undef]",
+ "target_wwpn": "[undef]"
+ },
+ {
+ "device": "nvme1n1",
+ "host_adapter": "[undef]",
+ "host_wwnn": "[undef]",
+ "host_wwpn": "[undef]",
+ "multipath": "[orphan]",
+ "serial": "171877424191 ",
+ "target_wwnn": "[undef]",
+ "target_wwpn": "[undef]"
+ }
+ ]
+ },
+ "raid": {
+ "/dev/md126": {
+ "DEVLINKS": "/dev/disk/by-id/md-uuid-ac4bee3d:2607dd80:76f9390f:f2d72638 /dev/md/subvol",
+ "DEVNAME": "/dev/md126",
+ "DEVPATH": "/devices/virtual/block/md126",
+ "DEVTYPE": "disk",
+ "MAJOR": "9",
+ "MD_CONTAINER": "/dev/md/container",
+ "MD_DEVICES": "2",
+ "MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "MD_DEVICE_ev_nvme0n1_ROLE": "0",
+ "MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "MD_DEVICE_ev_nvme1n1_ROLE": "1",
+ "MD_DEVNAME": "subvol",
+ "MD_LEVEL": "raid0",
+ "MD_MEMBER": "0",
+ "MD_UUID": "ac4bee3d:2607dd80:76f9390f:f2d72638",
+ "MINOR": "126",
+ "SUBSYSTEM": "block",
+ "SYSTEMD_READY": "0",
+ "TAGS": ":systemd:",
+ "UDISKS_MD_CONTAINER": "/dev/md/container",
+ "UDISKS_MD_DEVICES": "2",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_ROLE": "0",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_ROLE": "1",
+ "UDISKS_MD_DEVNAME": "subvol",
+ "UDISKS_MD_LEVEL": "raid0",
+ "UDISKS_MD_MEMBER": "0",
+ "UDISKS_MD_UUID": "ac4bee3d:2607dd80:76f9390f:f2d72638",
+ "USEC_INITIALIZED": "9797274925",
+ "container": "/dev/md/container",
+ "raidlevel": "raid0",
+ "size": "214748364800"
+ },
+ "/dev/md127": {
+ "DEVLINKS": "/dev/disk/by-id/md-uuid-00000000:00000000:00000000:00000000 /dev/md/container",
+ "DEVNAME": "/dev/md127",
+ "DEVPATH": "/devices/virtual/block/md127",
+ "DEVTYPE": "disk",
+ "MAJOR": "9",
+ "MD_DEVICES": "2",
+ "MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "MD_DEVICE_ev_nvme0n1_ROLE": "spare",
+ "MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "MD_DEVICE_ev_nvme1n1_ROLE": "spare",
+ "MD_DEVNAME": "container",
+ "MD_LEVEL": "container",
+ "MD_METADATA": "imsm",
+ "MD_UUID": "00000000:00000000:00000000:00000000",
+ "MINOR": "127",
+ "SUBSYSTEM": "block",
+ "SYSTEMD_READY": "0",
+ "TAGS": ":systemd:",
+ "UDISKS_MD_DEVICES": "2",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_DEV": "/dev/nvme0n1",
+ "UDISKS_MD_DEVICE_ev_nvme0n1_ROLE": "spare",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_DEV": "/dev/nvme1n1",
+ "UDISKS_MD_DEVICE_ev_nvme1n1_ROLE": "spare",
+ "UDISKS_MD_DEVNAME": "container",
+ "UDISKS_MD_LEVEL": "container",
+ "UDISKS_MD_METADATA": "imsm",
+ "UDISKS_MD_UUID": "00000000:00000000:00000000:00000000",
+ "USEC_INITIALIZED": "9783286242",
+ "devices": [
+ "/dev/nvme0n1",
+ "/dev/nvme1n1"
+ ],
+ "raidlevel": "container",
+ "size": "0",
+ "spare_devices": []
+ }
+ },
+ "zfs": {
+ "zpools": {}
+ }
+ }
+}
diff --git a/tests/data/probert_storage_nvme_uuid.json b/tests/data/probert_storage_nvme_uuid.json
new file mode 100644
index 0000000..c54239b
--- /dev/null
+++ b/tests/data/probert_storage_nvme_uuid.json
@@ -0,0 +1,310 @@
+{
+ "blockdev": {
+ "/dev/sr0": {
+ "DEVLINKS": "/dev/disk/by-id/usb-0ea0_1111 /dev/dvd /dev/disk/by-path/pci-0000:00:14.0-usb-0:7.2:1.0-scsi-0:0:0:0 /dev/cdrom /dev/disk/by-uuid/2020-04-05-09-44-04-00 /dev/disk/by-label/Ubuntu-Server\\x2020.04\\x20LTS\\x20amd64",
+ "DEVNAME": "/dev/sr0",
+ "DEVPATH": "/devices/pci0000:00/0000:00:14.0/usb1/1-7/1-7.2/1-7.2:1.0/host14/target14:0:0/14:0:0:0/block/sr0",
+ "DEVTYPE": "disk",
+ "ID_BUS": "usb",
+ "ID_CDROM": "1",
+ "ID_CDROM_CD": "1",
+ "ID_CDROM_DVD": "1",
+ "ID_CDROM_MEDIA": "1",
+ "ID_CDROM_MEDIA_CD": "1",
+ "ID_CDROM_MEDIA_SESSION_COUNT": "1",
+ "ID_CDROM_MEDIA_TRACK_COUNT": "1",
+ "ID_CDROM_MEDIA_TRACK_COUNT_DATA": "1",
+ "ID_CDROM_MRW": "1",
+ "ID_CDROM_MRW_W": "1",
+ "ID_FOR_SEAT": "block-pci-0000_00_14_0-usb-0_7_2_1_0-scsi-0_0_0_0",
+ "ID_FS_BOOT_SYSTEM_ID": "EL\\x20TORITO\\x20SPECIFICATION",
+ "ID_FS_LABEL": "Ubuntu-Server_20.04_LTS_amd64",
+ "ID_FS_LABEL_ENC": "Ubuntu-Server\\x2020.04\\x20LTS\\x20amd64",
+ "ID_FS_TYPE": "iso9660",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "2020-04-05-09-44-04-00",
+ "ID_FS_UUID_ENC": "2020-04-05-09-44-04-00",
+ "ID_FS_VERSION": "Joliet Extension",
+ "ID_MODEL": "1111",
+ "ID_MODEL_ENC": "1111",
+ "ID_MODEL_ID": "1111",
+ "ID_PART_TABLE_TYPE": "dos",
+ "ID_PART_TABLE_UUID": "15e92274",
+ "ID_PATH": "pci-0000:00:14.0-usb-0:7.2:1.0-scsi-0:0:0:0",
+ "ID_PATH_TAG": "pci-0000_00_14_0-usb-0_7_2_1_0-scsi-0_0_0_0",
+ "ID_REVISION": "0200",
+ "ID_SCSI": "1",
+ "ID_SCSI_INQUIRY": "1",
+ "ID_SERIAL": "0ea0_1111",
+ "ID_TYPE": "generic",
+ "ID_USB_DRIVER": "usb-storage",
+ "ID_USB_INTERFACES": ":080550:",
+ "ID_USB_INTERFACE_NUM": "00",
+ "ID_VENDOR": "0ea0",
+ "ID_VENDOR_ENC": "0ea0",
+ "ID_VENDOR_ID": "0ea0",
+ "MAJOR": "11",
+ "MINOR": "0",
+ "SCSI_MODEL": "Virtual_CDROM",
+ "SCSI_MODEL_ENC": "Virtual\\x20CDROM\\x20\\x20\\x20",
+ "SCSI_REVISION": "3000",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "cd/dvd",
+ "SCSI_VENDOR": "IPMI",
+ "SCSI_VENDOR_ENC": "IPMI\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "SYSTEMD_MOUNT_DEVICE_BOUND": "1",
+ "TAGS": ":uaccess:systemd:seat:",
+ "USEC_INITIALIZED": "8087244",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "119",
+ "dev": "11:0",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "media_change eject_request",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "1",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "1",
+ "removable": "1",
+ "ro": "0",
+ "size": "963641344",
+ "stat": " 15299 95 1879276 1488515 0 0 0 0 0 62500 1457848 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=11\nMINOR=0\nDEVNAME=sr0\nDEVTYPE=disk"
+ },
+ "partitiontable": {
+ "label": "dos",
+ "id": "0x15e92274",
+ "device": "/dev/sr0",
+ "unit": "sectors",
+ "partitions": [
+ {
+ "node": "/dev/sr0p1",
+ "start": 0,
+ "size": 1882112,
+ "type": "0",
+ "bootable": true
+ },
+ {
+ "node": "/dev/sr0p2",
+ "start": 20464,
+ "size": 8000,
+ "type": "ef"
+ }
+ ]
+ }
+ },
+ "/dev/nvme0n1": {
+ "DEVLINKS": "/dev/disk/by-id/nvme-SAMSUNG_MZPLL3T2HAJQ-00005_S4CCNE0M300015 /dev/disk/by-id/nvme-uuid.344343304d3000150025384500000004",
+ "DEVNAME": "/dev/nvme0n1",
+ "DEVPATH": "/devices/virtual/nvme-subsystem/nvme-subsys0/nvme0n1",
+ "DEVTYPE": "disk",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_MODEL": "SAMSUNG MZPLL3T2HAJQ-00005",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "4bac57b7-307b-4b0e-a853-e0232c6fb955",
+ "ID_REVISION": "GPJA0B3Q",
+ "ID_SERIAL": "SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015",
+ "ID_SERIAL_SHORT": "S4CCNE0M300015",
+ "ID_WWN": "uuid.344343304d3000150025384500000004",
+ "MAJOR": "259",
+ "MINOR": "1",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "5210525",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "259:1",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "nguid": "34434330-4d30-0015-0025-384500000004",
+ "nsid": "1",
+ "range": "0",
+ "removable": "0",
+ "ro": "0",
+ "size": "3200631791616",
+ "stat": " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=259\nMINOR=1\nDEVNAME=nvme0n1\nDEVTYPE=disk",
+ "uuid": "34434330-4d30-0015-0025-384500000004",
+ "wwid": "uuid.344343304d3000150025384500000004"
+ },
+ "partitiontable": {
+ "label": "gpt",
+ "id": "4BAC57B7-307B-4B0E-A853-E0232C6FB955",
+ "device": "/dev/nvme0n1",
+ "unit": "sectors",
+ "firstlba": 34,
+ "lastlba": 6251233934,
+ "partitions": [
+ {
+ "node": "/dev/nvme0n1p1",
+ "start": 2048,
+ "size": 2048,
+ "type": "21686148-6449-6E6F-744E-656564454649",
+ "uuid": "B6D4F123-1AD8-4893-9B39-0E48074EE38B"
+ },
+ {
+ "node": "/dev/nvme0n1p2",
+ "start": 4096,
+ "size": 48234496,
+ "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ "uuid": "76A1E7BC-4C68-4BC2-A0F5-7D1D07445FDB"
+ },
+ {
+ "node": "/dev/nvme0n1p3",
+ "start": 48238592,
+ "size": 6202995343,
+ "type": "0FC63DAF-8483-4772-8E79-3D69D8477DE4",
+ "uuid": "763B085C-8010-F646-A3D4-E25DE4B83C87"
+ }
+ ]
+ }
+ },
+ "/dev/nvme0n1p1": {
+ "DEVLINKS": "/dev/disk/by-id/nvme-SAMSUNG_MZPLL3T2HAJQ-00005_S4CCNE0M300015-part1 /dev/disk/by-partuuid/b6d4f123-1ad8-4893-9b39-0e48074ee38b /dev/disk/by-id/nvme-uuid.344343304d3000150025384500000004-part1",
+ "DEVNAME": "/dev/nvme0n1p1",
+ "DEVPATH": "/devices/virtual/nvme-subsystem/nvme-subsys0/nvme0n1/nvme0n1p1",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_MODEL": "SAMSUNG MZPLL3T2HAJQ-00005",
+ "ID_PART_ENTRY_DISK": "259:1",
+ "ID_PART_ENTRY_NUMBER": "1",
+ "ID_PART_ENTRY_OFFSET": "2048",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "2048",
+ "ID_PART_ENTRY_TYPE": "21686148-6449-6e6f-744e-656564454649",
+ "ID_PART_ENTRY_UUID": "b6d4f123-1ad8-4893-9b39-0e48074ee38b",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "4bac57b7-307b-4b0e-a853-e0232c6fb955",
+ "ID_REVISION": "GPJA0B3Q",
+ "ID_SCSI": "1",
+ "ID_SERIAL": "SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015",
+ "ID_SERIAL_SHORT": "S4CCNE0M300015",
+ "ID_WWN": "uuid.344343304d3000150025384500000004",
+ "MAJOR": "259",
+ "MINOR": "2",
+ "PARTN": "1",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "5214260",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "259:2",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "1",
+ "ro": "0",
+ "size": "1048576",
+ "start": "2048",
+ "stat": " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=259\nMINOR=2\nDEVNAME=nvme0n1p1\nDEVTYPE=partition\nPARTN=1"
+ }
+ },
+ "/dev/nvme0n1p2": {
+ "DEVLINKS": "/dev/disk/by-id/nvme-SAMSUNG_MZPLL3T2HAJQ-00005_S4CCNE0M300015-part2 /dev/disk/by-uuid/25fa500f-e450-4a13-b51d-74eba6f1c915 /dev/disk/by-id/nvme-uuid.344343304d3000150025384500000004-part2 /dev/disk/by-partuuid/76a1e7bc-4c68-4bc2-a0f5-7d1d07445fdb",
+ "DEVNAME": "/dev/nvme0n1p2",
+ "DEVPATH": "/devices/virtual/nvme-subsystem/nvme-subsys0/nvme0n1/nvme0n1p2",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_FS_TYPE": "xfs",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "25fa500f-e450-4a13-b51d-74eba6f1c915",
+ "ID_FS_UUID_ENC": "25fa500f-e450-4a13-b51d-74eba6f1c915",
+ "ID_MODEL": "SAMSUNG MZPLL3T2HAJQ-00005",
+ "ID_PART_ENTRY_DISK": "259:1",
+ "ID_PART_ENTRY_NUMBER": "2",
+ "ID_PART_ENTRY_OFFSET": "4096",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "48234496",
+ "ID_PART_ENTRY_TYPE": "0fc63daf-8483-4772-8e79-3d69d8477de4",
+ "ID_PART_ENTRY_UUID": "76a1e7bc-4c68-4bc2-a0f5-7d1d07445fdb",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "4bac57b7-307b-4b0e-a853-e0232c6fb955",
+ "ID_REVISION": "GPJA0B3Q",
+ "ID_SCSI": "1",
+ "ID_SERIAL": "SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015",
+ "ID_SERIAL_SHORT": "S4CCNE0M300015",
+ "ID_WWN": "uuid.344343304d3000150025384500000004",
+ "MAJOR": "259",
+ "MINOR": "3",
+ "PARTN": "2",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "5215731",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "259:3",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "2",
+ "ro": "0",
+ "size": "24696061952",
+ "start": "4096",
+ "stat": " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=259\nMINOR=3\nDEVNAME=nvme0n1p2\nDEVTYPE=partition\nPARTN=2"
+ }
+ },
+ "/dev/nvme0n1p3": {
+ "DEVLINKS": "/dev/disk/by-partuuid/763b085c-8010-f646-a3d4-e25de4b83c87 /dev/disk/by-id/nvme-uuid.344343304d3000150025384500000004-part3 /dev/disk/by-id/nvme-SAMSUNG_MZPLL3T2HAJQ-00005_S4CCNE0M300015-part3 /dev/disk/by-uuid/38d3b1d3-05db-441e-b71b-70cd058f7313",
+ "DEVNAME": "/dev/nvme0n1p3",
+ "DEVPATH": "/devices/virtual/nvme-subsystem/nvme-subsys0/nvme0n1/nvme0n1p3",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_FS_TYPE": "xfs",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "38d3b1d3-05db-441e-b71b-70cd058f7313",
+ "ID_FS_UUID_ENC": "38d3b1d3-05db-441e-b71b-70cd058f7313",
+ "ID_MODEL": "SAMSUNG MZPLL3T2HAJQ-00005",
+ "ID_PART_ENTRY_DISK": "259:1",
+ "ID_PART_ENTRY_NUMBER": "3",
+ "ID_PART_ENTRY_OFFSET": "48238592",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "6202995343",
+ "ID_PART_ENTRY_TYPE": "0fc63daf-8483-4772-8e79-3d69d8477de4",
+ "ID_PART_ENTRY_UUID": "763b085c-8010-f646-a3d4-e25de4b83c87",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "4bac57b7-307b-4b0e-a853-e0232c6fb955",
+ "ID_REVISION": "GPJA0B3Q",
+ "ID_SCSI": "1",
+ "ID_SERIAL": "SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015",
+ "ID_SERIAL_SHORT": "S4CCNE0M300015",
+ "ID_WWN": "uuid.344343304d3000150025384500000004",
+ "MAJOR": "259",
+ "MINOR": "4",
+ "PARTN": "3",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "5215490",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "259:4",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "3",
+ "ro": "0",
+ "size": "3175933615616",
+ "start": "48238592",
+ "stat": " 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=259\nMINOR=4\nDEVNAME=nvme0n1p3\nDEVTYPE=partition\nPARTN=3"
+ }
+ }
+ }
+}
diff --git a/tests/data/probert_storage_win10_bitlocker.json b/tests/data/probert_storage_win10_bitlocker.json
new file mode 100644
index 0000000..2016a56
--- /dev/null
+++ b/tests/data/probert_storage_win10_bitlocker.json
@@ -0,0 +1,1042 @@
+{
+ "network": {
+ "links": [
+ {
+ "addresses": [
+ {
+ "address": "127.0.0.1/8",
+ "family": 2,
+ "scope": "host",
+ "source": "static"
+ },
+ {
+ "address": "::1/128",
+ "family": 10,
+ "scope": "host",
+ "source": "static"
+ }
+ ],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 772,
+ "family": 0,
+ "flags": 65609,
+ "ifindex": 1,
+ "is_vlan": false,
+ "name": "lo"
+ },
+ "type": "lo",
+ "udev_data": {
+ "DEVPATH": "/devices/virtual/net/lo",
+ "ID_MM_CANDIDATE": "1",
+ "ID_NET_LINK_FILE": "/usr/lib/systemd/network/99-default.link",
+ "ID_NET_NAME": "lo",
+ "IFINDEX": "1",
+ "INTERFACE": "lo",
+ "SUBSYSTEM": "net",
+ "USEC_INITIALIZED": "1082225",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "00:00:00:00:00:00",
+ "broadcast": "00:00:00:00:00:00",
+ "carrier": "1",
+ "carrier_changes": "0",
+ "carrier_down_count": "0",
+ "carrier_up_count": "0",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "dormant": "0",
+ "duplex": null,
+ "flags": "0x9",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "1",
+ "iflink": "1",
+ "link_mode": "0",
+ "mtu": "65536",
+ "name_assign_type": null,
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "unknown",
+ "phys_port_id": null,
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": null,
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "772",
+ "uevent": "INTERFACE=lo\nIFINDEX=1"
+ }
+ }
+ },
+ {
+ "addresses": [
+ {
+ "address": "192.168.122.130/24",
+ "family": 2,
+ "scope": "global",
+ "source": "dhcp"
+ },
+ {
+ "address": "fe80::5054:ff:fe83:cb5f/64",
+ "family": 10,
+ "scope": "link",
+ "source": "static"
+ }
+ ],
+ "bond": {
+ "is_master": false,
+ "is_slave": false,
+ "lacp_rate": null,
+ "master": null,
+ "mode": null,
+ "slaves": [],
+ "xmit_hash_policy": null
+ },
+ "bridge": {
+ "interfaces": [],
+ "is_bridge": false,
+ "is_port": false,
+ "options": {}
+ },
+ "netlink_data": {
+ "arptype": 1,
+ "family": 0,
+ "flags": 69699,
+ "ifindex": 2,
+ "is_vlan": false,
+ "name": "enp1s0"
+ },
+ "type": "eth",
+ "udev_data": {
+ "DEVPATH": "/devices/pci0000:00/0000:00:02.0/0000:01:00.0/net/enp1s0",
+ "ID_BUS": "pci",
+ "ID_MM_CANDIDATE": "1",
+ "ID_MODEL_FROM_DATABASE": "82574L Gigabit Network Connection",
+ "ID_MODEL_ID": "0x10d3",
+ "ID_NET_DRIVER": "e1000e",
+ "ID_NET_LINK_FILE": "/usr/lib/systemd/network/99-default.link",
+ "ID_NET_NAME": "enp1s0",
+ "ID_NET_NAME_MAC": "enx52540083cb5f",
+ "ID_NET_NAME_PATH": "enp1s0",
+ "ID_NET_NAMING_SCHEME": "v247",
+ "ID_PATH": "pci-0000:01:00.0",
+ "ID_PATH_TAG": "pci-0000_01_00_0",
+ "ID_PCI_CLASS_FROM_DATABASE": "Network controller",
+ "ID_PCI_SUBCLASS_FROM_DATABASE": "Ethernet controller",
+ "ID_VENDOR_FROM_DATABASE": "Intel Corporation",
+ "ID_VENDOR_ID": "0x8086",
+ "IFINDEX": "2",
+ "INTERFACE": "enp1s0",
+ "SUBSYSTEM": "net",
+ "SYSTEMD_ALIAS": "/sys/subsystem/net/devices/enp1s0",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "1359667",
+ "attrs": {
+ "addr_assign_type": "0",
+ "addr_len": "6",
+ "address": "52:54:00:83:cb:5f",
+ "broadcast": "ff:ff:ff:ff:ff:ff",
+ "carrier": "1",
+ "carrier_changes": "2",
+ "carrier_down_count": "1",
+ "carrier_up_count": "1",
+ "dev_id": "0x0",
+ "dev_port": "0",
+ "device": null,
+ "dormant": "0",
+ "duplex": "full",
+ "flags": "0x1003",
+ "gro_flush_timeout": "0",
+ "ifalias": "",
+ "ifindex": "2",
+ "iflink": "2",
+ "link_mode": "0",
+ "mtu": "1500",
+ "name_assign_type": "4",
+ "napi_defer_hard_irqs": "0",
+ "netdev_group": "0",
+ "operstate": "up",
+ "phys_port_id": null,
+ "phys_port_name": null,
+ "phys_switch_id": null,
+ "proto_down": "0",
+ "speed": "1000",
+ "subsystem": "net",
+ "testing": "0",
+ "tx_queue_len": "1000",
+ "type": "1",
+ "uevent": "INTERFACE=enp1s0\nIFINDEX=2"
+ }
+ }
+ }
+ ],
+ "routes": [
+ {
+ "dst": "default",
+ "family": 2,
+ "ifindex": 2,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "192.168.122.0/24",
+ "family": 2,
+ "ifindex": 2,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "192.168.122.1",
+ "family": 2,
+ "ifindex": 2,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "127.0.0.0",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "127.0.0.0/8",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "127.0.0.1",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "127.255.255.255",
+ "family": 2,
+ "ifindex": 1,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "192.168.122.0",
+ "family": 2,
+ "ifindex": 2,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "192.168.122.130",
+ "family": 2,
+ "ifindex": 2,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "192.168.122.255",
+ "family": 2,
+ "ifindex": 2,
+ "table": 255,
+ "type": 3
+ },
+ {
+ "dst": "::1",
+ "family": 10,
+ "ifindex": 1,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "fe80::/64",
+ "family": 10,
+ "ifindex": 2,
+ "table": 254,
+ "type": 1
+ },
+ {
+ "dst": "::1",
+ "family": 10,
+ "ifindex": 1,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "fe80::5054:ff:fe83:cb5f",
+ "family": 10,
+ "ifindex": 2,
+ "table": 255,
+ "type": 2
+ },
+ {
+ "dst": "ff00::/8",
+ "family": 10,
+ "ifindex": 2,
+ "table": 255,
+ "type": 5
+ }
+ ]
+ },
+ "storage": {
+ "bcache": {
+ "backing": {},
+ "caching": {}
+ },
+ "blockdev": {
+ "/dev/sda": {
+ "DEVLINKS": "/dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00001 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00001 /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00001 /dev/disk/by-path/pci-0000:00:1f.2-ata-1 /dev/disk/by-path/pci-0000:00:1f.2-ata-1.0",
+ "DEVNAME": "/dev/sda",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda",
+ "DEVTYPE": "disk",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_MODEL": "QEMU_HARDDISK",
+ "ID_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "17edef9f-34bf-42c9-a15f-cee9aa2e1499",
+ "ID_PATH": "pci-0000:00:1f.2-ata-1.0",
+ "ID_PATH_ATA_COMPAT": "pci-0000:00:1f.2-ata-1",
+ "ID_PATH_TAG": "pci-0000_00_1f_2-ata-1_0",
+ "ID_REVISION": "2.5+",
+ "ID_SCSI": "1",
+ "ID_SCSI_SERIAL": "QM00001",
+ "ID_SERIAL": "QEMU_HARDDISK_QM00001",
+ "ID_SERIAL_SHORT": "QM00001",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "MAJOR": "8",
+ "MINOR": "0",
+ "MPATH_SBIN_PATH": "/sbin",
+ "SCSI_IDENT_LUN_ATA": "QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_T10": "ATA_QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_VENDOR": "QM00001",
+ "SCSI_IDENT_SERIAL": "QM00001",
+ "SCSI_MODEL": "QEMU_HARDDISK",
+ "SCSI_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "SCSI_REVISION": "2.5+",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "1859322",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "50",
+ "dev": "8:0",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "256",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "16",
+ "removable": "0",
+ "ro": "0",
+ "size": "85899345920",
+ "stat": " 1178 0 51786 111 1 0 0 0 0 224 111 0 0 0 0 1 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=0\nDEVNAME=sda\nDEVTYPE=disk"
+ }
+ },
+ "/dev/sda1": {
+ "DEVLINKS": "/dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00001-part1 /dev/disk/by-uuid/54E4-27B3 /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00001-part1 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00001-part1 /dev/disk/by-partuuid/9f8b9dd0-4c48-4e38-b1c6-d6c130a3b9b1 /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001-part1 /dev/disk/by-partlabel/EFI\\x20system\\x20partition /dev/disk/by-path/pci-0000:00:1f.2-ata-1-part1 /dev/disk/by-path/pci-0000:00:1f.2-ata-1.0-part1",
+ "DEVNAME": "/dev/sda1",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda1",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_FS_TYPE": "vfat",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "54E4-27B3",
+ "ID_FS_UUID_ENC": "54E4-27B3",
+ "ID_FS_VERSION": "FAT32",
+ "ID_MODEL": "QEMU_HARDDISK",
+ "ID_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "ID_PART_ENTRY_DISK": "8:0",
+ "ID_PART_ENTRY_FLAGS": "0x8000000000000000",
+ "ID_PART_ENTRY_NAME": "EFI\\x20system\\x20partition",
+ "ID_PART_ENTRY_NUMBER": "1",
+ "ID_PART_ENTRY_OFFSET": "2048",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "204800",
+ "ID_PART_ENTRY_TYPE": "c12a7328-f81f-11d2-ba4b-00a0c93ec93b",
+ "ID_PART_ENTRY_UUID": "9f8b9dd0-4c48-4e38-b1c6-d6c130a3b9b1",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "17edef9f-34bf-42c9-a15f-cee9aa2e1499",
+ "ID_PATH": "pci-0000:00:1f.2-ata-1.0",
+ "ID_PATH_ATA_COMPAT": "pci-0000:00:1f.2-ata-1",
+ "ID_PATH_TAG": "pci-0000_00_1f_2-ata-1_0",
+ "ID_REVISION": "2.5+",
+ "ID_SCSI": "1",
+ "ID_SCSI_SERIAL": "QM00001",
+ "ID_SERIAL": "QEMU_HARDDISK_QM00001",
+ "ID_SERIAL_SHORT": "QM00001",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "MAJOR": "8",
+ "MINOR": "1",
+ "PARTN": "1",
+ "PARTNAME": "EFI system partition",
+ "SCSI_IDENT_LUN_ATA": "QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_T10": "ATA_QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_VENDOR": "QM00001",
+ "SCSI_IDENT_SERIAL": "QM00001",
+ "SCSI_MODEL": "QEMU_HARDDISK",
+ "SCSI_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "SCSI_REVISION": "2.5+",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "UDISKS_IGNORE": "1",
+ "USEC_INITIALIZED": "1865155",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "8:1",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "1",
+ "ro": "0",
+ "size": "104857600",
+ "start": "2048",
+ "stat": " 204 0 12069 17 0 0 0 0 0 84 17 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=1\nDEVNAME=sda1\nDEVTYPE=partition\nPARTN=1\nPARTNAME=EFI system partition"
+ }
+ },
+ "/dev/sda2": {
+ "DEVLINKS": "/dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00001-part2 /dev/disk/by-partlabel/Microsoft\\x20reserved\\x20partition /dev/disk/by-path/pci-0000:00:1f.2-ata-1.0-part2 /dev/disk/by-path/pci-0000:00:1f.2-ata-1-part2 /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00001-part2 /dev/disk/by-partuuid/55850c46-570d-4d08-9e84-1339e41b8746 /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001-part2 /dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00001-part2",
+ "DEVNAME": "/dev/sda2",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda2",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_MODEL": "QEMU_HARDDISK",
+ "ID_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "ID_PART_ENTRY_DISK": "8:0",
+ "ID_PART_ENTRY_FLAGS": "0x8000000000000000",
+ "ID_PART_ENTRY_NAME": "Microsoft\\x20reserved\\x20partition",
+ "ID_PART_ENTRY_NUMBER": "2",
+ "ID_PART_ENTRY_OFFSET": "206848",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "32768",
+ "ID_PART_ENTRY_TYPE": "e3c9e316-0b5c-4db8-817d-f92df00215ae",
+ "ID_PART_ENTRY_UUID": "55850c46-570d-4d08-9e84-1339e41b8746",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "17edef9f-34bf-42c9-a15f-cee9aa2e1499",
+ "ID_PATH": "pci-0000:00:1f.2-ata-1.0",
+ "ID_PATH_ATA_COMPAT": "pci-0000:00:1f.2-ata-1",
+ "ID_PATH_TAG": "pci-0000_00_1f_2-ata-1_0",
+ "ID_REVISION": "2.5+",
+ "ID_SCSI": "1",
+ "ID_SCSI_SERIAL": "QM00001",
+ "ID_SERIAL": "QEMU_HARDDISK_QM00001",
+ "ID_SERIAL_SHORT": "QM00001",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "MAJOR": "8",
+ "MINOR": "2",
+ "PARTN": "2",
+ "PARTNAME": "Microsoft reserved partition",
+ "SCSI_IDENT_LUN_ATA": "QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_T10": "ATA_QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_VENDOR": "QM00001",
+ "SCSI_IDENT_SERIAL": "QM00001",
+ "SCSI_MODEL": "QEMU_HARDDISK",
+ "SCSI_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "SCSI_REVISION": "2.5+",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "UDISKS_IGNORE": "1",
+ "USEC_INITIALIZED": "1869681",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "8:2",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "2",
+ "ro": "0",
+ "size": "16777216",
+ "start": "206848",
+ "stat": " 296 0 8336 22 0 0 0 0 0 88 22 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=2\nDEVNAME=sda2\nDEVTYPE=partition\nPARTN=2\nPARTNAME=Microsoft reserved partition"
+ }
+ },
+ "/dev/sda3": {
+ "DEVLINKS": "/dev/disk/by-partlabel/Basic\\x20data\\x20partition /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001-part3 /dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00001-part3 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00001-part3 /dev/disk/by-path/pci-0000:00:1f.2-ata-1-part3 /dev/disk/by-path/pci-0000:00:1f.2-ata-1.0-part3 /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00001-part3 /dev/disk/by-partuuid/695bcd76-922c-4194-9097-17355ed02893",
+ "DEVNAME": "/dev/sda3",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda3",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_FS_TYPE": "BitLocker",
+ "ID_FS_USAGE": "crypto",
+ "ID_FS_VERSION": "2",
+ "ID_MODEL": "QEMU_HARDDISK",
+ "ID_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "ID_PART_ENTRY_DISK": "8:0",
+ "ID_PART_ENTRY_NAME": "Basic\\x20data\\x20partition",
+ "ID_PART_ENTRY_NUMBER": "3",
+ "ID_PART_ENTRY_OFFSET": "239616",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "166486126",
+ "ID_PART_ENTRY_TYPE": "ebd0a0a2-b9e5-4433-87c0-68b6b72699c7",
+ "ID_PART_ENTRY_UUID": "695bcd76-922c-4194-9097-17355ed02893",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "17edef9f-34bf-42c9-a15f-cee9aa2e1499",
+ "ID_PATH": "pci-0000:00:1f.2-ata-1.0",
+ "ID_PATH_ATA_COMPAT": "pci-0000:00:1f.2-ata-1",
+ "ID_PATH_TAG": "pci-0000_00_1f_2-ata-1_0",
+ "ID_REVISION": "2.5+",
+ "ID_SCSI": "1",
+ "ID_SCSI_SERIAL": "QM00001",
+ "ID_SERIAL": "QEMU_HARDDISK_QM00001",
+ "ID_SERIAL_SHORT": "QM00001",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "MAJOR": "8",
+ "MINOR": "3",
+ "PARTN": "3",
+ "PARTNAME": "Basic data partition",
+ "SCSI_IDENT_LUN_ATA": "QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_T10": "ATA_QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_VENDOR": "QM00001",
+ "SCSI_IDENT_SERIAL": "QM00001",
+ "SCSI_MODEL": "QEMU_HARDDISK",
+ "SCSI_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "SCSI_REVISION": "2.5+",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "USEC_INITIALIZED": "1867565",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "8:3",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "3",
+ "ro": "0",
+ "size": "85240896512",
+ "start": "239616",
+ "stat": " 120 0 2212 16 0 0 0 0 0 56 16 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=3\nDEVNAME=sda3\nDEVTYPE=partition\nPARTN=3\nPARTNAME=Basic data partition"
+ }
+ },
+ "/dev/sda4": {
+ "DEVLINKS": "/dev/disk/by-uuid/7448E57248E53410 /dev/disk/by-path/pci-0000:00:1f.2-ata-1-part4 /dev/disk/by-id/scsi-0ATA_QEMU_HARDDISK_QM00001-part4 /dev/disk/by-partuuid/9e09a65a-4ff5-4af1-9b2d-3b5010d0714c /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001-part4 /dev/disk/by-id/scsi-SATA_QEMU_HARDDISK_QM00001-part4 /dev/disk/by-path/pci-0000:00:1f.2-ata-1.0-part4 /dev/disk/by-id/scsi-1ATA_QEMU_HARDDISK_QM00001-part4",
+ "DEVNAME": "/dev/sda4",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1f.2/ata1/host0/target0:0:0/0:0:0:0/block/sda/sda4",
+ "DEVTYPE": "partition",
+ "DM_MULTIPATH_DEVICE_PATH": "0",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_FS_TYPE": "ntfs",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "7448E57248E53410",
+ "ID_FS_UUID_ENC": "7448E57248E53410",
+ "ID_MODEL": "QEMU_HARDDISK",
+ "ID_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "ID_PART_ENTRY_DISK": "8:0",
+ "ID_PART_ENTRY_FLAGS": "0x8000000000000001",
+ "ID_PART_ENTRY_NUMBER": "4",
+ "ID_PART_ENTRY_OFFSET": "166727680",
+ "ID_PART_ENTRY_SCHEME": "gpt",
+ "ID_PART_ENTRY_SIZE": "1040384",
+ "ID_PART_ENTRY_TYPE": "de94bba4-06d1-4d40-a16a-bfd50179d6ac",
+ "ID_PART_ENTRY_UUID": "9e09a65a-4ff5-4af1-9b2d-3b5010d0714c",
+ "ID_PART_TABLE_TYPE": "gpt",
+ "ID_PART_TABLE_UUID": "17edef9f-34bf-42c9-a15f-cee9aa2e1499",
+ "ID_PATH": "pci-0000:00:1f.2-ata-1.0",
+ "ID_PATH_ATA_COMPAT": "pci-0000:00:1f.2-ata-1",
+ "ID_PATH_TAG": "pci-0000_00_1f_2-ata-1_0",
+ "ID_REVISION": "2.5+",
+ "ID_SCSI": "1",
+ "ID_SCSI_SERIAL": "QM00001",
+ "ID_SERIAL": "QEMU_HARDDISK_QM00001",
+ "ID_SERIAL_SHORT": "QM00001",
+ "ID_TYPE": "disk",
+ "ID_VENDOR": "ATA",
+ "ID_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "MAJOR": "8",
+ "MINOR": "4",
+ "PARTN": "4",
+ "SCSI_IDENT_LUN_ATA": "QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_T10": "ATA_QEMU_HARDDISK_QM00001",
+ "SCSI_IDENT_LUN_VENDOR": "QM00001",
+ "SCSI_IDENT_SERIAL": "QM00001",
+ "SCSI_MODEL": "QEMU_HARDDISK",
+ "SCSI_MODEL_ENC": "QEMU\\x20HARDDISK\\x20\\x20\\x20",
+ "SCSI_REVISION": "2.5+",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "disk",
+ "SCSI_VENDOR": "ATA",
+ "SCSI_VENDOR_ENC": "ATA\\x20\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":systemd:",
+ "UDISKS_IGNORE": "1",
+ "USEC_INITIALIZED": "1872214",
+ "attrs": {
+ "alignment_offset": "0",
+ "dev": "8:4",
+ "discard_alignment": "0",
+ "inflight": " 0 0",
+ "partition": "4",
+ "ro": "0",
+ "size": "532676608",
+ "start": "166727680",
+ "stat": " 244 0 16632 29 0 0 0 0 0 72 29 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=8\nMINOR=4\nDEVNAME=sda4\nDEVTYPE=partition\nPARTN=4"
+ }
+ },
+ "/dev/sr0": {
+ "DEVLINKS": "/dev/disk/by-path/pci-0000:00:1f.2-ata-2.0 /dev/dvd /dev/cdrom /dev/disk/by-path/pci-0000:00:1f.2-ata-2 /dev/disk/by-label/Ubuntu\\x20custom /dev/disk/by-id/ata-QEMU_DVD-ROM_QM00003 /dev/disk/by-id/scsi-0QEMU_QEMU_DVD-ROM_QM00003 /dev/disk/by-uuid/2021-07-11-10-08-42-00 /dev/disk/by-id/scsi-1ATA_QEMU_DVD-ROM_QM00003",
+ "DEVNAME": "/dev/sr0",
+ "DEVPATH": "/devices/pci0000:00/0000:00:1f.2/ata2/host1/target1:0:0/1:0:0:0/block/sr0",
+ "DEVTYPE": "disk",
+ "ID_ATA": "1",
+ "ID_BUS": "ata",
+ "ID_CDROM": "1",
+ "ID_CDROM_DVD": "1",
+ "ID_CDROM_MEDIA": "1",
+ "ID_CDROM_MEDIA_DVD": "1",
+ "ID_CDROM_MEDIA_SESSION_COUNT": "1",
+ "ID_CDROM_MEDIA_STATE": "complete",
+ "ID_CDROM_MEDIA_TRACK_COUNT": "1",
+ "ID_CDROM_MEDIA_TRACK_COUNT_DATA": "1",
+ "ID_CDROM_MRW": "1",
+ "ID_CDROM_MRW_W": "1",
+ "ID_FOR_SEAT": "block-pci-0000_00_1f_2-ata-2_0",
+ "ID_FS_BOOT_SYSTEM_ID": "EL\\x20TORITO\\x20SPECIFICATION",
+ "ID_FS_LABEL": "Ubuntu_custom",
+ "ID_FS_LABEL_ENC": "Ubuntu\\x20custom",
+ "ID_FS_TYPE": "iso9660",
+ "ID_FS_USAGE": "filesystem",
+ "ID_FS_UUID": "2021-07-11-10-08-42-00",
+ "ID_FS_UUID_ENC": "2021-07-11-10-08-42-00",
+ "ID_MODEL": "QEMU_DVD-ROM",
+ "ID_MODEL_ENC": "QEMU\\x20DVD-ROM\\x20\\x20\\x20\\x20",
+ "ID_PART_TABLE_TYPE": "dos",
+ "ID_PATH": "pci-0000:00:1f.2-ata-2.0",
+ "ID_PATH_ATA_COMPAT": "pci-0000:00:1f.2-ata-2",
+ "ID_PATH_TAG": "pci-0000_00_1f_2-ata-2_0",
+ "ID_REVISION": "2.5+",
+ "ID_SCSI": "1",
+ "ID_SERIAL": "QEMU_DVD-ROM_QM00003",
+ "ID_TYPE": "cd/dvd",
+ "ID_VENDOR": "QEMU",
+ "ID_VENDOR_ENC": "QEMU\\x20\\x20\\x20\\x20",
+ "MAJOR": "11",
+ "MINOR": "0",
+ "SCSI_IDENT_LUN_ATA": "QEMU_DVD-ROM_QM00003",
+ "SCSI_IDENT_LUN_T10": "ATA_QEMU_DVD-ROM_QM00003",
+ "SCSI_IDENT_LUN_VENDOR": "QM00003",
+ "SCSI_MODEL": "QEMU_DVD-ROM",
+ "SCSI_MODEL_ENC": "QEMU\\x20DVD-ROM\\x20\\x20\\x20\\x20",
+ "SCSI_REVISION": "2.5+",
+ "SCSI_TPGS": "0",
+ "SCSI_TYPE": "cd/dvd",
+ "SCSI_VENDOR": "QEMU",
+ "SCSI_VENDOR_ENC": "QEMU\\x20\\x20\\x20\\x20",
+ "SUBSYSTEM": "block",
+ "TAGS": ":seat:systemd:uaccess:",
+ "USEC_INITIALIZED": "2100445",
+ "attrs": {
+ "alignment_offset": "0",
+ "bdi": null,
+ "capability": "119",
+ "dev": "11:0",
+ "device": null,
+ "discard_alignment": "0",
+ "events": "media_change eject_request",
+ "events_async": "",
+ "events_poll_msecs": "-1",
+ "ext_range": "1",
+ "hidden": "0",
+ "inflight": " 0 0",
+ "range": "1",
+ "removable": "1",
+ "ro": "0",
+ "size": "1303957504",
+ "stat": " 11868 44 2455445 1276 0 0 0 0 0 5060 1276 0 0 0 0 0 0",
+ "subsystem": "block",
+ "uevent": "MAJOR=11\nMINOR=0\nDEVNAME=sr0\nDEVTYPE=disk"
+ },
+ "partitiontable": {
+ "device": "/dev/sr0",
+ "id": "0x00000000",
+ "label": "dos",
+ "partitions": [
+ {
+ "bootable": true,
+ "node": "/dev/sr0p1",
+ "size": 2536024,
+ "start": 64,
+ "type": "83"
+ },
+ {
+ "node": "/dev/sr0p2",
+ "size": 10040,
+ "start": 2536088,
+ "type": "ef"
+ }
+ ],
+ "sectorsize": 2048,
+ "unit": "sectors"
+ }
+ }
+ },
+ "dasd": {},
+ "dmcrypt": {},
+ "filesystem": {
+ "/dev/sda1": {
+ "TYPE": "vfat",
+ "USAGE": "filesystem",
+ "UUID": "54E4-27B3",
+ "UUID_ENC": "54E4-27B3",
+ "VERSION": "FAT32"
+ },
+ "/dev/sda3": {
+ "TYPE": "BitLocker",
+ "USAGE": "crypto",
+ "VERSION": "2"
+ },
+ "/dev/sda4": {
+ "TYPE": "ntfs",
+ "USAGE": "filesystem",
+ "UUID": "7448E57248E53410",
+ "UUID_ENC": "7448E57248E53410"
+ },
+ "/dev/sr0": {
+ "BOOT_SYSTEM_ID": "EL\\x20TORITO\\x20SPECIFICATION",
+ "LABEL": "Ubuntu_custom",
+ "LABEL_ENC": "Ubuntu\\x20custom",
+ "TYPE": "iso9660",
+ "USAGE": "filesystem",
+ "UUID": "2021-07-11-10-08-42-00",
+ "UUID_ENC": "2021-07-11-10-08-42-00"
+ }
+ },
+ "lvm": {},
+ "mount": [
+ {
+ "children": [
+ {
+ "children": [
+ {
+ "fstype": "securityfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "securityfs",
+ "target": "/sys/kernel/security"
+ },
+ {
+ "children": [
+ {
+ "fstype": "cgroup2",
+ "options": "rw,nosuid,nodev,noexec,relatime,nsdelegate",
+ "source": "cgroup2",
+ "target": "/sys/fs/cgroup/unified"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,xattr,name=systemd",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/systemd"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,hugetlb",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/hugetlb"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,devices",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/devices"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,net_cls,net_prio",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/net_cls,net_prio"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,blkio",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/blkio"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,pids",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/pids"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,cpu,cpuacct",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/cpu,cpuacct"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,memory",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/memory"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,rdma",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/rdma"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,perf_event",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/perf_event"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,cpuset",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/cpuset"
+ },
+ {
+ "fstype": "cgroup",
+ "options": "rw,nosuid,nodev,noexec,relatime,freezer",
+ "source": "cgroup",
+ "target": "/sys/fs/cgroup/freezer"
+ }
+ ],
+ "fstype": "tmpfs",
+ "options": "ro,nosuid,nodev,noexec,size=4096k,nr_inodes=1024,mode=755,inode64",
+ "source": "tmpfs",
+ "target": "/sys/fs/cgroup"
+ },
+ {
+ "fstype": "pstore",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "pstore",
+ "target": "/sys/fs/pstore"
+ },
+ {
+ "fstype": "efivarfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "efivarfs",
+ "target": "/sys/firmware/efi/efivars"
+ },
+ {
+ "fstype": "bpf",
+ "options": "rw,nosuid,nodev,noexec,relatime,mode=700",
+ "source": "none",
+ "target": "/sys/fs/bpf"
+ },
+ {
+ "fstype": "tracefs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "tracefs",
+ "target": "/sys/kernel/tracing"
+ },
+ {
+ "fstype": "debugfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "debugfs",
+ "target": "/sys/kernel/debug"
+ },
+ {
+ "fstype": "fusectl",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "fusectl",
+ "target": "/sys/fs/fuse/connections"
+ },
+ {
+ "fstype": "configfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "configfs",
+ "target": "/sys/kernel/config"
+ }
+ ],
+ "fstype": "sysfs",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "sysfs",
+ "target": "/sys"
+ },
+ {
+ "children": [
+ {
+ "fstype": "autofs",
+ "options": "rw,relatime,fd=29,pgrp=1,timeout=0,minproto=5,maxproto=5,direct,pipe_ino=16121",
+ "source": "systemd-1",
+ "target": "/proc/sys/fs/binfmt_misc"
+ }
+ ],
+ "fstype": "proc",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "proc",
+ "target": "/proc"
+ },
+ {
+ "children": [
+ {
+ "fstype": "devpts",
+ "options": "rw,nosuid,noexec,relatime,gid=5,mode=620,ptmxmode=000",
+ "source": "devpts",
+ "target": "/dev/pts"
+ },
+ {
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,inode64",
+ "source": "tmpfs",
+ "target": "/dev/shm"
+ },
+ {
+ "fstype": "mqueue",
+ "options": "rw,nosuid,nodev,noexec,relatime",
+ "source": "mqueue",
+ "target": "/dev/mqueue"
+ },
+ {
+ "fstype": "hugetlbfs",
+ "options": "rw,relatime,pagesize=2M",
+ "source": "hugetlbfs",
+ "target": "/dev/hugepages"
+ }
+ ],
+ "fstype": "devtmpfs",
+ "options": "rw,nosuid,relatime,size=8148668k,nr_inodes=2037167,mode=755,inode64",
+ "source": "udev",
+ "target": "/dev"
+ },
+ {
+ "children": [
+ {
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,noexec,relatime,size=5120k,inode64",
+ "source": "tmpfs",
+ "target": "/run/lock"
+ },
+ {
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,relatime,size=1636852k,nr_inodes=409213,mode=700,uid=999,gid=999,inode64",
+ "source": "tmpfs",
+ "target": "/run/user/999"
+ }
+ ],
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,noexec,relatime,size=1636856k,mode=755,inode64",
+ "source": "tmpfs",
+ "target": "/run"
+ },
+ {
+ "fstype": "iso9660",
+ "options": "ro,noatime,nojoliet,check=s,map=n,blocksize=2048",
+ "source": "/dev/sr0",
+ "target": "/cdrom"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,noatime",
+ "source": "/dev/loop0",
+ "target": "/rofs"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,relatime",
+ "source": "/dev/loop3",
+ "target": "/usr/lib/modules"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,relatime",
+ "source": "/dev/loop0",
+ "target": "/media/filesystem"
+ },
+ {
+ "fstype": "tmpfs",
+ "options": "rw,nosuid,nodev,relatime,inode64",
+ "source": "tmpfs",
+ "target": "/tmp"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop4",
+ "target": "/snap/snapd/12398"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop5",
+ "target": "/snap/subiquity/x1"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop6",
+ "target": "/snap/core18/2074"
+ },
+ {
+ "fstype": "squashfs",
+ "options": "ro,nodev,relatime",
+ "source": "/dev/loop7",
+ "target": "/snap/core20/1026"
+ }
+ ],
+ "fstype": "overlay",
+ "options": "rw,relatime,lowerdir=/jnstaller.squashfs:/installer.squashfs:/filesystem.squashfs,upperdir=/cow/upper,workdir=/cow/work",
+ "source": "/cow",
+ "target": "/"
+ }
+ ],
+ "multipath": {},
+ "raid": {},
+ "zfs": {
+ "zpools": {}
+ }
+ }
+}
diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py
new file mode 100644
index 0000000..5e43585
--- /dev/null
+++ b/tests/integration/__init__.py
@@ -0,0 +1,3 @@
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+# This directory contains tests that require root to run, essentially.
diff --git a/tests/integration/test_block_meta.py b/tests/integration/test_block_meta.py
new file mode 100644
index 0000000..bd602b2
--- /dev/null
+++ b/tests/integration/test_block_meta.py
@@ -0,0 +1,208 @@
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+from collections import namedtuple
+import contextlib
+import sys
+import yaml
+import os
+
+from curtin import block, udev, util
+
+from tests.unittests.helpers import CiTestCase
+from tests.integration.webserv import ImageServer
+
+
+class IntegrationTestCase(CiTestCase):
+ allowed_subp = True
+
+
+@contextlib.contextmanager
+def loop_dev(image):
+ dev = util.subp(
+ ['losetup', '--show', '--find', '--partscan', image],
+ capture=True, decode='ignore')[0].strip()
+ try:
+ udev.udevadm_trigger([dev])
+ yield dev
+ finally:
+ util.subp(['losetup', '--detach', dev])
+
+
+PartData = namedtuple("PartData", ('number', 'offset', 'size'))
+
+
+def summarize_partitions(dev):
+ # We don't care about the kname
+ return sorted(
+ [PartData(*d[1:]) for d in block.sysfs_partition_data(dev)])
+
+
+class StorageConfigBuilder:
+
+ def __init__(self):
+ self.config = []
+ self.cur_image = None
+
+ def render(self):
+ return {
+ 'storage': {
+ 'config': self.config,
+ },
+ }
+
+ def add_image(self, *, path, size, create=False, **kw):
+ action = {
+ 'type': 'image',
+ 'id': 'id' + str(len(self.config)),
+ 'path': path,
+ 'size': size,
+ }
+ action.update(**kw)
+ self.cur_image = action['id']
+ self.config.append(action)
+ if create:
+ with open(path, "wb") as f:
+ f.write(b"\0" * int(util.human2bytes(size)))
+
+ def add_part(self, *, size, **kw):
+ if self.cur_image is None:
+ raise Exception("no current image")
+ action = {
+ 'type': 'partition',
+ 'id': 'id' + str(len(self.config)),
+ 'device': self.cur_image,
+ 'size': size,
+ }
+ action.update(**kw)
+ self.config.append(action)
+
+
+class TestBlockMeta(IntegrationTestCase):
+
+ def run_bm(self, config, *args, **kwargs):
+ config_path = self.tmp_path('config.yaml')
+ with open(config_path, 'w') as fp:
+ yaml.dump(config, fp)
+
+ cmd_env = kwargs.pop('env', {})
+ cmd_env.update({
+ 'PATH': os.environ['PATH'],
+ 'CONFIG': config_path,
+ 'WORKING_DIR': '/tmp',
+ 'OUTPUT_FSTAB': self.tmp_path('fstab'),
+ 'OUTPUT_INTERFACES': '',
+ 'OUTPUT_NETWORK_STATE': '',
+ 'OUTPUT_NETWORK_CONFIG': '',
+ })
+
+ cmd = [
+ sys.executable, '-m', 'curtin', '--showtrace', '-vv',
+ '-c', config_path, 'block-meta', '--testmode', 'custom',
+ *args,
+ ]
+ util.subp(cmd, env=cmd_env, **kwargs)
+
+ def _test_default_offsets(self, ptable):
+ psize = 40 << 20
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder()
+ config.add_image(path=img, size='100M', ptable=ptable)
+ config.add_part(size=psize, number=1)
+ config.add_part(size=psize, number=2)
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(
+ number=1, offset=1 << 20, size=psize),
+ PartData(
+ number=2, offset=(1 << 20) + psize, size=psize),
+ ])
+
+ def test_default_offsets_gpt(self):
+ self._test_default_offsets('gpt')
+
+ def test_default_offsets_msdos(self):
+ self._test_default_offsets('msdos')
+
+ def _test_non_default_numbering(self, ptable):
+ psize = 40 << 20
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder()
+ config.add_image(path=img, size='100M', ptable=ptable)
+ config.add_part(size=psize, number=1)
+ config.add_part(size=psize, number=4)
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(
+ number=1, offset=1 << 20, size=psize),
+ PartData(
+ number=4, offset=(1 << 20) + psize, size=psize),
+ ])
+
+ def test_non_default_numbering_gpt(self):
+ self._test_non_default_numbering('gpt')
+
+ def BROKEN_test_non_default_numbering_msdos(self):
+ self._test_non_default_numbering('msdos')
+
+ def test_logical(self):
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder()
+ config.add_image(path=img, size='100M', ptable='msdos')
+ config.add_part(size='50M', number=1, flag='extended')
+ config.add_part(size='10M', number=5, flag='logical')
+ config.add_part(size='10M', number=6, flag='logical')
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ # extended partitions get a strange size in sysfs
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=2 << 20, size=10 << 20),
+ # part 5 takes us to 12 MiB offset, curtin leaves a 1 MiB
+ # gap.
+ PartData(number=6, offset=13 << 20, size=10 << 20),
+ ])
+
+ p1kname = block.partition_kname(block.path_to_kname(dev), 1)
+ self.assertTrue(block.is_extended_partition('/dev/' + p1kname))
+
+ def test_raw_image(self):
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder()
+ config.add_image(path=img, size='2G', ptable='gpt', create=True)
+
+ curtin_cfg = config.render()
+ server = ImageServer()
+ try:
+ server.start()
+ sources = {
+ 'sources': {
+ '00': {
+ 'uri': server.base_url + '/static/lvm-disk.dd',
+ 'type': 'dd-raw',
+ },
+ },
+ }
+ curtin_cfg.update(**sources)
+ mnt_point = self.tmp_dir()
+ cmd_env = {
+ 'TARGET_MOUNT_POINT': mnt_point,
+ }
+ with loop_dev(img) as dev:
+ try:
+ self.run_bm(curtin_cfg, f'--devices={dev}', env=cmd_env)
+ finally:
+ util.subp(['umount', mnt_point])
+ udev.udevadm_settle()
+ util.subp(
+ ['dmsetup', 'remove', '/dev/mapper/vmtests-root']
+ )
+ finally:
+ server.stop()
diff --git a/tests/integration/webserv.py b/tests/integration/webserv.py
new file mode 100644
index 0000000..f4ce4e4
--- /dev/null
+++ b/tests/integration/webserv.py
@@ -0,0 +1,53 @@
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+import threading
+import socketserver
+from http.server import SimpleHTTPRequestHandler
+from tests.vmtests.image_sync import IMAGE_DIR
+
+
+class ThreadedTCPServer(socketserver.ThreadingMixIn, socketserver.TCPServer):
+ pass
+
+
+class ImageHTTPRequestHandler(SimpleHTTPRequestHandler):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, directory=IMAGE_DIR, **kwargs)
+
+
+class ImageServer:
+ def __init__(self, host="localhost", port=0):
+ self.bind_host = host
+ self.bind_port = port
+ self.server = None
+ self._running = False
+
+ def start(self, *args, **kwds):
+ if self._running:
+ return
+
+ self.server = ThreadedTCPServer(
+ (self.bind_host, self.bind_port), ImageHTTPRequestHandler
+ )
+
+ server_thread = threading.Thread(target=self.server.serve_forever)
+
+ # exit the server thread when the main thread terminates
+ server_thread.daemon = True
+ server_thread.start()
+ self._running = True
+
+ def stop(self):
+ if not self._running:
+ return
+ if self.server:
+ self.server.shutdown()
+ self._running = False
+
+ @property
+ def base_url(self):
+ (ip, port) = (self.bind_host, self.bind_port)
+ if self.server is not None:
+ ip, port = self.server.server_address
+
+ return f"http://{ip}:{port}"
diff --git a/tests/unittests/helpers.py b/tests/unittests/helpers.py
index 64a79ca..819e2c5 100644
--- a/tests/unittests/helpers.py
+++ b/tests/unittests/helpers.py
@@ -121,15 +121,16 @@ class CiTestCase(TestCase):
util.subp = _real_subp
super(CiTestCase, self).tearDown()
- def add_patch(self, target, attr, **kwargs):
+ def add_patch(self, target, attr=None, **kwargs):
"""Patches specified target object and sets it as attr on test
instance also schedules cleanup"""
- if 'autospec' not in kwargs:
+ if 'autospec' not in kwargs and 'new' not in kwargs:
kwargs['autospec'] = True
m = mock.patch(target, **kwargs)
p = m.start()
self.addCleanup(m.stop)
- setattr(self, attr, p)
+ if attr is not None:
+ setattr(self, attr, p)
def tmp_dir(self, dir=None, cleanup=True):
"""Return a full path to a temporary directory for the test run."""
diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py
index 6556399..48fb820 100644
--- a/tests/unittests/test_apt_source.py
+++ b/tests/unittests/test_apt_source.py
@@ -12,6 +12,8 @@ import socket
import mock
from mock import call
+from aptsources.sourceslist import SourceEntry
+
from curtin import distro
from curtin import gpg
from curtin import util
@@ -63,6 +65,18 @@ class PseudoChrootableTarget(util.ChrootableTarget):
ChrootableTargetStr = "curtin.commands.apt_config.util.ChrootableTarget"
+def entryify(data):
+ return [SourceEntry(line) for line in data.splitlines()]
+
+
+def lineify(entries):
+ out = apt_config.entries_to_str(entries)
+ # the tests are written without the trailing newline,
+ # but we don't want to remove multiple of them
+ out = out[:-1] if len(out) > 0 and out[-1] == '\n' else out
+ return out
+
+
class TestAptSourceConfig(CiTestCase):
""" TestAptSourceConfig
Main Class to test apt configs
@@ -77,6 +91,7 @@ class TestAptSourceConfig(CiTestCase):
self.matcher = re.compile(ADD_APT_REPO_MATCH).search
self.add_patch('curtin.util.subp', 'm_subp')
self.m_subp.return_value = ('s390x', '')
+ self.target = self.tmp_dir()
@staticmethod
def _add_apt_sources(*args, **kwargs):
@@ -758,6 +773,7 @@ class TestAptSourceConfig(CiTestCase):
def test_disable_suites(self):
"""test_disable_suites - disable_suites with many configurations"""
release = "xenial"
+
orig = """deb http://ubuntu.com//ubuntu xenial main
deb http://ubuntu.com//ubuntu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
@@ -771,39 +787,38 @@ deb http://ubuntu.com//ubuntu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable release suite
disabled = ["$RELEASE"]
- expect = """\
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial main
+ expect = """# deb http://ubuntu.com//ubuntu xenial main
deb http://ubuntu.com//ubuntu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable other suite
disabled = ["$RELEASE-updates"]
expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
+# deb http://ubuntu.com//ubuntu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# multi disable
disabled = ["$RELEASE-updates", "$RELEASE-security"]
expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+# deb http://ubuntu.com//ubuntu xenial-updates main
+# deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# multi line disable (same suite multiple times in input)
disabled = ["$RELEASE-updates", "$RELEASE-security"]
@@ -815,14 +830,14 @@ deb http://UBUNTU.com//ubuntu xenial-updates main
deb http://UBUNTU.COM//ubuntu xenial-updates main
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+# deb http://ubuntu.com//ubuntu xenial-updates main
+# deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
-# suite disabled by curtin: deb http://UBUNTU.com//ubuntu xenial-updates main
-# suite disabled by curtin: deb http://UBUNTU.COM//ubuntu xenial-updates main
+# deb http://UBUNTU.com//ubuntu xenial-updates main
+# deb http://UBUNTU.COM//ubuntu xenial-updates main
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# comment in input
disabled = ["$RELEASE-updates", "$RELEASE-security"]
@@ -835,15 +850,15 @@ deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://UBUNTU.COM//ubuntu xenial-updates main
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-updates main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+# deb http://ubuntu.com//ubuntu xenial-updates main
+# deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
#foo
-#deb http://UBUNTU.com//ubuntu xenial-updates main
-# suite disabled by curtin: deb http://UBUNTU.COM//ubuntu xenial-updates main
+# deb http://UBUNTU.com//ubuntu xenial-updates main
+# deb http://UBUNTU.COM//ubuntu xenial-updates main
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable custom suite
disabled = ["foobar"]
@@ -854,9 +869,9 @@ deb http://ubuntu.com/ubuntu/ foobar main"""
expect = """deb http://ubuntu.com//ubuntu xenial main
deb http://ubuntu.com//ubuntu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
-# suite disabled by curtin: deb http://ubuntu.com/ubuntu/ foobar main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+# deb http://ubuntu.com/ubuntu/ foobar main"""
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable non existing suite
disabled = ["foobar"]
@@ -868,8 +883,8 @@ deb http://ubuntu.com/ubuntu/ notfoobar main"""
deb http://ubuntu.com//ubuntu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
deb http://ubuntu.com/ubuntu/ notfoobar main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable suite with option
disabled = ["$RELEASE-updates"]
@@ -879,12 +894,12 @@ deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by curtin: deb [a=b] http://ubu.com//ubu xenial-updates main
+# deb [a=b] http://ubu.com//ubu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable suite with more options and auto $RELEASE expansion
disabled = ["updates"]
@@ -894,13 +909,12 @@ deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
expect = """deb http://ubuntu.com//ubuntu xenial main
-# suite disabled by curtin: deb [a=b c=d] \
-http://ubu.com//ubu xenial-updates main
+# deb [a=b c=d] http://ubu.com//ubu xenial-updates main
deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
# single disable suite while options at others
disabled = ["$RELEASE-security"]
@@ -911,25 +925,167 @@ deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
expect = """deb http://ubuntu.com//ubuntu xenial main
deb [arch=foo] http://ubuntu.com//ubuntu xenial-updates main
-# suite disabled by curtin: deb http://ubuntu.com//ubuntu xenial-security main
+# deb http://ubuntu.com//ubuntu xenial-security main
deb-src http://ubuntu.com//ubuntu universe multiverse
deb http://ubuntu.com/ubuntu/ xenial-proposed main"""
- result = apt_config.disable_suites(disabled, orig, release)
- self.assertEqual(expect, result)
+ result = apt_config.disable_suites(disabled, entryify(orig), release)
+ self.assertEqual(expect, lineify(result))
def test_disable_suites_blank_lines(self):
"""test_disable_suites_blank_lines - ensure blank lines allowed"""
- lines = ["deb %(repo)s %(rel)s main universe",
- "",
- "deb %(repo)s %(rel)s-updates main universe",
- " # random comment",
- "#comment here",
- ""]
rel = "trusty"
- repo = 'http://example.com/mirrors/ubuntu'
- orig = "\n".join(lines) % {'repo': repo, 'rel': rel}
- self.assertEqual(
- orig, apt_config.disable_suites(["proposed"], orig, rel))
+
+ orig = """
+deb http://example.com/mirrors/ubuntu trusty main universe
+
+deb http://example.com/mirrors/ubuntu trusty-updates main universe
+
+deb http://example.com/mirrors/ubuntu trusty-proposed main universe
+
+#comment here"""
+ expect = """
+deb http://example.com/mirrors/ubuntu trusty main universe
+
+deb http://example.com/mirrors/ubuntu trusty-updates main universe
+
+# deb http://example.com/mirrors/ubuntu trusty-proposed main universe
+
+#comment here"""
+ disabled = ["proposed"]
+ result = apt_config.disable_suites(disabled, entryify(orig), rel)
+ self.assertEqual(expect, lineify(result))
+
+ def test_disable_components(self):
+ orig = """\
+deb http://ubuntu.com/ubuntu xenial main restricted universe multiverse
+deb http://ubuntu.com/ubuntu xenial-updates main restricted universe multiverse
+deb http://ubuntu.com/ubuntu xenial-security \
+main restricted universe multiverse
+deb-src http://ubuntu.com/ubuntu xenial main restricted universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed \
+main restricted universe multiverse"""
+ expect = orig
+
+ # no-op
+ disabled = []
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # no-op 2
+ disabled = None
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # we don't disable main
+ disabled = ('main', )
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # nonsense
+ disabled = ('asdf', )
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # free-only
+ expect = """\
+# deb http://ubuntu.com/ubuntu xenial main restricted universe multiverse
+deb http://ubuntu.com/ubuntu xenial main universe
+# deb http://ubuntu.com/ubuntu xenial-updates main restricted \
+universe multiverse
+deb http://ubuntu.com/ubuntu xenial-updates main universe
+# deb http://ubuntu.com/ubuntu xenial-security main restricted \
+universe multiverse
+deb http://ubuntu.com/ubuntu xenial-security main universe
+# deb-src http://ubuntu.com/ubuntu xenial main restricted universe multiverse
+deb-src http://ubuntu.com/ubuntu xenial main universe
+# deb http://ubuntu.com/ubuntu/ xenial-proposed main restricted \
+universe multiverse
+deb http://ubuntu.com/ubuntu/ xenial-proposed main universe"""
+ disabled = ('restricted', 'multiverse')
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # skip line when this component is the last
+ orig = """\
+deb http://ubuntu.com/ubuntu xenial main universe multiverse
+deb http://ubuntu.com/ubuntu xenial-updates universe
+deb http://ubuntu.com/ubuntu xenial-security universe multiverse"""
+ expect = """\
+# deb http://ubuntu.com/ubuntu xenial main universe multiverse
+deb http://ubuntu.com/ubuntu xenial main
+# deb http://ubuntu.com/ubuntu xenial-updates universe
+# deb http://ubuntu.com/ubuntu xenial-security universe multiverse"""
+ disabled = ('universe', 'multiverse')
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # comment everything
+ orig = """\
+deb http://ubuntu.com/ubuntu xenial-security universe multiverse"""
+ expect = """\
+# deb http://ubuntu.com/ubuntu xenial-security universe multiverse"""
+ disabled = ('universe', 'multiverse')
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ # double-hash comment
+ orig = """\
+
+## Major bug fix updates produced after the final release of the
+## distribution.
+
+deb http://archive.ubuntu.com/ubuntu/ impish-updates main restricted
+# deb http://archive.ubuntu.com/ubuntu/ impish-updates main restricted"""
+ expect = """\
+
+## Major bug fix updates produced after the final release of the
+## distribution.
+
+# deb http://archive.ubuntu.com/ubuntu/ impish-updates main restricted
+deb http://archive.ubuntu.com/ubuntu/ impish-updates main
+# deb http://archive.ubuntu.com/ubuntu/ impish-updates main restricted"""
+ disabled = ('restricted', )
+ result = apt_config.disable_components(disabled, entryify(orig))
+ self.assertEqual(expect, lineify(result))
+
+ @mock.patch("curtin.util.write_file")
+ @mock.patch("curtin.distro.get_architecture")
+ def test_generate_with_options(self, get_arch, write_file):
+ get_arch.return_value = "amd64"
+ orig = """deb http://ubuntu.com//ubuntu $RELEASE main
+# stuff things
+
+deb http://ubuntu.com//ubuntu $RELEASE-updates main
+deb http://ubuntu.com//ubuntu $RELEASE-security main
+deb-src http://ubuntu.com//ubuntu $RELEASE universe multiverse
+# deb http://ubuntu.com/ubuntu/ $RELEASE-proposed main
+deb [a=b] http://ubuntu.com/ubuntu/ $RELEASE-backports main
+"""
+ expect = """deb http://ubuntu.com//ubuntu xenial main
+# stuff things
+
+deb http://ubuntu.com//ubuntu xenial-updates main
+deb http://ubuntu.com//ubuntu xenial-security main
+deb-src http://ubuntu.com//ubuntu xenial universe multiverse
+# deb http://ubuntu.com/ubuntu/ xenial-proposed main
+# deb [a=b] http://ubuntu.com/ubuntu/ $RELEASE-backports main
+"""
+ # $RELEASE in backports doesn't get expanded because the line is
+ # considered invalid because of the options. So when the line
+ # gets commented out, it comments out the original line, not
+ # what we've modifed it to.
+ rel = 'xenial'
+ mirrors = {'PRIMARY': 'http://ubuntu.com/ubuntu/'}
+
+ cfg = {
+ 'preserve_sources_list': False,
+ 'sources_list': orig,
+ 'disable_suites': ['backports'],
+ }
+
+ apt_config.generate_sources_list(cfg, rel, mirrors, self.target)
+ filepath = os.path.join(self.target, 'etc/apt/sources.list')
+ write_file.assert_called_with(filepath, expect, mode=0o644)
class TestDebconfSelections(CiTestCase):
diff --git a/tests/unittests/test_block.py b/tests/unittests/test_block.py
index d96a4a8..6d9b776 100644
--- a/tests/unittests/test_block.py
+++ b/tests/unittests/test_block.py
@@ -409,11 +409,11 @@ class TestWipeVolume(CiTestCase):
def test_wipe_superblock(self, mock_quick_zero):
block.wipe_volume(self.dev, mode='superblock')
mock_quick_zero.assert_called_with(self.dev, exclusive=True,
- partitions=False, strict=False)
+ partitions=False)
block.wipe_volume(self.dev, exclusive=True,
mode='superblock-recursive')
mock_quick_zero.assert_called_with(self.dev, exclusive=True,
- partitions=True, strict=False)
+ partitions=True)
@mock.patch('curtin.block.wipe_file')
def test_wipe_zero(self, mock_wipe_file):
@@ -615,6 +615,69 @@ class TestNonAscii(CiTestCase):
block.blkid()
+class TestLsblkNormalization(CiTestCase):
+ # In the Jammy timeframe, lsblk changed output such that column names
+ # that previously contained dashes now have underscores instead.
+ # _lsblk is expected to normalize this format to the dash style.
+ # MAJ:MIN was also changed to MAJ_MIN
+ # impish, and expected format:
+ # ALIGNMENT="0" DISC-ALN="512" MAJ:MIN="252:0" ...
+ # jammy:
+ # ALIGNMENT="0" DISC_ALN="512" MAJ_MIN="252:0" ...
+ expected = {
+ 'vda': {
+ 'ALIGNMENT': "0",
+ 'DISC-ALN': "512",
+ 'DISC-GRAN': "512",
+ 'DISC-MAX': "2147483136",
+ 'DISC-ZERO': "0",
+ 'FSTYPE': "",
+ 'GROUP': "disk",
+ 'KNAME': "vda",
+ 'LABEL': "",
+ 'LOG-SEC': "512",
+ 'MAJ:MIN': "252:0",
+ 'MIN-IO': "512",
+ 'MODE': "brw-rw----",
+ 'MODEL': "",
+ 'MOUNTPOINT': "",
+ 'NAME': "vda",
+ 'OPT-IO': "0",
+ 'OWNER': "root",
+ 'PHY-SEC': "512",
+ 'RM': "0",
+ 'RO': "0",
+ 'ROTA': "1",
+ 'RQ-SIZE': "256",
+ 'SIZE': "12884901888",
+ 'STATE': "",
+ 'TYPE': "disk",
+ 'UUID': "",
+ 'device_path': '/dev/vda'
+ }
+ }
+
+ def test_lsblk_impish_style(self):
+ line = ('ALIGNMENT="0" DISC-ALN="512" DISC-GRAN="512" '
+ 'DISC-MAX="2147483136" DISC-ZERO="0" FSTYPE="" GROUP="disk" '
+ 'KNAME="vda" LABEL="" LOG-SEC="512" MAJ:MIN="252:0" '
+ 'MIN-IO="512" MODE="brw-rw----" MODEL="" MOUNTPOINT="" '
+ 'NAME="vda" OPT-IO="0" OWNER="root" PHY-SEC="512" RM="0" '
+ 'RO="0" ROTA="1" RQ-SIZE="256" SIZE="12884901888" STATE="" '
+ 'TYPE="disk" UUID=""')
+ self.assertEqual(self.expected, block._lsblock_pairs_to_dict(line))
+
+ def test_lsblk_jammy_style(self):
+ line = ('ALIGNMENT="0" DISC_ALN="512" DISC_GRAN="512" '
+ 'DISC_MAX="2147483136" DISC_ZERO="0" FSTYPE="" GROUP="disk" '
+ 'KNAME="vda" LABEL="" LOG_SEC="512" MAJ_MIN="252:0" '
+ 'MIN_IO="512" MODE="brw-rw----" MODEL="" MOUNTPOINT="" '
+ 'NAME="vda" OPT_IO="0" OWNER="root" PHY_SEC="512" RM="0" '
+ 'RO="0" ROTA="1" RQ_SIZE="256" SIZE="12884901888" STATE="" '
+ 'TYPE="disk" UUID=""')
+ self.assertEqual(self.expected, block._lsblock_pairs_to_dict(line))
+
+
class TestSlaveKnames(CiTestCase):
def setUp(self):
diff --git a/tests/unittests/test_block_lvm.py b/tests/unittests/test_block_lvm.py
index ff58b30..fc6130a 100644
--- a/tests/unittests/test_block_lvm.py
+++ b/tests/unittests/test_block_lvm.py
@@ -106,7 +106,8 @@ class TestBlockLvm(CiTestCase):
lvm.lvm_scan(multipath=True)
cmd_filter = [
'--config',
- 'devices{ filter = [ "a|/dev/mapper/mpath.*|", "r|.*|" ] }'
+ 'devices{ filter = [ "a|%s|", "a|%s|", "r|.*|" ] }' % (
+ '/dev/mapper/mpath.*', '/dev/mapper/dm_crypt-.*')
]
expected = [cmd + cmd_filter for cmd in cmds]
calls = [mock.call(cmd, capture=True) for cmd in expected]
@@ -117,12 +118,15 @@ class TestBlockLvm(CiTestCase):
class TestBlockLvmMultipathFilter(CiTestCase):
def test_generate_multipath_dev_mapper_filter(self):
- expected = 'filter = [ "a|/dev/mapper/mpath.*|", "r|.*|" ]'
+ expected = 'filter = [ "a|%s|", "a|%s|", "r|.*|" ]' % (
+ '/dev/mapper/mpath.*', '/dev/mapper/dm_crypt-.*')
self.assertEqual(expected, lvm.generate_multipath_dev_mapper_filter())
def test_generate_multipath_dm_uuid_filter(self):
expected = (
- 'filter = [ "a|/dev/disk/by-id/dm-uuid-.*mpath-.*|", "r|.*|" ]')
+ 'filter = [ "a|%s|", "a|%s|", "r|.*|" ]' % (
+ '/dev/disk/by-id/dm-uuid-.*mpath-.*',
+ '/dev/disk/by-id/.*dm_crypt-.*'))
self.assertEqual(expected, lvm.generate_multipath_dm_uuid_filter())
diff --git a/tests/unittests/test_block_mdadm.py b/tests/unittests/test_block_mdadm.py
index b04cf82..74396d8 100644
--- a/tests/unittests/test_block_mdadm.py
+++ b/tests/unittests/test_block_mdadm.py
@@ -942,8 +942,8 @@ class TestBlockMdadmMdHelpers(CiTestCase):
devname = '/dev/md0'
md_uuid = '93a73e10:427f280b:b7076c02:204b8f7a'
mock_os.path.realpath.return_value = devname
- rv = mdadm.md_check_array_uuid(devname, md_uuid)
- self.assertTrue(rv)
+ # "assertNotRaises"
+ mdadm.md_check_array_uuid(devname, md_uuid)
@patch('curtin.block.mdadm.os')
def test_md_check_array_uuid_mismatch(self, mock_os):
@@ -970,43 +970,87 @@ class TestBlockMdadmMdHelpers(CiTestCase):
def test_md_check_raid_level(self):
for rl in mdadm.VALID_RAID_LEVELS:
- self.assertTrue(mdadm.md_check_raidlevel(rl))
+ if isinstance(rl, int) or len(rl) <= 2:
+ el = 'raid%s' % (rl,)
+ elif rl == 'stripe':
+ el = 'raid0'
+ elif rl == 'mirror':
+ el = 'raid1'
+ else:
+ el = rl
+ # "assertNotRaises"
+ mdadm.md_check_raidlevel('md0', {'MD_LEVEL': el}, rl)
def test_md_check_raid_level_bad(self):
bogus = '27'
self.assertTrue(bogus not in mdadm.VALID_RAID_LEVELS)
with self.assertRaises(ValueError):
- mdadm.md_check_raidlevel(bogus)
+ mdadm.md_check_raidlevel('md0', {}, bogus)
@patch('curtin.block.mdadm.md_sysfs_attr')
def test_md_check_array_state(self, mock_attr):
mdname = '/dev/md0'
- mock_attr.side_effect = [
- 'clean', # array_state
- '0', # degraded
- 'idle', # sync_action
- ]
- self.assertTrue(mdadm.md_check_array_state(mdname))
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'clean'
+ elif attrname == 'degraded':
+ return '0'
+ elif attrname == 'sync_action':
+ return 'idle'
+
+ mock_attr.side_effect = mock_attr_impl
+ # "assertNotRaises"
+ mdadm.md_check_array_state(mdname)
+
+ @patch('curtin.block.mdadm.md_sysfs_attr')
+ def test_md_check_array_state_raid0(self, mock_attr):
+ # Raid 0 arrays do not have a degraded or sync_action sysfs
+ # attribute.
+ mdname = '/dev/md0'
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'clean'
+ elif attrname == 'degraded':
+ return default
+ elif attrname == 'sync_action':
+ return default
+
+ mock_attr.side_effect = mock_attr_impl
+ # "assertNotRaises"
+ mdadm.md_check_array_state(mdname)
@patch('curtin.block.mdadm.md_sysfs_attr')
def test_md_check_array_state_norw(self, mock_attr):
mdname = '/dev/md0'
- mock_attr.side_effect = [
- 'suspended', # array_state
- '0', # degraded
- 'idle', # sync_action
- ]
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'suspended'
+ elif attrname == 'degraded':
+ return '0'
+ elif attrname == 'sync_action':
+ return 'idle'
+
+ mock_attr.side_effect = mock_attr_impl
with self.assertRaises(ValueError):
mdadm.md_check_array_state(mdname)
@patch('curtin.block.mdadm.md_sysfs_attr')
def test_md_check_array_state_degraded(self, mock_attr):
mdname = '/dev/md0'
- mock_attr.side_effect = [
- 'clean', # array_state
- '1', # degraded
- 'idle', # sync_action
- ]
+
+ def mock_attr_impl(md_devname, attrname, default=''):
+ if attrname == 'array_state':
+ return 'clean'
+ elif attrname == 'degraded':
+ return '1'
+ elif attrname == 'sync_action':
+ return 'idle'
+
+ mock_attr.side_effect = mock_attr_impl
+
with self.assertRaises(ValueError):
mdadm.md_check_array_state(mdname)
@@ -1039,8 +1083,8 @@ class TestBlockMdadmMdHelpers(CiTestCase):
mock_guuid.return_value = '93a73e10:427f280b:b7076c02:204b8f7a'
mock_ckuuid.return_value = True
- rv = mdadm.md_check_uuid(mdname)
- self.assertTrue(rv)
+ # "assertNotRaises"
+ mdadm.md_check_uuid(mdname)
@patch('curtin.block.mdadm.md_check_array_uuid')
@patch('curtin.block.mdadm.md_get_uuid')
@@ -1152,6 +1196,7 @@ class TestBlockMdadmMdHelpers(CiTestCase):
with self.assertRaises(ValueError):
mdadm.md_check_array_membership(mdname, devices)
+ @patch('curtin.block.mdadm.mdadm_query_detail')
@patch('curtin.block.mdadm.md_check_array_membership')
@patch('curtin.block.mdadm.md_check_spares')
@patch('curtin.block.mdadm.md_check_devices')
@@ -1159,7 +1204,7 @@ class TestBlockMdadmMdHelpers(CiTestCase):
@patch('curtin.block.mdadm.md_check_raidlevel')
@patch('curtin.block.mdadm.md_check_array_state')
def test_md_check_all_good(self, mock_array, mock_raid, mock_uuid,
- mock_dev, mock_spare, mock_member):
+ mock_dev, mock_spare, mock_member, mock_detail):
md_devname = '/dev/md0'
raidlevel = 1
devices = ['/dev/vda', '/dev/vdb']
@@ -1171,16 +1216,143 @@ class TestBlockMdadmMdHelpers(CiTestCase):
mock_dev.return_value = None
mock_spare.return_value = None
mock_member.return_value = None
+ detail = {'MD_NAME': 'foo'}
+ mock_detail.return_value = detail
- mdadm.md_check(md_devname, raidlevel, devices=devices, spares=spares)
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=None)
mock_array.assert_has_calls([call(md_devname)])
- mock_raid.assert_has_calls([call(raidlevel)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
mock_uuid.assert_has_calls([call(md_devname)])
mock_dev.assert_has_calls([call(md_devname, devices)])
mock_spare.assert_has_calls([call(md_devname, spares)])
mock_member.assert_has_calls([call(md_devname, devices + spares)])
+ @patch('curtin.block.mdadm.os.path.realpath')
+ @patch('curtin.block.mdadm.mdadm_query_detail')
+ @patch('curtin.block.mdadm.md_check_array_membership')
+ @patch('curtin.block.mdadm.md_check_spares')
+ @patch('curtin.block.mdadm.md_check_devices')
+ @patch('curtin.block.mdadm.md_check_uuid')
+ @patch('curtin.block.mdadm.md_check_raidlevel')
+ @patch('curtin.block.mdadm.md_check_array_state')
+ def test_md_check_all_good_container(self, mock_array, mock_raid,
+ mock_uuid, mock_dev, mock_spare,
+ mock_member, mock_detail,
+ mock_realpath):
+ md_devname = '/dev/md0'
+ raidlevel = 1
+ devices = ['/dev/vda', '/dev/vdb']
+ spares = ['/dev/vdc']
+
+ mock_array.return_value = None
+ mock_raid.return_value = None
+ mock_uuid.return_value = None
+ mock_dev.return_value = None
+ mock_spare.return_value = None
+ mock_member.return_value = None
+ container_name = self.random_string()
+ container_dev = self.random_string()
+ detail = {'MD_CONTAINER': container_name}
+ mock_detail.return_value = detail
+
+ def realpath_impl(path):
+ if path == container_name:
+ return container_dev
+ else:
+ self.fail("unexpected realpath arg %r" % (path,))
+
+ mock_realpath.side_effect = realpath_impl
+
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=container_dev)
+
+ mock_array.assert_has_calls([call(md_devname)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
+ mock_uuid.assert_has_calls([call(md_devname)])
+ mock_dev.assert_has_calls([])
+ mock_spare.assert_has_calls([])
+ mock_member.assert_has_calls([])
+
+ @patch('curtin.block.mdadm.mdadm_query_detail')
+ @patch('curtin.block.mdadm.md_check_array_membership')
+ @patch('curtin.block.mdadm.md_check_spares')
+ @patch('curtin.block.mdadm.md_check_devices')
+ @patch('curtin.block.mdadm.md_check_uuid')
+ @patch('curtin.block.mdadm.md_check_raidlevel')
+ @patch('curtin.block.mdadm.md_check_array_state')
+ def test_md_check_all_no_container(self, mock_array, mock_raid,
+ mock_uuid, mock_dev, mock_spare,
+ mock_member, mock_detail):
+ md_devname = '/dev/md0'
+ raidlevel = 1
+ devices = ['/dev/vda', '/dev/vdb']
+ spares = ['/dev/vdc']
+
+ mock_array.return_value = None
+ mock_raid.return_value = None
+ mock_uuid.return_value = None
+ mock_dev.return_value = None
+ mock_spare.return_value = None
+ mock_member.return_value = None
+ container_name = self.random_string()
+ detail = {}
+
+ mock_detail.return_value = detail
+
+ with self.assertRaises(ValueError):
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=container_name)
+
+ mock_array.assert_has_calls([call(md_devname)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
+ mock_uuid.assert_has_calls([call(md_devname)])
+ mock_dev.assert_has_calls([])
+ mock_spare.assert_has_calls([])
+ mock_member.assert_has_calls([])
+
+ @patch('curtin.block.mdadm.mdadm_query_detail')
+ @patch('curtin.block.mdadm.md_check_array_membership')
+ @patch('curtin.block.mdadm.md_check_spares')
+ @patch('curtin.block.mdadm.md_check_devices')
+ @patch('curtin.block.mdadm.md_check_uuid')
+ @patch('curtin.block.mdadm.md_check_raidlevel')
+ @patch('curtin.block.mdadm.md_check_array_state')
+ def test_md_check_all_wrong_container(self, mock_array, mock_raid,
+ mock_uuid, mock_dev, mock_spare,
+ mock_member, mock_detail):
+ md_devname = '/dev/md0'
+ raidlevel = 1
+ devices = ['/dev/vda', '/dev/vdb']
+ spares = ['/dev/vdc']
+
+ mock_array.return_value = None
+ mock_raid.return_value = None
+ mock_uuid.return_value = None
+ mock_dev.return_value = None
+ mock_spare.return_value = None
+ mock_member.return_value = None
+ container_name = self.random_string()
+ detail = {'MD_CONTAINER': container_name + '1'}
+
+ mock_detail.return_value = detail
+
+ with self.assertRaises(ValueError):
+ mdadm.md_check(
+ md_devname, raidlevel, devices=devices, spares=spares,
+ container=container_name)
+
+ mock_array.assert_has_calls([call(md_devname)])
+ mock_raid.assert_has_calls([call(md_devname, detail, raidlevel)])
+ mock_uuid.assert_has_calls([call(md_devname)])
+ mock_dev.assert_has_calls([])
+ mock_spare.assert_has_calls([])
+ mock_member.assert_has_calls([])
+
def test_md_check_all_good_devshort(self):
md_devname = 'md0'
raidlevel = 1
@@ -1189,7 +1361,7 @@ class TestBlockMdadmMdHelpers(CiTestCase):
with self.assertRaises(ValueError):
mdadm.md_check(md_devname, raidlevel, devices=devices,
- spares=spares)
+ spares=spares, container=None)
def test_md_present(self):
mdname = 'md0'
diff --git a/tests/unittests/test_block_multipath.py b/tests/unittests/test_block_multipath.py
index 426be56..db767ab 100644
--- a/tests/unittests/test_block_multipath.py
+++ b/tests/unittests/test_block_multipath.py
@@ -1,7 +1,7 @@
import mock
from curtin.block import multipath
-from .helpers import CiTestCase, raise_pexec_error
+from .helpers import CiTestCase
# dmsetup uses tabs as separators
@@ -63,23 +63,40 @@ class TestMultipath(CiTestCase):
self.m_udev.udevadm_info.return_value = {'DM_UUID': 'lvm-vg-foo-lv1'}
self.assertFalse(multipath.is_mpath_device(self.random_string()))
- def test_is_mpath_member_true(self):
- """is_mpath_device returns false when DM_UUID doesnt start w/ mpath-"""
- self.assertTrue(multipath.is_mpath_member(self.random_string()))
-
def test_is_mpath_member_false(self):
- """is_mpath_member returns false if 'multipath -c <dev>' exits err."""
- self.m_subp.side_effect = raise_pexec_error
+ """is_mpath_member returns false if DM_MULTIPATH_DEVICE_PATH is not
+ present"""
+ self.m_udev.udevadm_info.return_value = {}
+ self.assertFalse(multipath.is_mpath_member(self.random_string()))
+
+ def test_is_mpath_member_false_2(self):
+ """is_mpath_member returns false if DM_MULTIPATH_DEVICE_PATH is not
+ '1'"""
+ self.m_udev.udevadm_info.return_value = {
+ "DM_MULTIPATH_DEVICE_PATH": "2",
+ }
self.assertFalse(multipath.is_mpath_member(self.random_string()))
+ def test_is_mpath_member_true(self):
+ """is_mpath_member returns true if DM_MULTIPATH_DEVICE_PATH is
+ '1'"""
+ self.m_udev.udevadm_info.return_value = {
+ "DM_MULTIPATH_DEVICE_PATH": "1",
+ }
+ self.assertTrue(multipath.is_mpath_member(self.random_string()))
+
def test_is_mpath_partition_true(self):
- """is_mpath_member returns true if 'multipath -c <dev>' exits 0."""
+ """is_mpath_partition returns true if udev info contains right keys."""
dm_device = "/dev/dm-" + self.random_string()
- self.m_udev.udevadm_info.return_value = {'DM_PART': '1'}
+ self.m_udev.udevadm_info.return_value = {
+ 'DM_PART': '1',
+ 'DM_MPATH': 'a',
+ }
self.assertTrue(multipath.is_mpath_partition(dm_device))
def test_is_mpath_partition_false(self):
- """is_mpath_member returns false if DM_PART is not present for dev."""
+ """is_mpath_partition returns false if DM_PART is not present for dev.
+ """
self.assertFalse(multipath.is_mpath_partition(self.random_string()))
def test_mpath_partition_to_mpath_id_and_partnumber(self):
diff --git a/tests/unittests/test_clear_holders.py b/tests/unittests/test_clear_holders.py
index d1c2590..48697b5 100644
--- a/tests/unittests/test_clear_holders.py
+++ b/tests/unittests/test_clear_holders.py
@@ -142,8 +142,7 @@ class TestClearHolders(CiTestCase):
# 1. wipe the bcache device contents
m_block.wipe_volume.assert_called_with(self.test_blockdev,
mode='superblock',
- exclusive=False,
- strict=True)
+ exclusive=False)
# 2. extract the backing device
m_bcache.get_backing_device.assert_called_with(self.test_blockdev)
m_bcache.sysfs_path.assert_has_calls([
@@ -243,7 +242,7 @@ class TestClearHolders(CiTestCase):
clear_holders.shutdown_mdadm(self.test_syspath)
mock_wipe.assert_called_with(self.test_blockdev, exclusive=False,
- mode='superblock', strict=True)
+ mode='superblock')
mock_mdadm.set_sync_action.assert_has_calls([
mock.call(self.test_blockdev, action="idle"),
mock.call(self.test_blockdev, action="frozen")])
@@ -281,7 +280,7 @@ class TestClearHolders(CiTestCase):
clear_holders.shutdown_mdadm(self.test_syspath)
mock_wipe.assert_called_with(self.test_blockdev, exclusive=False,
- mode='superblock', strict=True)
+ mode='superblock')
mock_mdadm.set_sync_action.assert_has_calls([
mock.call(self.test_blockdev, action="idle"),
mock.call(self.test_blockdev, action="frozen")])
@@ -363,7 +362,7 @@ class TestClearHolders(CiTestCase):
clear_holders.wipe_superblock(self.test_syspath)
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
@mock.patch('curtin.block.clear_holders.multipath')
@mock.patch('curtin.block.clear_holders.is_swap_device')
@@ -391,7 +390,7 @@ class TestClearHolders(CiTestCase):
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_zfs.zpool_export.assert_called_with('fake_pool')
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
@mock.patch('curtin.block.clear_holders.multipath')
@mock.patch('curtin.block.clear_holders.is_swap_device')
@@ -420,7 +419,7 @@ class TestClearHolders(CiTestCase):
self.assertEqual(0, mock_zfs.device_to_poolname.call_count)
self.assertEqual(0, mock_zfs.zpool_list.call_count)
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
@mock.patch('curtin.block.clear_holders.udev')
@mock.patch('curtin.block.clear_holders.multipath')
@@ -454,7 +453,7 @@ class TestClearHolders(CiTestCase):
clear_holders.wipe_superblock(self.test_syspath)
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
@mock.patch('curtin.block.clear_holders.multipath')
@mock.patch('curtin.block.clear_holders.is_swap_device')
@@ -482,7 +481,7 @@ class TestClearHolders(CiTestCase):
clear_holders.wipe_superblock(self.test_syspath)
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
mock_block.get_sysfs_partitions.assert_has_calls(
[mock.call(self.test_syspath)] * 3)
mock_block.rescan_block_devices.assert_has_calls(
@@ -515,7 +514,7 @@ class TestClearHolders(CiTestCase):
clear_holders.wipe_superblock(self.test_syspath)
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
mock_block.get_sysfs_partitions.assert_has_calls(
[mock.call(self.test_syspath)] * 3)
mock_block.rescan_block_devices.assert_has_calls(
@@ -577,7 +576,7 @@ class TestClearHolders(CiTestCase):
clear_holders.wipe_superblock(self.test_syspath)
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
@mock.patch('curtin.block.clear_holders.zfs')
@mock.patch('curtin.block.clear_holders.get_holders')
@@ -614,7 +613,7 @@ class TestClearHolders(CiTestCase):
mock_block.sysfs_to_devpath.assert_called_with(self.test_syspath)
mock_mpath.remove_partition.assert_called_with('mpatha-part1')
mock_block.wipe_volume.assert_called_with(
- self.test_blockdev, exclusive=True, mode='superblock', strict=True)
+ self.test_blockdev, exclusive=True, mode='superblock')
@mock.patch('curtin.block.clear_holders.LOG')
@mock.patch('curtin.block.clear_holders.block')
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index 8cfd6af..3e22792 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -128,6 +128,8 @@ class TestBlockMetaSimple(CiTestCase):
'mock_block_get_root_device')
self.add_patch('curtin.block.is_valid_device',
'mock_block_is_valid_device')
+ self.add_patch('curtin.block.lvm.activate_volgroups',
+ 'mock_activate_volgroups')
# config
self.add_patch('curtin.config.load_command_config',
'mock_config_load')
@@ -153,6 +155,8 @@ class TestBlockMetaSimple(CiTestCase):
self.mock_block_get_dev_name_entry.assert_called_with(devname)
self.mock_subp.assert_has_calls([call(args=wget),
call(['partprobe', devnode]),
+ call(['udevadm', 'trigger', devnode]),
+ call(['udevadm', 'settle']),
call(['udevadm', 'settle'])])
paths = ["curtin", "system-data/var/lib/snapd", "snaps"]
self.mock_block_get_root_device.assert_called_with([devname],
@@ -176,6 +180,8 @@ class TestBlockMetaSimple(CiTestCase):
self.mock_block_get_dev_name_entry.assert_called_with(devname)
self.mock_subp.assert_has_calls([call(args=wget),
call(['partprobe', devnode]),
+ call(['udevadm', 'trigger', devnode]),
+ call(['udevadm', 'settle']),
call(['udevadm', 'settle'])])
paths = ["curtin", "system-data/var/lib/snapd", "snaps"]
self.mock_block_get_root_device.assert_called_with([devname],
@@ -201,7 +207,8 @@ class TestBlockMetaSimple(CiTestCase):
mock_write_image.return_value = devname
args = Namespace(target=self.target, devices=None, mode=None,
- boot_fstype=None, fstype=None, force_mode=False)
+ boot_fstype=None, fstype=None, force_mode=False,
+ testmode=True)
block_meta.block_meta(args)
@@ -209,6 +216,62 @@ class TestBlockMetaSimple(CiTestCase):
self.mock_subp.assert_has_calls(
[call(['mount', devname, self.target])])
+ @patch('curtin.commands.block_meta.meta_clear')
+ @patch('curtin.commands.block_meta.write_image_to_disk')
+ @patch('curtin.commands.block_meta.get_device_paths_from_storage_config')
+ @patch('curtin.block.lookup_disk', new=lambda s: "/dev/" + s[:-6])
+ def test_meta_simple_grub_device(self, mock_gdpfsc, mock_write_image,
+ mock_clear):
+ # grub_device can indicate which device to choose when multiple
+ # are available. Also override custom mode to use simple anyhow
+ # because that's how dd imaging works.
+ def _gdpfsc(cfg):
+ return [x[1]["path"] for x in cfg.items()]
+ mock_gdpfsc.side_effect = _gdpfsc
+ sources = {
+ 'unittest': {'type': 'dd-xz',
+ 'uri': 'http://myhost/curtin-unittest-dd.xz'}
+ }
+ devname = "fakevdd"
+ config = {
+ 'sources': sources,
+ 'storage': {
+ 'config': [{
+ 'id': 'fakevdc',
+ 'name': 'fakevdc',
+ 'type': 'disk',
+ 'wipe': 'superblock',
+ 'path': '/dev/fakevdc',
+ 'serial': 'fakevdcserial',
+ }, {
+ 'grub_device': True,
+ 'id': 'fakevdd',
+ 'name': 'fakevdd',
+ 'type': 'disk',
+ 'wipe': 'superblock',
+ 'path': '/dev/fakevdd',
+ 'serial': 'fakevddserial',
+ }]
+ }
+ }
+ self.mock_config_load.return_value = config
+ self.mock_load_env.return_value = {'target': self.target}
+ self.mock_block_is_valid_device.return_value = True
+
+ def _gdne(devname):
+ bname = devname.split('/dev/')[-1]
+ return (bname, "/dev/" + bname)
+ self.mock_block_get_dev_name_entry.side_effect = _gdne
+ mock_write_image.return_value = devname
+
+ args = Namespace(target=None, devices=None, mode='custom',
+ boot_fstype=None, fstype=None, force_mode=False,
+ testmode=True)
+
+ block_meta.block_meta(args)
+
+ mock_write_image.assert_called_with(sources.get('unittest'), devname)
+
class TestBlockMeta(CiTestCase):
@@ -308,7 +371,7 @@ class TestBlockMeta(CiTestCase):
holders = ['md1']
self.mock_get_holders.return_value = holders
- block_meta.disk_handler(info, self.storage_config)
+ block_meta.disk_handler(info, self.storage_config, {})
print("clear_holders: %s" % self.mock_clear_holders.call_args_list)
print("assert_clear: %s" % self.mock_assert_clear.call_args_list)
@@ -331,7 +394,7 @@ class TestBlockMeta(CiTestCase):
self.mock_block_sys_block_path.return_value = '/sys/class/block/xxx'
self.mock_block_sector_size.return_value = (512, 512)
- block_meta.partition_handler(part_info, self.storage_config)
+ block_meta.partition_handler(part_info, self.storage_config, {})
part_offset = 2048 * 512
self.mock_block_zero_file.assert_called_with(disk_kname, [part_offset],
exclusive=False)
@@ -358,10 +421,10 @@ class TestBlockMeta(CiTestCase):
}
self.mock_get_volume_type.return_value = 'part'
- block_meta.mount_handler(mount_info, self.storage_config)
+ block_meta.mount_handler(mount_info, self.storage_config, {})
options = 'defaults'
comment = "# / was on /wark/xxx during curtin installation"
- expected = "%s\n%s %s %s %s 0 0\n" % (comment,
+ expected = "%s\n%s %s %s %s 0 1\n" % (comment,
disk_info['path'],
mount_info['path'],
fs_info['fstype'], options)
@@ -386,10 +449,10 @@ class TestBlockMeta(CiTestCase):
}
self.mock_get_volume_type.return_value = 'part'
- block_meta.mount_handler(mount_info, self.storage_config)
+ block_meta.mount_handler(mount_info, self.storage_config, {})
options = 'ro'
comment = "# /readonly was on /wark/xxx during curtin installation"
- expected = "%s\n%s %s %s %s 0 0\n" % (comment,
+ expected = "%s\n%s %s %s %s 0 1\n" % (comment,
disk_info['path'],
mount_info['path'],
fs_info['fstype'], options)
@@ -415,10 +478,10 @@ class TestBlockMeta(CiTestCase):
}
self.mock_get_volume_type.return_value = 'part'
- block_meta.mount_handler(mount_info, self.storage_config)
+ block_meta.mount_handler(mount_info, self.storage_config, {})
options = 'defaults'
comment = "# /readonly was on /wark/xxx during curtin installation"
- expected = "%s\n%s %s %s %s 0 0\n" % (comment,
+ expected = "%s\n%s %s %s %s 0 1\n" % (comment,
disk_info['path'],
mount_info['path'],
fs_info['fstype'], options)
@@ -446,10 +509,10 @@ class TestBlockMeta(CiTestCase):
}
self.mock_get_volume_type.return_value = 'part'
- block_meta.mount_handler(mount_info, self.storage_config)
+ block_meta.mount_handler(mount_info, self.storage_config, {})
options = 'defaults'
comment = "# /readonly was on /wark/xxx during curtin installation"
- expected = "#curtin-test\n%s\n%s %s %s %s 0 0\n" % (comment,
+ expected = "#curtin-test\n%s\n%s %s %s %s 0 1\n" % (comment,
disk_info['path'],
mount_info['path'],
fs_info['fstype'],
@@ -479,7 +542,7 @@ class TestZpoolHandler(CiTestCase):
m_getpath.return_value = disk_path
m_block.disk_to_byid_path.return_value = None
m_util.load_command_environment.return_value = {'target': 'mytarget'}
- block_meta.zpool_handler(info, storage_config)
+ block_meta.zpool_handler(info, storage_config, {})
m_zfs.zpool_create.assert_called_with(
info['pool'], [disk_path],
mountpoint="/",
@@ -610,7 +673,7 @@ class TestFstabData(CiTestCase):
self.assertEqual(
block_meta.FstabData(
spec="none", fstype="tmpfs", path="/tmpfs",
- options="defaults", freq="0", passno="0", device=None),
+ options="defaults", freq="0", device=None),
block_meta.mount_data(info, {'xm1': info}))
@patch('curtin.block.iscsi.volpath_is_iscsi')
@@ -625,7 +688,7 @@ class TestFstabData(CiTestCase):
self.assertEqual(
block_meta.FstabData(
spec=None, fstype="ext4", path="/",
- options="noatime", freq="0", passno="0", device="/dev/xda1"),
+ options="noatime", freq="0", device="/dev/xda1"),
block_meta.mount_data(scfg['m1'], scfg))
@patch('curtin.block.iscsi.volpath_is_iscsi', return_value=False)
@@ -643,7 +706,7 @@ class TestFstabData(CiTestCase):
self.assertEqual(
block_meta.FstabData(
spec=None, fstype="vfat", path="/boot/efi",
- options="defaults", freq="0", passno="0", device="/dev/xda1"),
+ options="defaults", freq="0", device="/dev/xda1"),
block_meta.mount_data(scfg['m1'], scfg))
@patch('curtin.block.iscsi.volpath_is_iscsi')
@@ -657,7 +720,7 @@ class TestFstabData(CiTestCase):
self.assertEqual(
block_meta.FstabData(
spec=None, fstype="ext4", path="/",
- options="noatime,_netdev", freq="0", passno="0",
+ options="noatime,_netdev", freq="0",
device="/dev/xda1"),
block_meta.mount_data(scfg['m1'], scfg))
@@ -683,7 +746,7 @@ class TestFstabData(CiTestCase):
self.assertEqual(
block_meta.FstabData(
spec=myspec, fstype="ext3", path="/",
- options="noatime", freq="0", passno="0",
+ options="noatime", freq="0",
device=None),
block_meta.mount_data(mnt, scfg))
@@ -761,15 +824,54 @@ class TestFstabData(CiTestCase):
spec="/dev/disk2", path="/mnt", fstype='btrfs', options='noatime')
lines = block_meta.fstab_line_for_data(fdata).splitlines()
self.assertEqual(
- ["/dev/disk2", "/mnt", "btrfs", "noatime", "0", "0"],
+ ["/dev/disk2", "/mnt", "btrfs", "noatime", "0", "1"],
+ lines[1].split())
+
+ def test_fstab_line_root_and_no_passno(self):
+ """fstab_line_for_data passno autoselect for /."""
+ fdata = block_meta.FstabData(
+ spec="/dev/disk2", path="/", fstype='btrfs', passno='0',
+ options='noatime')
+ lines = block_meta.fstab_line_for_data(fdata).splitlines()
+ self.assertEqual(
+ ["/dev/disk2", "/", "btrfs", "noatime", "0", "0"],
+ lines[1].split())
+
+ def test_fstab_line_boot_and_no_passno(self):
+ """fstab_line_for_data passno autoselect for /boot."""
+ fdata = block_meta.FstabData(
+ spec="/dev/disk2", path="/boot", fstype='btrfs', options='noatime')
+ lines = block_meta.fstab_line_for_data(fdata).splitlines()
+ self.assertEqual(
+ ["/dev/disk2", "/boot", "btrfs", "noatime", "0", "1"],
+ lines[1].split())
+
+ def test_fstab_line_boot_efi_and_no_passno(self):
+ """fstab_line_for_data passno autoselect for /boot/efi."""
+ fdata = block_meta.FstabData(
+ spec="/dev/disk2", path="/boot/efi", fstype='btrfs',
+ options='noatime')
+ lines = block_meta.fstab_line_for_data(fdata).splitlines()
+ self.assertEqual(
+ ["/dev/disk2", "/boot/efi", "btrfs", "noatime", "0", "1"],
+ lines[1].split())
+
+ def test_fstab_line_almost_boot(self):
+ """fstab_line_for_data passno that pretends to be /boot."""
+ fdata = block_meta.FstabData(
+ spec="/dev/disk2", path="/boots", fstype='btrfs',
+ options='noatime')
+ lines = block_meta.fstab_line_for_data(fdata).splitlines()
+ self.assertEqual(
+ ["/dev/disk2", "/boots", "btrfs", "noatime", "0", "1"],
lines[1].split())
def test_fstab_line_for_data_with_passno_and_freq(self):
"""fstab_line_for_data should respect passno and freq."""
fdata = block_meta.FstabData(
- spec="/dev/d1", path="/mnt", fstype='ext4', freq="1", passno="2")
+ spec="/dev/d1", path="/mnt", fstype='ext4', freq="1", passno="1")
lines = block_meta.fstab_line_for_data(fdata).splitlines()
- self.assertEqual(["1", "2"], lines[1].split()[4:6])
+ self.assertEqual(["1", "1"], lines[1].split()[4:6])
def test_fstab_line_for_data_raises_error_without_spec_or_device(self):
"""fstab_line_for_data should raise ValueError if no spec or device."""
@@ -797,7 +899,7 @@ class TestFstabData(CiTestCase):
"# /mnt was on /dev/disk2 during curtin installation",
lines[0])
self.assertEqual(
- [by_uuid, "/mnt", "ext4", "defaults", "0", "0"],
+ [by_uuid, "/mnt", "ext4", "defaults", "0", "1"],
lines[1].split())
self.assertEqual(1, m_uinfo.call_count)
self.assertEqual(1, m_vol_type.call_count)
@@ -819,7 +921,7 @@ class TestFstabData(CiTestCase):
"# /mnt was on /dev/disk2 during curtin installation",
lines[0])
self.assertEqual(
- ["/dev/disk2", "/mnt", "ext4", "defaults", "0", "0"],
+ ["/dev/disk2", "/mnt", "ext4", "defaults", "0", "1"],
lines[1].split())
self.assertEqual(1, m_uinfo.call_count)
self.assertEqual(1, m_vol_type.call_count)
@@ -837,7 +939,7 @@ class TestFstabData(CiTestCase):
'# /mnt was on /dev/xvda1 during curtin installation',
lines[0])
self.assertEqual(
- ["/dev/xvda1", "/mnt", "ext4", "defaults", "0", "0"],
+ ["/dev/xvda1", "/mnt", "ext4", "defaults", "0", "1"],
lines[1].split())
self.assertEqual(0, m_get_uuid.call_count)
@@ -1154,7 +1256,7 @@ class TestDasdHandler(CiTestCase):
m_dasd_devname.return_value = disk_path
m_getpath.return_value = disk_path
m_dasd_needf.side_effect = [True, False]
- block_meta.dasd_handler(info, storage_config)
+ block_meta.dasd_handler(info, storage_config, {})
m_dasd_format.assert_called_with(blksize=4096, layout='cdl',
set_label='cloudimg-rootfs',
mode='quick')
@@ -1176,7 +1278,7 @@ class TestDasdHandler(CiTestCase):
disk_path = "/wark/dasda"
m_getpath.return_value = disk_path
m_dasd_needf.side_effect = [False, False]
- block_meta.dasd_handler(info, storage_config)
+ block_meta.dasd_handler(info, storage_config, {})
self.assertEqual(0, m_dasd_format.call_count)
@patch('curtin.commands.block_meta.dasd.DasdDevice.format')
@@ -1196,7 +1298,7 @@ class TestDasdHandler(CiTestCase):
disk_path = "/wark/dasda"
m_getpath.return_value = disk_path
m_dasd_needf.side_effect = [False, False]
- block_meta.dasd_handler(info, storage_config)
+ block_meta.dasd_handler(info, storage_config, {})
self.assertEqual(1, m_dasd_needf.call_count)
self.assertEqual(0, m_dasd_format.call_count)
@@ -1219,7 +1321,7 @@ class TestDasdHandler(CiTestCase):
m_getpath.return_value = disk_path
m_dasd_needf.side_effect = [True, False]
with self.assertRaises(ValueError):
- block_meta.dasd_handler(info, storage_config)
+ block_meta.dasd_handler(info, storage_config, {})
self.assertEqual(1, m_dasd_needf.call_count)
self.assertEqual(0, m_dasd_format.call_count)
@@ -1242,7 +1344,7 @@ class TestDiskHandler(CiTestCase):
m_getpath.return_value = disk_path
m_block.get_part_table_type.return_value = 'vtoc'
m_getpath.return_value = disk_path
- block_meta.disk_handler(info, storage_config)
+ block_meta.disk_handler(info, storage_config, {})
m_getpath.assert_called_with(info['id'], storage_config)
m_block.get_part_table_type.assert_called_with(disk_path)
@@ -1258,7 +1360,7 @@ class TestDiskHandler(CiTestCase):
m_getpath.return_value = disk_path
m_block.get_part_table_type.return_value = self.random_string()
m_getpath.return_value = disk_path
- block_meta.disk_handler(info, storage_config)
+ block_meta.disk_handler(info, storage_config, {})
m_getpath.assert_called_with(info['id'], storage_config)
self.assertEqual(0, m_block.get_part_table_type.call_count)
@@ -1274,7 +1376,7 @@ class TestDiskHandler(CiTestCase):
m_getpath.return_value = disk_path
m_block.get_part_table_type.return_value = 'gpt'
m_getpath.return_value = disk_path
- block_meta.disk_handler(info, storage_config)
+ block_meta.disk_handler(info, storage_config, {})
m_getpath.assert_called_with(info['id'], storage_config)
self.assertEqual(0, m_block.get_part_table_type.call_count)
@@ -1292,7 +1394,7 @@ class TestDiskHandler(CiTestCase):
m_block.get_part_table_type.return_value = None
m_getpath.return_value = disk_path
with self.assertRaises(ValueError):
- block_meta.disk_handler(info, storage_config)
+ block_meta.disk_handler(info, storage_config, {})
m_getpath.assert_called_with(info['id'], storage_config)
m_block.get_part_table_type.assert_called_with(disk_path)
@@ -1304,7 +1406,7 @@ class TestDiskHandler(CiTestCase):
info = {'ptable': 'vtoc', 'type': 'disk', 'id': 'disk-foobar'}
path = m_getpath.return_value = self.random_string()
m_get_holders.return_value = []
- block_meta.disk_handler(info, OrderedDict())
+ block_meta.disk_handler(info, OrderedDict(), {})
m_subp.assert_called_once_with(['fdasd', '-c', '/dev/null', path])
@@ -1351,7 +1453,7 @@ class TestLvmVolgroupHandler(CiTestCase):
self.m_getpath.side_effect = iter(devices)
block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual([call(['vgcreate', '--force', '--zero=y', '--yes',
'vg1'] + devices, capture=True)],
@@ -1367,7 +1469,7 @@ class TestLvmVolgroupHandler(CiTestCase):
self.storage_config['lvm-volgroup1']['preserve'] = True
block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(0, self.m_subp.call_count)
self.assertEqual(1, self.m_lvm.lvm_scan.call_count)
@@ -1380,7 +1482,7 @@ class TestLvmVolgroupHandler(CiTestCase):
self.storage_config['lvm-volgroup1']['preserve'] = True
block_meta.lvm_volgroup_handler(self.storage_config['lvm-volgroup1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(1, self.m_lvm.activate_volgroups.call_count)
self.assertEqual([call('vg1')],
@@ -1397,7 +1499,7 @@ class TestLvmVolgroupHandler(CiTestCase):
with self.assertRaises(RuntimeError):
block_meta.lvm_volgroup_handler(
- self.storage_config['lvm-volgroup1'], self.storage_config)
+ self.storage_config['lvm-volgroup1'], self.storage_config, {})
self.assertEqual(1, self.m_lvm.activate_volgroups.call_count)
self.assertEqual([call('vg1')],
@@ -1448,7 +1550,7 @@ class TestLvmPartitionHandler(CiTestCase):
expected_size_str = "%sB" % util.human2bytes(lv_size)
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
call_name, call_args, call_kwargs = self.m_subp.mock_calls[0]
# call_args is an n-tuple of arg list
@@ -1462,7 +1564,7 @@ class TestLvmPartitionHandler(CiTestCase):
self.m_getpath.return_value = devpath
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
self.m_wipe.assert_called_with(devpath, mode='superblock',
exclusive=False)
@@ -1476,7 +1578,7 @@ class TestLvmPartitionHandler(CiTestCase):
wipe_mode = 'zero'
self.storage_config['lvm-part1']['wipe'] = wipe_mode
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
self.m_wipe.assert_called_with(devpath, mode=wipe_mode,
exclusive=False)
@@ -1485,7 +1587,7 @@ class TestLvmPartitionHandler(CiTestCase):
m_verify.return_value = True
self.storage_config['lvm-part1']['preserve'] = True
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(0, self.m_distro.lsb_release.call_count)
self.assertEqual(0, self.m_subp.call_count)
@@ -1495,7 +1597,7 @@ class TestLvmPartitionHandler(CiTestCase):
self.m_lvm.get_lv_size_bytes.return_value = 1073741824.0
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual([call('vg1')],
self.m_lvm.get_lvols_in_volgroup.call_args_list)
self.assertEqual([call('lv1')],
@@ -1509,7 +1611,7 @@ class TestLvmPartitionHandler(CiTestCase):
with self.assertRaises(RuntimeError):
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual([call('vg1')],
self.m_lvm.get_lvols_in_volgroup.call_args_list)
@@ -1524,7 +1626,7 @@ class TestLvmPartitionHandler(CiTestCase):
with self.assertRaises(RuntimeError):
block_meta.lvm_partition_handler(self.storage_config['lvm-part1'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual([call('vg1')],
self.m_lvm.get_lvols_in_volgroup.call_args_list)
self.assertEqual([call('lv1')],
@@ -1592,7 +1694,7 @@ class TestDmCryptHandler(CiTestCase):
self.m_getpath.return_value = volume_path
info = self.storage_config['dmcrypt0']
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
expected_calls = [
call(['cryptsetup', '--cipher', self.cipher,
'--key-size', self.keysize,
@@ -1610,7 +1712,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
del info['dm_name']
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
expected_calls = [
call(['cryptsetup', '--cipher', self.cipher,
'--key-size', self.keysize,
@@ -1634,7 +1736,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
volume_name = "%s:%s" % (volume_byid, info['dm_name'])
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
expected_calls = [
call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
'--sector-size', '4096', '--name', info['dm_name'],
@@ -1669,7 +1771,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
volume_name = "%s:%s" % (volume_byid, info['dm_name'])
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
expected_calls = [
call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
'--sector-size', '4096', '--name', info['dm_name'],
@@ -1706,7 +1808,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
volume_name = "%s:%s" % (volume_byid, info['dm_name'])
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
expected_calls = [
call(['zkey', 'generate', '--xts', '--volume-type', 'luks2',
'--sector-size', '4096', '--name', info['dm_name'],
@@ -1733,7 +1835,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
self.assertEqual(0, self.m_subp.call_count)
self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
@@ -1754,7 +1856,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
self.assertEqual(len(util.load_file(self.crypttab).splitlines()), 1)
@patch('curtin.commands.block_meta.os.path.exists')
@@ -1766,7 +1868,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
with self.assertRaises(RuntimeError):
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
@patch('curtin.commands.block_meta.os.path.exists')
def test_dm_crypt_preserve_raises_exception_if_wrong_dev_used(self, m_ex):
@@ -1784,7 +1886,7 @@ class TestDmCryptHandler(CiTestCase):
info = self.storage_config['dmcrypt0']
info['preserve'] = True
with self.assertRaises(RuntimeError):
- block_meta.dm_crypt_handler(info, self.storage_config)
+ block_meta.dm_crypt_handler(info, self.storage_config, {})
class TestRaidHandler(CiTestCase):
@@ -1882,7 +1984,7 @@ class TestRaidHandler(CiTestCase):
self.storage_config['mddevice']['name'] = param
try:
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config)
+ self.storage_config, {})
except ValueError:
if param in ['bad/path']:
continue
@@ -1904,7 +2006,7 @@ class TestRaidHandler(CiTestCase):
md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual([call(md_devname, 5, devices, [], None, '', None)],
self.m_mdadm.mdadm_create.call_args_list)
@@ -1914,12 +2016,32 @@ class TestRaidHandler(CiTestCase):
devices = [self.random_string(), self.random_string(),
self.random_string()]
+ md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
- m_verify.return_value = True
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
+ self.assertEqual(
+ [call(md_devname, 5, devices, [], None)],
+ m_verify.call_args_list)
+
+ @patch('curtin.commands.block_meta.raid_verify')
+ def test_raid_handler_preserves_existing_device_container(self, m_verify):
+ """ raid_handler preserves existing device. """
+
+ devices = [self.random_string()]
+ md_devname = '/dev/' + self.storage_config['mddevice']['name']
+ self.m_getpath.side_effect = iter(devices)
+ self.storage_config['mddevice']['preserve'] = True
+ del self.storage_config['mddevice']['devices']
+ self.storage_config['mddevice']['container'] = self.random_string()
+ block_meta.raid_handler(self.storage_config['mddevice'],
+ self.storage_config, {})
+ self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
+ self.assertEqual(
+ [call(md_devname, 5, [], [], devices[0])],
+ m_verify.call_args_list)
def test_raid_handler_preserve_verifies_md_device(self):
""" raid_handler preserve verifies existing raid device. """
@@ -1931,9 +2053,9 @@ class TestRaidHandler(CiTestCase):
self.m_mdadm.md_check.return_value = True
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
- self.assertEqual([call(md_devname, 5, devices, [])],
+ self.assertEqual([call(md_devname, 5, devices, [], None)],
self.m_mdadm.md_check.call_args_list)
def test_raid_handler_preserve_verifies_md_device_after_assemble(self):
@@ -1943,12 +2065,12 @@ class TestRaidHandler(CiTestCase):
self.random_string()]
md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
- self.m_mdadm.md_check.side_effect = iter([False, True])
+ self.m_mdadm.md_check.side_effect = iter([ValueError(), None])
self.storage_config['mddevice']['preserve'] = True
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
- self.assertEqual([call(md_devname, 5, devices, [])] * 2,
+ self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
self.m_mdadm.md_check.call_args_list)
self.assertEqual([call(md_devname, devices, [])],
self.m_mdadm.mdadm_assemble.call_args_list)
@@ -1960,13 +2082,13 @@ class TestRaidHandler(CiTestCase):
self.random_string()]
md_devname = '/dev/' + self.storage_config['mddevice']['name']
self.m_getpath.side_effect = iter(devices)
- self.m_mdadm.md_check.side_effect = iter([False, False])
+ self.m_mdadm.md_check.side_effect = iter([ValueError(), ValueError()])
self.storage_config['mddevice']['preserve'] = True
- with self.assertRaises(RuntimeError):
+ with self.assertRaises(ValueError):
block_meta.raid_handler(self.storage_config['mddevice'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual(0, self.m_mdadm.mdadm_create.call_count)
- self.assertEqual([call(md_devname, 5, devices, [])] * 2,
+ self.assertEqual([call(md_devname, 5, devices, [], None)] * 2,
self.m_mdadm.md_check.call_args_list)
self.assertEqual([call(md_devname, devices, [])],
self.m_mdadm.mdadm_assemble.call_args_list)
@@ -2057,7 +2179,7 @@ class TestBcacheHandler(CiTestCase):
self.m_bcache.create_cache_device.return_value = cset_uuid
block_meta.bcache_handler(self.storage_config['id_bcache0'],
- self.storage_config)
+ self.storage_config, {})
self.assertEqual([call(caching_device)],
self.m_bcache.create_cache_device.call_args_list)
self.assertEqual([
@@ -2180,7 +2302,7 @@ class TestPartitionHandler(CiTestCase):
self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
self.m_block.get_blockdev_sector_size.return_value = (512, 512)
m_ex_part.return_value = 'disk-sda-part-2'
- block_meta.partition_handler(logical_part, self.storage_config)
+ block_meta.partition_handler(logical_part, self.storage_config, {})
m_ex_part.assert_called_with('sda', self.storage_config)
def test_part_handler_raise_exception_missing_extended_part(self):
@@ -2200,7 +2322,7 @@ class TestPartitionHandler(CiTestCase):
self.m_block.sys_block_path.return_value = 'sys/class/block/sda'
self.m_block.get_blockdev_sector_size.return_value = (512, 512)
with self.assertRaises(RuntimeError):
- block_meta.partition_handler(logical_part, self.storage_config)
+ block_meta.partition_handler(logical_part, self.storage_config, {})
@patch('curtin.commands.block_meta.partition_verify_fdasd')
def test_part_hander_reuse_vtoc(self, m_verify_fdasd):
@@ -2227,7 +2349,7 @@ class TestPartitionHandler(CiTestCase):
m_verify_fdasd.return_value = True
devpath = self.m_getpath.return_value = self.random_string()
- block_meta.partition_handler(sconfig[1], oconfig)
+ block_meta.partition_handler(sconfig[1], oconfig, {})
m_verify_fdasd.assert_has_calls([call(devpath, 1, sconfig[1])])
@@ -2290,7 +2412,7 @@ class TestMultipathPartitionHandler(CiTestCase):
m_part_info.return_value = (2048, 2048)
part2 = self.storage_config['disk-sda-part-2']
- block_meta.partition_handler(part2, self.storage_config)
+ block_meta.partition_handler(part2, self.storage_config, {})
expected_calls = [
call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',
@@ -2319,7 +2441,7 @@ class TestMultipathPartitionHandler(CiTestCase):
m_part_info.return_value = (2048, 2048)
part2 = self.storage_config['disk-sda-part-2']
- block_meta.partition_handler(part2, self.storage_config)
+ block_meta.partition_handler(part2, self.storage_config, {})
expected_calls = [
call(['sgdisk', '--new', '2:4096:4096', '--typecode=2:8300',
@@ -2448,8 +2570,6 @@ class TestPartitionVerifySfdisk(CiTestCase):
def setUp(self):
super(TestPartitionVerifySfdisk, self).setUp()
base = 'curtin.commands.block_meta.'
- self.add_patch(base + 'verify_exists', 'm_verify_exists')
- self.add_patch(base + 'block.sfdisk_info', 'm_block_sfdisk_info')
self.add_patch(base + 'verify_size', 'm_verify_size')
self.add_patch(base + 'verify_ptable_flag', 'm_verify_ptable_flag')
self.info = {
@@ -2464,34 +2584,29 @@ class TestPartitionVerifySfdisk(CiTestCase):
self.devpath = self.random_string()
def test_partition_verify_sfdisk(self):
- block_meta.partition_verify_sfdisk(self.devpath, self.info)
- self.assertEqual(
- [call(self.devpath)],
- self.m_verify_exists.call_args_list)
- self.assertEqual(
- [call(self.devpath)],
- self.m_block_sfdisk_info.call_args_list)
+ devpath = self.random_string()
+ sfdisk_part_info = {
+ 'node': devpath,
+ }
+ label = self.random_string()
+ block_meta.partition_verify_sfdisk(self.info, label, sfdisk_part_info)
self.assertEqual(
- [call(self.devpath, self.part_size,
- sfdisk_info=self.m_block_sfdisk_info.return_value)],
+ [call(devpath, self.part_size, sfdisk_part_info)],
self.m_verify_size.call_args_list)
self.assertEqual(
- [call(self.devpath, self.info['flag'],
- sfdisk_info=self.m_block_sfdisk_info.return_value)],
+ [call(devpath, self.info['flag'], label, sfdisk_part_info)],
self.m_verify_ptable_flag.call_args_list)
def test_partition_verify_skips_ptable_no_flag(self):
del self.info['flag']
- block_meta.partition_verify_sfdisk(self.devpath, self.info)
- self.assertEqual(
- [call(self.devpath)],
- self.m_verify_exists.call_args_list)
- self.assertEqual(
- [call(self.devpath)],
- self.m_block_sfdisk_info.call_args_list)
+ devpath = self.random_string()
+ sfdisk_part_info = {
+ 'node': devpath,
+ }
+ label = self.random_string()
+ block_meta.partition_verify_sfdisk(self.info, label, sfdisk_part_info)
self.assertEqual(
- [call(self.devpath, self.part_size,
- sfdisk_info=self.m_block_sfdisk_info.return_value)],
+ [call(devpath, self.part_size, sfdisk_part_info)],
self.m_verify_size.call_args_list)
self.assertEqual([], self.m_verify_ptable_flag.call_args_list)
@@ -2619,15 +2734,17 @@ class TestVerifyPtableFlag(CiTestCase):
def test_verify_ptable_flag_finds_boot_on_gpt(self):
devpath = '/dev/vda15'
expected_flag = 'boot'
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_gpt)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'gpt',
+ self.sfdisk_info_gpt['partitions'][2])
def test_verify_ptable_flag_raises_exception_missing_flag(self):
devpath = '/dev/vda1'
expected_flag = 'boot'
with self.assertRaises(RuntimeError):
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_gpt)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'gpt',
+ self.sfdisk_info_gpt['partitions'][0])
def test_verify_ptable_flag_raises_exception_invalid_flag(self):
devpath = '/dev/vda1'
@@ -2635,52 +2752,49 @@ class TestVerifyPtableFlag(CiTestCase):
self.assertNotIn(expected_flag, block_meta.SGDISK_FLAGS.keys())
self.assertNotIn(expected_flag, block_meta.MSDOS_FLAGS.keys())
with self.assertRaises(RuntimeError):
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_gpt)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'gpt',
+ self.sfdisk_info_gpt['partitions'][0])
def test_verify_ptable_flag_checks_bootable_not_table_type(self):
devpath = '/dev/vdb1'
expected_flag = 'boot'
- del self.sfdisk_info_dos['partitions'][0]['bootable']
+ partinfo = self.sfdisk_info_dos['partitions'][0]
+ del partinfo['bootable']
self.sfdisk_info_dos['partitions'][0]['type'] = '0x80'
with self.assertRaises(RuntimeError):
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_dos)
-
- def test_verify_ptable_flag_calls_block_sfdisk_if_info_none(self):
- devpath = '/dev/vda15'
- expected_flag = 'boot'
- self.m_block_sfdisk_info.return_value = self.sfdisk_info_gpt
- block_meta.verify_ptable_flag(devpath, expected_flag, sfdisk_info=None)
- self.assertEqual(
- [call(devpath)],
- self.m_block_sfdisk_info.call_args_list)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'dos', partinfo)
def test_verify_ptable_flag_finds_boot_on_msdos(self):
devpath = '/dev/vdb1'
expected_flag = 'boot'
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_dos)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'dos',
+ self.sfdisk_info_dos['partitions'][0])
def test_verify_ptable_flag_finds_linux_on_dos_primary_partition(self):
devpath = '/dev/vdb2'
expected_flag = 'linux'
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_dos)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'dos',
+ self.sfdisk_info_dos['partitions'][1])
def test_verify_ptable_flag_finds_dos_extended_partition(self):
devpath = '/dev/vdb3'
expected_flag = 'extended'
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_dos)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'dos',
+ self.sfdisk_info_dos['partitions'][2])
def test_verify_ptable_flag_finds_dos_logical_partition(self):
devpath = '/dev/vdb5'
expected_flag = 'logical'
self.m_block_get_blockdev_for_partition.return_value = (
('/dev/vdb', '5'))
- block_meta.verify_ptable_flag(devpath, expected_flag,
- sfdisk_info=self.sfdisk_info_dos)
+ block_meta.verify_ptable_flag(
+ devpath, expected_flag, 'dos',
+ self.sfdisk_info_dos['partitions'][4])
class TestGetDevicePathsFromStorageConfig(CiTestCase):
diff --git a/tests/unittests/test_commands_extract.py b/tests/unittests/test_commands_extract.py
index c318699..19f2ee9 100644
--- a/tests/unittests/test_commands_extract.py
+++ b/tests/unittests/test_commands_extract.py
@@ -4,211 +4,388 @@ import os
from .helpers import CiTestCase
from curtin import util
-from curtin.commands.extract import (extract_root_fsimage_url,
- extract_root_layered_fsimage_url,
- _get_image_stack)
+from curtin.commands.extract import (
+ extract_source,
+ _get_image_stack,
+ )
from curtin.url_helper import UrlError
-class TestExtractRootFsImageUrl(CiTestCase):
- """Test extract_root_fsimage_url."""
+class Mount:
+ def __init__(self, device, mountpoint, options, type):
+ self.device = device
+ self.mountpoint = mountpoint
+ self.options = options
+ self.type = type
+ self.unmounted = False
+
+ def __repr__(self):
+ return "Mount({!r}, {!r}, {!r}, {!r})".format(
+ self.device, self.mountpoint, self.options, self.type)
+
+
+class MountTracker:
+
+ def __init__(self):
+ self.mounts = []
+
+ def mount(self, device, mountpoint, options=None, type=None):
+ if not os.path.isdir(mountpoint):
+ raise AssertionError("%s is not a directory" % (mountpoint,))
+ self.mounts.append(Mount(device, mountpoint, options, type))
+
+ def unmount(self, mountpoint):
+ for m in reversed(self.mounts):
+ if m.mountpoint == mountpoint and not m.unmounted:
+ m.unmounted = True
+ return
+ else:
+ raise Exception("%s not mounted" % (mountpoint,))
+
+ def check_unmounted(self):
+ for mount in self.mounts:
+ if not mount.unmounted:
+ raise AssertionError("Mount %s was not unmounted" % (mount,))
+
+
+class ExtractTestCase(CiTestCase):
+
def _fake_download(self, url, path, retries=0):
self.downloads.append(os.path.abspath(path))
with open(path, "w") as fp:
fp.write("fake content from " + url + "\n")
def setUp(self):
- super(TestExtractRootFsImageUrl, self).setUp()
+ super(ExtractTestCase, self).setUp()
self.downloads = []
self.add_patch("curtin.commands.extract.url_helper.download",
"m_download", side_effect=self._fake_download)
- self.add_patch("curtin.commands.extract._extract_root_fsimage",
- "m__extract_root_fsimage")
+ self.add_patch("curtin.commands.extract.copy_to_target",
+ "m_copy_to_target")
+
+ def tearDown(self):
+ super(ExtractTestCase, self).tearDown()
+ # ensure the files got cleaned up.
+ self.assertEqual([], [f for f in self.downloads if os.path.exists(f)])
+
+ def track_mounts(self):
+ tracker = MountTracker()
+ self.add_patch('curtin.commands.extract.mount', new=tracker.mount)
+ self.add_patch('curtin.commands.extract.unmount', new=tracker.unmount)
+
+ self.addCleanup(tracker.check_unmounted)
+
+ return tracker
+
+ def assert_mounted_and_extracted(self, mount_tracker, fnames, target):
+ # Assert that `fnames` (which should be ordered base to top
+ # layer) were mounted in the correct order and extracted to
+ # `target`.
+ fname_to_mountpoint = {}
+ other_mounts = []
+ for mount in mount_tracker.mounts:
+ if mount.device in fnames:
+ fname_to_mountpoint[mount.device] = mount.mountpoint
+ else:
+ other_mounts.append(mount)
+
+ if len(fnames) == 1:
+ self.assertEqual(len(other_mounts), 0)
+ self.m_copy_to_target.assert_called_once_with(
+ mount_tracker.mounts[0].mountpoint, target)
+ return
+
+ expected_lowers = []
+ for fname in fnames:
+ if fname not in fname_to_mountpoint:
+ self.fail("%s was not mounted" % (fname,))
+ else:
+ expected_lowers.append(fname_to_mountpoint[fname])
+ expected_lowers.reverse()
+
+ self.assertEqual(len(other_mounts), 1)
+ final_mount = other_mounts[0]
+ opts = final_mount.options.split(',')
+ for opt in opts:
+ if opt.startswith('lowerdir='):
+ seen_lowers = opt[len('lowerdir='):].split(":")
+ break
+ else:
+ self.fail("did not find expected lowerdir option")
+ self.assertEqual(expected_lowers, seen_lowers)
+ self.m_copy_to_target.assert_called_once_with(
+ final_mount.mountpoint, target)
+
+ def assert_downloaded_and_mounted_and_extracted(self, mount_tracker, urls,
+ target):
+ # Assert that `urls` (which should be ordered base to top
+ # layer) were downloaed and mounted in the correct order and
+ # extracted to `target`.
+ self.assertEqual(len(self.m_download.call_args_list), len(urls))
+ url_to_fname = {}
+ for call in self.m_download.call_args_list:
+ url, path = call[0][:2]
+ self.assertIn(url, urls)
+ url_to_fname[url] = path
+ fnames = []
+ for url in urls:
+ fnames.append(url_to_fname[url])
+ self.assert_mounted_and_extracted(mount_tracker, fnames, target)
+
+
+class TestExtractSourceCp(ExtractTestCase):
+ """Test extract_source with cp sources."""
+
+ def test_cp_uri(self):
+ mount_tracker = self.track_mounts()
+ path = self.random_string()
+ target = self.random_string()
+
+ extract_source({'uri': 'cp://' + path}, target)
+
+ self.assertEqual(0, self.m_download.call_count)
+ self.assertEqual(0, len(mount_tracker.mounts))
+ self.m_copy_to_target.assert_called_once_with(path, target)
+
+
+class TestExtractSourceFsImageUrl(ExtractTestCase):
+ """Test extract_source with fsimage sources."""
+
+ def tmp_path_with_random_content(self, name=None):
+ if name is None:
+ name = self.random_string()
+ tdir = self.tmp_dir()
+ path = os.path.join(tdir, self.random_string())
+ util.write_file(path, self.random_string())
+ return path
+
+ def test_abspath(self):
+ mount_tracker = self.track_mounts()
+ path = self.tmp_path_with_random_content()
+ target = self.random_string()
+
+ extract_source({'type': 'fsimage', 'uri': path}, target)
+
+ self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(mount_tracker, [path], target)
+
+ def test_abspath_dots(self):
+ mount_tracker = self.track_mounts()
+ path = self.tmp_path_with_random_content(name='a.b.c')
+ target = self.random_string()
+
+ extract_source({'type': 'fsimage', 'uri': path}, target)
+
+ self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(mount_tracker, [path], target)
+
+ def test_relpath(self):
+ mount_tracker = self.track_mounts()
+ path = self.tmp_path_with_random_content()
+ target = self.random_string()
- def test_relative_file_url(self):
- """extract_root_fsimage_url supports relative file:// urls."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
startdir = os.getcwd()
- fname = "my.img"
try:
- os.chdir(tmpd)
- util.write_file(fname, fname + " data\n")
- extract_root_fsimage_url("file://" + fname, target)
+ os.chdir(os.path.dirname(path))
+ extract_source(
+ {'type': 'fsimage', 'uri': os.path.basename(path)},
+ target)
finally:
os.chdir(startdir)
- self.assertEqual(1, self.m__extract_root_fsimage.call_count)
+
self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(
+ mount_tracker, [os.path.basename(path)], target)
+
+ def test_abs_fileurl(self):
+ mount_tracker = self.track_mounts()
+ path = self.tmp_path_with_random_content()
+ target = self.random_string()
+
+ extract_source(
+ {'type': 'fsimage', 'uri': 'file://' + path},
+ target)
- def test_absolute_file_url(self):
- """extract_root_fsimage_url supports absolute file:/// urls."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- fpath = self.tmp_path("my.img", tmpd)
- util.write_file(fpath, fpath + " data\n")
- extract_root_fsimage_url("file:///" + fpath, target)
- self.assertEqual(1, self.m__extract_root_fsimage.call_count)
self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(mount_tracker, [path], target)
+
+ def test_rel_fileurl(self):
+ mount_tracker = self.track_mounts()
+ path = self.tmp_path_with_random_content()
+ target = self.random_string()
+
+ startdir = os.getcwd()
+ try:
+ os.chdir(os.path.dirname(path))
+ extract_source(
+ {'type': 'fsimage', 'uri': 'file://' + os.path.basename(path)},
+ target)
+ finally:
+ os.chdir(startdir)
+
+ self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(
+ mount_tracker, [os.path.basename(path)], target)
def test_http_url(self):
"""extract_root_fsimage_url supports http:// urls."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- myurl = "http://bogus.example.com/my.img"
- extract_root_fsimage_url(myurl, target)
- self.assertEqual(1, self.m__extract_root_fsimage.call_count)
- self.assertEqual(1, self.m_download.call_count)
- # ensure the file got cleaned up.
- self.assertEqual(1, len(self.downloads))
- self.assertEqual([], [f for f in self.downloads if os.path.exists(f)])
+ mount_tracker = self.track_mounts()
+ uri = 'http://' + self.random_string()
+ target = self.random_string()
- def test_file_path_not_url(self):
- """extract_root_fsimage_url supports normal file path without file:."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- fpath = self.tmp_path("my.img", tmpd)
- util.write_file(fpath, fpath + " data\n")
- extract_root_fsimage_url(os.path.abspath(fpath), target)
- self.assertEqual(1, self.m__extract_root_fsimage.call_count)
- self.assertEqual(0, self.m_download.call_count)
+ extract_source({'type': 'fsimage', 'uri': uri}, target)
+ self.assert_downloaded_and_mounted_and_extracted(
+ mount_tracker, [uri], target)
-class TestExtractRootLayeredFsImageUrl(CiTestCase):
- """Test extract_root_layared_fsimage_url."""
- def _fake_download(self, url, path, retries=0):
- self.downloads.append(os.path.abspath(path))
- with open(path, "w") as fp:
- fp.write("fake content from " + url + "\n")
- def setUp(self):
- super(TestExtractRootLayeredFsImageUrl, self).setUp()
- self.downloads = []
- self.add_patch("curtin.commands.extract.url_helper.download",
- "m_download", side_effect=self._fake_download)
- self.add_patch("curtin.commands.extract._extract_root_layered_fsimage",
- "m__extract_root_layered_fsimage")
+class TestExtractSourceLayeredFsImageUrl(ExtractTestCase):
+ """Test extract_source with fsimage-layered sources."""
+
+ def tmp_paths_with_random_content(self, names):
+ tdir = self.tmp_dir()
+ paths = []
+ longest = ''
+ for name in names:
+ path = os.path.join(tdir, name)
+ util.write_file(path, self.random_string())
+ if len(path) > len(longest):
+ longest = path
+ paths.append(path)
+ return paths, longest
+
+ def test_absolute_file_path_single(self):
+ mount_tracker = self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(['base.ext'])
+ target = self.random_string()
+
+ extract_source(
+ {'type': 'fsimage-layered', 'uri': longest},
+ target)
+
+ self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(mount_tracker, paths, target)
+
+ def test_relative_file_path_single(self):
+ mount_tracker = self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(['base.ext'])
+ target = self.random_string()
- def test_relative_local_file_single(self):
- """extract_root_layered_fsimage_url supports relative file:// uris."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
startdir = os.getcwd()
- fname = "my.img"
try:
- os.chdir(tmpd)
- util.write_file(fname, fname + " data\n")
- extract_root_layered_fsimage_url("file://" + fname, target)
+ os.chdir(os.path.dirname(longest))
+ extract_source(
+ {'type': 'fsimage-layered', 'uri': os.path.basename(longest)},
+ target)
finally:
os.chdir(startdir)
- self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count)
+
self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(
+ mount_tracker, [os.path.basename(path) for path in paths], target)
+
+ def test_absolute_file_url_single(self):
+ mount_tracker = self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(['base.ext'])
+ target = self.random_string()
+
+ extract_source(
+ {'type': 'fsimage-layered', 'uri': 'file://' + longest},
+ target)
- def test_absolute_local_file_single(self):
- """extract_root_layered_fsimage_url supports absolute file:/// uris."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- fpath = self.tmp_path("my.img", tmpd)
- util.write_file(fpath, fpath + " data\n")
- extract_root_layered_fsimage_url("file:///" + fpath, target)
- self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count)
self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(mount_tracker, paths, target)
+
+ def test_relative_file_url_single(self):
+ mount_tracker = self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(['base.ext'])
+ target = self.random_string()
+
+ startdir = os.getcwd()
+ try:
+ os.chdir(os.path.dirname(longest))
+ extract_source(
+ {
+ 'type': 'fsimage-layered',
+ 'uri': 'file://' + os.path.basename(longest),
+ },
+ target)
+ finally:
+ os.chdir(startdir)
- def test_local_file_path_single(self):
- """extract_root_layered_fsimage_url supports normal file path without
- file:"""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- fpath = self.tmp_path("my.img", tmpd)
- util.write_file(fpath, fpath + " data\n")
- extract_root_layered_fsimage_url(os.path.abspath(fpath), target)
- self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count)
self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(
+ mount_tracker, [os.path.basename(path) for path in paths], target)
def test_local_file_path_multiple(self):
- """extract_root_layered_fsimage_url supports normal hierarchy file
- path"""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- arg = os.path.abspath(self.tmp_path("minimal.standard.debug.squashfs",
- tmpd))
- for f in ["minimal.squashfs",
- "minimal.standard.squashfs",
- "minimal.standard.debug.squashfs"]:
- fpath = self.tmp_path(f, tmpd)
- util.write_file(fpath, fpath + " data\n")
- extract_root_layered_fsimage_url(arg, target)
- self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count)
+ mount_tracker = self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(
+ ['base.ext', 'base.overlay.ext', 'base.overlay.other.ext'])
+ target = self.random_string()
+
+ extract_source(
+ {'type': 'fsimage-layered', 'uri': longest},
+ target)
+
self.assertEqual(0, self.m_download.call_count)
+ self.assert_mounted_and_extracted(mount_tracker, paths, target)
def test_local_file_path_multiple_one_missing(self):
- """extract_root_layered_fsimage_url supports normal hierarchy file
- path but intermediate layer missing"""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- arg = os.path.abspath(self.tmp_path("minimal.standard.debug.squashfs",
- tmpd))
- for f in ["minimal.squashfs",
- "minimal.standard.debug.squashfs"]:
- fpath = self.tmp_path(f, tmpd)
- util.write_file(fpath, fpath + " data\n")
- self.assertRaises(ValueError, extract_root_layered_fsimage_url, arg,
- target)
- self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count)
- self.assertEqual(0, self.m_download.call_count)
+ self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(
+ ['base.ext', 'base.overlay.other.ext'])
+ target = self.random_string()
+
+ self.assertRaises(
+ ValueError, extract_source,
+ {'type': 'fsimage-layered', 'uri': longest},
+ target)
def test_local_file_path_multiple_one_empty(self):
- """extract_root_layered_fsimage_url supports normal hierarchy file
- path but intermediate layer empty"""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- arg = os.path.abspath(self.tmp_path("minimal.standard.debug.squashfs",
- tmpd))
- for f in ["minimal.squashfs",
- "minimal.standard.squashfs"
- "minimal.standard.debug.squashfs"]:
- fpath = self.tmp_path(f, tmpd)
- if f == "minimal.standard.squashfs":
- util.write_file(fpath, "")
- else:
- util.write_file(fpath, fpath + " data\n")
- self.assertRaises(ValueError, extract_root_layered_fsimage_url, arg,
- target)
- self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count)
- self.assertEqual(0, self.m_download.call_count)
+ self.track_mounts()
+ paths, longest = self.tmp_paths_with_random_content(
+ ['base.ext', 'base.overlay.ext', 'base.overlay.other.ext'])
+ target = self.random_string()
+ util.write_file(paths[1], '')
+
+ self.assertRaises(
+ ValueError, extract_source,
+ {'type': 'fsimage-layered', 'uri': longest},
+ target)
def test_remote_file_single(self):
- """extract_root_layered_fsimage_url supports http:// urls."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
+ mount_tracker = self.track_mounts()
+ target = self.random_string()
myurl = "http://example.io/minimal.squashfs"
- extract_root_layered_fsimage_url(myurl, target)
- self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count)
- self.assertEqual(1, self.m_download.call_count)
- self.assertEqual("http://example.io/minimal.squashfs",
- self.m_download.call_args_list[0][0][0])
- # ensure the file got cleaned up.
- self.assertEqual([], [f for f in self.downloads if os.path.exists(f)])
+
+ extract_source(
+ {'type': 'fsimage-layered', 'uri': myurl},
+ target)
+
+ self.assert_downloaded_and_mounted_and_extracted(
+ mount_tracker, ["http://example.io/minimal.squashfs"], target)
def test_remote_file_multiple(self):
- """extract_root_layered_fsimage_url supports normal hierarchy from
- http:// urls."""
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
+ mount_tracker = self.track_mounts()
+ target = self.random_string()
myurl = "http://example.io/minimal.standard.debug.squashfs"
- extract_root_layered_fsimage_url(myurl, target)
- self.assertEqual(1, self.m__extract_root_layered_fsimage.call_count)
- self.assertEqual(3, self.m_download.call_count)
- for i, image_url in enumerate(["minimal.squashfs",
- "minimal.standard.squashfs",
- "minimal.standard.debug.squashfs"]):
- self.assertEqual("http://example.io/" + image_url,
- self.m_download.call_args_list[i][0][0])
- # ensure the file got cleaned up.
- self.assertEqual([], [f for f in self.downloads if os.path.exists(f)])
+
+ extract_source(
+ {'type': 'fsimage-layered', 'uri': myurl},
+ target)
+
+ urls = [
+ "http://example.io/minimal.squashfs",
+ "http://example.io/minimal.standard.squashfs",
+ "http://example.io/minimal.standard.debug.squashfs",
+ ]
+ self.assert_downloaded_and_mounted_and_extracted(
+ mount_tracker, urls, target)
def test_remote_file_multiple_one_missing(self):
- """extract_root_layered_fsimage_url supports normal hierarchy from
- http:// urls with one layer missing."""
+ self.track_mounts()
+ target = self.random_string()
+ myurl = "http://example.io/minimal.standard.debug.squashfs"
def fail_download_minimal_standard(url, path, retries=0):
if url == "http://example.io/minimal.standard.squashfs":
@@ -217,23 +394,16 @@ class TestExtractRootLayeredFsImageUrl(CiTestCase):
return self._fake_download(url, path, retries)
self.m_download.side_effect = fail_download_minimal_standard
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- myurl = "http://example.io/minimal.standard.debug.squashfs"
- self.assertRaises(UrlError, extract_root_layered_fsimage_url,
- myurl, target)
- self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count)
- self.assertEqual(2, self.m_download.call_count)
- for i, image_url in enumerate(["minimal.squashfs",
- "minimal.standard.squashfs"]):
- self.assertEqual("http://example.io/" + image_url,
- self.m_download.call_args_list[i][0][0])
- # ensure the file got cleaned up.
- self.assertEqual([], [f for f in self.downloads if os.path.exists(f)])
+ self.assertRaises(
+ UrlError, extract_source,
+ {'type': 'fsimage-layered', 'uri': myurl},
+ target)
+ self.assertEqual(0, self.m_copy_to_target.call_count)
def test_remote_file_multiple_one_empty(self):
- """extract_root_layered_fsimage_url supports normal hierarchy from
- http:// urls with one layer empty."""
+ self.track_mounts()
+ target = self.random_string()
+ myurl = "http://example.io/minimal.standard.debug.squashfs"
def empty_download_minimal_standard(url, path, retries=0):
if url == "http://example.io/minimal.standard.squashfs":
@@ -244,20 +414,17 @@ class TestExtractRootLayeredFsImageUrl(CiTestCase):
return self._fake_download(url, path, retries)
self.m_download.side_effect = empty_download_minimal_standard
- tmpd = self.tmp_dir()
- target = self.tmp_path("target_d", tmpd)
- myurl = "http://example.io/minimal.standard.debug.squashfs"
- self.assertRaises(ValueError, extract_root_layered_fsimage_url,
- myurl, target)
- self.assertEqual(0, self.m__extract_root_layered_fsimage.call_count)
+ self.assertRaises(
+ ValueError, extract_source,
+ {'type': 'fsimage-layered', 'uri': myurl},
+ target)
+ self.assertEqual(0, self.m_copy_to_target.call_count)
self.assertEqual(3, self.m_download.call_count)
for i, image_url in enumerate(["minimal.squashfs",
"minimal.standard.squashfs",
"minimal.standard.debug.squashfs"]):
self.assertEqual("http://example.io/" + image_url,
self.m_download.call_args_list[i][0][0])
- # ensure the file got cleaned up.
- self.assertEqual([], [f for f in self.downloads if os.path.exists(f)])
class TestGetImageStack(CiTestCase):
@@ -294,4 +461,11 @@ class TestGetImageStack(CiTestCase):
'https://path.com/to/aa.bbb.cccc.fs'],
_get_image_stack("https://path.com/to/aa.bbb.cccc.fs"))
+ def test_get_image_stack_relative_file_urls(self):
+ self.assertEqual(
+ ['file://aa.fs',
+ 'file://aa.bbb.fs',
+ 'file://aa.bbb.cccc.fs'],
+ _get_image_stack("file://aa.bbb.cccc.fs"))
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_curthooks.py b/tests/unittests/test_curthooks.py
index 41670f7..f878d9f 100644
--- a/tests/unittests/test_curthooks.py
+++ b/tests/unittests/test_curthooks.py
@@ -536,9 +536,13 @@ class TestSetupZipl(CiTestCase):
@patch('curtin.block.get_devices_for_mp')
@patch('platform.machine')
- def test_setup_zipl_writes_etc_zipl_conf(self, m_machine, m_get_devices):
+ @patch('curtin.commands.block_meta.get_volume_spec')
+ def test_setup_zipl_writes_etc_zipl_conf(
+ self, m_get_volume_spec, m_machine, m_get_devices):
m_machine.return_value = 's390x'
m_get_devices.return_value = ['/dev/mapper/ubuntu--vg-root']
+ root_dev = self.random_string()
+ m_get_volume_spec.return_value = root_dev
curthooks.setup_zipl(None, self.target)
m_get_devices.assert_called_with(self.target)
with open(os.path.join(self.target, 'etc', 'zipl.conf')) as stream:
@@ -546,6 +550,8 @@ class TestSetupZipl(CiTestCase):
self.assertIn(
'# This has been modified by the MAAS curtin installer',
content)
+ # validate the root= parameter was properly set in the cmdline
+ self.assertIn('root={}'.format(root_dev), content)
class EfiOutput(object):
diff --git a/tests/unittests/test_distro.py b/tests/unittests/test_distro.py
index 380680c..7532126 100644
--- a/tests/unittests/test_distro.py
+++ b/tests/unittests/test_distro.py
@@ -210,6 +210,14 @@ class TestDistroInfo(CiTestCase):
distro_obj = distro.get_osfamily()
self.assertEqual(family, distro_obj)
+ def test_get_from_idlike(self):
+ name = 'NotADistro'
+ self.mock_os_release.return_value = {
+ 'ID': name,
+ 'ID_LIKE': "stuff things rhel"
+ }
+ self.assertEqual('rhel', distro.get_distro(name))
+
class TestDistroIdentity(CiTestCase):
diff --git a/tests/unittests/test_feature.py b/tests/unittests/test_feature.py
index 8690ad8..0a7f890 100644
--- a/tests/unittests/test_feature.py
+++ b/tests/unittests/test_feature.py
@@ -30,4 +30,7 @@ class TestExportsFeatures(CiTestCase):
def test_has_uefi_reorder_fallback_support(self):
self.assertIn('UEFI_REORDER_FALLBACK_SUPPORT', curtin.FEATURES)
+ def test_has_fstab_default_fsck_on(self):
+ self.assertIn('FSTAB_DEFAULT_FSCK_ON_BLK', curtin.FEATURES)
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_pack.py b/tests/unittests/test_pack.py
index cb0b135..803d7de 100644
--- a/tests/unittests/test_pack.py
+++ b/tests/unittests/test_pack.py
@@ -87,7 +87,7 @@ class TestPack(TestCase):
return out, err, rc, log_contents
- def test_psuedo_install(self):
+ def test_pseudo_install(self):
# do a install that has only a early stage and only one command.
mystr = "HI MOM"
cfg = {
diff --git a/tests/unittests/test_storage_config.py b/tests/unittests/test_storage_config.py
index 2d27d4e..a2308c4 100644
--- a/tests/unittests/test_storage_config.py
+++ b/tests/unittests/test_storage_config.py
@@ -32,6 +32,18 @@ class TestStorageConfigSchema(CiTestCase):
storage_config.validate_config(config)
@skipUnlessJsonSchema()
+ def test_disk_schema_accepts_nvme_uuid(self):
+ disk = {
+ "id": "disk-nvme0n1",
+ "path": "/dev/nvme0n1",
+ "serial": "Samsung SSD 960 EVO 250GB_S3ESNX0JB35041V",
+ "type": "disk",
+ "wwn": "uuid.344343304d3000150025384500000004"
+ }
+ config = {'config': [disk], 'version': 1}
+ storage_config.validate_config(config)
+
+ @skipUnlessJsonSchema()
def test_disk_schema_accepts_nvme_wwid(self):
disk = {
"id": "disk-nvme0n1",
@@ -54,6 +66,43 @@ class TestStorageConfigSchema(CiTestCase):
config = {'config': [disk], 'version': 1}
storage_config.validate_config(config)
+ @skipUnlessJsonSchema()
+ def test_format_schema_arbitrary_fstype_if_preserve(self):
+ format = {
+ "fstype": "BitLocker",
+ "id": "format-partition-sda3",
+ "preserve": True,
+ "type": "format",
+ "volume": "partition-sda3"
+ }
+ config = {'config': [format], 'version': 1}
+ storage_config.validate_config(config)
+
+ @skipUnlessJsonSchema()
+ def test_format_schema_arbitrary_fstype_fail_when_preserve_false(self):
+ format = {
+ "fstype": "BitLocker",
+ "id": "format-partition-sda3",
+ "preserve": False,
+ "type": "format",
+ "volume": "partition-sda3"
+ }
+ config = {'config': [format], 'version': 1}
+ with self.assertRaises(ValueError):
+ storage_config.validate_config(config)
+
+ @skipUnlessJsonSchema()
+ def test_format_schema_arbitrary_fstype_fail_when_no_preserve(self):
+ format = {
+ "fstype": "BitLocker",
+ "id": "format-partition-sda3",
+ "type": "format",
+ "volume": "partition-sda3"
+ }
+ config = {'config': [format], 'version': 1}
+ with self.assertRaises(ValueError):
+ storage_config.validate_config(config)
+
class TestProbertParser(CiTestCase):
@@ -467,149 +516,51 @@ class TestBlockdevParser(CiTestCase):
self.assertDictEqual(expected_dict,
self.bdevp.asdict(blockdev))
- def test_blockdev_detects_multipath(self):
+ def test_blockdev_multipath_disk(self):
self.probe_data = _get_data('probert_storage_multipath.json')
self.bdevp = BlockdevParser(self.probe_data)
- blockdev = self.bdevp.blockdev_data['/dev/sda2']
+ blockdev = self.bdevp.blockdev_data['/dev/dm-0']
expected_dict = {
- 'flag': 'linux',
- 'id': 'partition-sda2',
- 'offset': 2097152,
+ 'id': 'mpath-disk-mpatha',
'multipath': 'mpatha',
- 'size': 10734272512,
- 'type': 'partition',
- 'device': 'disk-sda',
- 'number': 2}
+ 'path': '/dev/dm-0',
+ 'ptable': 'gpt',
+ 'type': 'disk',
+ 'wwn': '0x0000000000000064',
+ }
self.assertDictEqual(expected_dict, self.bdevp.asdict(blockdev))
- def test_blockdev_skips_multipath_entry_if_no_multipath_data(self):
+ def test_blockdev_multipath_partition(self):
self.probe_data = _get_data('probert_storage_multipath.json')
- del self.probe_data['multipath']
self.bdevp = BlockdevParser(self.probe_data)
- blockdev = self.bdevp.blockdev_data['/dev/sda2']
+ blockdev = self.bdevp.blockdev_data['/dev/dm-2']
expected_dict = {
+ 'device': 'mpath-disk-mpatha',
'flag': 'linux',
- 'id': 'partition-sda2',
+ 'id': 'mpath-partition-mpatha-part2',
+ 'multipath': 'mpatha',
+ 'number': 2,
'offset': 2097152,
'size': 10734272512,
'type': 'partition',
- 'device': 'disk-sda',
- 'number': 2}
+ }
self.assertDictEqual(expected_dict, self.bdevp.asdict(blockdev))
- def test_blockdev_skips_multipath_entry_if_bad_multipath_data(self):
+ @skipUnlessJsonSchema()
+ def test_blockdev_skips_underlying_disks_and_partitions(self):
self.probe_data = _get_data('probert_storage_multipath.json')
- for path in self.probe_data['multipath']['paths']:
- path['multipath'] = ''
self.bdevp = BlockdevParser(self.probe_data)
- blockdev = self.bdevp.blockdev_data['/dev/sda2']
- expected_dict = {
- 'flag': 'linux',
- 'id': 'partition-sda2',
- 'offset': 2097152,
- 'size': 10734272512,
- 'type': 'partition',
- 'device': 'disk-sda',
- 'number': 2}
- self.assertDictEqual(expected_dict, self.bdevp.asdict(blockdev))
-
- def test_blockdev_skips_multipath_entry_if_no_mp_paths(self):
- self.probe_data = _get_data('probert_storage_multipath.json')
- del self.probe_data['multipath']['paths']
- self.bdevp = BlockdevParser(self.probe_data)
- blockdev = self.bdevp.blockdev_data['/dev/sda2']
- expected_dict = {
- 'flag': 'linux',
- 'id': 'partition-sda2',
- 'offset': 2097152,
- 'size': 10734272512,
- 'type': 'partition',
- 'device': 'disk-sda',
- 'number': 2}
- self.assertDictEqual(expected_dict, self.bdevp.asdict(blockdev))
+ configs = self.bdevp.parse()[0]
+ config_paths = {c.get('path') for c in configs}
+ self.assertNotIn('/dev/sda', config_paths)
def test_blockdev_finds_multipath_id_from_dm_uuid(self):
self.probe_data = _get_data('probert_storage_zlp6.json')
self.bdevp = BlockdevParser(self.probe_data)
blockdev = self.bdevp.blockdev_data['/dev/dm-2']
result = self.bdevp.blockdev_to_id(blockdev)
- self.assertEqual('disk-sda', result)
-
- def test_blockdev_find_mpath_members_checks_dm_name(self):
- """ BlockdevParser find_mpath_members uses dm_name if present."""
- dm14 = {
- "DEVTYPE": "disk",
- "DEVLINKS": "/dev/disk/by-id/dm-name-mpathb",
- "DEVNAME": "/dev/dm-14",
- "DEVTYPE": "disk",
- "DM_NAME": "mpathb",
- "DM_UUID": "mpath-360050768028211d8b000000000000062",
- "DM_WWN": "0x60050768028211d8b000000000000062",
- "MPATH_DEVICE_READY": "1",
- "MPATH_SBIN_PATH": "/sbin",
- }
- multipath = {
- "maps": [
- {
- "multipath": "360050768028211d8b000000000000061",
- "sysfs": "dm-11",
- "paths": "4"
- },
- {
- "multipath": "360050768028211d8b000000000000062",
- "sysfs": "dm-14",
- "paths": "4"
- },
- {
- "multipath": "360050768028211d8b000000000000063",
- "sysfs": "dm-15",
- "paths": "4"
- }],
- "paths": [
- {
- "device": "sdej",
- "serial": "0200a084762cXX00",
- "multipath": "mpatha",
- "host_wwnn": "0x20000024ff9127de",
- "target_wwnn": "0x5005076802065e38",
- "host_wwpn": "0x21000024ff9127de",
- "target_wwpn": "0x5005076802165e38",
- "host_adapter": "[undef]"
- },
- {
- "device": "sdel",
- "serial": "0200a084762cXX00",
- "multipath": "mpathb",
- "host_wwnn": "0x20000024ff9127de",
- "target_wwnn": "0x5005076802065e38",
- "host_wwpn": "0x21000024ff9127de",
- "target_wwpn": "0x5005076802165e38",
- "host_adapter": "[undef]"
- },
- {
- "device": "sdet",
- "serial": "0200a084762cXX00",
- "multipath": "mpatha",
- "host_wwnn": "0x20000024ff9127de",
- "target_wwnn": "0x5005076802065e37",
- "host_wwpn": "0x21000024ff9127de",
- "target_wwpn": "0x5005076802165e37",
- "host_adapter": "[undef]"
- },
- {
- "device": "sdev",
- "serial": "0200a084762cXX00",
- "multipath": "mpathb",
- "host_wwnn": "0x20000024ff9127de",
- "target_wwnn": "0x5005076802065e37",
- "host_wwpn": "0x21000024ff9127de",
- "target_wwpn": "0x5005076802165e37",
- "host_adapter": "[undef]"
- }],
- }
- self.bdevp.blockdev_data['/dev/dm-14'] = dm14
- self.probe_data['multipath'] = multipath
- self.assertEqual('disk-sdel', self.bdevp.blockdev_to_id(dm14))
+ self.assertEqual(
+ 'mpath-disk-36005076306ffd6b60000000000002406', result)
def test_blockdev_detects_dasd_device_id_and_vtoc_ptable(self):
self.probe_data = _get_data('probert_storage_dasd.json')
@@ -737,15 +688,14 @@ class TestLvmParser(CiTestCase):
class TestRaidParser(CiTestCase):
- def setUp(self):
- super(TestRaidParser, self).setUp()
- self.probe_data = _get_data('probert_storage_mdadm_bcache.json')
- self.raidp = RaidParser(self.probe_data)
+ def _load(self, fname):
+ probe_data = _get_data(fname)
+ return RaidParser(probe_data), probe_data
def test_raid_parser(self):
""" RaidParser 'class_data' on instance matches input. """
- self.assertDictEqual(self.probe_data['raid'],
- self.raidp.class_data)
+ raidp, probe_data = self._load('probert_storage_mdadm_bcache.json')
+ self.assertDictEqual(probe_data['raid'], raidp.class_data)
def test_raid_asdict(self):
""" RaidParser converts known raid_data to expected dict. """
@@ -754,20 +704,49 @@ class TestRaidParser(CiTestCase):
'type': 'raid',
'id': 'raid-md0',
'name': 'md0',
+ 'metadata': '1.2',
'raidlevel': 'raid5',
'devices': ['disk-vde', 'disk-vdf', 'disk-vdg'],
'spare_devices': [],
}
- raid_data = self.raidp.class_data[devname]
- self.assertDictEqual(expected_dict, self.raidp.asdict(raid_data))
+ raidp, _ = self._load('probert_storage_mdadm_bcache.json')
+ raid_data = raidp.class_data[devname]
+ self.assertDictEqual(expected_dict, raidp.asdict(raid_data))
@skipUnlessJsonSchema()
def test_raid_parser_parses_all_lvs_vgs(self):
""" RaidParser returns expected dicts for known raid probe data."""
- configs, errors = self.raidp.parse()
+ raidp, _ = self._load('probert_storage_mdadm_bcache.json')
+ configs, errors = raidp.parse()
self.assertEqual(1, len(configs))
self.assertEqual(0, len(errors))
+ def test_imsm_container(self):
+ raidp, probe_data = self._load('probert_storage_imsm.json')
+ container_raid_data = probe_data['raid']['/dev/md127']
+ container_expected = {
+ 'type': 'raid',
+ 'id': 'raid-md127',
+ 'name': 'md127',
+ 'metadata': 'imsm',
+ 'raidlevel': 'container',
+ 'devices': ['disk-nvme0n1', 'disk-nvme1n1'],
+ 'spare_devices': [],
+ }
+ self.assertEqual(container_expected, raidp.asdict(container_raid_data))
+
+ def test_imsm_volume(self):
+ raidp, probe_data = self._load('probert_storage_imsm.json')
+ container_raid_data = probe_data['raid']['/dev/md126']
+ container_expected = {
+ 'type': 'raid',
+ 'id': 'raid-md126',
+ 'name': 'md126',
+ 'raidlevel': 'raid0',
+ 'container': 'raid-md127',
+ }
+ self.assertEqual(container_expected, raidp.asdict(container_raid_data))
+
class TestDasdParser(CiTestCase):
@@ -928,7 +907,6 @@ class TestZfsParser(CiTestCase):
}
zpool_data = self.zfsp.class_data['zpools'][zpool]['datasets'][dataset]
- print(zpool_data)
self.assertDictEqual(expected_properties,
self.zfsp.get_local_ds_properties(zpool_data))
@@ -1017,22 +995,6 @@ class TestExtractStorageConfig(CiTestCase):
extracted)
@skipUnlessJsonSchema()
- def test_find_all_multipath(self):
- """ verify probed multipath paths are included in config. """
- self.probe_data = _get_data('probert_storage_multipath.json')
- extracted = storage_config.extract_storage_config(self.probe_data)
- config = extracted['storage']['config']
- blockdev = self.probe_data['blockdev']
-
- for mpmap in self.probe_data['multipath']['maps']:
- nr_disks = int(mpmap['paths'])
- mp_name = blockdev['/dev/%s' % mpmap['sysfs']]['DM_NAME']
- matched_disks = [cfg for cfg in config
- if cfg['type'] == 'disk' and
- cfg.get('multipath', '') == mp_name]
- self.assertEqual(nr_disks, len(matched_disks))
-
- @skipUnlessJsonSchema()
def test_find_raid_partition(self):
""" verify probed raid partitions are found. """
self.probe_data = _get_data('probert_storage_raid1_partitions.json')
@@ -1044,7 +1006,7 @@ class TestExtractStorageConfig(CiTestCase):
cfg['id'].startswith('raid')]
self.assertEqual(1, len(raids))
self.assertEqual(1, len(raid_partitions))
- self.assertEqual({'id': 'raid-md1', 'type': 'raid',
+ self.assertEqual({'id': 'raid-md1', 'type': 'raid', 'metadata': '1.2',
'raidlevel': 'raid1', 'name': 'md1',
'devices': ['partition-vdb1', 'partition-vdc1'],
'spare_devices': []}, raids[0])
@@ -1081,6 +1043,36 @@ class TestExtractStorageConfig(CiTestCase):
self.assertEqual(expected_dict, disks[0])
@skipUnlessJsonSchema()
+ def test_blockdev_detects_nvme_uuid(self):
+ self.probe_data = _get_data('probert_storage_nvme_uuid.json')
+ extracted = storage_config.extract_storage_config(self.probe_data)
+ config = extracted['storage']['config']
+ disks = [cfg for cfg in config if cfg['type'] == 'disk']
+ expected_dict = {
+ 'id': 'disk-nvme0n1',
+ 'path': '/dev/nvme0n1',
+ 'ptable': 'gpt',
+ 'serial': 'SAMSUNG MZPLL3T2HAJQ-00005_S4CCNE0M300015',
+ 'type': 'disk',
+ 'wwn': 'uuid.344343304d3000150025384500000004',
+ }
+ self.assertEqual(1, len(disks))
+ self.assertEqual(expected_dict, disks[0])
+
+ @skipUnlessJsonSchema()
+ def test_blockdev_multipath(self):
+ self.probe_data = _get_data('probert_storage_zlp6.json')
+ extracted = storage_config.extract_storage_config(self.probe_data)
+ config = extracted['storage']['config']
+ disks = [cfg for cfg in config if cfg['type'] == 'disk']
+ expected_count = len([
+ 1 for bd_name, bd_data in self.probe_data['blockdev'].items()
+ if bd_data.get('DM_UUID', '').startswith('mpath-')
+ or bd_name.startswith('/dev/dasd') and bd_data['DEVTYPE'] == 'disk'
+ ])
+ self.assertEqual(expected_count, len(disks))
+
+ @skipUnlessJsonSchema()
def test_blockdev_skips_invalid_wwn(self):
self.probe_data = _get_data('probert_storage_bogus_wwn.json')
extracted = storage_config.extract_storage_config(self.probe_data)
@@ -1097,5 +1089,23 @@ class TestExtractStorageConfig(CiTestCase):
self.assertEqual(1, len(disks))
self.assertEqual(expected_dict, disks[0])
+ @skipUnlessJsonSchema()
+ def test_arbitrary_fstype_if_preserve_true(self):
+ self.probe_data = _get_data('probert_storage_win10_bitlocker.json')
+ extracted = storage_config.extract_storage_config(self.probe_data)
+ configs = extracted['storage']['config']
+ format = [cfg for cfg in configs if cfg.get('type') == 'format']
+ bitlocker = [entry for entry in format
+ if entry.get('id') == 'format-partition-sda3']
+ expected_dict = {
+ 'id': 'format-partition-sda3',
+ 'type': 'format',
+ 'volume': 'partition-sda3',
+ 'fstype': 'BitLocker',
+ 'preserve': True,
+ }
+ self.assertEqual(1, len(bitlocker))
+ self.assertEqual(expected_dict, bitlocker[0])
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_tox_environ.py b/tests/unittests/test_tox_environ.py
new file mode 100644
index 0000000..ecc7597
--- /dev/null
+++ b/tests/unittests/test_tox_environ.py
@@ -0,0 +1,17 @@
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+""" test_tox_environ
+verify that systems running tests contain the environmental packages expected
+"""
+
+from aptsources.sourceslist import SourceEntry
+from .helpers import CiTestCase
+
+
+class TestPythonPackages(CiTestCase):
+ def test_python_apt(self):
+ """test_python_apt - Ensure the python-apt package is available"""
+
+ line = 'deb http://us.archive.ubuntu.com/ubuntu/ hirsute main'
+
+ self.assertEqual(line, str(SourceEntry(line)))
diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py
index 0b19d8f..fd6c246 100644
--- a/tests/vmtests/__init__.py
+++ b/tests/vmtests/__init__.py
@@ -1631,6 +1631,9 @@ class VMBaseClass(TestCase):
def check_file_regex(self, filename, regex):
self.assertRegex(self.load_collect_file(filename), regex)
+ def not_file_regex(self, filename, regex):
+ self.assertNotRegex(self.load_collect_file(filename), regex)
+
# To get rid of deprecation warning in python 3.
def assertRegex(self, s, r):
try:
@@ -1640,6 +1643,14 @@ class VMBaseClass(TestCase):
# Python 2.
self.assertRegexpMatches(s, r)
+ def assertNotRegex(self, s, r):
+ try:
+ # Python 3.
+ super(VMBaseClass, self).assertNotRegex(s, r)
+ except AttributeError:
+ # Python 2.
+ self.assertNotRegexpMatches(s, r)
+
def get_blkid_data(self, blkid_file):
data = self.load_collect_file(blkid_file)
ret = {}
diff --git a/tests/vmtests/releases.py b/tests/vmtests/releases.py
index 35b069b..fa755b1 100644
--- a/tests/vmtests/releases.py
+++ b/tests/vmtests/releases.py
@@ -185,20 +185,20 @@ class _FocalBase(_UbuntuBase):
subarch = "ga-20.04"
-class _GroovyBase(_UbuntuBase):
- release = "groovy"
- target_release = "groovy"
+class _HirsuteBase(_UbuntuBase):
+ release = "hirsute"
+ target_release = "hirsute"
mem = "2048"
if _UbuntuBase.arch == "arm64":
- subarch = "ga-20.10"
+ subarch = "ga-21.04"
-class _HirsuteBase(_UbuntuBase):
- release = "hirsute"
- target_release = "hirsute"
+class _ImpishBase(_UbuntuBase):
+ release = "impish"
+ target_release = "impish"
mem = "2048"
if _UbuntuBase.arch == "arm64":
- subarch = "ga-20.10"
+ subarch = "ga-21.10"
class _Releases(object):
@@ -219,8 +219,8 @@ class _Releases(object):
disco = _DiscoBase
eoan = _EoanBase
focal = _FocalBase
- groovy = _GroovyBase
hirsute = _HirsuteBase
+ impish = _ImpishBase
class _CentosReleases(object):
diff --git a/tests/vmtests/test_apt_config_cmd.py b/tests/vmtests/test_apt_config_cmd.py
index a95612c..2fdd971 100644
--- a/tests/vmtests/test_apt_config_cmd.py
+++ b/tests/vmtests/test_apt_config_cmd.py
@@ -72,7 +72,7 @@ class HirsuteTestAptConfigCMDCMD(relbase.hirsute, TestAptConfigCMD):
__test__ = True
-class GroovyTestAptConfigCMDCMD(relbase.groovy, TestAptConfigCMD):
+class ImpishTestAptConfigCMDCMD(relbase.impish, TestAptConfigCMD):
__test__ = True
diff --git a/tests/vmtests/test_apt_source.py b/tests/vmtests/test_apt_source.py
index a090ffa..9d00dc8 100644
--- a/tests/vmtests/test_apt_source.py
+++ b/tests/vmtests/test_apt_source.py
@@ -164,9 +164,8 @@ class TestAptSrcDisablePockets(TestAptSrcAbs):
r"deb.*us.archive.ubuntu.com")
self.check_file_regex("sources.list",
r"deb.*security.ubuntu.com")
- # updates disabled
- self.check_file_regex("sources.list",
- r"# suite disabled by curtin:.*-updates")
+ # updates disabled and not present
+ self.not_file_regex("sources.list", r"# .*-updates")
class TestAptSrcModifyArches(TestAptSrcModify):
diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py
index 06f1f59..6059bd9 100644
--- a/tests/vmtests/test_basic.py
+++ b/tests/vmtests/test_basic.py
@@ -263,7 +263,7 @@ class HirsuteTestBasic(relbase.hirsute, TestBasicAbs):
__test__ = True
-class GroovyTestBasic(relbase.groovy, TestBasicAbs):
+class ImpishTestBasic(relbase.impish, TestBasicAbs):
__test__ = True
@@ -387,7 +387,7 @@ class HirsuteTestScsiBasic(relbase.hirsute, TestBasicScsiAbs):
__test__ = True
-class GroovyTestScsiBasic(relbase.groovy, TestBasicScsiAbs):
+class ImpishTestScsiBasic(relbase.impish, TestBasicScsiAbs):
__test__ = True
diff --git a/tests/vmtests/test_basic_dasd.py b/tests/vmtests/test_basic_dasd.py
index 49957b8..0aa35fd 100644
--- a/tests/vmtests/test_basic_dasd.py
+++ b/tests/vmtests/test_basic_dasd.py
@@ -60,7 +60,7 @@ class HirsuteTestBasicDasd(relbase.hirsute, TestBasicDasd):
__test__ = True
-class GroovyTestBasicDasd(relbase.groovy, TestBasicDasd):
+class ImpishTestBasicDasd(relbase.impish, TestBasicDasd):
__test__ = True
diff --git a/tests/vmtests/test_bcache_basic.py b/tests/vmtests/test_bcache_basic.py
index e9ec1ea..490ae4c 100644
--- a/tests/vmtests/test_bcache_basic.py
+++ b/tests/vmtests/test_bcache_basic.py
@@ -72,7 +72,7 @@ class HirsuteBcacheBasic(relbase.hirsute, TestBcacheBasic):
__test__ = True
-class GroovyBcacheBasic(relbase.groovy, TestBcacheBasic):
+class ImpishBcacheBasic(relbase.impish, TestBcacheBasic):
__test__ = True
diff --git a/tests/vmtests/test_bcache_bug1718699.py b/tests/vmtests/test_bcache_bug1718699.py
index a43ec0f..3b19c41 100644
--- a/tests/vmtests/test_bcache_bug1718699.py
+++ b/tests/vmtests/test_bcache_bug1718699.py
@@ -27,7 +27,7 @@ class HirsuteTestBcacheBug1718699(relbase.hirsute, TestBcacheBug1718699):
__test__ = True
-class GroovyTestBcacheBug1718699(relbase.groovy, TestBcacheBug1718699):
+class ImpishTestBcacheBug1718699(relbase.impish, TestBcacheBug1718699):
__test__ = True
diff --git a/tests/vmtests/test_bcache_ceph.py b/tests/vmtests/test_bcache_ceph.py
index 53e050c..713ade9 100644
--- a/tests/vmtests/test_bcache_ceph.py
+++ b/tests/vmtests/test_bcache_ceph.py
@@ -83,7 +83,7 @@ class HirsuteTestBcacheCeph(relbase.hirsute, TestBcacheCeph):
__test__ = True
-class GroovyTestBcacheCeph(relbase.groovy, TestBcacheCeph):
+class ImpishTestBcacheCeph(relbase.impish, TestBcacheCeph):
__test__ = True
@@ -117,7 +117,7 @@ class HirsuteTestBcacheCephLvm(relbase.hirsute, TestBcacheCephLvm):
__test__ = True
-class GroovyTestBcacheCephLvm(relbase.groovy, TestBcacheCephLvm):
+class ImpishTestBcacheCephLvm(relbase.impish, TestBcacheCephLvm):
__test__ = True
diff --git a/tests/vmtests/test_bcache_partitions.py b/tests/vmtests/test_bcache_partitions.py
index a69d2ef..6268fd5 100644
--- a/tests/vmtests/test_bcache_partitions.py
+++ b/tests/vmtests/test_bcache_partitions.py
@@ -33,7 +33,7 @@ class HirsuteTestBcachePartitions(relbase.hirsute, TestBcachePartitions):
__test__ = True
-class GroovyTestBcachePartitions(relbase.groovy, TestBcachePartitions):
+class ImpishTestBcachePartitions(relbase.impish, TestBcachePartitions):
__test__ = True
diff --git a/tests/vmtests/test_fs_battery.py b/tests/vmtests/test_fs_battery.py
index 7d7b494..7d70f9c 100644
--- a/tests/vmtests/test_fs_battery.py
+++ b/tests/vmtests/test_fs_battery.py
@@ -159,10 +159,10 @@ class TestFsBattery(VMBaseClass):
def test_fstab_has_mounts(self):
"""Verify each of the expected "my" mounts got into fstab."""
expected = [
- "none /my/tmpfs tmpfs size=4194304 0 0".split(),
+ "none /my/tmpfs tmpfs size=4194304 0 1".split(),
"none /my/ramfs ramfs defaults 0 0".split(),
- "/my/bind-over-var-cache /var/cache none bind 0 0".split(),
- "/etc /my/bind-ro-etc none bind,ro 0 0".split(),
+ "/my/bind-over-var-cache /var/cache none bind 3 0".split(),
+ "/etc /my/bind-ro-etc none bind,ro 1 0".split(),
]
fstab_found = [
line.split() for line in self.load_collect_file(
@@ -247,7 +247,7 @@ class HirsuteTestFsBattery(relbase.hirsute, TestFsBattery):
__test__ = True
-class GroovyTestFsBattery(relbase.groovy, TestFsBattery):
+class ImpishTestFsBattery(relbase.impish, TestFsBattery):
__test__ = True
diff --git a/tests/vmtests/test_iscsi.py b/tests/vmtests/test_iscsi.py
index c46ac30..2fd9866 100644
--- a/tests/vmtests/test_iscsi.py
+++ b/tests/vmtests/test_iscsi.py
@@ -80,7 +80,7 @@ class HirsuteTestIscsiBasic(relbase.hirsute, TestBasicIscsiAbs):
__test__ = True
-class GroovyTestIscsiBasic(relbase.groovy, TestBasicIscsiAbs):
+class ImpishTestIscsiBasic(relbase.impish, TestBasicIscsiAbs):
__test__ = True
diff --git a/tests/vmtests/test_journald_reporter.py b/tests/vmtests/test_journald_reporter.py
index 3979aa7..064d71a 100644
--- a/tests/vmtests/test_journald_reporter.py
+++ b/tests/vmtests/test_journald_reporter.py
@@ -40,7 +40,7 @@ class HirsuteTestJournaldReporter(relbase.hirsute, TestJournaldReporter):
__test__ = True
-class GroovyTestJournaldReporter(relbase.groovy, TestJournaldReporter):
+class ImpishTestJournaldReporter(relbase.impish, TestJournaldReporter):
__test__ = True
diff --git a/tests/vmtests/test_lvm.py b/tests/vmtests/test_lvm.py
index d169eeb..a0ce90d 100644
--- a/tests/vmtests/test_lvm.py
+++ b/tests/vmtests/test_lvm.py
@@ -85,7 +85,7 @@ class HirsuteTestLvm(relbase.hirsute, TestLvmAbs):
__test__ = True
-class GroovyTestLvm(relbase.groovy, TestLvmAbs):
+class ImpishTestLvm(relbase.impish, TestLvmAbs):
__test__ = True
diff --git a/tests/vmtests/test_lvm_iscsi.py b/tests/vmtests/test_lvm_iscsi.py
index 0cf020e..463d863 100644
--- a/tests/vmtests/test_lvm_iscsi.py
+++ b/tests/vmtests/test_lvm_iscsi.py
@@ -103,7 +103,7 @@ class HirsuteTestIscsiLvm(relbase.hirsute, TestLvmIscsiAbs):
__test__ = True
-class GroovyTestIscsiLvm(relbase.groovy, TestLvmIscsiAbs):
+class ImpishTestIscsiLvm(relbase.impish, TestLvmIscsiAbs):
__test__ = True
diff --git a/tests/vmtests/test_lvm_raid.py b/tests/vmtests/test_lvm_raid.py
index 3fe71a9..650fc25 100644
--- a/tests/vmtests/test_lvm_raid.py
+++ b/tests/vmtests/test_lvm_raid.py
@@ -63,5 +63,5 @@ class HirsuteTestLvmOverRaid(relbase.hirsute, TestLvmOverRaidAbs):
__test__ = True
-class GroovyTestLvmOverRaid(relbase.groovy, TestLvmOverRaidAbs):
+class ImpishTestLvmOverRaid(relbase.impish, TestLvmOverRaidAbs):
__test__ = True
diff --git a/tests/vmtests/test_lvm_root.py b/tests/vmtests/test_lvm_root.py
index c910160..bc09e8d 100644
--- a/tests/vmtests/test_lvm_root.py
+++ b/tests/vmtests/test_lvm_root.py
@@ -101,7 +101,7 @@ class HirsuteTestLvmRootExt4(relbase.hirsute, TestLvmRootAbs):
}
-class GroovyTestLvmRootExt4(relbase.groovy, TestLvmRootAbs):
+class ImpishTestLvmRootExt4(relbase.impish, TestLvmRootAbs):
__test__ = True
conf_replace = {
'__ROOTFS_FORMAT__': 'ext4',
@@ -162,7 +162,7 @@ class HirsuteTestUefiLvmRootExt4(relbase.hirsute, TestUefiLvmRootAbs):
}
-class GroovyTestUefiLvmRootExt4(relbase.groovy, TestUefiLvmRootAbs):
+class ImpishTestUefiLvmRootExt4(relbase.impish, TestUefiLvmRootAbs):
__test__ = True
conf_replace = {
'__BOOTFS_FORMAT__': 'ext4',
diff --git a/tests/vmtests/test_mdadm_bcache.py b/tests/vmtests/test_mdadm_bcache.py
index 62f25e7..d90f478 100644
--- a/tests/vmtests/test_mdadm_bcache.py
+++ b/tests/vmtests/test_mdadm_bcache.py
@@ -162,7 +162,7 @@ class HirsuteTestMdadmBcache(relbase.hirsute, TestMdadmBcacheAbs):
__test__ = True
-class GroovyTestMdadmBcache(relbase.groovy, TestMdadmBcacheAbs):
+class ImpishTestMdadmBcache(relbase.impish, TestMdadmBcacheAbs):
__test__ = True
@@ -211,7 +211,7 @@ class HirsuteTestMirrorboot(relbase.hirsute, TestMirrorbootAbs):
__test__ = True
-class GroovyTestMirrorboot(relbase.groovy, TestMirrorbootAbs):
+class ImpishTestMirrorboot(relbase.impish, TestMirrorbootAbs):
__test__ = True
@@ -265,7 +265,7 @@ class HirsuteTestMirrorbootPartitions(relbase.hirsute,
__test__ = True
-class GroovyTestMirrorbootPartitions(relbase.groovy,
+class ImpishTestMirrorbootPartitions(relbase.impish,
TestMirrorbootPartitionsAbs):
__test__ = True
@@ -365,7 +365,7 @@ class HirsuteTestMirrorbootPartitionsUEFI(relbase.hirsute,
__test__ = True
-class GroovyTestMirrorbootPartitionsUEFI(relbase.groovy,
+class ImpishTestMirrorbootPartitionsUEFI(relbase.impish,
TestMirrorbootPartitionsUEFIAbs):
__test__ = True
@@ -418,7 +418,7 @@ class HirsuteTestRaid5boot(relbase.hirsute, TestRaid5bootAbs):
__test__ = True
-class GroovyTestRaid5boot(relbase.groovy, TestRaid5bootAbs):
+class ImpishTestRaid5boot(relbase.impish, TestRaid5bootAbs):
__test__ = True
@@ -483,7 +483,7 @@ class HirsuteTestRaid6boot(relbase.hirsute, TestRaid6bootAbs):
__test__ = True
-class GroovyTestRaid6boot(relbase.groovy, TestRaid6bootAbs):
+class ImpishTestRaid6boot(relbase.impish, TestRaid6bootAbs):
__test__ = True
@@ -534,7 +534,7 @@ class HirsuteTestRaid10boot(relbase.hirsute, TestRaid10bootAbs):
__test__ = True
-class GroovyTestRaid10boot(relbase.groovy, TestRaid10bootAbs):
+class ImpishTestRaid10boot(relbase.impish, TestRaid10bootAbs):
__test__ = True
@@ -642,7 +642,7 @@ class HirsuteTestAllindata(relbase.hirsute, TestAllindataAbs):
__test__ = True
-class GroovyTestAllindata(relbase.groovy, TestAllindataAbs):
+class ImpishTestAllindata(relbase.impish, TestAllindataAbs):
__test__ = True
diff --git a/tests/vmtests/test_mdadm_iscsi.py b/tests/vmtests/test_mdadm_iscsi.py
index 6ad6b72..f6f0b22 100644
--- a/tests/vmtests/test_mdadm_iscsi.py
+++ b/tests/vmtests/test_mdadm_iscsi.py
@@ -58,7 +58,7 @@ class HirsuteTestIscsiMdadm(relbase.hirsute, TestMdadmIscsiAbs):
__test__ = True
-class GroovyTestIscsiMdadm(relbase.groovy, TestMdadmIscsiAbs):
+class ImpishTestIscsiMdadm(relbase.impish, TestMdadmIscsiAbs):
__test__ = True
diff --git a/tests/vmtests/test_multipath.py b/tests/vmtests/test_multipath.py
index f924d25..ac85a58 100644
--- a/tests/vmtests/test_multipath.py
+++ b/tests/vmtests/test_multipath.py
@@ -166,7 +166,23 @@ class HirsuteTestMultipathBasic(relbase.hirsute, TestMultipathBasicAbs):
__test__ = True
-class GroovyTestMultipathBasic(relbase.groovy, TestMultipathBasicAbs):
+class ImpishTestMultipathBasic(relbase.impish, TestMultipathBasicAbs):
+ __test__ = True
+
+
+class TestMultipathReuseAbs(TestMultipathBasicAbs):
+ conf_file = "examples/tests/multipath-reuse.yaml"
+
+
+class FocalTestMultipathReuse(relbase.focal, TestMultipathReuseAbs):
+ __test__ = True
+
+
+class HirsuteTestMultipathReuse(relbase.hirsute, TestMultipathReuseAbs):
+ __test__ = True
+
+
+class ImpishTestMultipathReuse(relbase.impish, TestMultipathReuseAbs):
__test__ = True
diff --git a/tests/vmtests/test_multipath_lvm.py b/tests/vmtests/test_multipath_lvm.py
index f5f5537..97d797e 100644
--- a/tests/vmtests/test_multipath_lvm.py
+++ b/tests/vmtests/test_multipath_lvm.py
@@ -64,7 +64,7 @@ class HirsuteTestMultipathLvm(relbase.hirsute, TestMultipathLvmAbs):
__test__ = True
-class GroovyTestMultipathLvm(relbase.groovy, TestMultipathLvmAbs):
+class ImpishTestMultipathLvm(relbase.impish, TestMultipathLvmAbs):
__test__ = True
@@ -82,7 +82,7 @@ class HirsuteTestMultipathLvmPartWipe(relbase.hirsute,
__test__ = True
-class GroovyTestMultipathLvmPartWipe(relbase.groovy,
+class ImpishTestMultipathLvmPartWipe(relbase.impish,
TestMultipathLvmPartWipeAbs):
__test__ = True
diff --git a/tests/vmtests/test_network.py b/tests/vmtests/test_network.py
index db16bd4..1b42493 100644
--- a/tests/vmtests/test_network.py
+++ b/tests/vmtests/test_network.py
@@ -482,7 +482,7 @@ class HirsuteTestNetworkBasic(relbase.hirsute, TestNetworkBasicAbs):
__test__ = True
-class GroovyTestNetworkBasic(relbase.groovy, TestNetworkBasicAbs):
+class ImpishTestNetworkBasic(relbase.impish, TestNetworkBasicAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_alias.py b/tests/vmtests/test_network_alias.py
index 9f460fa..8b58edd 100644
--- a/tests/vmtests/test_network_alias.py
+++ b/tests/vmtests/test_network_alias.py
@@ -60,7 +60,7 @@ class HirsuteTestNetworkAlias(relbase.hirsute, TestNetworkAliasAbs):
__test__ = True
-class GroovyTestNetworkAlias(relbase.groovy, TestNetworkAliasAbs):
+class ImpishTestNetworkAlias(relbase.impish, TestNetworkAliasAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_bonding.py b/tests/vmtests/test_network_bonding.py
index f58065d..73bcf60 100644
--- a/tests/vmtests/test_network_bonding.py
+++ b/tests/vmtests/test_network_bonding.py
@@ -65,7 +65,7 @@ class HirsuteTestBonding(relbase.hirsute, TestNetworkBondingAbs):
__test__ = True
-class GroovyTestBonding(relbase.groovy, TestNetworkBondingAbs):
+class ImpishTestBonding(relbase.impish, TestNetworkBondingAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_bridging.py b/tests/vmtests/test_network_bridging.py
index b0d5962..93ecc4b 100644
--- a/tests/vmtests/test_network_bridging.py
+++ b/tests/vmtests/test_network_bridging.py
@@ -244,7 +244,7 @@ class HirsuteTestBridging(relbase.hirsute, TestBridgeNetworkAbs):
__test__ = True
-class GroovyTestBridging(relbase.groovy, TestBridgeNetworkAbs):
+class ImpishTestBridging(relbase.impish, TestBridgeNetworkAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_disabled.py b/tests/vmtests/test_network_disabled.py
index 9079f8e..d56ebde 100644
--- a/tests/vmtests/test_network_disabled.py
+++ b/tests/vmtests/test_network_disabled.py
@@ -67,7 +67,7 @@ class HirsuteCurtinDisableNetworkRendering(relbase.hirsute, TestKlass1):
__test__ = True
-class GroovyCurtinDisableNetworkRendering(relbase.groovy, TestKlass1):
+class ImpishCurtinDisableNetworkRendering(relbase.impish, TestKlass1):
__test__ = True
@@ -79,7 +79,7 @@ class HirsuteCurtinDisableCloudInitNetworking(relbase.hirsute, TestKlass2):
__test__ = True
-class GroovyCurtinDisableCloudInitNetworking(relbase.groovy, TestKlass2):
+class ImpishCurtinDisableCloudInitNetworking(relbase.impish, TestKlass2):
__test__ = True
@@ -92,7 +92,7 @@ class HirsuteCurtinDisableCloudInitNetworkingVersion1(relbase.hirsute,
__test__ = True
-class GroovyCurtinDisableCloudInitNetworkingVersion1(relbase.groovy,
+class ImpishCurtinDisableCloudInitNetworkingVersion1(relbase.impish,
TestKlass3):
__test__ = True
diff --git a/tests/vmtests/test_network_ipv6.py b/tests/vmtests/test_network_ipv6.py
index d76d295..80b8ccf 100644
--- a/tests/vmtests/test_network_ipv6.py
+++ b/tests/vmtests/test_network_ipv6.py
@@ -53,7 +53,15 @@ class BionicTestNetworkIPV6(relbase.bionic, TestNetworkIPV6Abs):
__test__ = True
-class GroovyTestNetworkIPV6(relbase.groovy, TestNetworkIPV6Abs):
+class FocalTestNetworkIPV6(relbase.focal, TestNetworkIPV6Abs):
+ __test__ = True
+
+
+class HirsuteTestNetworkIPV6(relbase.hirsute, TestNetworkIPV6Abs):
+ __test__ = True
+
+
+class ImpishTestNetworkIPV6(relbase.impish, TestNetworkIPV6Abs):
__test__ = True
diff --git a/tests/vmtests/test_network_ipv6_static.py b/tests/vmtests/test_network_ipv6_static.py
index c3e5b5a..f24aab5 100644
--- a/tests/vmtests/test_network_ipv6_static.py
+++ b/tests/vmtests/test_network_ipv6_static.py
@@ -31,7 +31,7 @@ class HirsuteTestNetworkIPV6Static(relbase.hirsute, TestNetworkIPV6StaticAbs):
__test__ = True
-class GroovyTestNetworkIPV6Static(relbase.groovy, TestNetworkIPV6StaticAbs):
+class ImpishTestNetworkIPV6Static(relbase.impish, TestNetworkIPV6StaticAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_ipv6_vlan.py b/tests/vmtests/test_network_ipv6_vlan.py
index 96c056b..a6eae41 100644
--- a/tests/vmtests/test_network_ipv6_vlan.py
+++ b/tests/vmtests/test_network_ipv6_vlan.py
@@ -30,7 +30,7 @@ class HirsuteTestNetworkIPV6Vlan(relbase.hirsute, TestNetworkIPV6VlanAbs):
__test__ = True
-class GroovyTestNetworkIPV6Vlan(relbase.groovy, TestNetworkIPV6VlanAbs):
+class ImpishTestNetworkIPV6Vlan(relbase.impish, TestNetworkIPV6VlanAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_mtu.py b/tests/vmtests/test_network_mtu.py
index 8e6e57c..a36a752 100644
--- a/tests/vmtests/test_network_mtu.py
+++ b/tests/vmtests/test_network_mtu.py
@@ -197,7 +197,7 @@ class HirsuteTestNetworkMtu(relbase.hirsute, TestNetworkMtuNetworkdAbs):
__test__ = True
-class GroovyTestNetworkMtu(relbase.groovy, TestNetworkMtuNetworkdAbs):
+class ImpishTestNetworkMtu(relbase.impish, TestNetworkMtuNetworkdAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_ovs.py b/tests/vmtests/test_network_ovs.py
index a32d637..3e8699d 100644
--- a/tests/vmtests/test_network_ovs.py
+++ b/tests/vmtests/test_network_ovs.py
@@ -42,7 +42,8 @@ class HirsuteTestNetworkOvs(relbase.hirsute, TestNetworkOvsAbs):
__test__ = True
-class GroovyTestNetworkOvs(relbase.groovy, TestNetworkOvsAbs):
+class ImpishTestNetworkOvs(relbase.impish, TestNetworkOvsAbs):
__test__ = True
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/vmtests/test_network_static.py b/tests/vmtests/test_network_static.py
index abcbb75..95960af 100644
--- a/tests/vmtests/test_network_static.py
+++ b/tests/vmtests/test_network_static.py
@@ -36,7 +36,7 @@ class HirsuteTestNetworkStatic(relbase.hirsute, TestNetworkStaticAbs):
__test__ = True
-class GroovyTestNetworkStatic(relbase.groovy, TestNetworkStaticAbs):
+class ImpishTestNetworkStatic(relbase.impish, TestNetworkStaticAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_static_routes.py b/tests/vmtests/test_network_static_routes.py
index e158f9a..eb096ee 100644
--- a/tests/vmtests/test_network_static_routes.py
+++ b/tests/vmtests/test_network_static_routes.py
@@ -38,7 +38,7 @@ class HirsuteTestNetworkStaticRoutes(relbase.hirsute,
__test__ = True
-class GroovyTestNetworkStaticRoutes(relbase.groovy,
+class ImpishTestNetworkStaticRoutes(relbase.impish,
TestNetworkStaticRoutesAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_vlan.py b/tests/vmtests/test_network_vlan.py
index af52af0..38bc87c 100644
--- a/tests/vmtests/test_network_vlan.py
+++ b/tests/vmtests/test_network_vlan.py
@@ -79,18 +79,12 @@ class BionicTestNetworkVlan(relbase.bionic, TestNetworkVlanAbs):
class FocalTestNetworkVlan(relbase.focal, TestNetworkVlanAbs):
__test__ = True
- def test_ip_output(self):
- return super().test_ip_output()
-
class HirsuteTestNetworkVlan(relbase.hirsute, TestNetworkVlanAbs):
__test__ = True
- def test_ip_output(self):
- return super().test_ip_output()
-
-class GroovyTestNetworkVlan(relbase.groovy, TestNetworkVlanAbs):
+class ImpishTestNetworkVlan(relbase.impish, TestNetworkVlanAbs):
__test__ = True
diff --git a/tests/vmtests/test_nvme.py b/tests/vmtests/test_nvme.py
index 9531a80..6352f6d 100644
--- a/tests/vmtests/test_nvme.py
+++ b/tests/vmtests/test_nvme.py
@@ -73,8 +73,18 @@ class BionicTestNvme(relbase.bionic, TestNvmeAbs):
__test__ = True
-class GroovyTestNvme(relbase.groovy, TestNvmeAbs):
- __test__ = True
+class FocalTestNvme(relbase.focal, TestNvmeAbs):
+ __test__ = False
+ # An error occured handling 'nvme_disk2':
+ # OSError - [Errno 16] Device or resource busy: '/dev/mapper/mpatha'
+
+
+class HirsuteTestNvme(relbase.hirsute, TestNvmeAbs):
+ __test__ = False
+
+
+class ImpishTestNvme(relbase.impish, TestNvmeAbs):
+ __test__ = False
class TestNvmeBcacheAbs(TestNvmeAbs):
@@ -147,7 +157,7 @@ class HirsuteTestNvmeBcache(relbase.hirsute, TestNvmeBcacheAbs):
__test__ = True
-class GroovyTestNvmeBcache(relbase.groovy, TestNvmeBcacheAbs):
+class ImpishTestNvmeBcache(relbase.impish, TestNvmeBcacheAbs):
__test__ = True
diff --git a/tests/vmtests/test_panic.py b/tests/vmtests/test_panic.py
index e841f2a..a5533f9 100644
--- a/tests/vmtests/test_panic.py
+++ b/tests/vmtests/test_panic.py
@@ -33,7 +33,7 @@ class HirsuteTestInstallPanic(relbase.hirsute, TestInstallPanic):
__test__ = True
-class GroovyTestInstallPanic(relbase.groovy, TestInstallPanic):
+class ImpishTestInstallPanic(relbase.impish, TestInstallPanic):
__test__ = True
diff --git a/tests/vmtests/test_pollinate_useragent.py b/tests/vmtests/test_pollinate_useragent.py
index 4aeefd8..fa33ec5 100644
--- a/tests/vmtests/test_pollinate_useragent.py
+++ b/tests/vmtests/test_pollinate_useragent.py
@@ -69,7 +69,7 @@ class HirsuteTestPollinateUserAgent(relbase.hirsute, TestPollinateUserAgent):
__test__ = True
-class GroovyTestPollinateUserAgent(relbase.groovy, TestPollinateUserAgent):
+class ImpishTestPollinateUserAgent(relbase.impish, TestPollinateUserAgent):
__test__ = True
diff --git a/tests/vmtests/test_preserve.py b/tests/vmtests/test_preserve.py
index 28dd34f..74fe29e 100644
--- a/tests/vmtests/test_preserve.py
+++ b/tests/vmtests/test_preserve.py
@@ -33,7 +33,7 @@ class HirsuteTestPreserve(relbase.hirsute, TestPreserve):
__test__ = True
-class GroovyTestPreserve(relbase.groovy, TestPreserve):
+class ImpishTestPreserve(relbase.impish, TestPreserve):
__test__ = True
diff --git a/tests/vmtests/test_preserve_bcache.py b/tests/vmtests/test_preserve_bcache.py
index 2e6b412..46edd3f 100644
--- a/tests/vmtests/test_preserve_bcache.py
+++ b/tests/vmtests/test_preserve_bcache.py
@@ -64,7 +64,7 @@ class HirsuteTestPreserveBcache(relbase.hirsute, TestPreserveBcache):
__test__ = True
-class GroovyTestPreserveBcache(relbase.groovy, TestPreserveBcache):
+class ImpishTestPreserveBcache(relbase.impish, TestPreserveBcache):
__test__ = True
diff --git a/tests/vmtests/test_preserve_lvm.py b/tests/vmtests/test_preserve_lvm.py
index 274e9c6..0c09c11 100644
--- a/tests/vmtests/test_preserve_lvm.py
+++ b/tests/vmtests/test_preserve_lvm.py
@@ -77,7 +77,7 @@ class HirsuteTestLvmPreserve(relbase.hirsute, TestLvmPreserveAbs):
__test__ = True
-class GroovyTestLvmPreserve(relbase.groovy, TestLvmPreserveAbs):
+class ImpishTestLvmPreserve(relbase.impish, TestLvmPreserveAbs):
__test__ = True
diff --git a/tests/vmtests/test_preserve_partition_wipe_vg.py b/tests/vmtests/test_preserve_partition_wipe_vg.py
index 2469615..4db6c2b 100644
--- a/tests/vmtests/test_preserve_partition_wipe_vg.py
+++ b/tests/vmtests/test_preserve_partition_wipe_vg.py
@@ -33,7 +33,7 @@ class HirsuteTestPreserveWipeLvm(relbase.hirsute, TestPreserveWipeLvm):
__test__ = True
-class GroovyTestPreserveWipeLvm(relbase.groovy, TestPreserveWipeLvm):
+class ImpishTestPreserveWipeLvm(relbase.impish, TestPreserveWipeLvm):
__test__ = True
@@ -61,7 +61,7 @@ class HirsuteTestPreserveWipeLvmSimple(relbase.hirsute,
__test__ = True
-class GroovyTestPreserveWipeLvmSimple(relbase.groovy,
+class ImpishTestPreserveWipeLvmSimple(relbase.impish,
TestPreserveWipeLvmSimple):
__test__ = True
diff --git a/tests/vmtests/test_preserve_raid.py b/tests/vmtests/test_preserve_raid.py
index 7fc6daa..4bb977e 100644
--- a/tests/vmtests/test_preserve_raid.py
+++ b/tests/vmtests/test_preserve_raid.py
@@ -33,7 +33,42 @@ class HirsuteTestPreserveRAID(relbase.hirsute, TestPreserveRAID):
__test__ = True
-class GroovyTestPreserveRAID(relbase.groovy, TestPreserveRAID):
+class ImpishTestPreserveRAID(relbase.impish, TestPreserveRAID):
+ __test__ = True
+
+
+class TestPartitionExistingRAID(VMBaseClass):
+ """ Test that curtin can repartition an existing RAID. """
+ conf_file = "examples/tests/partition-existing-raid.yaml"
+ extra_disks = ['10G', '10G', '10G']
+ uefi = True
+ extra_collect_scripts = [textwrap.dedent("""
+ cd OUTPUT_COLLECT_D
+ lsblk --nodeps --noheading --raw --output PTTYPE /dev/md1 > md1-pttype
+ exit 0
+ """)]
+
+ def test_correct_ptype(self):
+ self.assertEqual('gpt', self.load_collect_file('md1-pttype').strip())
+
+
+class BionicTestPartitionExistingRAID(
+ relbase.bionic, TestPartitionExistingRAID):
+ __test__ = True
+
+
+class FocalTestPartitionExistingRAID(
+ relbase.focal, TestPartitionExistingRAID):
+ __test__ = True
+
+
+class HirsuteTestPartitionExistingRAID(
+ relbase.hirsute, TestPartitionExistingRAID):
+ __test__ = True
+
+
+class ImpishTestPartitionExistingRAID(
+ relbase.impish, TestPartitionExistingRAID):
__test__ = True
diff --git a/tests/vmtests/test_python_apt.py b/tests/vmtests/test_python_apt.py
new file mode 100644
index 0000000..4162608
--- /dev/null
+++ b/tests/vmtests/test_python_apt.py
@@ -0,0 +1,42 @@
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+from aptsources.sourceslist import SourceEntry
+
+from . import VMBaseClass
+from .releases import base_vm_classes as relbase
+
+
+class TestPythonApt(VMBaseClass):
+ """TestPythonApt - apt sources manipulation with python{,3}-apt"""
+ test_type = 'config'
+ conf_file = "examples/tests/apt_source_custom.yaml"
+
+ def test_python_apt(self):
+ """test_python_apt - Ensure the python-apt package is available"""
+
+ line = 'deb http://us.archive.ubuntu.com/ubuntu/ hirsute main'
+
+ self.assertEqual(line, str(SourceEntry(line)))
+
+
+class XenialTestPythonApt(relbase.xenial, TestPythonApt):
+ __test__ = True
+
+
+class BionicTestPythonApt(relbase.bionic, TestPythonApt):
+ __test__ = True
+
+
+class FocalTestPythonApt(relbase.focal, TestPythonApt):
+ __test__ = True
+
+
+class HirsuteTestPythonApt(relbase.hirsute, TestPythonApt):
+ __test__ = True
+
+
+class ImpishTestPythonApt(relbase.impish, TestPythonApt):
+ __test__ = True
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/tests/vmtests/test_raid5_bcache.py b/tests/vmtests/test_raid5_bcache.py
index 493c0fd..e682f34 100644
--- a/tests/vmtests/test_raid5_bcache.py
+++ b/tests/vmtests/test_raid5_bcache.py
@@ -96,7 +96,7 @@ class HirsuteTestRaid5Bcache(relbase.hirsute, TestMdadmBcacheAbs):
__test__ = True
-class GroovyTestRaid5Bcache(relbase.groovy, TestMdadmBcacheAbs):
+class ImpishTestRaid5Bcache(relbase.impish, TestMdadmBcacheAbs):
__test__ = True
diff --git a/tests/vmtests/test_raid_partition_to_disk.py b/tests/vmtests/test_raid_partition_to_disk.py
index aaa73f2..a2bd1be 100644
--- a/tests/vmtests/test_raid_partition_to_disk.py
+++ b/tests/vmtests/test_raid_partition_to_disk.py
@@ -26,7 +26,7 @@ class HirsuteTestRAIDPartitionToDisk(relbase.hirsute, TestRAIDPartitionToDisk):
__test__ = True
-class GroovyTestRAIDPartitionToDisk(relbase.groovy, TestRAIDPartitionToDisk):
+class ImpishTestRAIDPartitionToDisk(relbase.impish, TestRAIDPartitionToDisk):
__test__ = True
diff --git a/tests/vmtests/test_reuse_lvm_member.py b/tests/vmtests/test_reuse_lvm_member.py
index eba3d1b..81c0cb0 100644
--- a/tests/vmtests/test_reuse_lvm_member.py
+++ b/tests/vmtests/test_reuse_lvm_member.py
@@ -31,7 +31,7 @@ class HirsuteTestReuseLVMMemberPartition(relbase.hirsute,
__test__ = True
-class GroovyTestReuseLVMMemberPartition(relbase.groovy,
+class ImpishTestReuseLVMMemberPartition(relbase.impish,
TestReuseLVMMemberPartition):
__test__ = True
diff --git a/tests/vmtests/test_reuse_msdos_partitions.py b/tests/vmtests/test_reuse_msdos_partitions.py
index 77431bf..ed52f5b 100644
--- a/tests/vmtests/test_reuse_msdos_partitions.py
+++ b/tests/vmtests/test_reuse_msdos_partitions.py
@@ -28,7 +28,7 @@ class HirsuteTestReuseMSDOSPartitions(relbase.hirsute,
__test__ = True
-class GroovyTestReuseMSDOSPartitions(relbase.groovy,
+class ImpishTestReuseMSDOSPartitions(relbase.impish,
TestReuseMSDOSPartitions):
__test__ = True
diff --git a/tests/vmtests/test_reuse_raid_member.py b/tests/vmtests/test_reuse_raid_member.py
index e3723e8..7611140 100644
--- a/tests/vmtests/test_reuse_raid_member.py
+++ b/tests/vmtests/test_reuse_raid_member.py
@@ -36,7 +36,7 @@ class HirsuteTestReuseRAIDMember(relbase.hirsute, TestReuseRAIDMember):
__test__ = True
-class GroovyTestReuseRAIDMember(relbase.groovy, TestReuseRAIDMember):
+class ImpishTestReuseRAIDMember(relbase.impish, TestReuseRAIDMember):
__test__ = True
@@ -55,7 +55,7 @@ class HirsuteTestReuseRAIDMemberPartition(relbase.hirsute,
__test__ = True
-class GroovyTestReuseRAIDMemberPartition(relbase.groovy,
+class ImpishTestReuseRAIDMemberPartition(relbase.impish,
TestReuseRAIDMemberPartition):
__test__ = True
diff --git a/tests/vmtests/test_reuse_uefi_esp.py b/tests/vmtests/test_reuse_uefi_esp.py
index 46f3a57..958e43f 100644
--- a/tests/vmtests/test_reuse_uefi_esp.py
+++ b/tests/vmtests/test_reuse_uefi_esp.py
@@ -49,7 +49,7 @@ class HirsuteTestUefiReuseEsp(relbase.hirsute, TestUefiReuseEspAbs):
return super().test_efiboot_menu_has_one_distro_entry()
-class GroovyTestUefiReuseEsp(relbase.groovy, TestUefiReuseEspAbs):
+class ImpishTestUefiReuseEsp(relbase.impish, TestUefiReuseEspAbs):
__test__ = True
def test_efiboot_menu_has_one_distro_entry(self):
diff --git a/tests/vmtests/test_simple.py b/tests/vmtests/test_simple.py
index 83dca96..0ee87fc 100644
--- a/tests/vmtests/test_simple.py
+++ b/tests/vmtests/test_simple.py
@@ -63,7 +63,7 @@ class HirsuteTestSimple(relbase.hirsute, TestSimple):
self.output_files_exist(["netplan.yaml"])
-class GroovyTestSimple(relbase.groovy, TestSimple):
+class ImpishTestSimple(relbase.impish, TestSimple):
__test__ = True
def test_output_files_exist(self):
@@ -126,7 +126,7 @@ class HirsuteTestSimpleStorage(relbase.hirsute, TestSimpleStorage):
self.output_files_exist(["netplan.yaml"])
-class GroovyTestSimpleStorage(relbase.groovy, TestSimpleStorage):
+class ImpishTestSimpleStorage(relbase.impish, TestSimpleStorage):
__test__ = True
def test_output_files_exist(self):
@@ -166,7 +166,7 @@ class HirsuteTestGrubNoDefaults(relbase.hirsute, TestGrubNoDefaults):
self.output_files_exist(["netplan.yaml"])
-class GroovyTestGrubNoDefaults(relbase.groovy, TestGrubNoDefaults):
+class ImpishTestGrubNoDefaults(relbase.impish, TestGrubNoDefaults):
__test__ = True
def test_output_files_exist(self):
diff --git a/tests/vmtests/test_uefi_basic.py b/tests/vmtests/test_uefi_basic.py
index 0ed4fab..aa4c650 100644
--- a/tests/vmtests/test_uefi_basic.py
+++ b/tests/vmtests/test_uefi_basic.py
@@ -118,7 +118,7 @@ class HirsuteUefiTestBasic(relbase.hirsute, TestBasicAbs):
__test__ = True
-class GroovyUefiTestBasic(relbase.groovy, TestBasicAbs):
+class ImpishUefiTestBasic(relbase.impish, TestBasicAbs):
__test__ = True
@@ -147,7 +147,7 @@ class HirsuteUefiTestBasic4k(relbase.hirsute, TestBasicAbs):
disk_block_size = 4096
-class GroovyUefiTestBasic4k(relbase.groovy, TestBasicAbs):
+class ImpishUefiTestBasic4k(relbase.impish, TestBasicAbs):
__test__ = True
disk_block_size = 4096
diff --git a/tests/vmtests/test_zfsroot.py b/tests/vmtests/test_zfsroot.py
index 21e33b6..0e310c6 100644
--- a/tests/vmtests/test_zfsroot.py
+++ b/tests/vmtests/test_zfsroot.py
@@ -106,7 +106,7 @@ class HirsuteTestZfsRoot(relbase.hirsute, TestZfsRootAbs):
mem = 4096
-class GroovyTestZfsRoot(relbase.groovy, TestZfsRootAbs):
+class ImpishTestZfsRoot(relbase.impish, TestZfsRootAbs):
__test__ = True
mem = 4096
@@ -140,7 +140,9 @@ class HirsuteTestZfsRootFsType(relbase.hirsute, TestZfsRootFsTypeAbs):
mem = 4096
-class GroovyTestZfsRootFsType(relbase.groovy, TestZfsRootFsTypeAbs):
+class ImpishTestZfsRootFsType(relbase.impish, TestZfsRootFsTypeAbs):
__test__ = True
+ mem = 4096
+
# vi: ts=4 expandtab syntax=python
diff --git a/tools/vmtest-add-release b/tools/vmtest-add-release
index 7a74296..5acd2c3 100755
--- a/tools/vmtest-add-release
+++ b/tools/vmtest-add-release
@@ -49,7 +49,8 @@ if __name__ == "__main__":
description="Tool to add vmtest classes by distro release")
parser.add_argument('--distro-release', '-d',
action='store', required=True)
- parser.add_argument('--path', '-p', action='store', required=True)
+ parser.add_argument('--path', '-p', action='store',
+ default='./tests/vmtests')
parser.add_argument('--previous-release', '-r',
action='store', required=True)
diff --git a/tools/vmtest-create-static-images b/tools/vmtest-create-static-images
new file mode 100755
index 0000000..d236608
--- /dev/null
+++ b/tools/vmtest-create-static-images
@@ -0,0 +1,64 @@
+#!/bin/bash
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+IMAGE_DIR=${CURTIN_VMTEST_IMAGE_DIR:-/srv/images}
+LVM_IMAGE=${IMAGE_DIR}/static/lvm-disk.dd
+WORKDIR=$(dirname "${LVM_IMAGE}")
+
+shopt -s extglob
+set -eux
+mkdir -p "${WORKDIR}"
+
+cleanup() {
+ if [ -e /dev/mapper/vmtests-root ]; then
+ sudo umount /dev/mapper/vmtests-root
+ fi
+ rm -rf "${WORKDIR:?}"/mnt
+}
+trap cleanup EXIT
+
+(
+ flock -n 100 || exit 1
+
+ rm -f "${LVM_IMAGE}"
+
+ # create a raw disk
+ truncate -s 2G "${LVM_IMAGE}"
+
+ # find a free loop device
+ loopdev=$(losetup --show -f "${LVM_IMAGE}")
+
+ # partition the disk
+ sudo /sbin/parted -a optimal "${loopdev}" --script \
+ mklabel gpt \
+ mkpart primary 0% 25% \
+ mkpart primary 25% 100% \
+ set 2 lvm on
+ sudo udevadm trigger --settle "${loopdev}"
+
+ # create LVM volumes
+ sudo pvcreate "${loopdev}"p2
+ sudo vgcreate vmtests "${loopdev}"p2
+ sudo lvcreate -L 1G -n root vmtests
+ sudo udevadm settle
+
+ # format and add curtin dir to image
+ sudo mkfs.ext2 "${loopdev}"p1
+ sudo mkfs.ext4 /dev/mapper/vmtests-root
+ mkdir -p "${WORKDIR}"/mnt
+ sudo mount /dev/mapper/vmtests-root "${WORKDIR}"/mnt
+
+ # curtin looks for this directory to identify the rootfs
+ sudo mkdir "${WORKDIR}"/mnt/curtin
+ sudo tee "${WORKDIR}"/mnt/curtin/curtin-hooks <<EOF
+#!/bin/sh
+exit 0
+EOF
+ sudo chmod 750 "${WORKDIR}"/mnt/curtin/curtin-hooks
+
+ sudo umount /dev/mapper/vmtests-root
+ sudo losetup -d "$loopdev"
+ sudo dmsetup remove /dev/mapper/vmtests-root
+ sudo udevadm settle
+
+) 100>"${IMAGE_DIR}"/.static.lock
diff --git a/tools/vmtest-remove-release b/tools/vmtest-remove-release
index d2c5f83..1315c46 100755
--- a/tools/vmtest-remove-release
+++ b/tools/vmtest-remove-release
@@ -40,7 +40,8 @@ if __name__ == "__main__":
description="Tool to remove vmtest classes by distro release")
parser.add_argument('--distro-release', '-d',
action='store', required=True)
- parser.add_argument('--path', '-p', action='store', required=True)
+ parser.add_argument('--path', '-p', action='store',
+ default='./tests/vmtests')
args = parser.parse_args()
distro = args.distro_release.title()
diff --git a/tools/vmtest-system-setup b/tools/vmtest-system-setup
index b9185db..05f4c8e 100755
--- a/tools/vmtest-system-setup
+++ b/tools/vmtest-system-setup
@@ -5,12 +5,17 @@ _APT_UPDATED=false
error() { echo "$@" 1>&2; }
fail() { [ $# -eq 0 ] || error "$@"; exit 2; }
-rel="$(lsb_release -sc)"
case "$(uname -m)" in
i?86|x86_64) qemu="qemu-system-x86";;
ppc*) qemu="qemu-system-ppc";;
s390x) qemu="qemu-system-s390x";;
esac
+
+get_python_apt() {
+ [[ "$1" < "21.04" ]] && echo python-apt
+ [[ "$1" > "16.04" ]] && echo python3-apt
+}
+
DEPS=(
cloud-image-utils
make
@@ -20,6 +25,7 @@ DEPS=(
python3-nose
python3-simplestreams
python3-yaml
+ $(get_python_apt "$(lsb_release -sr)")
ovmf
simplestreams
$qemu
diff --git a/tox.ini b/tox.ini
index 04b43b6..d9437c5 100644
--- a/tox.ini
+++ b/tox.ini
@@ -22,15 +22,18 @@ setenv = VIRTUAL_ENV={envdir}
LC_ALL = en_US.utf-8
deps = -r{toxinidir}/test-requirements.txt
-r{toxinidir}/requirements.txt
-commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+# noproxy needed for several of the curtin jenkins jobs
+commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
{posargs:--with-coverage --cover-erase --cover-branches \
--cover-package=curtin --cover-inclusive tests/unittests}
[testenv:py3]
basepython = python3
+sitepackages = true
[testenv:py27]
basepython = python2.7
+sitepackages = true
# https://github.com/pypa/setuptools/issues/1963
deps = {[testenv]deps}
setuptools<45
@@ -59,6 +62,7 @@ commands = {envpython} -m pyflakes {posargs:curtin/ tests/ tools/}
[testenv:py3-pylint]
# set basepython because tox 1.6 (trusty) does not support generated environments
basepython = python3
+sitepackages = true
deps = {[testenv]deps}
pylint==2.6.0
git+https://git.launchpad.net/simplestreams
@@ -67,6 +71,7 @@ commands = {envpython} -m pylint --errors-only {posargs:curtin tests/vmtests}
[testenv:py27-pylint]
# set basepython because tox 1.6 (trusty) does not support generated environments
basepython = python2.7
+sitepackages = true
deps = {[testenv]deps}
{[testenv:py27]deps}
pylint==1.8.1
@@ -107,13 +112,14 @@ deps = {[testenv:trusty]deps}
setuptools<45
basepython = python2.7
-commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+sitepackages = true
+commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
{posargs:tests/unittests}
[testenv:trusty-py3]
deps = {[testenv:trusty]deps}
basepython = python3
-commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
{posargs:tests/unittests}
[testenv:xenial]
@@ -127,13 +133,14 @@ deps =
basepython = python27
deps = {[testenv:xenial]deps}
{[testenv:py27]deps}
-commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
{posargs:tests/unittests}
[testenv:xenial-py3]
basepython = python3
+sitepackages = true
deps = {[testenv:xenial]deps}
-commands = {envpython} {toxinidir}/tools/noproxy nosetests \
+commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
{posargs:tests/unittests}
[testenv:tip-pycodestyle]
Follow ups