curtin-dev team mailing list archive
-
curtin-dev team
-
Mailing list archive
-
Message #02387
[Merge] ~dbungert/curtin:ubuntu/devel into curtin:ubuntu/devel
Dan Bungert has proposed merging ~dbungert/curtin:ubuntu/devel into curtin:ubuntu/devel.
Commit message:
Version 22.1
Requested reviews:
curtin developers (curtin-dev)
For more details, see:
https://code.launchpad.net/~dbungert/curtin/+git/curtin/+merge/425672
--
Your team curtin developers is requested to review the proposed merge of ~dbungert/curtin:ubuntu/devel into curtin:ubuntu/devel.
diff --git a/curtin/__init__.py b/curtin/__init__.py
index 8a3e850..036cd5d 100644
--- a/curtin/__init__.py
+++ b/curtin/__init__.py
@@ -40,6 +40,6 @@ FEATURES = [
'FSTAB_DEFAULT_FSCK_ON_BLK'
]
-__version__ = "21.3"
+__version__ = "22.1"
# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/__init__.py b/curtin/block/__init__.py
index ca0bc10..49b062f 100644
--- a/curtin/block/__init__.py
+++ b/curtin/block/__init__.py
@@ -993,19 +993,12 @@ def sysfs_partition_data(blockdev=None, sysfs_path=None):
else:
raise ValueError("Blockdev and sysfs_path cannot both be None")
- # queue property is only on parent devices, ie, we can't read
- # /sys/class/block/vda/vda1/queue/* as queue is only on the
- # parent device
sysfs_prefix = sysfs_path
(parent, partnum) = get_blockdev_for_partition(blockdev)
if partnum:
sysfs_prefix = sys_block_path(parent)
partnum = int(partnum)
- block_size = int(util.load_file(os.path.join(
- sysfs_prefix, 'queue/logical_block_size')))
- unit = block_size
-
ptdata = []
for part_sysfs in get_sysfs_partitions(sysfs_prefix):
data = {}
@@ -1015,8 +1008,12 @@ def sysfs_partition_data(blockdev=None, sysfs_path=None):
continue
data[sfile] = int(util.load_file(dfile))
if partnum is None or data['partition'] == partnum:
- ptdata.append((path_to_kname(part_sysfs), data['partition'],
- data['start'] * unit, data['size'] * unit,))
+ ptdata.append((
+ path_to_kname(part_sysfs),
+ data['partition'],
+ data['start'] * SECTOR_SIZE_BYTES,
+ data['size'] * SECTOR_SIZE_BYTES,
+ ))
return ptdata
@@ -1371,4 +1368,9 @@ def discover():
return {}
+def get_resize_fstypes():
+ from curtin.commands.block_meta_v2 import resizers
+ return {fstype for fstype in resizers.keys()}
+
+
# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/deps.py b/curtin/block/deps.py
index 38581a8..db449d8 100644
--- a/curtin/block/deps.py
+++ b/curtin/block/deps.py
@@ -96,8 +96,12 @@ def detect_required_packages_mapping(osfamily=DISTROS.debian):
if osfamily not in distro_mapping:
raise ValueError('No block package mapping for distro: %s' % osfamily)
- return {1: {'handler': storage_config_required_packages,
- 'mapping': distro_mapping.get(osfamily)}}
+ cfg_map = {
+ 'handler': storage_config_required_packages,
+ 'mapping': distro_mapping.get(osfamily),
+ }
+
+ return {1: cfg_map, 2: cfg_map}
# vi: ts=4 expandtab syntax=python
diff --git a/curtin/block/schemas.py b/curtin/block/schemas.py
index 84a5279..92f88d0 100644
--- a/curtin/block/schemas.py
+++ b/curtin/block/schemas.py
@@ -284,8 +284,13 @@ PARTITION = {
'properties': {
'id': {'$ref': '#/definitions/id'},
'multipath': {'type': 'string'},
+ # Permit path to device as output.
+ # This value is ignored for input.
+ 'path': {'type': 'string',
+ 'pattern': _path_dev},
'name': {'$ref': '#/definitions/name'},
'offset': {'$ref': '#/definitions/size'}, # XXX: This is not used
+ 'resize': {'type': 'boolean'},
'preserve': {'$ref': '#/definitions/preserve'},
'size': {'$ref': '#/definitions/size'},
'uuid': {'$ref': '#/definitions/uuid'}, # XXX: This is not used
@@ -299,6 +304,11 @@ PARTITION = {
'enum': ['bios_grub', 'boot', 'extended', 'home', 'linux',
'logical', 'lvm', 'mbr', 'prep', 'raid', 'swap',
'']},
+ 'partition_type': {'type': 'string',
+ 'oneOf': [
+ {'pattern': r'^0x[0-9a-fA-F]{1,2}$'},
+ {'$ref': '#/definitions/uuid'},
+ ]},
'grub_device': {
'type': ['boolean', 'integer'],
'minimum': 0,
diff --git a/curtin/commands/apt_config.py b/curtin/commands/apt_config.py
index 9ea2d30..4f62a86 100644
--- a/curtin/commands/apt_config.py
+++ b/curtin/commands/apt_config.py
@@ -28,6 +28,9 @@ APT_LISTS = "/var/lib/apt/lists"
APT_CONFIG_FN = "/etc/apt/apt.conf.d/94curtin-config"
APT_PROXY_FN = "/etc/apt/apt.conf.d/90curtin-aptproxy"
+# Files to store pinning information
+APT_PREFERENCES_FN = "/etc/apt/preferences.d/90curtin.pref"
+
# Default keyserver to use
DEFAULT_KEYSERVER = "keyserver.ubuntu.com"
@@ -37,7 +40,7 @@ PRIMARY_ARCH_MIRRORS = {"PRIMARY": "http://archive.ubuntu.com/ubuntu/",
PORTS_MIRRORS = {"PRIMARY": "http://ports.ubuntu.com/ubuntu-ports",
"SECURITY": "http://ports.ubuntu.com/ubuntu-ports"}
PRIMARY_ARCHES = ['amd64', 'i386']
-PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el']
+PORTS_ARCHES = ['s390x', 'arm64', 'armhf', 'powerpc', 'ppc64el', 'riscv64']
APT_SOURCES_PROPOSED = (
"deb $MIRROR $RELEASE-proposed main restricted universe multiverse")
@@ -81,6 +84,11 @@ def handle_apt(cfg, target=None):
except (IOError, OSError):
LOG.exception("Failed to apply proxy or apt config info:")
+ try:
+ apply_apt_preferences(cfg, target + APT_PREFERENCES_FN)
+ except (IOError, OSError):
+ LOG.exception("Failed to apply apt preferences.")
+
# Process 'apt_source -> sources {dict}'
if 'sources' in cfg:
params = mirrors
@@ -571,7 +579,7 @@ def find_apt_mirror_info(cfg, arch=None):
def apply_apt_proxy_config(cfg, proxy_fname, config_fname):
"""apply_apt_proxy_config
- Applies any apt*proxy config from if specified
+ Applies any apt*proxy from config if specified
"""
# Set up any apt proxy
cfgs = (('proxy', 'Acquire::http::Proxy "%s";'),
@@ -584,8 +592,14 @@ def apply_apt_proxy_config(cfg, proxy_fname, config_fname):
LOG.debug("write apt proxy info to %s", proxy_fname)
util.write_file(proxy_fname, '\n'.join(proxies) + '\n')
elif os.path.isfile(proxy_fname):
- util.del_file(proxy_fname)
- LOG.debug("no apt proxy configured, removed %s", proxy_fname)
+ # When $ curtin apt-config is called with no proxy set, it makes
+ # sense to remove the proxy file (if present). Having said that,
+ # this code is also called automatically at the curthooks stage with an
+ # empty configuration. Since the installation of external packages and
+ # execution of unattended-upgrades (which happen after executing the
+ # curthooks) need to use the proxy if specified, we must not let the
+ # curthooks remove the proxy file.
+ pass
if cfg.get('conf', None):
LOG.debug("write apt config info to %s", config_fname)
@@ -595,6 +609,38 @@ def apply_apt_proxy_config(cfg, proxy_fname, config_fname):
LOG.debug("no apt config configured, removed %s", config_fname)
+def preference_to_str(preference):
+ """ Return a textual representation of a given preference as specified in
+ apt_preferences(5).
+ """
+
+ return """\
+Package: {package}
+Pin: {pin}
+Pin-Priority: {pin_priority}
+""".format(package=preference["package"],
+ pin=preference["pin"],
+ pin_priority=preference["pin-priority"])
+
+
+def apply_apt_preferences(cfg, pref_fname):
+ """ Apply apt preferences if any is provided.
+ """
+
+ prefs = cfg.get("preferences")
+ if not prefs:
+ # When $ curtin apt-config is called with no preferences set, it makes
+ # sense to remove the preferences file (if present). Having said that,
+ # this code is also called automatically at the curthooks stage with an
+ # empty configuration. Since the installation of packages (which
+ # happens after executing the curthooks) needs to honor the preferences
+ # set, we must not let the curthooks remove the preferences file.
+ return
+ prefs_as_strings = [preference_to_str(pref) for pref in prefs]
+ LOG.debug("write apt preferences info to %s.", pref_fname)
+ util.write_file(pref_fname, "\n".join(prefs_as_strings))
+
+
def apt_command(args):
""" Main entry point for curtin apt-config standalone command
This does not read the global config as handled by curthooks, but
diff --git a/curtin/commands/block_meta.py b/curtin/commands/block_meta.py
index 1913cb4..5614883 100644
--- a/curtin/commands/block_meta.py
+++ b/curtin/commands/block_meta.py
@@ -552,16 +552,30 @@ DEVS = set()
def image_handler(info, storage_config, handlers):
path = info['path']
- if os.path.exists(path):
- os.unlink(path)
+ size = int(util.human2bytes(info['size']))
+ sector_size = str(int(util.human2bytes(info.get('sector_size', 512))))
+ if info.get('preserve', False):
+ actual_size = os.stat(path).st_size
+ if size != actual_size:
+ raise RuntimeError(
+ 'image at {} was size {} not {} as expected.'.format(
+ path, actual_size, size))
+ else:
+ if os.path.exists(path):
+ os.unlink(path)
+ try:
+ with open(path, 'wb') as fp:
+ fp.truncate(size)
+ except BaseException:
+ if os.path.exists(path):
+ os.unlink(path)
+ raise
try:
- with open(path, 'wb') as fp:
- fp.truncate(int(util.human2bytes(info['size'])))
dev = util.subp([
- 'losetup', '--show', '--find', path],
+ 'losetup', '--show', '--sector-size', sector_size, '--find', path],
capture=True)[0].strip()
except BaseException:
- if os.path.exists(path):
+ if os.path.exists(path) and not info.get('preserve'):
os.unlink(path)
raise
info['dev'] = dev
@@ -765,12 +779,17 @@ def verify_exists(devpath):
raise RuntimeError("Device %s does not exist" % devpath)
-def verify_size(devpath, expected_size_bytes, part_info):
+def get_part_size_bytes(devpath, part_info):
(found_type, _code) = ptable_uuid_to_flag_entry(part_info.get('type'))
if found_type == 'extended':
found_size_bytes = int(part_info['size']) * 512
else:
found_size_bytes = block.read_sys_block_size_bytes(devpath)
+ return found_size_bytes
+
+
+def verify_size(devpath, expected_size_bytes, part_info):
+ found_size_bytes = get_part_size_bytes(devpath, part_info)
msg = (
'Verifying %s size, expecting %s bytes, found %s bytes' % (
devpath, expected_size_bytes, found_size_bytes))
@@ -807,7 +826,7 @@ def verify_ptable_flag(devpath, expected_flag, label, part_info):
def partition_verify_sfdisk(part_action, label, sfdisk_part_info):
- devpath = sfdisk_part_info['node']
+ devpath = os.path.realpath(sfdisk_part_info['node'])
verify_size(
devpath, int(util.human2bytes(part_action['size'])), sfdisk_part_info)
expected_flag = part_action.get('flag')
@@ -1110,6 +1129,12 @@ def _get_volume_type(device_path):
return lsblock[kname]['TYPE']
+def _get_volume_fstype(device_path):
+ lsblock = block._lsblock([device_path])
+ kname = block.path_to_kname(device_path)
+ return lsblock[kname]['FSTYPE']
+
+
def get_volume_spec(device_path):
"""
Return the most reliable spec for a device per Ubuntu FSTAB wiki
@@ -2004,6 +2029,17 @@ def meta_custom(args):
storage_config_dict = extract_storage_ordered_dict(cfg)
+ version = cfg['storage']['version']
+ if version > 1:
+ from curtin.commands.block_meta_v2 import (
+ disk_handler_v2,
+ partition_handler_v2,
+ )
+ command_handlers.update({
+ 'disk': disk_handler_v2,
+ 'partition': partition_handler_v2,
+ })
+
storage_config_dict = zfsroot_update_storage_config(storage_config_dict)
# set up reportstack
diff --git a/curtin/commands/block_meta_v2.py b/curtin/commands/block_meta_v2.py
new file mode 100644
index 0000000..b4838f9
--- /dev/null
+++ b/curtin/commands/block_meta_v2.py
@@ -0,0 +1,422 @@
+# This file is part of curtin. See LICENSE file for copyright and license info.
+
+import os
+from typing import Optional
+
+import attr
+
+from curtin import (block, util)
+from curtin.commands.block_meta import (
+ _get_volume_fstype,
+ disk_handler as disk_handler_v1,
+ get_path_to_storage_volume,
+ make_dname,
+ partition_handler as partition_handler_v1,
+ verify_ptable_flag,
+ verify_size,
+ )
+from curtin.log import LOG
+from curtin.storage_config import (
+ GPT_GUID_TO_CURTIN_MAP,
+ select_configs,
+ )
+from curtin.udev import udevadm_settle
+
+
+@attr.s(auto_attribs=True)
+class PartTableEntry:
+ number: int
+ start: int
+ size: int
+ type: str
+ uuid: Optional[str]
+ bootable: bool = False
+
+ def render(self):
+ r = f'{self.number}: '
+ for a in 'start', 'size', 'type', 'uuid':
+ v = getattr(self, a)
+ if v is not None:
+ r += f' {a}={v}'
+ if self.bootable:
+ r += ' bootable'
+ return r
+
+
+ONE_MIB_BYTES = 1 << 20
+
+
+def align_up(size, block_size):
+ return (size + block_size - 1) & ~(block_size - 1)
+
+
+def align_down(size, block_size):
+ return size & ~(block_size - 1)
+
+
+def resize_ext(path, size):
+ util.subp(['e2fsck', '-p', '-f', path])
+ size_k = size // 1024
+ util.subp(['resize2fs', path, f'{size_k}k'])
+
+
+def resize_ntfs(path, size):
+ util.subp(['ntfsresize', '-s', str(size), path])
+
+
+def perform_resize(kname, resize):
+ path = block.kname_to_path(kname)
+ fstype = resize['fstype']
+ size = resize['size']
+ direction = resize['direction']
+ LOG.debug('Resizing %s of type %s %s to %s',
+ path, fstype, direction, size)
+ resizers[fstype](path, size)
+
+
+resizers = {
+ 'ext2': resize_ext,
+ 'ext3': resize_ext,
+ 'ext4': resize_ext,
+ 'ntfs': resize_ntfs,
+}
+
+
+FLAG_TO_GUID = {
+ flag: guid for (guid, (flag, typecode)) in GPT_GUID_TO_CURTIN_MAP.items()
+ }
+FLAG_TO_MBR_TYPE = {
+ flag: typecode[:2].upper() for (guid, (flag, typecode))
+ in GPT_GUID_TO_CURTIN_MAP.items()
+ }
+FLAG_TO_MBR_TYPE['extended'] = '05'
+
+
+class SFDiskPartTable:
+
+ label = None
+
+ def __init__(self, sector_bytes):
+ self.entries = []
+ self.label_id = None
+ self._sector_bytes = sector_bytes
+ if ONE_MIB_BYTES % sector_bytes != 0:
+ raise Exception(
+ f"sector_bytes {sector_bytes} does not divide 1MiB, cannot "
+ "continue!")
+ self.one_mib_sectors = ONE_MIB_BYTES // sector_bytes
+
+ def bytes2sectors(self, amount):
+ return int(util.human2bytes(amount)) // self._sector_bytes
+
+ def sectors2bytes(self, amount):
+ return amount * self._sector_bytes
+
+ def render(self):
+ r = ['label: ' + self.label]
+ if self.label_id:
+ r.extend(['label-id: ' + self.label_id])
+ r.extend([''])
+ r.extend([e.render() for e in self.entries])
+ return '\n'.join(r)
+
+ def apply(self, device):
+ sfdisk_script = self.render()
+ LOG.debug("sfdisk input:\n---\n%s\n---\n", sfdisk_script)
+ util.subp(
+ ['sfdisk', '--no-tell-kernel', '--no-reread', device],
+ data=sfdisk_script.encode('ascii'))
+ util.subp(['partprobe', device])
+ # sfdisk and partprobe (as invoked here) use ioctls to inform the
+ # kernel that the partition table has changed so it can add and remove
+ # device nodes for the partitions as needed. Unfortunately this is
+ # asynchronous: we can return before the nodes are present in /dev (or
+ # /sys for that matter). Calling "udevadm settle" is slightly
+ # incoherent as udev has nothing to do with creating these nodes, but
+ # at the same time, udev won't finish processing the events triggered
+ # by the sfdisk until after the nodes for the partitions have been
+ # updated by the kernel.
+ udevadm_settle()
+
+
+class GPTPartTable(SFDiskPartTable):
+
+ label = 'gpt'
+
+ def add(self, action):
+ number = action.get('number', len(self.entries) + 1)
+ if 'offset' in action:
+ start = self.bytes2sectors(action['offset'])
+ else:
+ if self.entries:
+ prev = self.entries[-1]
+ start = align_up(prev.start + prev.size, self.one_mib_sectors)
+ else:
+ start = self.one_mib_sectors
+ size = self.bytes2sectors(action['size'])
+ uuid = action.get('uuid')
+ type = action.get('partition_type',
+ FLAG_TO_GUID.get(action.get('flag')))
+ entry = PartTableEntry(number, start, size, type, uuid)
+ self.entries.append(entry)
+ return entry
+
+
+class DOSPartTable(SFDiskPartTable):
+
+ label = 'dos'
+ _extended = None
+
+ def add(self, action):
+ flag = action.get('flag', None)
+ start = action.get('offset', None)
+ if start is not None:
+ start = self.bytes2sectors(start)
+ if flag == 'logical':
+ if self._extended is None:
+ raise Exception("logical partition without extended partition")
+ prev = None
+ for entry in reversed(self.entries):
+ if entry.number > 4:
+ prev = entry
+ break
+ # The number of an logical partition cannot be specified (so the
+ # 'number' from the action is completely ignored here) as the
+ # partitions are numbered by the order they are found in the linked
+ # list of logical partitions. sfdisk just cares that we put a
+ # number > 4 here, in fact we could "number" every logical
+ # partition as "5" but it's not hard to put the number that the
+ # partition will end up getting into the sfdisk input.
+ if prev is None:
+ number = 5
+ if start is None:
+ start = align_up(
+ self._extended.start + self.one_mib_sectors,
+ self.one_mib_sectors)
+ else:
+ number = prev.number + 1
+ if start is None:
+ start = align_up(
+ prev.start + prev.size + self.one_mib_sectors,
+ self.one_mib_sectors)
+ else:
+ number = action.get('number', len(self.entries) + 1)
+ if number > 4:
+ raise Exception(
+ "primary partition cannot have number %s" % (number,))
+ if start is None:
+ prev = None
+ for entry in self.entries:
+ if entry.number <= 4:
+ prev = entry
+ if prev is None:
+ start = self.one_mib_sectors
+ else:
+ start = align_up(
+ prev.start + prev.size,
+ self.one_mib_sectors)
+ size = self.bytes2sectors(action['size'])
+ type = action.get('partition_type', FLAG_TO_MBR_TYPE.get(flag))
+ if flag == 'boot':
+ bootable = True
+ else:
+ bootable = None
+ entry = PartTableEntry(
+ number, start, size, type, uuid=None, bootable=bootable)
+ if flag == 'extended':
+ self._extended = entry
+ self.entries.append(entry)
+ return entry
+
+
+def _find_part_info(sfdisk_info, offset):
+ for part in sfdisk_info['partitions']:
+ if part['start'] == offset:
+ return part
+ else:
+ raise Exception(
+ "could not find existing partition by offset")
+
+
+def _wipe_for_action(action):
+ # If a wipe action is specified, do that.
+ if 'wipe' in action:
+ return action['wipe']
+ # Existing partitions are left alone by default.
+ if action.get('preserve', False):
+ return None
+ # New partitions are wiped by default apart from extended partitions, where
+ # it would destroy the EBR.
+ if action.get('flag') == 'extended':
+ return None
+ return 'superblock'
+
+
+def _prepare_resize(storage_config, part_action, table, part_info):
+ if not part_action.get('preserve') or not part_action.get('resize'):
+ return None
+
+ devpath = os.path.realpath(part_info['node'])
+ fstype = _get_volume_fstype(devpath)
+ if fstype == '':
+ return None
+
+ volume = part_action['id']
+ format_actions = select_configs(storage_config, type='format',
+ volume=volume)
+ if len(format_actions) > 1:
+ raise Exception(f'too many format actions for volume {volume}')
+
+ if len(format_actions) == 1:
+ if not format_actions[0].get('preserve'):
+ return None
+
+ target_fstype = format_actions[0]['fstype']
+ msg = (
+ 'Verifying %s format, expecting %s, found %s' % (
+ devpath, fstype, target_fstype))
+ LOG.debug(msg)
+ if fstype != target_fstype:
+ raise RuntimeError(msg)
+
+ msg = 'Resize requested for format %s' % (fstype, )
+ LOG.debug(msg)
+ if fstype not in resizers:
+ raise RuntimeError(msg + ' is unsupported')
+
+ start = table.sectors2bytes(part_info['size'])
+ end = int(util.human2bytes(part_action['size']))
+ if start > end:
+ direction = 'down'
+ elif start < end:
+ direction = 'up'
+ else:
+ return None
+
+ return {
+ 'fstype': fstype,
+ 'size': end,
+ 'direction': direction,
+ }
+
+
+def verify_offset(devpath, part_action, current_info, table):
+ if 'offset' not in part_action:
+ return
+ current_offset = table.sectors2bytes(current_info['start'])
+ action_offset = int(util.human2bytes(part_action['offset']))
+ msg = (
+ 'Verifying %s offset, expecting %s, found %s' % (
+ devpath, current_offset, action_offset))
+ LOG.debug(msg)
+ if current_offset != action_offset:
+ raise RuntimeError(msg)
+
+
+def partition_verify_sfdisk_v2(part_action, label, sfdisk_part_info,
+ storage_config, table):
+ devpath = os.path.realpath(sfdisk_part_info['node'])
+ if not part_action.get('resize'):
+ verify_size(devpath, int(util.human2bytes(part_action['size'])),
+ sfdisk_part_info)
+ verify_offset(devpath, part_action, sfdisk_part_info, table)
+ expected_flag = part_action.get('flag')
+ if expected_flag:
+ verify_ptable_flag(devpath, expected_flag, label, sfdisk_part_info)
+
+
+def disk_handler_v2(info, storage_config, handlers):
+ disk_handler_v1(info, storage_config, handlers)
+
+ part_actions = []
+
+ for action in storage_config.values():
+ if action['type'] == 'partition' and action['device'] == info['id']:
+ part_actions.append(action)
+
+ table_cls = {
+ 'msdos': DOSPartTable,
+ 'gpt': GPTPartTable,
+ }.get(info.get('ptable'))
+
+ if table_cls is None:
+ for action in part_actions:
+ partition_handler_v1(action, storage_config, handlers)
+ return
+
+ disk = get_path_to_storage_volume(info.get('id'), storage_config)
+ (sector_size, _) = block.get_blockdev_sector_size(disk)
+
+ table = table_cls(sector_size)
+ preserved_offsets = set()
+ wipes = {}
+ resizes = {}
+
+ sfdisk_info = None
+ for action in part_actions:
+ entry = table.add(action)
+ if action.get('preserve', False):
+ if sfdisk_info is None:
+ # Lazily computing sfdisk_info is slightly more efficient but
+ # the real reason for doing this is that calling sfdisk_info on
+ # a disk with no partition table logs messages that makes the
+ # vmtest infrastructure unhappy.
+ sfdisk_info = block.sfdisk_info(disk)
+ part_info = _find_part_info(sfdisk_info, entry.start)
+ partition_verify_sfdisk_v2(action, sfdisk_info['label'], part_info,
+ storage_config, table)
+ resizes[entry.start] = _prepare_resize(storage_config, action,
+ table, part_info)
+ preserved_offsets.add(entry.start)
+ wipes[entry.start] = _wipe_for_action(action)
+
+ # preserve disk label ids
+ if info.get('preserve') and sfdisk_info is not None:
+ table.label_id = sfdisk_info['id']
+
+ for kname, nr, offset, size in block.sysfs_partition_data(disk):
+ offset_sectors = table.bytes2sectors(offset)
+ resize = resizes.get(offset_sectors)
+ if resize and resize['direction'] == 'down':
+ perform_resize(kname, resize)
+
+ for kname, nr, offset, size in block.sysfs_partition_data(disk):
+ offset_sectors = table.bytes2sectors(offset)
+ if offset_sectors not in preserved_offsets:
+ # Do a superblock wipe of any partitions that are being deleted.
+ block.wipe_volume(block.kname_to_path(kname), 'superblock')
+ elif wipes.get(offset_sectors) is not None:
+ # We do a quick wipe of where any new partitions will be,
+ # because if there is bcache or other metadata there, this
+ # can cause the partition to be used by a storage
+ # subsystem and preventing the exclusive open done by the
+ # wipe_volume call below. See
+ # https://bugs.launchpad.net/curtin/+bug/1718699 for all
+ # the gory details.
+ LOG.debug('Wiping 1M on %s at offset %s', disk, offset)
+ block.zero_file_at_offsets(disk, [offset], exclusive=False)
+
+ table.apply(disk)
+
+ for kname, number, offset, size in block.sysfs_partition_data(disk):
+ offset_sectors = table.bytes2sectors(offset)
+ wipe = wipes[offset_sectors]
+ if wipe is not None:
+ # Wipe the new partitions as needed.
+ block.wipe_volume(block.kname_to_path(kname), wipe)
+ resize = resizes.get(offset_sectors)
+ if resize and resize['direction'] == 'up':
+ perform_resize(kname, resize)
+
+ # Make the names if needed
+ if 'name' in info:
+ for action in part_actions:
+ if action.get('flag') != 'extended':
+ make_dname(action['id'], storage_config)
+
+
+def partition_handler_v2(info, storage_config, handlers):
+ pass
+
+
+# vi: ts=4 expandtab syntax=python
diff --git a/curtin/commands/install_grub.py b/curtin/commands/install_grub.py
index ba46bd2..74ffdf1 100644
--- a/curtin/commands/install_grub.py
+++ b/curtin/commands/install_grub.py
@@ -62,6 +62,9 @@ def get_grub_package_name(target_arch, uefi, rhel_ver=None):
elif target_arch == 'i386':
grub_name = 'grub-efi-ia32'
grub_target = 'i386-efi'
+ elif target_arch == 'riscv64':
+ grub_name = 'grub-efi-riscv64'
+ grub_target = 'riscv64-efi'
else:
raise ValueError('Unsupported UEFI arch: %s' % target_arch)
else:
diff --git a/curtin/distro.py b/curtin/distro.py
index 8b5fbf8..16ce2c5 100644
--- a/curtin/distro.py
+++ b/curtin/distro.py
@@ -23,7 +23,8 @@ from .log import LOG
DistroInfo = namedtuple('DistroInfo', ('variant', 'family'))
DISTRO_NAMES = ['arch', 'centos', 'debian', 'fedora', 'freebsd', 'gentoo',
- 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu']
+ 'opensuse', 'redhat', 'rhel', 'sles', 'suse', 'ubuntu',
+ 'rocky']
# python2.7 lacks PEP 435, so we must make use an alternative for py2.7/3.x
@@ -37,7 +38,7 @@ DISTROS = distro_enum(*DISTRO_NAMES)
OS_FAMILIES = {
DISTROS.debian: [DISTROS.debian, DISTROS.ubuntu],
DISTROS.redhat: [DISTROS.centos, DISTROS.fedora, DISTROS.redhat,
- DISTROS.rhel],
+ DISTROS.rhel, DISTROS.rocky],
DISTROS.gentoo: [DISTROS.gentoo],
DISTROS.freebsd: [DISTROS.freebsd],
DISTROS.suse: [DISTROS.opensuse, DISTROS.sles, DISTROS.suse],
@@ -382,6 +383,9 @@ def system_upgrade(opts=None, target=None, env=None, allow_daemons=False,
osfamily=None):
LOG.debug("Upgrading system in %s", target)
+ if not osfamily:
+ osfamily = get_osfamily(target=target)
+
distro_cfg = {
DISTROS.debian: {'function': run_apt_command,
'subcommands': ('dist-upgrade', 'autoremove')},
diff --git a/curtin/storage_config.py b/curtin/storage_config.py
index 405a1e2..e9e8991 100644
--- a/curtin/storage_config.py
+++ b/curtin/storage_config.py
@@ -79,7 +79,7 @@ STORAGE_CONFIG_SCHEMA = {
'required': ['version', 'config'],
'definitions': schemas.definitions,
'properties': {
- 'version': {'type': 'integer', 'enum': [1]},
+ 'version': {'type': 'integer', 'enum': [1, 2]},
'config': {
'type': 'array',
'items': {
@@ -753,6 +753,8 @@ class BlockdevParser(ProbertParser):
return entry
if entry['type'] == 'partition':
+ if devname:
+ entry['path'] = devname
attrs = blockdev_data['attrs']
if self.is_mpath_partition(blockdev_data):
entry['number'] = int(blockdev_data['DM_PART'])
@@ -798,6 +800,8 @@ class BlockdevParser(ProbertParser):
entry['size'] *= 512
ptype = blockdev_data.get('ID_PART_ENTRY_TYPE')
+ if ptype is not None:
+ entry['partition_type'] = ptype
flag_name, _flag_code = ptable_uuid_to_flag_entry(ptype)
if ptable and ptable.get('label') == 'dos':
@@ -1315,7 +1319,7 @@ def extract_storage_config(probe_data, strict=False):
ordered = (dasd + disk + part + format + lvols + lparts + raids +
dmcrypts + mounts + bcache + zpool + zfs)
- final_config = {'storage': {'version': 1, 'config': ordered}}
+ final_config = {'storage': {'version': 2, 'config': ordered}}
try:
LOG.info('Validating extracted storage config components')
validate_config(final_config['storage'])
@@ -1346,7 +1350,7 @@ def extract_storage_config(probe_data, strict=False):
LOG.debug("Merging storage config dependencies")
merged_config = {
- 'version': 1,
+ 'version': 2,
'config': merge_config_trees_to_list(ctrees)
}
LOG.debug("Merged storage config:\n%s",
@@ -1355,4 +1359,12 @@ def extract_storage_config(probe_data, strict=False):
return {'storage': merged_config}
+def select_configs(storage_config, **kwargs):
+ """ Given a set of key=value arguments, return a list of the configs that
+ match all specified key-value pairs.
+ """
+ return [cfg for cfg in storage_config.values()
+ if all(cfg.get(k) == v for k, v in kwargs.items())]
+
+
# vi: ts=4 expandtab syntax=python
diff --git a/curtin/util.py b/curtin/util.py
index 5b66b55..d3c3b66 100644
--- a/curtin/util.py
+++ b/curtin/util.py
@@ -501,6 +501,13 @@ def chdir(dirname):
os.chdir(curdir)
+@contextmanager
+def mount(src, target):
+ do_mount(src, target)
+ yield
+ do_umount(target)
+
+
def do_mount(src, target, opts=None):
# mount src at target with opts and return True
# if already mounted, return False
diff --git a/debian/changelog b/debian/changelog
index b8e9bb5..da79e7c 100644
--- a/debian/changelog
+++ b/debian/changelog
@@ -1,3 +1,72 @@
+curtin (22.1-0ubuntu1) kinetic; urgency=medium
+
+ * New upstream release. (LP: #1979687)
+ - deb: fix dependencies [Dan Bungert]
+ - tox: drop xenial-py3 from default env list [Dan Bungert]
+ - block/v2: preserve disk label id [Dan Bungert]
+ - block/v2: docs for partition_type [Dan Bungert]
+ - block/v2: unit tests for partition_type [Dan Bungert]
+ - block/v2: raw partition table codes for gpt [Dan Bungert]
+ - block/v2: allow setting raw partition_type value [Dan Bungert]
+ - Make sure curthooks do not discard supplied proxy settings
+ [Olivier Gayot]
+ - block/v2: resize-friendly ordering of wipe [Dan Bungert]
+ - block/v2: handle resize when no format action [Dan Bungert]
+ - block/v2: resize of ntfs [Dan Bungert]
+ - vmtests: remove out of date skip [Dan Bungert]
+ - block: provide get_resize_fstypes [Dan Bungert]
+ - Add support for resize of ext{2,3,4} [Dan Bungert]
+ - Add riscv64 to supported UEFI architectures [William Wilson]
+ - block_meta_v2: call make_dname when required [Michael Hudson-Doyle]
+ - examples: even more tweaks for v2 [Michael Hudson-Doyle]
+ - Add riscv64 support [Heinrich Schuchardt]
+ - examples: stop assuming curtin accounts for overhead of logical
+ partitions [Michael Hudson-Doyle]
+ - block_meta_v2: zero start of partitions before they are created
+ [Michael Hudson-Doyle]
+ - examples: enlarge / for some more vmtests [Michael Hudson-Doyle]
+ - skip BionicTestPartitionExistingRAID.test_correct_ptype
+ [Michael Hudson-Doyle]
+ - examples: boost size of / in multipath-reuse.yaml [Michael Hudson-Doyle]
+ - examples: add offsets to preserved partitions [Michael Hudson-Doyle]
+ - block_meta_v2: change how we invoke sfdisk again, restore partprobe call
+ [Michael Hudson-Doyle]
+ - block_meta_v2: fix partitioning a device with sector size != 512
+ [Michael Hudson-Doyle]
+ - block_meta_v2: fix implicit offset calculation for dos partitions
+ [Michael Hudson-Doyle]
+ - block_meta_v2: do not use aliases for partition types
+ [Michael Hudson-Doyle]
+ - Remove CentOS 6 tests. [Michael Hudson-Doyle]
+ - vmtests: boost the size of / in a few tests [Michael Hudson-Doyle]
+ - examples: sleep after creating bcache device in preserve-bcache.yaml
+ [Michael Hudson-Doyle]
+ - vmtests: fix parted invocation in partition-existing-raid.yaml
+ [Michael Hudson-Doyle]
+ - vmtests: bump VM memory size to 2048 MiB for all tests
+ [Michael Hudson-Doyle]
+ - vmtests: drop assertion that clear-holders ran [Michael Hudson-Doyle]
+ - block_meta_v2: a few more fixes for v2 partitioning
+ [Michael Hudson-Doyle]
+ - block_meta: call realpath on partition node returned by sfdisk
+ [Michael Hudson-Doyle]
+ - Add rocky linux as a RHEL-like variant [Dimitri John Ledkov]
+ - Update pylint version in tox.ini [Michael Hudson-Doyle]
+ - block_meta: implement v2 partitioning [Michael Hudson-Doyle]
+ - Stop running CI against Python 2 [Michael Hudson-Doyle]
+ - Make sure curthooks do not discard APT preferences [Olivier Gayot]
+ - Remove leftover debug print statement [Olivier Gayot]
+ - Fix format of examples/apt-source.yaml [Olivier Gayot]
+ - Implement support for APT preferences in apt-config [Olivier Gayot]
+ - build-deb: changelog gen with dch [Dan Bungert]
+ - vmtests uefi: relax the uefi check [Dan Bungert]
+ - block: output partition device path [Dan Bungert]
+ - support version 2 curtin storage configs [Michael Hudson-Doyle]
+ - system-upgrade: lookup os family [Dan Bungert]
+ - add preserve: true support to the image action [Michael Hudson-Doyle]
+
+ -- Dan Bungert <daniel.bungert@xxxxxxxxxxxxx> Mon, 27 Jun 2022 16:24:54 -0600
+
curtin (21.3-0ubuntu1) jammy; urgency=medium
* New upstream release.
diff --git a/debian/changelog.trunk b/debian/changelog.trunk
deleted file mode 100644
index 4d943c0..0000000
--- a/debian/changelog.trunk
+++ /dev/null
@@ -1,5 +0,0 @@
-curtin (UPSTREAM_VER-0ubuntu1) UNRELEASED; urgency=low
-
- * Initial release
-
- -- Scott Moser <smoser@xxxxxxxxxx> Mon, 29 Jul 2013 16:12:09 -0400
diff --git a/debian/control b/debian/control
index 9f0b71d..a35cbf6 100644
--- a/debian/control
+++ b/debian/control
@@ -7,6 +7,7 @@ Build-Depends: debhelper (>= 7),
dh-python,
python3,
python3-apt,
+ python3-attr,
python3-coverage,
python3-mock,
python3-nose,
@@ -14,6 +15,8 @@ Build-Depends: debhelper (>= 7),
python3-setuptools,
python3-yaml
Homepage: http://launchpad.net/curtin
+Vcs-Git: https://git.launchpad.net/curtin
+Vcs-Browser: https://git.launchpad.net/curtin
X-Python3-Version: >= 3.2
Package: curtin
@@ -51,6 +54,7 @@ Architecture: all
Priority: extra
Depends: curtin-common (= ${binary:Version}),
python3-apt,
+ python3-attr,
python3-oauthlib,
python3-yaml,
wget,
diff --git a/doc/topics/apt_source.rst b/doc/topics/apt_source.rst
index cf0f8bd..924ee80 100644
--- a/doc/topics/apt_source.rst
+++ b/doc/topics/apt_source.rst
@@ -31,6 +31,8 @@ Features
- add arbitrary apt.conf settings
+ - add arbitrary apt preferences
+
- provide debconf configurations
- disabling suites (=pockets)
diff --git a/doc/topics/storage.rst b/doc/topics/storage.rst
index 0f33ec0..bbff909 100644
--- a/doc/topics/storage.rst
+++ b/doc/topics/storage.rst
@@ -13,8 +13,7 @@ Custom storage configuration is handled by the ``block-meta custom`` command
in curtin. Partitioning layout is read as a list of in-order modifications to
make to achieve the desired configuration. The top level configuration key
containing this configuration is ``storage``. This key should contain a
-dictionary with at least a version number and the configuration list. The
-current config specification is ``version: 1``.
+dictionary with at least a version number and the configuration list.
**Config Example**::
@@ -27,6 +26,20 @@ current config specification is ``version: 1``.
serial: QM00002
model: QEMU_HARDDISK
+Config versions
+---------------
+
+The current version of curtin supports versions ``1`` and ``2``. These
+only differ in the interpretation of ``partition`` actions at this
+time. ``lvm_partition`` actions will be interpreted differently at
+some point in the future.
+
+.. note::
+
+ Config version ``2`` is under active development and subject to change.
+ Users are advised to use version ``1`` unless features enabled by version
+ ``2`` are required.
+
Configuration Types
-------------------
Each entry in the config list is a dictionary with several keys which vary
@@ -322,13 +335,41 @@ The partition command creates a single partition on a disk. Curtin only needs
to be told which disk to use and the size of the partition. Additional options
are available.
+Partition actions are interpreted differently according to the version of the
+storage config.
+
+ * For version 1 configs, the actions are handled one by one and each
+ partition is created (or assumed to exist, in the ``preserve: true`` case)
+ just after that described by the previous action.
+
+ * For version 2 configs, the actions are bundled together to create a
+ complete description of the partition table, and the ``offset`` of each
+ action is respected if present. Any partitions that already exist but are
+ not referenced in the new config are (superblock-) wiped and deleted.
+
+ * Because the numbering of logical partitions is not stable (i.e. if there
+ are two logical partitions numbered 5 and 6, and partition 5 is deleted,
+ what was partition 6 will become partition 5), curtin checks if a
+ partition is deleted or not by checking for the presence of a partition
+ action with a matching offset.
+
+If the disk is being completely repartitioned, the two schemes are effectively
+the same.
+
**number**: *<number>*
-The partition number can be specified using ``number``. However, numbers must
-be in order and some situations, such as extended/logical partitions on msdos
-partition tables will require special numbering, so it maybe better to omit
-the partition number. If the ``number`` key is not present, curtin will attempt
-determine the right number to use.
+The partition number can be specified using ``number``.
+
+For GPT partition tables, this will just be the slot in the partition table
+that is used to describe this partition.
+
+For DOS partition tables, a primary or extended partition must have a number
+less than or equal to 4. Logical partitions have numbers 5 or greater but are
+numbered by the order they are found when parsing the partitions, so the
+``number`` field is ignored for them.
+
+If the ``number`` key is not present, curtin will attempt determine the right
+number to use.
**size**: *<size>*
@@ -338,8 +379,15 @@ the appropriate SI prefix, i.e. *B, k, M, G, T...*
.. note::
- Curtin does not adjust size values. If you specific a size that exceeds the
- capacity of a device then installation will fail.
+ Curtin does not adjust or inspect size values. If you specify a size that
+ exceeds the capacity of a device then installation will fail.
+
+**offset**: *<offset>*
+
+The offset at which to create the partition. Only respected in a version 2
+config. If the offset field is not present, the partition will be placed after
+that described by the preceding (logical or primary, if appropriate) partition
+action, or at the start of the disk (or extended partition, as appropriate).
**device**: *<device id>*
@@ -368,9 +416,7 @@ only apply to gpt partition tables.
The *logical/extended* partition flags can be used to create logical partitions
on a msdos table. An extended partition should be created containing all of the
empty space on the drive, and logical partitions can be created within it. A
-extended partition must already be present to create logical partitions. If the
-``number`` flag is set for an extended partition it must be set to 4, and
-each logical partition should be numbered starting from 5.
+extended partition must already be present to create logical partitions.
On msdos partition tables, the *boot* flag sets the boot parameter to that
partition. On gpt partition tables, the boot flag sets the esp flag on the
@@ -385,10 +431,33 @@ partition with the *bios_grub* flag is needed. This partition should be placed
at the beginning of the disk and should be 1MB in size. It should not contain a
filesystem or be mounted anywhere on the system.
+**partition_type**: *msdos: byte value in 0xnn style; gpt: GUID*
+
+Only applicable to v2 storage configuration. If both ``partition_type`` and
+``flag`` are set, ``partition_type`` dictates the acutal type.
+
+The ``partition_type`` field allows for setting arbitrary partition type values
+that do not have a matching ``flag``, or cases that are not handled by the
+``flag`` system. For example, since the *boot* flag results in both setting
+the bootable state for a MSDOS partition table and setting it to type *0xEF*,
+one can override this behavior and achieve a bootable partition of a different
+type by using ``flag``: *boot* and using ``partition_type``.
+
**preserve**: *true, false*
If the preserve flag is set to true, curtin will verify that the partition
exists and that the ``size`` and ``flag`` match the configuration provided.
+See also the ``resize`` flag, which adjusts this behavior.
+
+**resize**: *true, false*
+
+Only applicable to v2 storage configuration.
+If the ``preserve`` flag is set to false, this value is not applicable.
+If the ``preserve`` flag is set to true, curtin will adjust the size of the
+partition to the new size. When adjusting smaller, the size of the contents
+must permit that. When adjusting larger, there must already be a gap beyond
+the partition in question.
+Resize is supported on filesystems of types ext2, ext3, ext4, ntfs.
**name**: *<name>*
diff --git a/examples/apt-source.yaml b/examples/apt-source.yaml
index f0f7108..30e30a2 100644
--- a/examples/apt-source.yaml
+++ b/examples/apt-source.yaml
@@ -77,7 +77,7 @@ apt:
# arches is list of architectures the following config applies to
# the special keyword "default" applies to any architecture not explicitly
# listed.
- - arches: [amd64, i386, default]
+ - arches: [amd64, i386, default]
# uri is just defining the target as-is
uri: http://us.archive.ubuntu.com/ubuntu
#
@@ -100,7 +100,7 @@ apt:
# security is optional, if not defined it is set to the same value as primary
security:
uri: http://security.ubuntu.com/ubuntu
- [...]
+ # [...]
# if no mirrors are specified at all, or all lookups fail it will use:
# primary: http://archive.ubuntu.com/ubuntu
@@ -152,6 +152,18 @@ apt:
# The following example is also the builtin default if nothing is specified
add_apt_repo_match: '^[\w-]+:\w'
+ # 1.9 preferences
+ #
+ # Any apt preferences that will be made available to apt
+ # see the APT_PREFERENCES(5) man page for details about what can be specified
+ preferences:
+ - package: python3-*
+ pin: origin *ubuntu.com*
+ pin-priority: 200
+ - package: python-*
+ pin: origin *ubuntu.com*
+ pin-priority: -1
+
##############################################################################
# Section 2: source list entries
diff --git a/examples/tests/basic.yaml b/examples/tests/basic.yaml
index 82f5ad1..9b5f7ea 100644
--- a/examples/tests/basic.yaml
+++ b/examples/tests/basic.yaml
@@ -17,7 +17,7 @@ storage:
- id: sda1
type: partition
number: 1
- size: 3GB
+ size: 4GB
device: sda
flag: boot
- id: sda2
diff --git a/examples/tests/basic_iscsi.yaml b/examples/tests/basic_iscsi.yaml
index 88516ca..4e9f89a 100644
--- a/examples/tests/basic_iscsi.yaml
+++ b/examples/tests/basic_iscsi.yaml
@@ -12,7 +12,7 @@ storage:
- id: vdb1
type: partition
number: 1
- size: 3GB
+ size: 4GB
device: vdb
flag: boot
- id: vdb2
diff --git a/examples/tests/bcache-partitions.yaml b/examples/tests/bcache-partitions.yaml
index 20ccddc..90861bc 100644
--- a/examples/tests/bcache-partitions.yaml
+++ b/examples/tests/bcache-partitions.yaml
@@ -18,7 +18,6 @@ storage:
type: disk
name: rotary1
serial: disk-c
- ptable: gpt
wipe: superblock
- id: id_rotary0_part1
type: partition
diff --git a/examples/tests/centos6_basic.yaml b/examples/tests/centos6_basic.yaml
deleted file mode 100644
index 90fc584..0000000
--- a/examples/tests/centos6_basic.yaml
+++ /dev/null
@@ -1,101 +0,0 @@
-showtrace: true
-storage:
- version: 1
- config:
- - id: sda
- type: disk
- ptable: msdos
- model: QEMU HARDDISK
- serial: disk-a
- name: main_disk_with_in/\&valid@#dname
- wipe: superblock
- grub_device: true
- - id: sda1
- type: partition
- number: 1
- size: 3GB
- device: sda
- flag: boot
- - id: sda2
- type: partition
- number: 2
- size: 1GB
- device: sda
- - id: sda3
- type: partition
- number: 3
- size: 1GB
- device: sda
- name: swap
- - id: sda1_root
- type: format
- fstype: ext3
- volume: sda1
- label: 'cloudimg-rootfs'
- - id: sda2_home
- type: format
- fstype: ext4
- volume: sda2
- - id: sda3_swap
- type: format
- fstype: swap
- volume: sda3
- - id: sda1_mount
- type: mount
- path: /
- device: sda1_root
- - id: sda2_mount
- type: mount
- path: /home
- device: sda2_home
- - id: sparedisk_id
- type: disk
- serial: disk-b
- name: sparedisk
- wipe: superblock
- - id: sparedisk_fat_fmt_id
- type: format
- fstype: fat32
- volume: sparedisk_id
- - id: btrfs_disk_id
- type: disk
- serial: disk-c
- name: btrfs_volume
- wipe: superblock
- - id: btrfs_disk_fmt_id
- type: format
- fstype: btrfs
- volume: btrfs_disk_id
- - id: btrfs_disk_mnt_id
- type: mount
- path: /btrfs
- options: 'defaults,noatime'
- device: btrfs_disk_fmt_id
- - id: pnum_disk
- type: disk
- serial: disk-d
- name: pnum_disk
- wipe: superblock
- ptable: gpt
- - id: pnum_disk_p1
- type: partition
- number: 1
- size: 1GB
- device: pnum_disk
- - id: pnum_disk_p2
- type: partition
- number: 2
- size: 8MB
- device: pnum_disk
- flag: prep
- wipe: zero
- name: prep
- - id: pnum_disk_p3
- type: partition
- number: 10
- size: 1GB
- device: pnum_disk
- - id: swap_mnt
- type: mount
- path: "none"
- device: sda3_swap
diff --git a/examples/tests/lvm.yaml b/examples/tests/lvm.yaml
index 8eab6b0..1018d1b 100644
--- a/examples/tests/lvm.yaml
+++ b/examples/tests/lvm.yaml
@@ -23,7 +23,7 @@ storage:
flag: boot
- id: sda_extended
type: partition
- size: 5G
+ size: 5.5G
flag: extended
device: sda
- id: sda2
diff --git a/examples/tests/lvm_iscsi.yaml b/examples/tests/lvm_iscsi.yaml
index dd7c2b6..1f2ad01 100644
--- a/examples/tests/lvm_iscsi.yaml
+++ b/examples/tests/lvm_iscsi.yaml
@@ -12,7 +12,7 @@ storage:
- id: vdb1
type: partition
number: 1
- size: 3GB
+ size: 4GB
device: vdb
flag: boot
- id: vdb2
@@ -44,7 +44,7 @@ storage:
wipe: superblock
- id: sda_extended
type: partition
- size: 5G
+ size: 5.5G
flag: extended
device: sda
- id: sda1
@@ -100,7 +100,7 @@ storage:
wipe: superblock
- id: sdb_extended
type: partition
- size: 4G
+ size: 4.5G
flag: extended
device: sdb
- id: sdb1
diff --git a/examples/tests/mirrorboot.yaml b/examples/tests/mirrorboot.yaml
index 42fdc93..83217d8 100644
--- a/examples/tests/mirrorboot.yaml
+++ b/examples/tests/mirrorboot.yaml
@@ -17,7 +17,7 @@ storage:
flag: bios_grub
- id: sda1
type: partition
- size: 3GB
+ size: 3.5GB
device: sda
- id: sdb
type: disk
@@ -28,7 +28,7 @@ storage:
name: second_disk
- id: sdb1
type: partition
- size: 3GB
+ size: 3.5GB
device: sdb
- id: mddevice
name: md0
diff --git a/examples/tests/multipath-lvm-part-wipe.yaml b/examples/tests/multipath-lvm-part-wipe.yaml
index cb18a08..8400d78 100644
--- a/examples/tests/multipath-lvm-part-wipe.yaml
+++ b/examples/tests/multipath-lvm-part-wipe.yaml
@@ -113,7 +113,7 @@ storage:
- id: root_vg_lv1
type: lvm_partition
name: lv1_root
- size: 2.5G
+ size: 3.5G
volgroup: root_vg
- id: lv1_root_fs
type: format
diff --git a/examples/tests/multipath-reuse.yaml b/examples/tests/multipath-reuse.yaml
index 24e193e..f008848 100644
--- a/examples/tests/multipath-reuse.yaml
+++ b/examples/tests/multipath-reuse.yaml
@@ -6,8 +6,8 @@ bucket:
- &setup |
parted /dev/disk/by-id/dm-name-mpatha --script -- \
mklabel msdos \
- mkpart primary ext4 1GiB 4GiB \
- mkpart primary ext4 4GiB 5GiB \
+ mkpart primary ext4 1GiB 5GiB \
+ mkpart primary ext4 5GiB 6GiB \
set 1 boot on
udevadm settle
@@ -32,16 +32,18 @@ storage:
- id: sda1
type: partition
number: 1
- size: 3GB
+ size: 4GB
device: sda
flag: boot
preserve: true
+ offset: 1G
- id: sda2
type: partition
number: 2
size: 1GB
device: sda
preserve: true
+ offset: 5G
- id: sda1_root
type: format
fstype: ext4
diff --git a/examples/tests/multipath.yaml b/examples/tests/multipath.yaml
index 11838d1..a3b536f 100644
--- a/examples/tests/multipath.yaml
+++ b/examples/tests/multipath.yaml
@@ -16,7 +16,7 @@ storage:
- id: sda1
type: partition
number: 1
- size: 3GB
+ size: 4GB
device: sda
flag: boot
wipe: superblock
diff --git a/examples/tests/partition-existing-raid.yaml b/examples/tests/partition-existing-raid.yaml
index 07cf8d2..423ab85 100644
--- a/examples/tests/partition-existing-raid.yaml
+++ b/examples/tests/partition-existing-raid.yaml
@@ -15,7 +15,7 @@ bucket:
/dev/disk/by-id/virtio-disk-b-part1 /dev/disk/by-id/virtio-disk-c-part1
udevadm settle
parted /dev/md1 --script -- \
- mklabel dos
+ mklabel msdos
udevadm settle
mdadm --stop /dev/md1
udevadm settle
@@ -51,7 +51,7 @@ storage:
id: id_disk0_part2
device: id_disk0
number: 2
- size: 3G
+ size: 4G
- type: partition
id: id_disk0_part3
device: id_disk0
@@ -63,6 +63,7 @@ storage:
flag: boot
number: 1
size: 8G
+ offset: 1G
preserve: true
- type: partition
id: id_disk2_part1
@@ -70,6 +71,7 @@ storage:
flag: boot
number: 1
size: 8G
+ offset: 1G
preserve: true
- type: raid
id: raid-md1
diff --git a/examples/tests/preserve-bcache.yaml b/examples/tests/preserve-bcache.yaml
index f614f37..13f8d54 100644
--- a/examples/tests/preserve-bcache.yaml
+++ b/examples/tests/preserve-bcache.yaml
@@ -10,6 +10,7 @@ bucket:
udevadm settle
make-bcache -C /dev/disk/by-id/virtio-disk-b \
-B /dev/disk/by-id/virtio-disk-a-part2 --writeback
+ sleep 1
udevadm settle
mkfs.ext4 /dev/bcache0
mount /dev/bcache0 /mnt
@@ -46,6 +47,7 @@ storage:
size: 1024M
preserve: true
wipe: superblock
+ offset: 1M
- id: id_rotary0_part2
type: partition
name: rotary0-part2
@@ -53,6 +55,7 @@ storage:
number: 2
size: 8G
preserve: true
+ offset: 1026M
- id: id_bcache0
type: bcache
name: bcache0
diff --git a/examples/tests/preserve-lvm.yaml b/examples/tests/preserve-lvm.yaml
index a939759..58bfa1f 100644
--- a/examples/tests/preserve-lvm.yaml
+++ b/examples/tests/preserve-lvm.yaml
@@ -47,6 +47,7 @@ storage:
device: main_disk
flag: bios_grub
preserve: true
+ offset: 1MB
- id: main_disk_p2
type: partition
number: 2
@@ -54,6 +55,7 @@ storage:
device: main_disk
flag: boot
preserve: true
+ offset: 3MB
- id: root_vg
type: lvm_volgroup
name: root_vg
diff --git a/examples/tests/preserve-partition-wipe-vg-simple.yaml b/examples/tests/preserve-partition-wipe-vg-simple.yaml
index e1f0b9e..9876b42 100644
--- a/examples/tests/preserve-partition-wipe-vg-simple.yaml
+++ b/examples/tests/preserve-partition-wipe-vg-simple.yaml
@@ -39,6 +39,7 @@ storage:
number: 1
type: partition
id: disk-sda-part-1
+ offset: 2M
- device: disk-sda
size: 3G
flag: linux
@@ -47,6 +48,7 @@ storage:
wipe: zero
type: partition
id: disk-sda-part-2
+ offset: 4G
- fstype: ext4
volume: disk-sda-part-2
preserve: false
diff --git a/examples/tests/preserve-partition-wipe-vg.yaml b/examples/tests/preserve-partition-wipe-vg.yaml
index 27a4235..5e35a54 100644
--- a/examples/tests/preserve-partition-wipe-vg.yaml
+++ b/examples/tests/preserve-partition-wipe-vg.yaml
@@ -51,6 +51,7 @@ storage:
wipe: zero
type: partition
id: disk-sda-part-1
+ offset: 2M
- device: disk-sda
size: 3G
flag: linux
@@ -58,6 +59,7 @@ storage:
wipe: zero
type: partition
id: disk-sda-part-2
+ offset: 1G
- device: disk-sdb
flag: linux
size: 3G
@@ -65,12 +67,14 @@ storage:
wipe: zero
type: partition
id: disk-sdb-part-1
+ offset: 1G
- device: disk-sdb
flag: linux
size: 3G
preserve: true
type: partition
id: disk-sdb-part-2
+ offset: 4G
- fstype: ext4
volume: disk-sda-part-2
preserve: false
diff --git a/examples/tests/preserve-raid.yaml b/examples/tests/preserve-raid.yaml
index 9e0489f..3d39c80 100644
--- a/examples/tests/preserve-raid.yaml
+++ b/examples/tests/preserve-raid.yaml
@@ -52,7 +52,7 @@ storage:
id: id_disk0_part2
device: id_disk0
number: 2
- size: 3G
+ size: 4G
- type: partition
id: id_disk0_part3
device: id_disk0
@@ -65,6 +65,7 @@ storage:
number: 1
size: 8G
preserve: true
+ offset: 1G
- type: partition
id: id_disk2_part1
device: id_disk2
@@ -72,6 +73,7 @@ storage:
number: 1
size: 8G
preserve: true
+ offset: 1G
- type: raid
id: raid-md1
name: md1
diff --git a/examples/tests/preserve.yaml b/examples/tests/preserve.yaml
index de8a975..2cf692e 100644
--- a/examples/tests/preserve.yaml
+++ b/examples/tests/preserve.yaml
@@ -6,8 +6,8 @@ bucket:
mklabel gpt \
mkpart primary ext4 2MiB 514MiB \
set 1 esp on \
- mkpart primary ext4 1GiB 4GiB \
- mkpart primary ext4 4GiB 7GiB
+ mkpart primary ext4 1GiB 5GiB \
+ mkpart primary ext4 6GiB 9GiB
udevadm settle
mkfs.ext4 /dev/disk/by-id/virtio-disk-a-part3
mount /dev/disk/by-id/virtio-disk-a-part3 /mnt
@@ -32,18 +32,21 @@ storage:
number: 1
size: 512M
preserve: true
+ offset: 2M
- type: partition
id: id_disk0_part2
device: id_disk0
number: 2
- size: 3G
+ size: 4G
preserve: true
+ offset: 1G
- type: partition
id: id_disk0_part3
device: id_disk0
number: 3
size: 3G
preserve: true
+ offset: 6G
- type: format
id: id_efi_format
volume: id_disk0_part1
diff --git a/examples/tests/reuse-lvm-member-partition.yaml b/examples/tests/reuse-lvm-member-partition.yaml
index fd8f602..cad1474 100644
--- a/examples/tests/reuse-lvm-member-partition.yaml
+++ b/examples/tests/reuse-lvm-member-partition.yaml
@@ -69,12 +69,14 @@ storage:
flag: boot
number: 1
size: 1G
+ offset: 1G
- type: partition
id: id_disk0_part2
preserve: true
device: id_disk0
number: 2
size: 7G
+ offset: 2G
- type: format
id: id_efi_format
volume: id_disk0_part1
diff --git a/examples/tests/reuse-msdos-partitions.yaml b/examples/tests/reuse-msdos-partitions.yaml
index d444517..f3c6974 100644
--- a/examples/tests/reuse-msdos-partitions.yaml
+++ b/examples/tests/reuse-msdos-partitions.yaml
@@ -43,6 +43,7 @@ storage:
flag: boot
preserve: true
wipe: superblock
+ offset: 1M
- id: sda2
type: partition
number: 2
@@ -50,6 +51,7 @@ storage:
flag: extended
device: sda
preserve: true
+ offset: 3074M
- id: sda5
type: partition
number: 5
@@ -58,6 +60,7 @@ storage:
device: sda
preserve: true
wipe: superblock
+ offset: 3075M
- id: sda6
type: partition
number: 6
@@ -66,6 +69,7 @@ storage:
device: sda
preserve: true
wipe: superblock
+ offset: 5123M
- id: sda1_root
type: format
fstype: ext4
diff --git a/examples/tests/reuse-raid-member-wipe-partition.yaml b/examples/tests/reuse-raid-member-wipe-partition.yaml
index d20b79c..136f96e 100644
--- a/examples/tests/reuse-raid-member-wipe-partition.yaml
+++ b/examples/tests/reuse-raid-member-wipe-partition.yaml
@@ -49,6 +49,7 @@ storage:
flag: boot
number: 1
size: 1G
+ offset: 1G
- type: partition
id: id_disk0_part2
preserve: true
@@ -56,6 +57,7 @@ storage:
number: 2
size: 7G
wipe: superblock
+ offset: 2G
- type: format
id: id_efi_format
volume: id_disk0_part1
diff --git a/examples/tests/uefi_basic.yaml b/examples/tests/uefi_basic.yaml
index 91a72ae..e6ad351 100644
--- a/examples/tests/uefi_basic.yaml
+++ b/examples/tests/uefi_basic.yaml
@@ -31,7 +31,7 @@ storage:
- device: id_disk0
id: id_disk0_part2
number: 2
- size: 3G
+ size: 4G
type: partition
wipe: superblock
- fstype: fat32
diff --git a/examples/tests/uefi_reuse_esp.yaml b/examples/tests/uefi_reuse_esp.yaml
index 7ad7fdf..0232019 100644
--- a/examples/tests/uefi_reuse_esp.yaml
+++ b/examples/tests/uefi_reuse_esp.yaml
@@ -8,7 +8,7 @@ bucket:
mklabel gpt \
mkpart primary fat32 1MiB 513MiB \
set 1 esp on \
- mkpart primary ext4 513MiB 3585MiB
+ mkpart primary ext4 513MiB 4609MiB
udevadm settle
mkfs.vfat -I -n EFI -F 32 /dev/disk/by-id/virtio-disk-a-part1
@@ -67,9 +67,10 @@ storage:
- device: id_disk0
id: id_disk0_part2
number: 2
- size: 3G
+ size: 4G
type: partition
preserve: true
+ offset: 513M
- fstype: fat32
id: id_efi_format
label: efi
diff --git a/pylintrc b/pylintrc
index 1b5fa1a..7a50917 100644
--- a/pylintrc
+++ b/pylintrc
@@ -7,7 +7,7 @@ jobs=0
# List of members which are set dynamically and missed by pylint inference
# system, and so shouldn't trigger E1101 when accessed. Python regular
# expressions are accepted.
-generated-members=redhat,centos,fedora,debian,suse,opensuse,sles,arch,ubuntu,rhel,freebsd,gentoo
+generated-members=redhat,centos,fedora,debian,suse,opensuse,sles,arch,ubuntu,rhel,freebsd,gentoo,rocky
# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
diff --git a/test-requirements.txt b/test-requirements.txt
index f6404c1..1970d03 100644
--- a/test-requirements.txt
+++ b/test-requirements.txt
@@ -3,3 +3,4 @@ mock
nose
pyflakes
coverage
+parameterized
diff --git a/tests/integration/test_block_meta.py b/tests/integration/test_block_meta.py
index bd602b2..e542017 100644
--- a/tests/integration/test_block_meta.py
+++ b/tests/integration/test_block_meta.py
@@ -1,13 +1,21 @@
# This file is part of curtin. See LICENSE file for copyright and license info.
-from collections import namedtuple
+import dataclasses
+from dataclasses import dataclass
import contextlib
+import json
+import os
+from parameterized import parameterized
+import re
import sys
+from typing import Optional
import yaml
-import os
from curtin import block, udev, util
+from curtin.commands.block_meta import _get_volume_fstype
+from curtin.commands.block_meta_v2 import ONE_MIB_BYTES
+
from tests.unittests.helpers import CiTestCase
from tests.integration.webserv import ImageServer
@@ -17,10 +25,11 @@ class IntegrationTestCase(CiTestCase):
@contextlib.contextmanager
-def loop_dev(image):
- dev = util.subp(
- ['losetup', '--show', '--find', '--partscan', image],
- capture=True, decode='ignore')[0].strip()
+def loop_dev(image, sector_size=512):
+ dev = util.subp([
+ 'losetup', '--show', '--find', '--partscan',
+ '--sector-size', str(sector_size), image,
+ ], capture=True, decode='ignore')[0].strip()
try:
udev.udevadm_trigger([dev])
yield dev
@@ -28,18 +37,114 @@ def loop_dev(image):
util.subp(['losetup', '--detach', dev])
-PartData = namedtuple("PartData", ('number', 'offset', 'size'))
+@dataclass(order=True)
+class PartData:
+ number: Optional[int] = None
+ offset: Optional[int] = None
+ size: Optional[int] = None
+ boot: Optional[bool] = None
+ partition_type: Optional[str] = None
+
+ # test cases may initialize the values they care about
+ # test utilities shall initialize all fields
+ def assertFieldsAreNotNone(self):
+ for field in dataclasses.fields(self):
+ assert getattr(self, field.name) is not None
+
+ def __eq__(self, other):
+ for field in dataclasses.fields(self):
+ myval = getattr(self, field.name)
+ otherval = getattr(other, field.name)
+ if myval is not None and otherval is not None \
+ and myval != otherval:
+ return False
+ return True
+
+
+def _get_ext_size(dev, part_action):
+ num = part_action['number']
+ cmd = ['dumpe2fs', '-h', f'{dev}p{num}']
+ out = util.subp(cmd, capture=True)[0]
+ for line in out.splitlines():
+ if line.startswith('Block count'):
+ block_count = line.split(':')[1].strip()
+ if line.startswith('Block size'):
+ block_size = line.split(':')[1].strip()
+ return int(block_count) * int(block_size)
+
+
+def _get_ntfs_size(dev, part_action):
+ num = part_action['number']
+ cmd = ['ntfsresize',
+ '--no-action',
+ '--force', # needed post-resize, which otherwise demands a CHKDSK
+ '--info', f'{dev}p{num}']
+ out = util.subp(cmd, capture=True)[0]
+ # Sample input:
+ # Current volume size: 41939456 bytes (42 MB)
+ volsize_matcher = re.compile(r'^Current volume size: ([0-9]+) bytes')
+ for line in out.splitlines():
+ m = volsize_matcher.match(line)
+ if m:
+ return int(m.group(1))
+ raise Exception('ntfs volume size not found')
+
+
+_get_fs_sizers = {
+ 'ext2': _get_ext_size,
+ 'ext3': _get_ext_size,
+ 'ext4': _get_ext_size,
+ 'ntfs': _get_ntfs_size,
+}
+
+
+def _get_filesystem_size(dev, part_action, fstype='ext4'):
+ if fstype not in _get_fs_sizers.keys():
+ raise Exception(f'_get_filesystem_size: no support for {fstype}')
+ return _get_fs_sizers[fstype](dev, part_action)
+
+
+def _get_extended_partition_size(dev, num):
+ # sysfs reports extended partitions as having 1K size
+ # sfdisk seems to have a better idea
+ ptable_json = util.subp(['sfdisk', '-J', dev], capture=True)[0]
+ ptable = json.loads(ptable_json)
+
+ nodename = f'{dev}p{num}'
+ partitions = ptable['partitiontable']['partitions']
+ partition = [part for part in partitions if part['node'] == nodename][0]
+ return partition['size'] * 512
+
+
+def _get_disk_label_id(dev):
+ ptable_json = util.subp(['sfdisk', '-J', dev], capture=True)[0]
+ ptable = json.loads(ptable_json)
+ # string in lowercase hex
+ return ptable['partitiontable']['id']
def summarize_partitions(dev):
- # We don't care about the kname
- return sorted(
- [PartData(*d[1:]) for d in block.sysfs_partition_data(dev)])
+ parts = []
+ ptable_json = util.subp(['sfdisk', '-J', dev], capture=True)[0]
+ ptable = json.loads(ptable_json)
+ partitions = ptable['partitiontable']['partitions']
+ for d in block.sysfs_partition_data(dev):
+ nodename = f'/dev/{d[0]}'
+ partition = [part for part in partitions
+ if part['node'] == nodename][0]
+ ptype = partition['type']
+ boot = partition.get('bootable', False)
+ # We don't care about the kname
+ pd = PartData(*d[1:], partition_type=ptype, boot=boot)
+ pd.assertFieldsAreNotNone()
+ parts.append(pd)
+ return sorted(parts)
class StorageConfigBuilder:
- def __init__(self):
+ def __init__(self, *, version):
+ self.version = version
self.config = []
self.cur_image = None
@@ -47,37 +152,81 @@ class StorageConfigBuilder:
return {
'storage': {
'config': self.config,
+ 'version': self.version,
},
}
- def add_image(self, *, path, size, create=False, **kw):
- action = {
- 'type': 'image',
- 'id': 'id' + str(len(self.config)),
- 'path': path,
- 'size': size,
- }
- action.update(**kw)
- self.cur_image = action['id']
+ def _add(self, *, type, **kw):
+ if type != 'image' and self.cur_image is None:
+ raise Exception("no current image")
+ action = {'id': 'id' + str(len(self.config))}
+ action.update(type=type, **kw)
self.config.append(action)
+ return action
+
+ def add_image(self, *, path, size, create=False, **kw):
if create:
with open(path, "wb") as f:
f.write(b"\0" * int(util.human2bytes(size)))
+ action = self._add(type='image', path=path, size=size, **kw)
+ self.cur_image = action['id']
+ return action
def add_part(self, *, size, **kw):
- if self.cur_image is None:
- raise Exception("no current image")
- action = {
- 'type': 'partition',
- 'id': 'id' + str(len(self.config)),
- 'device': self.cur_image,
- 'size': size,
- }
- action.update(**kw)
- self.config.append(action)
+ fstype = kw.pop('fstype', None)
+ part = self._add(type='partition', device=self.cur_image, size=size,
+ **kw)
+ if fstype:
+ self.add_format(part=part, fstype=fstype)
+ return part
+
+ def add_format(self, *, part, fstype='ext4', **kw):
+ return self._add(type='format', volume=part['id'], fstype=fstype, **kw)
+
+ def set_preserve(self):
+ for action in self.config:
+ action['preserve'] = True
class TestBlockMeta(IntegrationTestCase):
+ def setUp(self):
+ self.data = self.random_string()
+
+ def assertPartitions(self, *args):
+ with loop_dev(self.img) as dev:
+ self.assertEqual([*args], summarize_partitions(dev))
+
+ @contextlib.contextmanager
+ def mount(self, dev, partition_cfg):
+ mnt_point = self.tmp_dir()
+ num = partition_cfg['number']
+ with util.mount(f'{dev}p{num}', mnt_point):
+ yield mnt_point
+
+ @contextlib.contextmanager
+ def open_file_on_part(self, dev, part_action, mode):
+ with self.mount(dev, part_action) as mnt_point:
+ with open(f'{mnt_point}/data.txt', mode) as fp:
+ yield fp
+
+ def create_data(self, dev, part_action):
+ with self.open_file_on_part(dev, part_action, 'w') as fp:
+ fp.write(self.data)
+
+ def check_data(self, dev, part_action):
+ with self.open_file_on_part(dev, part_action, 'r') as fp:
+ self.assertEqual(self.data, fp.read())
+
+ def check_fssize(self, dev, part_action, fstype, expected):
+ tolerance = 0
+ if fstype == 'ntfs':
+ # Per ntfsresize manpage, the actual fs size is at least one sector
+ # less than requested.
+ # In these tests it has been consistently 7 sectors fewer.
+ tolerance = 512 * 10
+ actual_fssize = _get_filesystem_size(dev, part_action, fstype)
+ diff = expected - actual_fssize
+ self.assertTrue(0 <= diff <= tolerance, f'difference of {diff}')
def run_bm(self, config, *args, **kwargs):
config_path = self.tmp_path('config.yaml')
@@ -102,34 +251,88 @@ class TestBlockMeta(IntegrationTestCase):
]
util.subp(cmd, env=cmd_env, **kwargs)
- def _test_default_offsets(self, ptable):
+ def _test_default_offsets(self, ptable, version, sector_size=512):
psize = 40 << 20
img = self.tmp_path('image.img')
- config = StorageConfigBuilder()
+ config = StorageConfigBuilder(version=version)
+ config.add_image(
+ path=img, size='200M', ptable=ptable, sector_size=sector_size)
+ p1 = config.add_part(size=psize, number=1)
+ p2 = config.add_part(size=psize, number=2)
+ p3 = config.add_part(size=psize, number=3)
+ self.run_bm(config.render())
+
+ with loop_dev(img, sector_size) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=psize),
+ PartData(number=2, offset=(1 << 20) + psize, size=psize),
+ PartData(number=3, offset=(1 << 20) + 2*psize, size=psize),
+ ])
+ p1['offset'] = 1 << 20
+ p2['offset'] = (1 << 20) + psize
+ p3['offset'] = (1 << 20) + 2*psize
+ config.set_preserve()
+ self.run_bm(config.render())
+
+ def test_default_offsets_gpt_v1(self):
+ self._test_default_offsets('gpt', 1)
+
+ def test_default_offsets_msdos_v1(self):
+ self._test_default_offsets('msdos', 1)
+
+ def test_default_offsets_gpt_v2(self):
+ self._test_default_offsets('gpt', 2)
+
+ def test_default_offsets_msdos_v2(self):
+ self._test_default_offsets('msdos', 2)
+
+ def test_default_offsets_gpt_v1_4k(self):
+ self._test_default_offsets('gpt', 1, 4096)
+
+ def test_default_offsets_msdos_v1_4k(self):
+ self._test_default_offsets('msdos', 1, 4096)
+
+ def test_default_offsets_gpt_v2_4k(self):
+ self._test_default_offsets('gpt', 2, 4096)
+
+ def test_default_offsets_msdos_v2_4k(self):
+ self._test_default_offsets('msdos', 2, 4096)
+
+ def _test_specified_offsets(self, ptable, version):
+ psize = 20 << 20
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=version)
config.add_image(path=img, size='100M', ptable=ptable)
- config.add_part(size=psize, number=1)
- config.add_part(size=psize, number=2)
+ config.add_part(size=psize, number=1, offset=psize)
+ config.add_part(size=psize, number=2, offset=psize * 3)
self.run_bm(config.render())
with loop_dev(img) as dev:
self.assertEqual(
summarize_partitions(dev), [
- PartData(
- number=1, offset=1 << 20, size=psize),
- PartData(
- number=2, offset=(1 << 20) + psize, size=psize),
+ PartData(number=1, offset=psize, size=psize),
+ PartData(number=2, offset=psize*3, size=psize),
])
+ config.set_preserve()
+ self.run_bm(config.render())
+
+ def DONT_test_specified_offsets_gpt_v1(self):
+ self._test_specified_offsets('gpt', 1)
+
+ def DONT_test_specified_offsets_msdos_v1(self):
+ self._test_specified_offsets('msdos', 1)
- def test_default_offsets_gpt(self):
- self._test_default_offsets('gpt')
+ def test_specified_offsets_gpt_v2(self):
+ self._test_specified_offsets('gpt', 2)
- def test_default_offsets_msdos(self):
- self._test_default_offsets('msdos')
+ def test_specified_offsets_msdos_v2(self):
+ self._test_specified_offsets('msdos', 2)
- def _test_non_default_numbering(self, ptable):
+ def _test_non_default_numbering(self, ptable, version):
psize = 40 << 20
img = self.tmp_path('image.img')
- config = StorageConfigBuilder()
+ config = StorageConfigBuilder(version=version)
config.add_image(path=img, size='100M', ptable=ptable)
config.add_part(size=psize, number=1)
config.add_part(size=psize, number=4)
@@ -138,23 +341,30 @@ class TestBlockMeta(IntegrationTestCase):
with loop_dev(img) as dev:
self.assertEqual(
summarize_partitions(dev), [
- PartData(
- number=1, offset=1 << 20, size=psize),
- PartData(
- number=4, offset=(1 << 20) + psize, size=psize),
+ PartData(number=1, offset=1 << 20, size=psize),
+ PartData(number=4, offset=(1 << 20) + psize, size=psize),
])
- def test_non_default_numbering_gpt(self):
- self._test_non_default_numbering('gpt')
+ def test_non_default_numbering_gpt_v1(self):
+ self._test_non_default_numbering('gpt', 1)
- def BROKEN_test_non_default_numbering_msdos(self):
- self._test_non_default_numbering('msdos')
+ def BROKEN_test_non_default_numbering_msdos_v1(self):
+ self._test_non_default_numbering('msdos', 2)
- def test_logical(self):
+ def test_non_default_numbering_gpt_v2(self):
+ self._test_non_default_numbering('gpt', 2)
+
+ def test_non_default_numbering_msdos_v2(self):
+ self._test_non_default_numbering('msdos', 2)
+
+ def _test_logical(self, version):
img = self.tmp_path('image.img')
- config = StorageConfigBuilder()
+ config = StorageConfigBuilder(version=version)
config.add_image(path=img, size='100M', ptable='msdos')
- config.add_part(size='50M', number=1, flag='extended')
+ # curtin adds 1MiB to the size of the extend partition per contained
+ # logical partition, but only in v1 mode
+ size = '97M' if version == 1 else '99M'
+ config.add_part(size=size, number=1, flag='extended')
config.add_part(size='10M', number=5, flag='logical')
config.add_part(size='10M', number=6, flag='logical')
self.run_bm(config.render())
@@ -163,19 +373,162 @@ class TestBlockMeta(IntegrationTestCase):
self.assertEqual(
summarize_partitions(dev), [
# extended partitions get a strange size in sysfs
- PartData(number=1, offset=1 << 20, size=1 << 10),
- PartData(number=5, offset=2 << 20, size=10 << 20),
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=2 << 20, size=10 << 20),
# part 5 takes us to 12 MiB offset, curtin leaves a 1 MiB
# gap.
PartData(number=6, offset=13 << 20, size=10 << 20),
])
+ self.assertEqual(99 << 20, _get_extended_partition_size(dev, 1))
p1kname = block.partition_kname(block.path_to_kname(dev), 1)
self.assertTrue(block.is_extended_partition('/dev/' + p1kname))
+ def test_logical_v1(self):
+ self._test_logical(1)
+
+ def test_logical_v2(self):
+ self._test_logical(2)
+
+ def _test_replace_partition(self, ptable):
+ psize = 20 << 20
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable=ptable)
+ config.add_part(size=psize, number=1)
+ config.add_part(size=psize, number=2)
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=psize),
+ PartData(number=2, offset=(1 << 20) + psize, size=psize),
+ ])
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable=ptable, preserve=True)
+ config.add_part(size=psize, number=1, offset=1 << 20, preserve=True)
+ config.add_part(size=psize*2, number=2)
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=psize),
+ PartData(number=2, offset=(1 << 20) + psize, size=2*psize),
+ ])
+
+ def test_replace_partition_gpt_v2(self):
+ self._test_replace_partition('gpt')
+
+ def test_replace_partition_msdos_v2(self):
+ self._test_replace_partition('msdos')
+
+ def test_delete_logical_partition(self):
+ # The test case that resulted in a lot of hair-pulling:
+ # deleting a logical partition renumbers any later partitions
+ # (so you cannot stably refer to partitions by number!)
+ psize = 20 << 20
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable='msdos')
+ config.add_part(size='90M', number=1, flag='extended')
+ config.add_part(size=psize, number=5, flag='logical')
+ config.add_part(size=psize, number=6, flag='logical')
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=(2 << 20), size=psize),
+ PartData(number=6, offset=(3 << 20) + psize, size=psize),
+ ])
+ self.assertEqual(90 << 20, _get_extended_partition_size(dev, 1))
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable='msdos', preserve=True)
+ config.add_part(size='90M', number=1, flag='extended', preserve=True)
+ config.add_part(
+ size=psize, number=5, flag='logical', offset=(3 << 20) + psize,
+ preserve=True)
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=(3 << 20) + psize, size=psize),
+ ])
+ self.assertEqual(90 << 20, _get_extended_partition_size(dev, 1))
+
+ def _test_wiping(self, ptable):
+ # Test wiping behaviour.
+ #
+ # Paritions that should be (superblock, i.e. first and last
+ # megabyte) wiped:
+ #
+ # 1) New partitions
+ # 2) Partitions that are being removed, i.e. no longer present
+ # 3) Preserved partitions with an explicit wipe
+ #
+ # Partitions that should not be wiped:
+ #
+ # 4) Preserved partitions with no wipe field.
+ #
+ # We test this by creating some partitions with block-meta,
+ # writing content to them, then running block-meta again, with
+ # each partition matching one of the conditions above.
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='30M', ptable=ptable)
+ config.add_part(size='5M', number=1, offset='5M')
+ config.add_part(size='5M', number=2, offset='10M')
+ config.add_part(size='5M', number=3, offset='15M')
+ config.add_part(size='5M', number=4, offset='20M')
+ self.run_bm(config.render())
+
+ part_offset_sizes = {}
+ with loop_dev(img) as dev:
+ for kname, number, offset, size in block.sysfs_partition_data(dev):
+ content = bytes([number])
+ with open(block.kname_to_path(kname), 'wb') as fp:
+ fp.write(content*size)
+ part_offset_sizes[number] = (offset, size)
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='30M', ptable=ptable, preserve=True)
+ config.add_part(size='5M', number=1, offset='5M')
+ # Partition 2 is being deleted.
+ config.add_part(
+ size='5M', number=3, offset='15M', preserve=True,
+ wipe='superblock')
+ config.add_part(size='5M', number=4, offset='20M', preserve=True)
+ self.run_bm(config.render())
+
+ expected_content = {1: {0}, 2: {0}, 3: {0}, 4: {4}}
+
+ with loop_dev(img) as dev:
+ with open(dev, 'rb') as fp:
+ for nr, (offset, size) in part_offset_sizes.items():
+ expected = expected_content[nr]
+ fp.seek(offset)
+ first = set(fp.read(ONE_MIB_BYTES))
+ fp.seek(offset + size - ONE_MIB_BYTES)
+ last = set(fp.read(ONE_MIB_BYTES))
+ self.assertEqual(first, expected)
+ self.assertEqual(last, expected)
+
+ def test_wiping_gpt(self):
+ self._test_wiping('gpt')
+
+ def test_wiping_msdos(self):
+ self._test_wiping('msdos')
+
def test_raw_image(self):
img = self.tmp_path('image.img')
- config = StorageConfigBuilder()
+ config = StorageConfigBuilder(version=1)
config.add_image(path=img, size='2G', ptable='gpt', create=True)
curtin_cfg = config.render()
@@ -206,3 +559,432 @@ class TestBlockMeta(IntegrationTestCase):
)
finally:
server.stop()
+
+ def _do_test_resize(self, start, end, fstype):
+ start <<= 20
+ end <<= 20
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='gpt')
+ p1 = config.add_part(size=start, offset=1 << 20, number=1,
+ fstype=fstype)
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.assertEqual(fstype, _get_volume_fstype(f'{dev}p1'))
+ self.create_data(dev, p1)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=start),
+ ])
+ self.check_fssize(dev, p1, fstype, start)
+
+ config.set_preserve()
+ p1['resize'] = True
+ p1['size'] = end
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.check_data(dev, p1)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=end),
+ ])
+ self.check_fssize(dev, p1, fstype, end)
+
+ def test_resize_up_ext2(self):
+ self._do_test_resize(40, 80, 'ext2')
+
+ def test_resize_down_ext2(self):
+ self._do_test_resize(80, 40, 'ext2')
+
+ def test_resize_up_ext3(self):
+ self._do_test_resize(40, 80, 'ext3')
+
+ def test_resize_down_ext3(self):
+ self._do_test_resize(80, 40, 'ext3')
+
+ def test_resize_up_ext4(self):
+ self._do_test_resize(40, 80, 'ext4')
+
+ def test_resize_down_ext4(self):
+ self._do_test_resize(80, 40, 'ext4')
+
+ def test_resize_up_ntfs(self):
+ self._do_test_resize(40, 80, 'ntfs')
+
+ def test_resize_down_ntfs(self):
+ self._do_test_resize(80, 40, 'ntfs')
+
+ def test_resize_logical(self):
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable='msdos')
+ config.add_part(size='50M', number=1, flag='extended', offset=1 << 20)
+ config.add_part(size='10M', number=5, flag='logical', offset=2 << 20)
+ p6 = config.add_part(size='10M', number=6, flag='logical',
+ offset=13 << 20, fstype='ext4')
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.create_data(dev, p6)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ # extended partitions get a strange size in sysfs
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=2 << 20, size=10 << 20),
+ # part 5 takes us to 12 MiB offset, curtin leaves a 1 MiB
+ # gap.
+ PartData(number=6, offset=13 << 20, size=10 << 20),
+ ])
+ self.assertEqual(50 << 20, _get_extended_partition_size(dev, 1))
+
+ config.set_preserve()
+ p6['resize'] = True
+ p6['size'] = '20M'
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.check_data(dev, p6)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=2 << 20, size=10 << 20),
+ PartData(number=6, offset=13 << 20, size=20 << 20),
+ ])
+ self.assertEqual(50 << 20, _get_extended_partition_size(dev, 1))
+
+ def test_resize_extended(self):
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable='msdos')
+ p1 = config.add_part(size='50M', number=1, flag='extended',
+ offset=1 << 20)
+ p5 = config.add_part(size='49M', number=5, flag='logical',
+ offset=2 << 20)
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ # extended partitions get a strange size in sysfs
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=2 << 20, size=49 << 20),
+ ])
+ self.assertEqual(50 << 20, _get_extended_partition_size(dev, 1))
+
+ config.set_preserve()
+ p1['resize'] = True
+ p1['size'] = '99M'
+ p5['resize'] = True
+ p5['size'] = '98M'
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=1 << 10),
+ PartData(number=5, offset=2 << 20, size=98 << 20),
+ ])
+ self.assertEqual(99 << 20, _get_extended_partition_size(dev, 1))
+
+ def test_split(self):
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='gpt')
+ config.add_part(size=9 << 20, offset=1 << 20, number=1)
+ p2 = config.add_part(size='180M', offset=10 << 20, number=2,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.create_data(dev, p2)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=180 << 20),
+ ])
+ self.assertEqual(180 << 20, _get_filesystem_size(dev, p2))
+
+ config.set_preserve()
+ p2['resize'] = True
+ p2['size'] = '80M'
+ p3 = config.add_part(size='100M', offset=90 << 20, number=3,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.check_data(dev, p2)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=80 << 20),
+ PartData(number=3, offset=90 << 20, size=100 << 20),
+ ])
+ self.assertEqual(80 << 20, _get_filesystem_size(dev, p2))
+ self.assertEqual(100 << 20, _get_filesystem_size(dev, p3))
+
+ def test_partition_unify(self):
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='gpt')
+ config.add_part(size=9 << 20, offset=1 << 20, number=1)
+ p2 = config.add_part(size='40M', offset=10 << 20, number=2,
+ fstype='ext4')
+ p3 = config.add_part(size='60M', offset=50 << 20, number=3,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.create_data(dev, p2)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=40 << 20),
+ PartData(number=3, offset=50 << 20, size=60 << 20),
+ ])
+ self.assertEqual(40 << 20, _get_filesystem_size(dev, p2))
+ self.assertEqual(60 << 20, _get_filesystem_size(dev, p3))
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='gpt')
+ config.add_part(size=9 << 20, offset=1 << 20, number=1)
+ p2 = config.add_part(size='100M', offset=10 << 20, number=2,
+ fstype='ext4', resize=True)
+ config.set_preserve()
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.check_data(dev, p2)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=100 << 20),
+ ])
+ self.assertEqual(100 << 20, _get_filesystem_size(dev, p2))
+
+ def test_mix_of_operations_gpt(self):
+ # a test that keeps, creates, resizes, and deletes a partition
+ # 200 MiB disk, using full disk
+ # init size preserve final size
+ # p1 - 9 MiB yes 9MiB
+ # p2 - 90 MiB yes, resize 139MiB
+ # p3 - 99 MiB no 50MiB
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='gpt')
+ config.add_part(size=9 << 20, offset=1 << 20, number=1)
+ p2 = config.add_part(size='90M', offset=10 << 20, number=2,
+ fstype='ext4')
+ p3 = config.add_part(size='99M', offset=100 << 20, number=3,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.create_data(dev, p2)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=90 << 20),
+ PartData(number=3, offset=100 << 20, size=99 << 20),
+ ])
+ self.assertEqual(90 << 20, _get_filesystem_size(dev, p2))
+ self.assertEqual(99 << 20, _get_filesystem_size(dev, p3))
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='gpt')
+ config.add_part(size=9 << 20, offset=1 << 20, number=1)
+ p2 = config.add_part(size='139M', offset=10 << 20, number=2,
+ fstype='ext4', resize=True)
+ config.set_preserve()
+ p3 = config.add_part(size='50M', offset=149 << 20, number=3,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.check_data(dev, p2)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=139 << 20),
+ PartData(number=3, offset=149 << 20, size=50 << 20),
+ ])
+ self.assertEqual(139 << 20, _get_filesystem_size(dev, p2))
+ self.assertEqual(50 << 20, _get_filesystem_size(dev, p3))
+
+ def test_mix_of_operations_msdos(self):
+ # a test that keeps, creates, resizes, and deletes a partition
+ # including handling of extended/logical
+ # 200 MiB disk, initially only using front 100MiB
+ # flag init size preserve final size
+ # p1 - primary 9MiB yes 9MiB
+ # p2 - extended 89MiB yes, resize 189MiB
+ # p3 - logical 37MiB yes, resize 137MiB
+ # p4 - logical 50MiB no 50MiB
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='msdos')
+ p1 = config.add_part(size='9M', offset=1 << 20, number=1,
+ fstype='ext4')
+ config.add_part(size='89M', offset=10 << 20, number=2, flag='extended')
+ p5 = config.add_part(size='36M', offset=11 << 20, number=5,
+ flag='logical', fstype='ext4')
+ p6 = config.add_part(size='50M', offset=49 << 20, number=6,
+ flag='logical', fstype='ext4')
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.create_data(dev, p1)
+ self.create_data(dev, p5)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=1 << 10),
+ PartData(number=5, offset=11 << 20, size=36 << 20),
+ PartData(number=6, offset=49 << 20, size=50 << 20),
+ ])
+ self.assertEqual(89 << 20, _get_extended_partition_size(dev, 2))
+ self.assertEqual(9 << 20, _get_filesystem_size(dev, p1))
+ self.assertEqual(36 << 20, _get_filesystem_size(dev, p5))
+ self.assertEqual(50 << 20, _get_filesystem_size(dev, p6))
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='200M', ptable='msdos')
+ p1 = config.add_part(size='9M', offset=1 << 20, number=1,
+ fstype='ext4')
+ config.add_part(size='189M', offset=10 << 20, number=2,
+ flag='extended', resize=True)
+ p5 = config.add_part(size='136M', offset=11 << 20, number=5,
+ flag='logical', fstype='ext4', resize=True)
+ config.set_preserve()
+ p6 = config.add_part(size='50M', offset=149 << 20, number=6,
+ flag='logical', fstype='ext4')
+ self.run_bm(config.render())
+
+ with loop_dev(img) as dev:
+ self.check_data(dev, p1)
+ self.check_data(dev, p5)
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=9 << 20),
+ PartData(number=2, offset=10 << 20, size=1 << 10),
+ PartData(number=5, offset=11 << 20, size=136 << 20),
+ PartData(number=6, offset=149 << 20, size=50 << 20),
+ ])
+ self.assertEqual(189 << 20, _get_extended_partition_size(dev, 2))
+ self.assertEqual(9 << 20, _get_filesystem_size(dev, p1))
+ self.assertEqual(136 << 20, _get_filesystem_size(dev, p5))
+ self.assertEqual(50 << 20, _get_filesystem_size(dev, p6))
+
+ def test_split_and_wiping(self):
+ # regression test for a bug where a partition wipe would happen before
+ # a resize was performed, resulting in data loss.
+ img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable='gpt')
+ p1 = config.add_part(size=98 << 20, offset=1 << 20, number=1,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=98 << 20),
+ ])
+ with self.mount(dev, p1) as mnt_point:
+ # Attempt to create files across the partition with gaps
+ for i in range(1, 41):
+ with open(f'{mnt_point}/{str(i)}', 'wb') as fp:
+ fp.write(bytes([i]) * (2 << 20))
+ for i in range(1, 41):
+ if i % 5 != 0:
+ os.remove(f'{mnt_point}/{str(i)}')
+
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=img, size='100M', ptable='gpt')
+ p1 = config.add_part(size=49 << 20, offset=1 << 20, number=1,
+ fstype='ext4', resize=True)
+ config.set_preserve()
+ config.add_part(size=49 << 20, offset=50 << 20, number=2,
+ fstype='ext4')
+ self.run_bm(config.render())
+ with loop_dev(img) as dev:
+ self.assertEqual(
+ summarize_partitions(dev), [
+ PartData(number=1, offset=1 << 20, size=49 << 20),
+ PartData(number=2, offset=50 << 20, size=49 << 20),
+ ])
+ with self.mount(dev, p1) as mnt_point:
+ for i in range(5, 41, 5):
+ with open(f'{mnt_point}/{i}', 'rb') as fp:
+ self.assertEqual(bytes([i]) * (2 << 20), fp.read())
+
+ def test_parttype_dos(self):
+ # msdos partition table partitions shall retain their type
+ # create initial situation similar to this
+ # Device Boot Start End Sectors Size Id Type
+ # /dev/sda1 * 2048 104447 102400 50M 7 HPFS/NTFS/exFA
+ # /dev/sda2 104448 208668781 208564334 99.5G 7 HPFS/NTFS/exFA
+ # /dev/sda3 208670720 209711103 1040384 508M 27 Hidden NTFS Wi
+ self.img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=self.img, size='200M', ptable='msdos')
+ config.add_part(size=50 << 20, offset=1 << 20, number=1,
+ fstype='ntfs', flag='boot', partition_type='0x7')
+ config.add_part(size=100 << 20, offset=51 << 20, number=2,
+ fstype='ntfs', partition_type='0x7')
+ config.add_part(size=48 << 20, offset=151 << 20, number=3,
+ fstype='ntfs', partition_type='0x27')
+ self.run_bm(config.render())
+ self.assertPartitions(
+ PartData(number=1, offset=1 << 20, size=50 << 20,
+ partition_type='7', boot=True),
+ PartData(number=2, offset=51 << 20, size=100 << 20,
+ partition_type='7', boot=False),
+ PartData(number=3, offset=151 << 20, size=48 << 20,
+ partition_type='27', boot=False))
+
+ def test_parttype_gpt(self):
+ # gpt partition table partitions shall retain their type
+ # create initial situation similar to this
+ # # Start (sector) End (sector) Size Code Name
+ # 1 2048 206847 100.0 MiB EF00 EFI system part
+ # 2 206848 239615 16.0 MiB 0C01 Microsoft reser
+ # 3 239616 103811181 49.4 GiB 0700 Basic data part
+ # 4 103813120 104853503 508.0 MiB 2700
+ esp = 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B'
+ msreserved = 'E3C9E316-0B5C-4DB8-817D-F92DF00215AE'
+ msdata = 'EBD0A0A2-B9E5-4433-87C0-68B6B72699C7'
+ winre = 'DE94BBA4-06D1-4D40-A16A-BFD50179D6AC'
+ self.img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=self.img, size='100M', ptable='gpt')
+ config.add_part(number=1, offset=1 << 20, size=9 << 20,
+ flag='boot', fstype='ntfs')
+ config.add_part(number=2, offset=10 << 20, size=20 << 20,
+ partition_type=msreserved)
+ config.add_part(number=3, offset=30 << 20, size=50 << 20,
+ partition_type=msdata, fstype='ntfs')
+ config.add_part(number=4, offset=80 << 20, size=19 << 20,
+ partition_type=winre, fstype='ntfs')
+ self.run_bm(config.render())
+ self.assertPartitions(
+ PartData(number=1, offset=1 << 20, size=9 << 20,
+ partition_type=esp),
+ PartData(number=2, offset=10 << 20, size=20 << 20,
+ partition_type=msreserved),
+ PartData(number=3, offset=30 << 20, size=50 << 20,
+ partition_type=msdata),
+ PartData(number=4, offset=80 << 20, size=19 << 20,
+ partition_type=winre))
+
+ @parameterized.expand([('msdos',), ('gpt',)])
+ def test_disk_label_id_persistent(self, ptable):
+ # when the disk is preserved, the disk label id shall also be preserved
+ self.img = self.tmp_path('image.img')
+ config = StorageConfigBuilder(version=2)
+ config.add_image(path=self.img, size='20M', ptable=ptable)
+ config.add_part(number=1, offset=1 << 20, size=18 << 20)
+ self.run_bm(config.render())
+ self.assertPartitions(
+ PartData(number=1, offset=1 << 20, size=18 << 20))
+ with loop_dev(self.img) as dev:
+ orig_label_id = _get_disk_label_id(dev)
+
+ config.set_preserve()
+ self.run_bm(config.render())
+ self.assertPartitions(
+ PartData(number=1, offset=1 << 20, size=18 << 20))
+ with loop_dev(self.img) as dev:
+ self.assertEqual(orig_label_id, _get_disk_label_id(dev))
diff --git a/tests/unittests/test_apt_source.py b/tests/unittests/test_apt_source.py
index 48fb820..267711f 100644
--- a/tests/unittests/test_apt_source.py
+++ b/tests/unittests/test_apt_source.py
@@ -572,6 +572,55 @@ class TestAptSourceConfig(CiTestCase):
'Acquire::ftp::Proxy "foobar3";\n'
'Acquire::https::Proxy "foobar4";\n'))
+ def test_preference_to_str(self):
+ """ test_preference_to_str - Test converting a preference dict to
+ textual representation.
+ """
+ preference = {
+ "package": "*",
+ "pin": "release a=unstable",
+ "pin-priority": 50,
+ }
+
+ expected = """\
+Package: *
+Pin: release a=unstable
+Pin-Priority: 50
+"""
+ self.assertEqual(expected, apt_config.preference_to_str(preference))
+
+ @staticmethod
+ def test_apply_apt_preferences():
+ """ test_apply_apt_preferences - Test apt preferences configuration
+ """
+ cfg = {
+ "preferences": [
+ {
+ "package": "*",
+ "pin": "release a=unstable",
+ "pin-priority": 50,
+ }, {
+ "package": "dummy-unwanted-package",
+ "pin": "origin *ubuntu.com*",
+ "pin-priority": -1,
+ }
+ ]
+ }
+
+ expected_content = """\
+Package: *
+Pin: release a=unstable
+Pin-Priority: 50
+
+Package: dummy-unwanted-package
+Pin: origin *ubuntu.com*
+Pin-Priority: -1
+"""
+ with mock.patch.object(util, "write_file") as mockobj:
+ apt_config.apply_apt_preferences(cfg, "preferencesfn")
+
+ mockobj.assert_called_with("preferencesfn", expected_content)
+
def test_mirror(self):
"""test_mirror - Test defining a mirror"""
pmir = "http://us.archive.ubuntu.com/ubuntu/"
diff --git a/tests/unittests/test_block.py b/tests/unittests/test_block.py
index 6d9b776..7a73b69 100644
--- a/tests/unittests/test_block.py
+++ b/tests/unittests/test_block.py
@@ -927,4 +927,12 @@ class TestSfdiskInfo(CiTestCase):
self.assertEqual([], self.m_load_json.call_args_list)
+class TestResize(CiTestCase):
+ def test_basic(self):
+ resizers = 'curtin.commands.block_meta_v2.resizers'
+ values = {'a': 1, 'b': 2}
+ with mock.patch.dict(resizers, values, clear=True):
+ self.assertEqual({'a', 'b'}, block.get_resize_fstypes())
+
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/unittests/test_commands_block_meta.py b/tests/unittests/test_commands_block_meta.py
index 3e22792..9185d4e 100644
--- a/tests/unittests/test_commands_block_meta.py
+++ b/tests/unittests/test_commands_block_meta.py
@@ -3,16 +3,25 @@
from argparse import Namespace
from collections import OrderedDict
import copy
-from mock import patch, call
+from mock import (
+ call,
+ Mock,
+ patch,
+)
import os
import random
+import uuid
from curtin.block import dasd
-from curtin.commands import block_meta
+from curtin.commands import block_meta, block_meta_v2
from curtin import paths, util
from .helpers import CiTestCase
+def random_uuid():
+ return uuid.uuid4()
+
+
class TestGetPathToStorageVolume(CiTestCase):
def setUp(self):
@@ -2572,6 +2581,8 @@ class TestPartitionVerifySfdisk(CiTestCase):
base = 'curtin.commands.block_meta.'
self.add_patch(base + 'verify_size', 'm_verify_size')
self.add_patch(base + 'verify_ptable_flag', 'm_verify_ptable_flag')
+ self.add_patch(base + 'os.path.realpath', 'm_realpath')
+ self.m_realpath.side_effect = lambda x: x
self.info = {
'id': 'disk-sda-part-2',
'type': 'partition',
@@ -2611,6 +2622,257 @@ class TestPartitionVerifySfdisk(CiTestCase):
self.assertEqual([], self.m_verify_ptable_flag.call_args_list)
+class TestPartitionVerifySfdiskV2(CiTestCase):
+
+ def setUp(self):
+ super(TestPartitionVerifySfdiskV2, self).setUp()
+ base = 'curtin.commands.block_meta_v2.'
+ self.add_patch(base + 'verify_size', 'm_verify_size')
+ self.add_patch(base + 'verify_ptable_flag', 'm_verify_ptable_flag')
+ self.add_patch(base + 'os.path.realpath', 'm_realpath')
+ self.m_realpath.side_effect = lambda x: x
+ self.info = {
+ 'id': 'disk-sda-part-2',
+ 'type': 'partition',
+ 'offset': '1GB',
+ 'device': 'sda',
+ 'number': 2,
+ 'size': '5GB',
+ 'flag': 'boot',
+ }
+ self.part_size = int(util.human2bytes(self.info['size']))
+ self.devpath = self.random_string()
+ self.sfdisk_part_info = {
+ 'node': self.devpath,
+ 'start': (1 << 30) // 512,
+ }
+ self.storage_config = {self.info['id']: self.info}
+ self.label = self.random_string()
+ self.table = Mock()
+ self.table.sectors2bytes = lambda x: x * 512
+
+ def test_partition_verify_sfdisk(self):
+ block_meta_v2.partition_verify_sfdisk_v2(self.info, self.label,
+ self.sfdisk_part_info,
+ self.storage_config,
+ self.table)
+ self.assertEqual(
+ [call(self.devpath, self.part_size, self.sfdisk_part_info)],
+ self.m_verify_size.call_args_list)
+ self.assertEqual(
+ [call(self.devpath, self.info['flag'], self.label,
+ self.sfdisk_part_info)],
+ self.m_verify_ptable_flag.call_args_list)
+
+ def test_partition_verify_no_moves(self):
+ self.info['preserve'] = True
+ self.info['resize'] = True
+ self.info['offset'] = '2GB'
+ with self.assertRaises(RuntimeError):
+ block_meta_v2.partition_verify_sfdisk_v2(
+ self.info, self.label, self.sfdisk_part_info,
+ self.storage_config, self.table)
+
+
+class TestSfdiskV2(CiTestCase):
+ def test_gpt_basic(self):
+ table = block_meta_v2.GPTPartTable(512)
+ expected = '''\
+label: gpt
+'''
+ self.assertEqual(expected, table.render())
+
+ def test_gpt_boot(self):
+ table = block_meta_v2.GPTPartTable(512)
+ table.add(dict(number=1, offset=1 << 20, size=9 << 20, flag='boot'))
+ expected = '''\
+label: gpt
+
+1: start=2048 size=18432 type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B'''
+ self.assertEqual(expected, table.render())
+
+ def test_gpt_boot_raw_type(self):
+ esp = 'C12A7328-F81F-11D2-BA4B-00A0C93EC93B'
+ table = block_meta_v2.GPTPartTable(512)
+ table.add(dict(number=1, offset=1 << 20, size=9 << 20,
+ partition_type=esp))
+ expected = '''\
+label: gpt
+
+1: start=2048 size=18432 type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B'''
+ self.assertEqual(expected, table.render())
+
+ def test_gpt_random_uuid(self):
+ ptype = str(random_uuid()).lower()
+ table = block_meta_v2.GPTPartTable(512)
+ table.add(dict(number=1, offset=1 << 20, size=9 << 20,
+ flag='boot', partition_type=ptype))
+ expected = f'''\
+label: gpt
+
+1: start=2048 size=18432 type={ptype}'''
+ self.assertEqual(expected, table.render())
+
+ def test_dos_basic(self):
+ table = block_meta_v2.DOSPartTable(512)
+ expected = '''\
+label: dos
+'''
+ self.assertEqual(expected, table.render())
+
+ def test_dos_boot(self):
+ table = block_meta_v2.DOSPartTable(512)
+ table.add(dict(number=1, offset=1 << 20, size=9 << 20, flag='boot'))
+ expected = '''\
+label: dos
+
+1: start=2048 size=18432 type=EF bootable'''
+ self.assertEqual(expected, table.render())
+
+ def test_dos_random_code(self):
+ ptype = hex(random.randint(0, 0xff))[2:]
+ table = block_meta_v2.DOSPartTable(512)
+ table.add(dict(number=1, offset=1 << 20, size=9 << 20,
+ flag='boot', partition_type=ptype))
+ expected = f'''\
+label: dos
+
+1: start=2048 size=18432 type={ptype} bootable'''
+ self.assertEqual(expected, table.render())
+
+
+class TestPartitionNeedsResize(CiTestCase):
+
+ def setUp(self):
+ super(TestPartitionNeedsResize, self).setUp()
+ base = 'curtin.commands.block_meta_v2.'
+ self.add_patch(base + 'os.path.realpath', 'm_realpath')
+ self.add_patch(base + '_get_volume_fstype', 'm_get_volume_fstype')
+ self.m_realpath.side_effect = lambda x: x
+ self.partition = {
+ 'id': 'disk-sda-part-2',
+ 'type': 'partition',
+ 'offset': '1GB',
+ 'device': 'sda',
+ 'number': 2,
+ 'size': '5GB',
+ 'flag': 'boot',
+ }
+ self.devpath = self.random_string()
+ self.sfdisk_part_info = {
+ 'node': self.devpath,
+ 'start': (1 << 30) // 512,
+ 'size': (1 << 30) // 512,
+ }
+ self.format = {
+ 'id': 'id-format',
+ 'type': 'format',
+ 'fstype': 'ext4',
+ 'volume': self.partition['id'],
+ }
+ self.storage_config = {
+ self.partition['id']: self.partition,
+ self.format['id']: self.format,
+ }
+ self.table = Mock()
+ self.table.sectors2bytes = lambda x: x * 512
+
+ def test_partition_resize_happy_path(self):
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.format['preserve'] = True
+ self.format['fstype'] = 'ext4'
+ self.m_get_volume_fstype.return_value = 'ext4'
+ expected = {
+ 'fstype': 'ext4',
+ 'size': 5 << 30,
+ 'direction': 'up',
+ }
+ actual = block_meta_v2._prepare_resize(
+ self.storage_config, self.partition, self.table,
+ self.sfdisk_part_info)
+ self.assertEqual(expected, actual)
+
+ def test_partition_resize_no_format_action(self):
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.storage_config = {self.partition['id']: self.partition}
+ self.m_get_volume_fstype.return_value = 'ext4'
+ expected = {
+ 'fstype': 'ext4',
+ 'size': 5 << 30,
+ 'direction': 'up',
+ }
+ actual = block_meta_v2._prepare_resize(
+ self.storage_config, self.partition, self.table,
+ self.sfdisk_part_info)
+ self.assertEqual(expected, actual)
+
+ def test_partition_resize_change_fs(self):
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.format['preserve'] = True
+ self.format['fstype'] = 'ext3'
+ self.m_get_volume_fstype.return_value = 'ext4'
+ with self.assertRaises(RuntimeError):
+ block_meta_v2._prepare_resize(self.storage_config, self.partition,
+ self.table, self.sfdisk_part_info)
+
+ def test_partition_resize_unsupported_fs(self):
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.format['preserve'] = True
+ self.format['fstype'] = 'reiserfs'
+ self.m_get_volume_fstype.return_value = 'resierfs'
+ with self.assertRaises(RuntimeError):
+ block_meta_v2._prepare_resize(self.storage_config, self.partition,
+ self.table, self.sfdisk_part_info)
+
+ def test_partition_resize_format_preserve_false(self):
+ # though the filesystem type is not supported for resize, it's ok
+ # because with format preserve=False, we're recreating anyhow
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.format['preserve'] = False
+ self.format['fstype'] = 'reiserfs'
+ self.m_get_volume_fstype.return_value = 'reiserfs'
+ self.assertIsNone(
+ block_meta_v2._prepare_resize(self.storage_config, self.partition,
+ self.table, self.sfdisk_part_info))
+
+ def test_partition_resize_partition_preserve_false(self):
+ # not a resize - partition is recreated
+ self.partition['preserve'] = False
+ self.partition['resize'] = True
+ self.format['preserve'] = False
+ self.format['fstype'] = 'reiserfs'
+ self.m_get_volume_fstype.return_value = 'reiserfs'
+ self.assertIsNone(
+ block_meta_v2._prepare_resize(self.storage_config, self.partition,
+ self.table, self.sfdisk_part_info))
+
+ def test_partition_resize_equal_size(self):
+ # not a resize - the size is the same so leave it alone
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.partition['size'] = '1GB'
+ self.format['preserve'] = True
+ self.m_get_volume_fstype.return_value = 'ext4'
+ self.assertIsNone(
+ block_meta_v2._prepare_resize(self.storage_config, self.partition,
+ self.table, self.sfdisk_part_info))
+
+ def test_partition_resize_unformatted(self):
+ # not a resize - an unformatted partition has nothing to preserve
+ self.partition['preserve'] = True
+ self.partition['resize'] = True
+ self.storage_config = {self.partition['id']: self.partition}
+ self.m_get_volume_fstype.return_value = ''
+ self.assertIsNone(
+ block_meta_v2._prepare_resize(self.storage_config, self.partition,
+ self.table, self.sfdisk_part_info))
+
+
class TestPartitionVerifyFdasd(CiTestCase):
def setUp(self):
diff --git a/tests/unittests/test_storage_config.py b/tests/unittests/test_storage_config.py
index a2308c4..df48a4d 100644
--- a/tests/unittests/test_storage_config.py
+++ b/tests/unittests/test_storage_config.py
@@ -7,7 +7,7 @@ from curtin.storage_config import ProbertParser as baseparser
from curtin.storage_config import (BcacheParser, BlockdevParser, DasdParser,
DmcryptParser, FilesystemParser, LvmParser,
RaidParser, MountParser, ZfsParser)
-from curtin.storage_config import ptable_uuid_to_flag_entry
+from curtin.storage_config import ptable_uuid_to_flag_entry, select_configs
from curtin import util
@@ -285,7 +285,7 @@ class TestBlockdevParser(CiTestCase):
""" BlockdevParser skips invalid ID_WWN_* values. """
self.bdevp.blockdev_data['/dev/sda'] = {
'DEVTYPE': 'disk',
- 'DEVNAME': 'sda',
+ 'DEVNAME': '/dev/sda',
'ID_SERIAL': 'Corsair_Force_GS_1785234921906',
'ID_SERIAL_SHORT': '1785234921906',
'ID_WWN': '0x0000000000000000',
@@ -300,7 +300,7 @@ class TestBlockdevParser(CiTestCase):
""" BlockdevParser skips invalid ID_SERIAL_* values. """
self.bdevp.blockdev_data['/dev/sda'] = {
'DEVTYPE': 'disk',
- 'DEVNAME': 'sda',
+ 'DEVNAME': '/dev/sda',
'ID_SERIAL': ' ',
'ID_SERIAL_SHORT': 'My Serial is My PassPort',
}
@@ -345,10 +345,12 @@ class TestBlockdevParser(CiTestCase):
'id': 'partition-sda1',
'type': 'partition',
'device': 'disk-sda',
+ 'path': '/dev/sda1',
'number': 1,
'offset': 1048576,
'size': 499122176,
'flag': 'linux',
+ 'partition_type': '0fc63daf-8483-4772-8e79-3d69d8477de4',
}
self.assertDictEqual(expected_dict,
self.bdevp.asdict(blockdev))
@@ -375,12 +377,12 @@ class TestBlockdevParser(CiTestCase):
""" BlockdevParser ignores partition with zero start value."""
self.bdevp.blockdev_data['/dev/vda'] = {
'DEVTYPE': 'disk',
- 'DEVNAME': 'vda',
+ 'DEVNAME': '/dev/vda',
}
test_value = {
'DEVTYPE': 'partition',
'MAJOR': "252",
- 'DEVNAME': 'vda1',
+ 'DEVNAME': '/dev/vda1',
"DEVPATH":
"/devices/pci0000:00/0000:00:04.0/virtio0/block/vda/vda1",
"ID_PART_ENTRY_TYPE": "0x0",
@@ -390,8 +392,10 @@ class TestBlockdevParser(CiTestCase):
'id': 'partition-vda1',
'type': 'partition',
'device': 'disk-vda',
+ 'path': '/dev/vda1',
'number': 1,
'size': 784334848,
+ 'partition_type': '0x0',
}
self.assertDictEqual(expected_dict, self.bdevp.asdict(test_value))
@@ -403,7 +407,8 @@ class TestBlockdevParser(CiTestCase):
# XXX: Parameterize me
def test_blockdev_to_id_raises_valueerror_on_empty_devtype(self):
- test_value = {'DEVTYPE': '', 'DEVNAME': 'bar', 'DEVPATH': 'foobar'}
+ test_value = {'DEVTYPE': '', 'DEVNAME': '/dev/bar',
+ 'DEVPATH': 'foobar'}
with self.assertRaises(ValueError):
self.bdevp.blockdev_to_id(test_value)
@@ -415,7 +420,7 @@ class TestBlockdevParser(CiTestCase):
# XXX: Parameterize me
def test_blockdev_to_id_raises_valueerror_on_missing_devtype(self):
- test_value = {'DEVNAME': 'bar', 'DEVPATH': 'foobar'}
+ test_value = {'DEVNAME': '/dev/bar', 'DEVPATH': 'foobar'}
with self.assertRaises(ValueError):
self.bdevp.blockdev_to_id(test_value)
@@ -424,9 +429,10 @@ class TestBlockdevParser(CiTestCase):
self.probe_data = _get_data('probert_storage_lvm.json')
self.bdevp = BlockdevParser(self.probe_data)
blockdev = self.bdevp.blockdev_data['/dev/vda2']
- expected_dict = {
+ base_expected_dict = {
'id': 'partition-vda2',
'type': 'partition',
+ 'path': '/dev/vda2',
'device': 'disk-vda',
'number': 2,
'offset': 3222274048,
@@ -435,6 +441,8 @@ class TestBlockdevParser(CiTestCase):
}
for ext_part_entry in ['0xf', '0x5', '0x85', '0xc5']:
blockdev['ID_PART_ENTRY_TYPE'] = ext_part_entry
+ expected_dict = base_expected_dict.copy()
+ expected_dict['partition_type'] = ext_part_entry
self.assertDictEqual(expected_dict,
self.bdevp.asdict(blockdev))
@@ -446,10 +454,12 @@ class TestBlockdevParser(CiTestCase):
'id': 'partition-vda5',
'type': 'partition',
'device': 'disk-vda',
+ 'path': '/dev/vda5',
'number': 5,
'offset': 3223322624,
'size': 2147483648,
'flag': 'logical',
+ 'partition_type': '0x83',
}
self.assertDictEqual(expected_dict,
self.bdevp.asdict(blockdev))
@@ -463,10 +473,12 @@ class TestBlockdevParser(CiTestCase):
'id': 'partition-vdb1',
'type': 'partition',
'device': 'disk-vdb',
+ 'path': '/dev/vdb1',
'number': 1,
'offset': 1048576,
'size': 536870912,
'flag': 'boot',
+ 'partition_type': '0xb',
}
self.assertDictEqual(expected_dict,
self.bdevp.asdict(blockdev))
@@ -480,10 +492,12 @@ class TestBlockdevParser(CiTestCase):
'id': 'partition-vda5',
'type': 'partition',
'device': 'disk-vda',
+ 'path': '/dev/vda5',
'number': 5,
'offset': 3223322624,
'size': 2147483648,
'flag': 'boot',
+ 'partition_type': '0x83',
}
self.assertDictEqual(expected_dict,
self.bdevp.asdict(blockdev))
@@ -536,6 +550,7 @@ class TestBlockdevParser(CiTestCase):
blockdev = self.bdevp.blockdev_data['/dev/dm-2']
expected_dict = {
'device': 'mpath-disk-mpatha',
+ 'path': '/dev/dm-2',
'flag': 'linux',
'id': 'mpath-partition-mpatha-part2',
'multipath': 'mpatha',
@@ -543,6 +558,7 @@ class TestBlockdevParser(CiTestCase):
'offset': 2097152,
'size': 10734272512,
'type': 'partition',
+ 'partition_type': '0fc63daf-8483-4772-8e79-3d69d8477de4',
}
self.assertDictEqual(expected_dict, self.bdevp.asdict(blockdev))
@@ -970,7 +986,7 @@ class TestExtractStorageConfig(CiTestCase):
""" verify live-iso extracted storage-config finds target disk. """
extracted = storage_config.extract_storage_config(self.probe_data)
self.assertEqual(
- {'storage': {'version': 1,
+ {'storage': {'version': 2,
'config': [{'id': 'disk-sda', 'path': '/dev/sda',
'serial': 'QEMU_HARDDISK_QM00001',
'type': 'disk'}]}}, extracted)
@@ -985,13 +1001,13 @@ class TestExtractStorageConfig(CiTestCase):
if missing_key != 'blockdev':
self.assertEqual(
{'storage':
- {'version': 1,
+ {'version': 2,
'config': [{'id': 'disk-sda', 'path': '/dev/sda',
'serial': 'QEMU_HARDDISK_QM00001',
'type': 'disk'}]}}, extracted)
else:
# empty config without blockdev data
- self.assertEqual({'storage': {'config': [], 'version': 1}},
+ self.assertEqual({'storage': {'config': [], 'version': 2}},
extracted)
@skipUnlessJsonSchema()
@@ -1010,10 +1026,17 @@ class TestExtractStorageConfig(CiTestCase):
'raidlevel': 'raid1', 'name': 'md1',
'devices': ['partition-vdb1', 'partition-vdc1'],
'spare_devices': []}, raids[0])
- self.assertEqual({'id': 'raid-md1p1', 'type': 'partition',
- 'size': 4285530112, 'flag': 'linux', 'number': 1,
- 'device': 'raid-md1', 'offset': 1048576},
- raid_partitions[0])
+ self.assertEqual({
+ 'id': 'raid-md1p1',
+ 'type': 'partition',
+ 'path': '/dev/md1p1',
+ 'size': 4285530112,
+ 'flag': 'linux',
+ 'number': 1,
+ 'partition_type': '0fc63daf-8483-4772-8e79-3d69d8477de4',
+ 'device': 'raid-md1',
+ 'offset': 1048576},
+ raid_partitions[0])
@skipUnlessJsonSchema()
def test_find_extended_partition(self):
@@ -1108,4 +1131,26 @@ class TestExtractStorageConfig(CiTestCase):
self.assertEqual(expected_dict, bitlocker[0])
+class TestSelectConfigs(CiTestCase):
+ def test_basic(self):
+ id0 = {'a': 1, 'b': 2}
+ id1 = {'a': 1, 'c': 3}
+ sc = {'id0': id0, 'id1': id1}
+
+ self.assertEqual([id0, id1], select_configs(sc, a=1))
+
+ def test_not_found(self):
+ id0 = {'a': 1, 'b': 2}
+ id1 = {'a': 1, 'c': 3}
+ sc = {'id0': id0, 'id1': id1}
+
+ self.assertEqual([], select_configs(sc, a=4))
+
+ def test_multi_criteria(self):
+ id0 = {'a': 1, 'b': 2}
+ id1 = {'a': 1, 'c': 3}
+ sc = {'id0': id0, 'id1': id1}
+
+ self.assertEqual([id0], select_configs(sc, a=1, b=2))
+
# vi: ts=4 expandtab syntax=python
diff --git a/tests/vmtests/__init__.py b/tests/vmtests/__init__.py
index fd6c246..c52c442 100644
--- a/tests/vmtests/__init__.py
+++ b/tests/vmtests/__init__.py
@@ -1930,29 +1930,6 @@ class VMBaseClass(TestCase):
self.assertIn(kpackage, self.debian_packages)
@skip_if_flag('expected_failure')
- def test_clear_holders_ran(self):
- """ Test curtin install runs block-meta/clear-holders. """
- if not self.has_storage_config():
- raise SkipTest("This test does not use storage config.")
-
- install_logfile = 'root/curtin-install.log'
- self.output_files_exist([install_logfile])
- install_log = self.load_collect_file(install_logfile)
-
- # validate block-meta called clear-holders at least once
- # We match both 'start' and 'finish' strings, so for each
- # call we'll have 2 matches.
- clear_holders_re = 'cmd-install/.*cmd-block-meta/clear-holders'
- events = re.findall(clear_holders_re, install_log)
- print('Matched clear-holder events:\n%s' % events)
- self.assertGreaterEqual(len(events), 2)
-
- # dirty_disks mode runs an early block-meta command which
- # also runs clear-holders
- if self.dirty_disks is True:
- self.assertGreaterEqual(len(events), 4)
-
- @skip_if_flag('expected_failure')
def test_kernel_img_conf(self):
""" Test curtin install kernel-img.conf correctly. """
if self.target_distro != 'ubuntu':
@@ -2058,17 +2035,6 @@ class VMBaseClass(TestCase):
return swaps
- # we don't yet have a skip_by_date on specific releases
- if is_devel_release(self.target_release):
- name = "test_swaps_used"
- bug = "1894910"
- fixby = "2020-10-15"
- removeby = "2020-11-01"
- raise SkipTest(
- "skip_by_date({name}) LP: #{bug} "
- "fixby={fixby} removeby={removeby}: ".format(
- name=name, bug=bug, fixby=fixby, removeby=removeby))
-
expected_swaps = find_fstab_swaps()
proc_swaps = self.load_collect_file("proc-swaps")
for swap in expected_swaps:
@@ -2573,7 +2539,6 @@ def prep_partition_for_device(device):
'size': '8M',
'flag': 'prep',
'guid': '9e1a2d38-c612-4316-aa26-8b49521e5a8b',
- 'offset': '1M',
'wipe': 'zero',
'grub_device': True,
'device': device}
diff --git a/tests/vmtests/releases.py b/tests/vmtests/releases.py
index fa755b1..67248bf 100644
--- a/tests/vmtests/releases.py
+++ b/tests/vmtests/releases.py
@@ -7,7 +7,7 @@ class _ReleaseBase(object):
repo = "maas-daily"
arch = get_platform_arch()
target_arch = arch
- mem = "1024"
+ mem = "2048"
class _UbuntuBase(_ReleaseBase):
@@ -72,22 +72,6 @@ class _UbuntuCore20FromFocalBase(_UbuntuCoreUbuntuBase):
release = "focal"
# release for target
target_release = "ubuntu-core-20"
- mem = "2048"
-
-
-class _Centos66FromXenialBase(_CentosFromUbuntuBase):
- release = "xenial"
- target_release = "centos66"
-
-
-class _Centos66FromBionicBase(_CentosFromUbuntuBase):
- release = "bionic"
- target_release = "centos66"
-
-
-class _Centos66FromFocalBase(_CentosFromUbuntuBase):
- release = "focal"
- target_release = "centos66"
class _PreciseBase(_UbuntuBase):
@@ -148,7 +132,6 @@ class _XenialEdge(_XenialBase):
class _BionicBase(_UbuntuBase):
release = "bionic"
target_release = "bionic"
- mem = "2048"
if _UbuntuBase.arch == "arm64":
subarch = "ga-18.04"
@@ -164,7 +147,6 @@ class _DiscoBase(_UbuntuBase):
release = "disco"
target_release = "disco"
# squashfs is over 300MB, need more ram
- mem = "2048"
if _UbuntuBase.arch == "arm64":
subarch = "ga-19.04"
@@ -172,7 +154,6 @@ class _DiscoBase(_UbuntuBase):
class _EoanBase(_UbuntuBase):
release = "eoan"
target_release = "eoan"
- mem = "2048"
if _UbuntuBase.arch == "arm64":
subarch = "ga-19.10"
@@ -180,7 +161,6 @@ class _EoanBase(_UbuntuBase):
class _FocalBase(_UbuntuBase):
release = "focal"
target_release = "focal"
- mem = "2048"
if _UbuntuBase.arch == "arm64":
subarch = "ga-20.04"
@@ -188,7 +168,6 @@ class _FocalBase(_UbuntuBase):
class _HirsuteBase(_UbuntuBase):
release = "hirsute"
target_release = "hirsute"
- mem = "2048"
if _UbuntuBase.arch == "arm64":
subarch = "ga-21.04"
@@ -196,7 +175,6 @@ class _HirsuteBase(_UbuntuBase):
class _ImpishBase(_UbuntuBase):
release = "impish"
target_release = "impish"
- mem = "2048"
if _UbuntuBase.arch == "arm64":
subarch = "ga-21.10"
@@ -225,11 +203,8 @@ class _Releases(object):
class _CentosReleases(object):
centos70_xenial = _Centos70FromXenialBase
- centos66_xenial = _Centos66FromXenialBase
centos70_bionic = _Centos70FromBionicBase
- centos66_bionic = _Centos66FromBionicBase
centos70_focal = _Centos70FromFocalBase
- centos66_focal = _Centos66FromFocalBase
class _UbuntuCoreReleases(object):
diff --git a/tests/vmtests/test_basic.py b/tests/vmtests/test_basic.py
index 6059bd9..616d635 100644
--- a/tests/vmtests/test_basic.py
+++ b/tests/vmtests/test_basic.py
@@ -41,10 +41,6 @@ class TestBasicAbs(VMBaseClass):
f="btrfs_uuid_diskc"
if command -v btrfs-debug-tree >/dev/null; then
btrfs-debug-tree -r $dev | awk '/^uuid/ {print $2}' | grep "-"
- # btrfs-debug-tree fails in centos66, use btrfs-show instead
- if [ "$?" != "0" ]; then
- btrfs-show $dev | awk '/uuid/ {print $4}'
- fi
else
btrfs inspect-internal dump-super $dev |
awk '/^dev_item.fsid/ {print $2}'
@@ -61,9 +57,6 @@ class TestBasicAbs(VMBaseClass):
""")]
def _test_ptable(self, blkid_output, expected):
- if self.target_release == "centos66":
- raise SkipTest("No PTTYPE blkid output on Centos66")
-
if not blkid_output:
raise RuntimeError('_test_ptable requires blkid output file')
@@ -100,8 +93,6 @@ class TestBasicAbs(VMBaseClass):
self.assertEqual(kname_uuid, btrfs_uuid)
def _test_partition_is_prep(self, info_file):
- if self.target_release == "centos66":
- raise SkipTest("Cannot detect PReP partitions in Centos66")
udev_info = self.load_collect_file(info_file).rstrip()
if not udev_info:
raise ValueError('Empty udev_info collect file')
@@ -132,10 +123,7 @@ class TestBasicAbs(VMBaseClass):
def test_partition_numbers(self):
# pnum_disk should have partitions 1 2, and 10
- if self.target_release != 'centos66':
- disk = self._dname_to_kname('pnum_disk')
- else:
- disk = self._serial_to_kname('disk-d')
+ disk = self._dname_to_kname('pnum_disk')
expected = [disk + s for s in ["", "1", "2", "10"]]
self._test_partition_numbers(disk, expected)
@@ -220,19 +208,6 @@ class Centos70FocalTestBasic(centos_relbase.centos70_focal,
__test__ = True
-class Centos66XenialTestBasic(centos_relbase.centos66_xenial,
- CentosTestBasicAbs):
- __test__ = True
-
-
-class Centos66BionicTestBasic(centos_relbase.centos66_bionic,
- CentosTestBasicAbs):
- # Centos66 cannot handle ext4 defaults in Bionic (64bit,meta_csum)
- # this conf defaults to ext3
- conf_file = "examples/tests/centos6_basic.yaml"
- __test__ = True
-
-
class XenialGAi386TestBasic(relbase.xenial_ga, TestBasicAbs):
__test__ = True
arch_skip = ["arm64", "ppc64el", "s390x"]
diff --git a/tests/vmtests/test_network.py b/tests/vmtests/test_network.py
index 1b42493..6ff3a16 100644
--- a/tests/vmtests/test_network.py
+++ b/tests/vmtests/test_network.py
@@ -486,11 +486,6 @@ class ImpishTestNetworkBasic(relbase.impish, TestNetworkBasicAbs):
__test__ = True
-class Centos66TestNetworkBasic(centos_relbase.centos66_xenial,
- CentosTestNetworkBasicAbs):
- __test__ = True
-
-
class Centos70TestNetworkBasic(centos_relbase.centos70_xenial,
CentosTestNetworkBasicAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_alias.py b/tests/vmtests/test_network_alias.py
index 8b58edd..dd6ba02 100644
--- a/tests/vmtests/test_network_alias.py
+++ b/tests/vmtests/test_network_alias.py
@@ -34,11 +34,6 @@ class CentosTestNetworkAliasAbs(TestNetworkAliasAbs):
pass
-class Centos66TestNetworkAlias(centos_relbase.centos66_xenial,
- CentosTestNetworkAliasAbs):
- __test__ = True
-
-
class Centos70TestNetworkAlias(centos_relbase.centos70_xenial,
CentosTestNetworkAliasAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_bonding.py b/tests/vmtests/test_network_bonding.py
index 73bcf60..ad0c1d4 100644
--- a/tests/vmtests/test_network_bonding.py
+++ b/tests/vmtests/test_network_bonding.py
@@ -69,11 +69,6 @@ class ImpishTestBonding(relbase.impish, TestNetworkBondingAbs):
__test__ = True
-class Centos66TestNetworkBonding(centos_relbase.centos66_xenial,
- CentosTestNetworkBondingAbs):
- __test__ = True
-
-
class Centos70TestNetworkBonding(centos_relbase.centos70_xenial,
CentosTestNetworkBondingAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_bridging.py b/tests/vmtests/test_network_bridging.py
index 93ecc4b..9c90702 100644
--- a/tests/vmtests/test_network_bridging.py
+++ b/tests/vmtests/test_network_bridging.py
@@ -41,8 +41,6 @@ default_bridge_params_uncheckable = [
# attrs we cannot validate
release_to_bridge_params_uncheckable = {
- 'centos66': ['bridge_fd', 'bridge_hello', 'bridge_hw', 'bridge_maxage',
- 'bridge_pathcost', 'bridge_portprio'],
'centos70': ['bridge_fd', 'bridge_hello', 'bridge_hw', 'bridge_maxage',
'bridge_pathcost', 'bridge_portprio'],
'xenial': ['bridge_ageing'],
@@ -220,11 +218,6 @@ class CentosTestBridgeNetworkAbs(TestBridgeNetworkAbs):
self.assertTrue('bridge' in status)
-class Centos66TestBridgeNetwork(centos_relbase.centos66_xenial,
- CentosTestBridgeNetworkAbs):
- __test__ = True
-
-
class Centos70TestBridgeNetwork(centos_relbase.centos70_xenial,
CentosTestBridgeNetworkAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_ipv6.py b/tests/vmtests/test_network_ipv6.py
index 80b8ccf..f524e82 100644
--- a/tests/vmtests/test_network_ipv6.py
+++ b/tests/vmtests/test_network_ipv6.py
@@ -65,11 +65,6 @@ class ImpishTestNetworkIPV6(relbase.impish, TestNetworkIPV6Abs):
__test__ = True
-class Centos66TestNetworkIPV6(centos_relbase.centos66_xenial,
- CentosTestNetworkIPV6Abs):
- __test__ = True
-
-
class Centos70TestNetworkIPV6(centos_relbase.centos70_xenial,
CentosTestNetworkIPV6Abs):
__test__ = True
diff --git a/tests/vmtests/test_network_ipv6_static.py b/tests/vmtests/test_network_ipv6_static.py
index f24aab5..cb9caad 100644
--- a/tests/vmtests/test_network_ipv6_static.py
+++ b/tests/vmtests/test_network_ipv6_static.py
@@ -35,11 +35,6 @@ class ImpishTestNetworkIPV6Static(relbase.impish, TestNetworkIPV6StaticAbs):
__test__ = True
-class Centos66TestNetworkIPV6Static(centos_relbase.centos66_xenial,
- CentosTestNetworkIPV6StaticAbs):
- __test__ = True
-
-
class Centos70TestNetworkIPV6Static(centos_relbase.centos70_xenial,
CentosTestNetworkIPV6StaticAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_ipv6_vlan.py b/tests/vmtests/test_network_ipv6_vlan.py
index a6eae41..7955101 100644
--- a/tests/vmtests/test_network_ipv6_vlan.py
+++ b/tests/vmtests/test_network_ipv6_vlan.py
@@ -34,11 +34,6 @@ class ImpishTestNetworkIPV6Vlan(relbase.impish, TestNetworkIPV6VlanAbs):
__test__ = True
-class Centos66TestNetworkIPV6Vlan(centos_relbase.centos66_xenial,
- CentosTestNetworkIPV6VlanAbs):
- __test__ = True
-
-
class Centos70TestNetworkIPV6Vlan(centos_relbase.centos70_xenial,
CentosTestNetworkIPV6VlanAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_mtu.py b/tests/vmtests/test_network_mtu.py
index a36a752..f112b1c 100644
--- a/tests/vmtests/test_network_mtu.py
+++ b/tests/vmtests/test_network_mtu.py
@@ -201,11 +201,6 @@ class ImpishTestNetworkMtu(relbase.impish, TestNetworkMtuNetworkdAbs):
__test__ = True
-class Centos66TestNetworkMtu(centos_relbase.centos66_xenial,
- CentosTestNetworkMtuAbs):
- __test__ = True
-
-
class Centos70TestNetworkMtu(centos_relbase.centos70_xenial,
CentosTestNetworkMtuAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_static.py b/tests/vmtests/test_network_static.py
index 95960af..867cf11 100644
--- a/tests/vmtests/test_network_static.py
+++ b/tests/vmtests/test_network_static.py
@@ -40,11 +40,6 @@ class ImpishTestNetworkStatic(relbase.impish, TestNetworkStaticAbs):
__test__ = True
-class Centos66TestNetworkStatic(centos_relbase.centos66_xenial,
- CentosTestNetworkStaticAbs):
- __test__ = True
-
-
class Centos70TestNetworkStatic(centos_relbase.centos70_xenial,
CentosTestNetworkStaticAbs):
__test__ = True
diff --git a/tests/vmtests/test_network_static_routes.py b/tests/vmtests/test_network_static_routes.py
index eb096ee..664c035 100644
--- a/tests/vmtests/test_network_static_routes.py
+++ b/tests/vmtests/test_network_static_routes.py
@@ -43,11 +43,6 @@ class ImpishTestNetworkStaticRoutes(relbase.impish,
__test__ = True
-class Centos66TestNetworkStaticRoutes(centos_relbase.centos66_xenial,
- CentosTestNetworkStaticRoutesAbs):
- __test__ = False
-
-
class Centos70TestNetworkStaticRoutes(centos_relbase.centos70_xenial,
CentosTestNetworkStaticRoutesAbs):
__test__ = False
diff --git a/tests/vmtests/test_network_vlan.py b/tests/vmtests/test_network_vlan.py
index 38bc87c..99bad66 100644
--- a/tests/vmtests/test_network_vlan.py
+++ b/tests/vmtests/test_network_vlan.py
@@ -88,11 +88,6 @@ class ImpishTestNetworkVlan(relbase.impish, TestNetworkVlanAbs):
__test__ = True
-class Centos66TestNetworkVlan(centos_relbase.centos66_xenial,
- CentosTestNetworkVlanAbs):
- __test__ = True
-
-
class Centos70TestNetworkVlan(centos_relbase.centos70_xenial,
CentosTestNetworkVlanAbs):
__test__ = True
diff --git a/tests/vmtests/test_preserve_raid.py b/tests/vmtests/test_preserve_raid.py
index 4bb977e..04c16b7 100644
--- a/tests/vmtests/test_preserve_raid.py
+++ b/tests/vmtests/test_preserve_raid.py
@@ -56,6 +56,9 @@ class BionicTestPartitionExistingRAID(
relbase.bionic, TestPartitionExistingRAID):
__test__ = True
+ def test_correct_ptype(self):
+ self.skipTest("lsblk on bionic does not support PTTYPE")
+
class FocalTestPartitionExistingRAID(
relbase.focal, TestPartitionExistingRAID):
diff --git a/tests/vmtests/test_simple.py b/tests/vmtests/test_simple.py
index 0ee87fc..2b91f0b 100644
--- a/tests/vmtests/test_simple.py
+++ b/tests/vmtests/test_simple.py
@@ -29,15 +29,6 @@ class Centos70BionicTestSimple(centos_relbase.centos70_bionic, TestSimple):
__test__ = True
-class Centos66XenialTestSimple(centos_relbase.centos66_xenial, TestSimple):
- __test__ = True
-
-
-class Centos66BionicTestSimple(centos_relbase.centos66_bionic, TestSimple):
- __test__ = False
- # LP: #1775424 Centos66 fails with Bionic Ephemeral ext4 features
-
-
class XenialTestSimple(relbase.xenial, TestSimple):
__test__ = True
diff --git a/tests/vmtests/test_uefi_basic.py b/tests/vmtests/test_uefi_basic.py
index aa4c650..1a90a7d 100644
--- a/tests/vmtests/test_uefi_basic.py
+++ b/tests/vmtests/test_uefi_basic.py
@@ -17,7 +17,7 @@ class TestBasicAbs(VMBaseClass):
disk_to_check = [('main_disk', 1), ('main_disk', 2)]
extra_collect_scripts = [textwrap.dedent("""
cd OUTPUT_COLLECT_D
- ls /sys/firmware/efi/ | cat >ls_sys_firmware_efi
+ test -d /sys/firmware/efi ; echo $? >is_efi
cp /sys/class/block/vda/queue/logical_block_size vda_lbs
cp /sys/class/block/vda/queue/physical_block_size vda_pbs
blockdev --getsz /dev/vda | cat >vda_blockdev_getsz
@@ -28,24 +28,10 @@ class TestBasicAbs(VMBaseClass):
exit 0
""")]
- def test_sys_firmware_efi(self):
- self.output_files_exist(["ls_sys_firmware_efi"])
- sys_efi_possible = [
- 'config_table',
- 'efivars',
- 'fw_platform_size',
- 'fw_vendor',
- 'runtime',
- 'runtime-map',
- 'systab',
- 'vars',
- ]
- efi_lines = self.load_collect_file(
- "ls_sys_firmware_efi").strip().split('\n')
-
- # sys/firmware/efi contents differ based on kernel and configuration
- for efi_line in efi_lines:
- self.assertIn(efi_line, sys_efi_possible)
+ def test_is_efi(self):
+ self.output_files_exist(["is_efi"])
+ efi_lines = self.load_collect_file("is_efi").strip().split('\n')
+ self.assertEqual(['0'], efi_lines)
def test_disk_block_sizes(self):
""" Test disk logical and physical block size are match
diff --git a/tools/build-deb b/tools/build-deb
index dbe364f..85868d7 100755
--- a/tools/build-deb
+++ b/tools/build-deb
@@ -1,7 +1,7 @@
#!/bin/sh
# This file is part of curtin. See LICENSE file for copyright and license info.
-set -e
+set -eu
sourcename="curtin"
TEMP_D=""
@@ -13,7 +13,7 @@ cleanup() {
[ -z "$TEMP_D" ] || rm -Rf "$TEMP_D"
}
-if [ "$1" = "-h" -o "$1" = "--help" ]; then
+if [ "${1:-}" = "-h" -o "${1:-}" = "--help" ]; then
cat <<EOF
Usage: ${0##*/}
build a deb of from trunk directory
@@ -36,19 +36,13 @@ top_d=$(cd "$(dirname "${0}")"/.. && pwd)
ref=HEAD
if [ $# -eq 0 ]; then
- # if no opts given, build source, without depends, and not signed.
- set -- -S -d -us -uc
+ # if no opts given, build source, without depends.
+ set -- -S -d
fi
-# grab the first line in the changelog
-# hopefully this pulls the version info there
-# resulting in something like: UPSTREAM_VER-0ubuntu1
-clogver_o=$(sed -n '1s,.*(\([^)]*\)).*,\1,p' debian/changelog.trunk)
-
# uver gets 17.1-3-gc85e2562 '17.1' if this is a tag.
uver=$(git describe --long --abbrev=8 "--match=[0-9][0-9]*" "$ref")
-clogver_debian=${clogver_o##*-}
-clogver_new="${uver}-${clogver_debian}"
+clogver_new="${uver}-0ubuntu1"
# uver_base_rel rel gets '17.1'
uver_base_rel=${uver%%-*}
@@ -60,7 +54,7 @@ TEMP_D=$(mktemp -d "${TMPDIR:-/tmp}/${bname}.XXXXXX")
trap cleanup EXIT
-echo "building version ${uver}, debian_ver=${clogver_debian}"
+echo "building version ${uver}"
dir="${sourcename}-$uver"
tarball="${sourcename}_$uver.orig.tar.gz"
@@ -73,38 +67,13 @@ cd "${TEMP_D}"
tar xzf "$tarball" || fail "failed extract tarball"
if [ ! -d "$dir" ]; then
- # make-tarball will create the directory name based on the
- # contents of debian/changelog.trunk in the version provided.
- # if that differs from what is here, then user has changes.
- for d in ${sourcename}*; do
- [ -d "$d" ] && break
- done
- if [ -d "$d" ]; then
- {
- echo "WARNING: git at '${uver}' had different version"
- echo " in debian/changelog.trunk than your tree. version there"
- echo " is '$d' working directory had $uver"
- } 1>&2
- dir=$d
- else
- echo "did not find a directory created by make-tarball. sorry." 1>&2
- exit
- fi
+ echo "did not find a directory created by make-tarball. sorry." 1>&2
+ exit
fi
cd "$dir" || fail "failed cd $dir"
-# move files ending in .trunk to name without .trunk
-# ie, this copies debian/changelog.trunk to debian/changelog
-for f in debian/*.trunk; do
- mv "$f" "${f%.trunk}"
-done
-
-# first line of debian/changelog looks like
-# curtin (<version>) UNRELEASED; urgency=low
-# fix the version and UNRELEASED
-sed -i -e "1s,([^)]*),(${clogver_new})," \
- -e "1s,UNRELEASED,${RELEASE}," debian/changelog ||
- fail "failed to write debian/changelog"
+dch --create --package curtin --newversion "$clogver_new" \
+ --distribution "$RELEASE" "Development release"
debuild "$@" || fail "debuild failed"
cd "$TEMP_D"
diff --git a/tox.ini b/tox.ini
index d9437c5..2fc4027 100644
--- a/tox.ini
+++ b/tox.ini
@@ -3,14 +3,10 @@ minversion = 1.6
skipsdist = True
envlist =
py3-flake8,
- py27,
py3,
py3-pyflakes,
py3-pylint,
- py27-pylint,
- trusty-py27,
- block-schema,
- xenial-py3
+ block-schema
[tox:jenkins]
downloadcache = ~/cache/pip
@@ -31,23 +27,6 @@ commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
basepython = python3
sitepackages = true
-[testenv:py27]
-basepython = python2.7
-sitepackages = true
-# https://github.com/pypa/setuptools/issues/1963
-deps = {[testenv]deps}
- setuptools<45
-
-# tox uses '--pre' by default to pip install. We don't want that, and
-# 'pip_pre=False' isn't available until tox version 1.9.
-install_command = pip install {opts} {packages}
-
-[testenv:py2-flake8]
-basepython = python2
-deps = {[testenv]deps}
- flake8
-commands = {envpython} -m flake8 {posargs:curtin}
-
[testenv:py3-flake8]
basepython = python3
deps = {[testenv]deps}
@@ -64,19 +43,10 @@ commands = {envpython} -m pyflakes {posargs:curtin/ tests/ tools/}
basepython = python3
sitepackages = true
deps = {[testenv]deps}
- pylint==2.6.0
+ pylint==2.12.2
git+https://git.launchpad.net/simplestreams
commands = {envpython} -m pylint --errors-only {posargs:curtin tests/vmtests}
-[testenv:py27-pylint]
-# set basepython because tox 1.6 (trusty) does not support generated environments
-basepython = python2.7
-sitepackages = true
-deps = {[testenv]deps}
- {[testenv:py27]deps}
- pylint==1.8.1
-commands = {envpython} -m pylint --errors-only {posargs:curtin}
-
[testenv:docs]
deps = {[testenv]deps}
sphinx
@@ -107,15 +77,6 @@ basepython = python3
commands =
{toxinidir}/tools/run-pyflakes3 {posargs}
-[testenv:trusty-py27]
-deps = {[testenv:trusty]deps}
- setuptools<45
-
-basepython = python2.7
-sitepackages = true
-commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
- {posargs:tests/unittests}
-
[testenv:trusty-py3]
deps = {[testenv:trusty]deps}
basepython = python3
@@ -129,13 +90,6 @@ deps =
pyyaml==3.11
oauthlib==1.0.3
-[testenv:xenial-py27]
-basepython = python27
-deps = {[testenv:xenial]deps}
- {[testenv:py27]deps}
-commands = {envpython} {toxinidir}/tools/noproxy {envpython} -m nose \
- {posargs:tests/unittests}
-
[testenv:xenial-py3]
basepython = python3
sitepackages = true
Follow ups