From 9222b51efaca13120f02e6aa09b25fdd4b9273df Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Behrmann?= Date: Wed, 18 Sep 2024 15:31:43 +0200 Subject: [PATCH 1/6] Format with ruff This change reformats mkosi using the ruff formatter with the default settings, except for a line width of 119 columns. Deviating from the default ruff formatting "fmt: skip" comments were added for were it semantically makes sense, mainly: - lists representing cmdlines, where options and their arguments should not be split - when alignment improves readability (by easing comparisons with lines above and below) Deviations from the above two guidelines are - alignment was discarded for semantically empty statements (enum.auto()) - when all positional arguments where on the same line and options where on different lines, the positional arguments where put on separate lines as well, to minimize difference from vanilla ruff. In collections that fit on a single line, trailing commas were removed, since they force ruff to use multi-line formatting. --- kernel-install/50-mkosi.install | 8 +- mkosi/__init__.py | 474 ++++++++++---------- mkosi/archive.py | 10 +- mkosi/backport.py | 8 +- mkosi/bootloader.py | 100 ++--- mkosi/completion.py | 30 +- mkosi/config.py | 651 ++++++++++++++-------------- mkosi/curl.py | 2 +- mkosi/distributions/__init__.py | 38 +- mkosi/distributions/arch.py | 15 +- mkosi/distributions/azure.py | 6 +- mkosi/distributions/centos.py | 50 +-- mkosi/distributions/debian.py | 64 +-- mkosi/distributions/fedora.py | 38 +- mkosi/distributions/mageia.py | 6 +- mkosi/distributions/openmandriva.py | 8 +- mkosi/distributions/opensuse.py | 29 +- mkosi/distributions/ubuntu.py | 1 - mkosi/initrd.py | 21 +- mkosi/installer/__init__.py | 32 +- mkosi/installer/apt.py | 26 +- mkosi/installer/dnf.py | 28 +- mkosi/installer/pacman.py | 19 +- mkosi/installer/rpm.py | 25 +- mkosi/installer/zypper.py | 18 +- mkosi/kmod.py | 7 +- mkosi/log.py | 20 +- mkosi/manifest.py | 15 +- mkosi/mounts.py | 5 +- mkosi/qemu.py | 184 ++++---- mkosi/run.py | 30 +- mkosi/sandbox.py | 45 +- mkosi/sysupdate.py | 10 +- mkosi/tree.py | 42 +- mkosi/types.py | 1 + mkosi/user.py | 8 +- mkosi/util.py | 19 +- mkosi/versioncomp.py | 5 +- mkosi/vmspawn.py | 4 +- tests/__init__.py | 4 +- tests/test_boot.py | 3 +- tests/test_config.py | 39 +- tests/test_initrd.py | 32 +- tests/test_json.py | 6 +- tests/test_sysext.py | 21 +- tests/test_versioncomp.py | 18 +- 46 files changed, 1133 insertions(+), 1092 deletions(-) diff --git a/kernel-install/50-mkosi.install b/kernel-install/50-mkosi.install index d59f64f6f..f8b77f601 100644 --- a/kernel-install/50-mkosi.install +++ b/kernel-install/50-mkosi.install @@ -76,9 +76,9 @@ def main() -> None: log_setup() parser = argparse.ArgumentParser( - description='kernel-install plugin to build initrds or Unified Kernel Images using mkosi', + description="kernel-install plugin to build initrds or Unified Kernel Images using mkosi", allow_abbrev=False, - usage='50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…', + usage="50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…", ) parser.add_argument( @@ -148,7 +148,7 @@ def main() -> None: "--format", str(format), "--output", output, "--output-dir", context.staging_area, - ] + ] # fmt: skip if context.verbose: cmdline += ["--debug"] @@ -167,5 +167,5 @@ def main() -> None: (context.staging_area / f"{output}.initrd").unlink() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/mkosi/__init__.py b/mkosi/__init__.py index 43270ace8..9e0cbae03 100644 --- a/mkosi/__init__.py +++ b/mkosi/__init__.py @@ -155,7 +155,8 @@ def mount_base_trees(context: Context) -> Iterator[None]: elif path.suffix == ".raw": run( ["systemd-dissect", "--mount", "--mkdir", path, d], - env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no")) + env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"), + ) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", d])) bases += [d] else: @@ -187,8 +188,10 @@ def install_distribution(context: Context) -> None: else: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): if context.config.packages: - die("Cannot install packages in extension images without a base tree", - hint="Configure a base tree with the BaseTrees= setting") + die( + "Cannot install packages in extension images without a base tree", + hint="Configure a base tree with the BaseTrees= setting", + ) return with complete_step(f"Installing {str(context.config.distribution).capitalize()}"): @@ -222,10 +225,12 @@ def install_distribution(context: Context) -> None: if context.config.packages: context.config.distribution.install_packages(context, context.config.packages) - for f in ("var/lib/systemd/random-seed", - "var/lib/systemd/credential.secret", - "etc/machine-info", - "var/lib/dbus/machine-id"): + for f in ( + "var/lib/systemd/random-seed", + "var/lib/systemd/credential.secret", + "etc/machine-info", + "var/lib/dbus/machine-id", + ): # Using missing_ok=True still causes an OSError if the mount is read-only even if the # file doesn't exist so do an explicit exists() check first. if (context.root / f).exists(): @@ -276,7 +281,7 @@ def check_root_populated(context: Context) -> None: hint=( "The root must be populated by the distribution, or from base trees, " "skeleton trees, and prepare scripts." - ) + ), ) @@ -385,13 +390,11 @@ def configure_autologin(context: Context) -> None: return with complete_step("Setting up autologin…"): - configure_autologin_service(context, "console-getty.service", - "--noclear --keep-baud console 115200,38400,9600") - configure_autologin_service(context, "getty@tty1.service", - "--noclear -") - configure_autologin_service(context, - "serial-getty@hvc0.service", - "--keep-baud 115200,57600,38400,9600 -") + configure_autologin_service( + context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600" + ) + configure_autologin_service(context, "getty@tty1.service", "--noclear -") + configure_autologin_service(context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -") @contextlib.contextmanager @@ -518,7 +521,7 @@ def run_configure_scripts(config: Config) -> Config: ), input=config.to_json(indent=None), stdout=subprocess.PIPE, - ) + ) # fmt: skip config = Config.from_json(result.stdout) @@ -564,7 +567,7 @@ def run_sync_scripts(config: Config) -> None: "--dir", "/work/src", "--chdir", "/work/src", *sources, - ] + ] # fmt: skip if (p := INVOKING_USER.home()).exists() and p != Path("/"): # We use a writable mount here to keep git worktrees working which encode absolute @@ -615,7 +618,7 @@ def script_maybe_chroot_sandbox( ], "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), - } + } # fmt: skip with finalize_host_scripts(context, helpers) as hd: if script.suffix != ".chroot": @@ -628,7 +631,7 @@ def script_maybe_chroot_sandbox( *context.config.distribution.package_manager(context.config).mounts(context), ], scripts=hd, - ) as sandbox: + ) as sandbox: # fmt: skip yield sandbox else: if suppress_chown: @@ -702,7 +705,7 @@ def run_prepare_scripts(context: Context, build: bool) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/prepare", arg], @@ -779,7 +782,7 @@ def run_build_scripts(context: Context) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/build-script", *cmdline], @@ -845,7 +848,7 @@ def run_postinst_scripts(context: Context) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/postinst", "final"], @@ -911,7 +914,7 @@ def run_finalize_scripts(context: Context) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/finalize"], @@ -922,7 +925,7 @@ def run_finalize_scripts(context: Context) -> None: script=script, options=options, network=context.config.with_network, - ) + ), ) @@ -967,10 +970,10 @@ def run_postoutput_scripts(context: Context) -> None: "--dir", "/work/out", "--become-root", *sources, - ] + ], ), stdin=sys.stdin, - ) + ) # fmt: skip def install_tree( @@ -992,7 +995,8 @@ def install_tree( def copy() -> None: copy_tree( - src, t, + src, + t, preserve=preserve, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, @@ -1015,7 +1019,7 @@ def copy() -> None: "--bind", t.parent, workdir(t.parent), ], ), - ) + ) # fmt: skip else: # If we get an unknown file without a target, we just copy it into /. copy() @@ -1050,11 +1054,12 @@ def install_sandbox_trees(config: Config, dst: Path) -> None: if (p := config.tools() / "etc/crypto-policies").exists(): copy_tree( - p, dst / "etc/crypto-policies", + p, + dst / "etc/crypto-policies", preserve=False, dereference=True, sandbox=config.sandbox, - ) + ) # fmt: skip if not config.sandbox_trees: return @@ -1091,7 +1096,8 @@ def install_build_dest(context: Context) -> None: with complete_step("Copying in build tree…"): copy_tree( - context.install_dir, context.root, + context.install_dir, + context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -1203,7 +1209,7 @@ def finalize_default_initrd( "--selinux-relabel", str(relabel), *(["-f"] * args.force), "--include=mkosi-initrd", - ] + ] # fmt: skip _, [config] = parse_config(cmdline + ["build"], resources=resources) @@ -1353,9 +1359,11 @@ def build_kernel_modules_initrd(context: Context, kver: str) -> Path: return kmods make_cpio( - context.root, kmods, + context.root, + kmods, files=gen_required_kernel_modules( - context.root, kver, + context.root, + kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_initrd_include, @@ -1366,7 +1374,6 @@ def build_kernel_modules_initrd(context: Context, kver: str) -> Path: sandbox=context.sandbox, ) - if context.config.distribution.is_apt_distribution(): # Ubuntu Focal's kernel does not support zstd-compressed initrds so use xz instead. if context.config.distribution == Distribution.ubuntu and context.config.release == "focal": @@ -1395,7 +1402,7 @@ def join_initrds(initrds: Sequence[Path], output: Path) -> Path: for p in initrds: initrd = p.read_bytes() n = len(initrd) - padding = b'\0' * (round_up(n, 4) - n) # pad to 32 bit alignment + padding = b"\0" * (round_up(n, 4) - n) # pad to 32 bit alignment seq.write(initrd) seq.write(padding) @@ -1404,12 +1411,9 @@ def join_initrds(initrds: Sequence[Path], output: Path) -> Path: def want_signed_pcrs(config: Config) -> bool: - return ( - config.sign_expected_pcr == ConfigFeature.enabled or - ( - config.sign_expected_pcr == ConfigFeature.auto and - config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None - ) + return config.sign_expected_pcr == ConfigFeature.enabled or ( + config.sign_expected_pcr == ConfigFeature.auto + and config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None ) @@ -1442,14 +1446,14 @@ def build_uki( "--output", workdir(output), "--efi-arch", arch, "--uname", kver, - ] + ] # fmt: skip options: list[PathString] = [ "--bind", output.parent, workdir(output.parent), "--ro-bind", context.workspace / "cmdline", context.workspace / "cmdline", "--ro-bind", context.root / "usr/lib/os-release", context.root / "usr/lib/os-release", "--ro-bind", stub, stub, - ] + ] # fmt: skip if context.config.secure_boot: assert context.config.secure_boot_key @@ -1460,14 +1464,12 @@ def build_uki( if context.config.secure_boot_sign_tool != SecureBootSignTool.pesign: cmd += [ "--signtool", "sbsign", - "--secureboot-private-key", - context.config.secure_boot_key, - "--secureboot-certificate", - context.config.secure_boot_certificate, - ] + "--secureboot-private-key", context.config.secure_boot_key, + "--secureboot-certificate", context.config.secure_boot_certificate, + ] # fmt: skip options += [ "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, - ] + ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--signing-engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): @@ -1480,7 +1482,7 @@ def build_uki( context.workspace / "pesign", "--secureboot-certificate-name", certificate_common_name(context, context.config.secure_boot_certificate), - ] + ] # fmt: skip options += ["--ro-bind", context.workspace / "pesign", context.workspace / "pesign"] if want_signed_pcrs(context.config): @@ -1489,17 +1491,17 @@ def build_uki( # SHA1 might be disabled in OpenSSL depending on the distro so we opt to not sign # for SHA1 to avoid having to manage a bunch of configuration to re-enable SHA1. "--pcr-banks", "sha256", - ] + ] # fmt: skip if context.config.secure_boot_key.exists(): options += ["--bind", context.config.secure_boot_key, context.config.secure_boot_key] if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += [ "--signing-engine", context.config.secure_boot_key_source.source, "--pcr-public-key", context.config.secure_boot_certificate, - ] + ] # fmt: skip options += [ "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, - ] + ] # fmt: skip cmd += ["build", "--linux", kimg] options += ["--ro-bind", kimg, kimg] @@ -1511,9 +1513,10 @@ def build_uki( python_binary(context.config, binary=ukify), ukify, sandbox=context.sandbox, - ) >= "256" and - (version := systemd_stub_version(context, stub)) and - version >= "256" + ) + >= "256" + and (version := systemd_stub_version(context, stub)) + and version >= "256" ): for microcode in microcodes: cmd += ["--microcode", microcode] @@ -1565,21 +1568,26 @@ def systemd_stub_version(context: Context, stub: Path) -> Optional[GenericVersio def want_uki(context: Context) -> bool: return want_efi(context.config) and ( - context.config.bootloader == Bootloader.uki or - context.config.unified_kernel_images == ConfigFeature.enabled or ( - context.config.unified_kernel_images == ConfigFeature.auto and - systemd_stub_binary(context).exists() and - context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None - ) + context.config.bootloader == Bootloader.uki + or context.config.unified_kernel_images == ConfigFeature.enabled + or ( + context.config.unified_kernel_images == ConfigFeature.auto + and systemd_stub_binary(context).exists() + and context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None + ) ) def find_entry_token(context: Context) -> str: if ( - not context.config.find_binary("kernel-install") or - "--version" not in run(["kernel-install", "--help"], - stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install")).stdout or - systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" + not context.config.find_binary("kernel-install") + or ( + "--version" + not in run( + ["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install") + ).stdout + ) + or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" ): return context.config.image_id or context.config.distribution.name @@ -1654,10 +1662,10 @@ def install_type1( with umask(~0o600): if ( - want_efi(context.config) and - context.config.secure_boot and - context.config.shim_bootloader != ShimBootloader.signed and - KernelType.identify(context.config, kimg) == KernelType.pe + want_efi(context.config) + and context.config.secure_boot + and context.config.shim_bootloader != ShimBootloader.signed + and KernelType.identify(context.config, kimg) == KernelType.pe ): kimg = sign_efi_binary(context, kimg, dst / "vmlinuz") else: @@ -1689,9 +1697,9 @@ def install_type1( assert config if ( - not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) and - not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) and - (root := finalize_root(partitions)) + not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) + and not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) + and (root := finalize_root(partitions)) ): cmdline = [root] + cmdline @@ -1727,7 +1735,7 @@ def expand_kernel_specifiers(text: str, kver: str, token: str, roothash: str, bo "e": token, "k": kver, "h": roothash, - "c": boot_count + "c": boot_count, } def replacer(match: re.Match[str]) -> str: @@ -1842,9 +1850,9 @@ def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: return if context.config.bootable == ConfigFeature.auto and ( - context.config.output_format == OutputFormat.cpio or - context.config.output_format.is_extension_image() or - context.config.overlay + context.config.output_format == OutputFormat.cpio + or context.config.output_format.is_extension_image() + or context.config.overlay ): return @@ -1895,7 +1903,8 @@ def maybe_compress(context: Context, compression: Compression, src: Path, dst: O if not compression or src.is_dir(): if dst: move_tree( - src, dst, + src, + dst, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -1908,7 +1917,7 @@ def maybe_compress(context: Context, compression: Compression, src: Path, dst: O with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: - src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. + src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. with dst.open("wb") as o: run(cmd, stdin=i, stdout=o, sandbox=context.sandbox(binary=cmd[0])) @@ -1927,13 +1936,11 @@ def copy_uki(context: Context) -> None: reverse=True, ) - if ( - (uki := context.root / efi_boot_binary(context)).exists() and + if (uki := context.root / efi_boot_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass - elif ( - (uki := context.root / shim_second_stage_binary(context)).exists() and + elif (uki := context.root / shim_second_stage_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass @@ -2013,7 +2020,8 @@ def calculate_signature(context: Context) -> None: cmdline += ["--default-key", context.config.key] cmdline += [ - "--output", workdir(context.staging / context.config.output_signature), + "--output", + workdir(context.staging / context.config.output_signature), workdir(context.staging / context.config.output_checksum), ] @@ -2029,16 +2037,16 @@ def calculate_signature(context: Context) -> None: "--bind", home, home, "--bind", context.staging, workdir(context.staging), "--bind", "/run", "/run", - ] + ] # fmt: skip - with (complete_step("Signing SHA256SUMS…")): + with complete_step("Signing SHA256SUMS…"): run( cmdline, env=env, sandbox=context.sandbox( binary="gpg", options=options, - ) + ), ) @@ -2064,12 +2072,12 @@ def save_manifest(context: Context, manifest: Optional[Manifest]) -> None: if manifest.has_data(): if ManifestFormat.json in context.config.manifest_format: with complete_step(f"Saving manifest {context.config.output_manifest}"): - with open(context.staging / context.config.output_manifest, 'w') as f: + with open(context.staging / context.config.output_manifest, "w") as f: manifest.write_json(f) if ManifestFormat.changelog in context.config.manifest_format: with complete_step(f"Saving report {context.config.output_changelog}"): - with open(context.staging / context.config.output_changelog, 'w') as f: + with open(context.staging / context.config.output_changelog, "w") as f: manifest.write_package_report(f) @@ -2088,7 +2096,7 @@ def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]: if config.image: fragments += [config.image] - key = '~'.join(str(s) for s in fragments) + key = "~".join(str(s) for s in fragments) assert config.cache_dir return ( @@ -2148,12 +2156,16 @@ def check_inputs(config: Config) -> None: die(f"{script} is not executable") if config.secure_boot and not config.secure_boot_key: - die("SecureBoot= is enabled but no secure boot key is configured", - hint="Run mkosi genkey to generate a secure boot key/certificate pair") + die( + "SecureBoot= is enabled but no secure boot key is configured", + hint="Run mkosi genkey to generate a secure boot key/certificate pair", + ) if config.secure_boot and not config.secure_boot_certificate: - die("SecureBoot= is enabled but no secure boot key is configured", - hint="Run mkosi genkey to generate a secure boot key/certificate pair") + die( + "SecureBoot= is enabled but no secure boot key is configured", + hint="Run mkosi genkey to generate a secure boot key/certificate pair", + ) def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path: @@ -2175,8 +2187,10 @@ def check_systemd_tool( v = systemd_tool_version(tool, sandbox=config.sandbox) if v < version: - die(f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", - hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.") + die( + f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", + hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.", + ) def check_ukify( @@ -2189,8 +2203,10 @@ def check_ukify( v = systemd_tool_version(python_binary(config, binary=ukify), ukify, sandbox=config.sandbox) if v < version: - die(f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", - hint="Use ToolsTree=default to get a newer version of 'ukify'.") + die( + f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", + hint="Use ToolsTree=default to get a newer version of 'ukify'.", + ) def check_tools(config: Config, verb: Verb) -> None: @@ -2204,7 +2220,7 @@ def check_tools(config: Config, verb: Verb) -> None: version="254", reason="build bootable images", hint="Use ToolsTree=default to download most required tools including ukify automatically or use " - "Bootable=no to create a non-bootable image which doesn't require ukify", + "Bootable=no to create a non-bootable image which doesn't require ukify", ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): @@ -2316,9 +2332,9 @@ def configure_initrd(context: Context) -> None: return if ( - not (context.root / "init").exists() and - not (context.root / "init").is_symlink() and - (context.root / "usr/lib/systemd/systemd").exists() + not (context.root / "init").exists() + and not (context.root / "init").is_symlink() + and (context.root / "usr/lib/systemd/systemd").exists() ): (context.root / "init").symlink_to("/usr/lib/systemd/systemd") @@ -2351,18 +2367,15 @@ def run_depmod(context: Context, *, cache: bool = False) -> None: for kver, _ in gen_kernel_images(context): modulesd = context.root / "usr/lib/modules" / kver - if ( - not cache and - not context.config.kernel_modules_exclude and - all((modulesd / o).exists() for o in outputs) - ): + if not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue if not cache: process_kernel_modules( - context.root, kver, + context.root, + kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_include, @@ -2384,8 +2397,10 @@ def run_sysusers(context: Context) -> None: return with complete_step("Generating system users"): - run(["systemd-sysusers", "--root=/buildroot"], - sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"])) + run( + ["systemd-sysusers", "--root=/buildroot"], + sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"]), + ) def run_tmpfiles(context: Context) -> None: @@ -2428,7 +2443,7 @@ def run_tmpfiles(context: Context) -> None: "--become-root", ], ), - ) + ) # fmt: skip def run_preset(context: Context) -> None: @@ -2440,10 +2455,14 @@ def run_preset(context: Context) -> None: return with complete_step("Applying presets…"): - run(["systemctl", "--root=/buildroot", "preset-all"], - sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"])) - run(["systemctl", "--root=/buildroot", "--global", "preset-all"], - sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"])) + run( + ["systemctl", "--root=/buildroot", "preset-all"], + sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]), + ) + run( + ["systemctl", "--root=/buildroot", "--global", "preset-all"], + sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]), + ) def run_hwdb(context: Context) -> None: @@ -2455,8 +2474,10 @@ def run_hwdb(context: Context) -> None: return with complete_step("Generating hardware database"): - run(["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], - sandbox=context.sandbox(binary="systemd-hwdb", options=["--bind", context.root, "/buildroot"])) + run( + ["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], + sandbox=context.sandbox(binary="systemd-hwdb", options=["--bind", context.root, "/buildroot"]), + ) # Remove any existing hwdb in /etc in favor of the one we just put in /usr. (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True) @@ -2487,7 +2508,7 @@ def run_firstboot(context: Context) -> None: ("--hostname", None, context.config.hostname), ("--root-password-hashed", "passwd.hashed-password.root", password), ("--root-shell", "passwd.shell.root", context.config.root_shell), - ) + ) # fmt: skip options = [] creds = [] @@ -2506,8 +2527,10 @@ def run_firstboot(context: Context) -> None: return with complete_step("Applying first boot settings"): - run(["systemd-firstboot", "--root=/buildroot", "--force", *options], - sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"])) + run( + ["systemd-firstboot", "--root=/buildroot", "--force", *options], + sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"]), + ) # Initrds generally don't ship with only /usr so there's not much point in putting the # credentials in /usr/lib/credstore. @@ -2529,9 +2552,11 @@ def run_selinux_relabel(context: Context) -> None: binpolicy = Path("/buildroot") / binpolicy.relative_to(context.root) with complete_step(f"Relabeling files using {policy} policy"): - run([setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"], + run( + [setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"], sandbox=context.sandbox(binary=setfiles, options=["--bind", context.root, "/buildroot"]), - check=context.config.selinux_relabel == ConfigFeature.enabled) + check=context.config.selinux_relabel == ConfigFeature.enabled, + ) def need_build_overlay(config: Config) -> bool: @@ -2548,7 +2573,8 @@ def save_cache(context: Context) -> None: rmtree(final, sandbox=context.sandbox) move_tree( - context.root, final, + context.root, + final, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -2556,7 +2582,8 @@ def save_cache(context: Context) -> None: if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists(): rmtree(build, sandbox=context.sandbox) move_tree( - context.workspace / "build-overlay", build, + context.workspace / "build-overlay", + build, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -2590,8 +2617,12 @@ def have_cache(config: Config) -> bool: if prev != json.loads(new): logging.info("Cache manifest mismatch, not reusing cached images") if ARG_DEBUG.get(): - run(["diff", manifest, "-"], input=new, check=False, - sandbox=config.sandbox(binary="diff", options=["--bind", manifest, manifest])) + run( + ["diff", manifest, "-"], + input=new, + check=False, + sandbox=config.sandbox(binary="diff", options=["--bind", manifest, manifest]), + ) return False else: @@ -2612,7 +2643,8 @@ def reuse_cache(context: Context) -> bool: with complete_step("Copying cached trees"): copy_tree( - final, context.root, + final, + context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -2666,13 +2698,13 @@ def make_image( f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed), workdir(context.staging / context.config.output_with_format), - ] + ] # fmt: skip options: list[PathString] = [ # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. "--become-root", "--bind", context.staging, workdir(context.staging), - ] + ] # fmt: skip if root: cmdline += ["--root=/buildroot"] @@ -2720,8 +2752,8 @@ def make_image( sandbox=context.sandbox( binary="systemd-repart", devices=( - not context.config.repart_offline or - context.config.verity_key_source.type != KeySourceType.file + not context.config.repart_offline + or context.config.verity_key_source.type != KeySourceType.file ), options=options, ), @@ -2761,11 +2793,10 @@ def make_disk( else: bootloader = None - esp = ( - context.config.bootable == ConfigFeature.enabled or - (context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists()) + esp = context.config.bootable == ConfigFeature.enabled or ( + context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists() ) - bios = (context.config.bootable != ConfigFeature.disabled and want_grub_bios(context)) + bios = context.config.bootable != ConfigFeature.disabled and want_grub_bios(context) if esp or bios: # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds @@ -2884,10 +2915,14 @@ def make_oci(context: Context, root_layer: Path, dst: Path) -> None: ], "annotations": { "io.systemd.mkosi.version": __version__, - **({ - "org.opencontainers.image.version": context.config.image_version, - } if context.config.image_version else {}), - } + **( + { + "org.opencontainers.image.version": context.config.image_version, + } + if context.config.image_version + else {} + ), + }, } oci_manifest_blob = json.dumps(oci_manifest) oci_manifest_digest = hashlib.sha256(oci_manifest_blob.encode()).hexdigest() @@ -2966,7 +3001,7 @@ def make_extension_image(context: Context, output: Path) -> None: "--size=auto", "--definitions", r, workdir(output), - ] + ] # fmt: skip options: list[PathString] = [ # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. @@ -2974,7 +3009,7 @@ def make_extension_image(context: Context, output: Path) -> None: "--bind", output.parent, workdir(output.parent), "--ro-bind", context.root, "/buildroot", "--ro-bind", r, r, - ] + ] # fmt: skip if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] @@ -3004,8 +3039,8 @@ def make_extension_image(context: Context, output: Path) -> None: sandbox=context.sandbox( binary="systemd-repart", devices=( - not context.config.repart_offline or - context.config.verity_key_source.type != KeySourceType.file + not context.config.repart_offline + or context.config.verity_key_source.type != KeySourceType.file ), options=options, ), @@ -3032,7 +3067,8 @@ def finalize_staging(context: Context) -> None: os.chmod(f, context.config.output_mode) move_tree( - f, context.config.output_dir_or_cwd(), + f, + context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -3042,7 +3078,7 @@ def clamp_mtime(path: Path, mtime: int) -> None: st = os.stat(path, follow_symlinks=False) orig = (st.st_atime_ns, st.st_mtime_ns) updated = (min(orig[0], mtime * 1_000_000_000), - min(orig[1], mtime * 1_000_000_000)) + min(orig[1], mtime * 1_000_000_000)) # fmt: skip if orig != updated: os.utime(path, ns=updated, follow_symlinks=False) @@ -3065,11 +3101,11 @@ def setup_workspace(args: Args, config: Config) -> Iterator[Path]: with contextlib.ExitStack() as stack: workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace-")) # Discard setuid/setgid bits as these are inherited and can leak into the image. - workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID|stat.S_ISUID)) + workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) stack.callback(lambda: rmtree(workspace, sandbox=config.sandbox)) (workspace / "tmp").mkdir(mode=0o1777) - with scopedenv({"TMPDIR" : os.fspath(workspace / "tmp")}): + with scopedenv({"TMPDIR": os.fspath(workspace / "tmp")}): try: yield Path(workspace) except BaseException: @@ -3110,13 +3146,11 @@ def copy_repository_metadata(config: Config, dst: Path) -> None: exclude: list[PathString] if d == "cache": exclude = flatten( - ("--ro-bind", tmp, p) - for p in config.distribution.package_manager(config).cache_subdirs(src) + ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).cache_subdirs(src) ) else: exclude = flatten( - ("--ro-bind", tmp, p) - for p in config.distribution.package_manager(config).state_subdirs(src) + ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).state_subdirs(src) ) subdst = dst / d / subdir @@ -3170,11 +3204,7 @@ def build_image(context: Context) -> None: wantrepo = ( ( not cached - and ( - context.config.packages - or context.config.build_packages - or context.config.prepare_scripts - ) + and (context.config.packages or context.config.build_packages or context.config.prepare_scripts) ) or context.config.volatile_packages or context.config.postinst_scripts @@ -3288,9 +3318,12 @@ def build_image(context: Context) -> None: context.root.rename(context.staging / context.config.output_with_format) if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): - maybe_compress(context, context.config.compress_output, - context.staging / context.config.output_with_format, - context.staging / context.config.output_with_compression) + maybe_compress( + context, + context.config.compress_output, + context.staging / context.config.output_with_format, + context.staging / context.config.output_with_compression, + ) calculate_sha256sum(context) calculate_signature(context) @@ -3357,9 +3390,9 @@ def run_shell(args: Args, config: Config) -> None: # copy to avoid ending up with files not owned by the directory image owner in the # directory image. if config.ephemeral or ( - config.output_format == OutputFormat.directory and - args.verb == Verb.boot and - (config.output_dir_or_cwd() / config.output).stat().st_uid != 0 + config.output_format == OutputFormat.directory + and args.verb == Verb.boot + and (config.output_dir_or_cwd() / config.output).stat().st_uid != 0 ): fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) else: @@ -3385,7 +3418,7 @@ def run_shell(args: Args, config: Config) -> None: devices=True, options=["--bind", fname, fname], ), - ) + ) # fmt: skip if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] @@ -3429,8 +3462,7 @@ def run_shell(args: Args, config: Config) -> None: cmdline += ["--bind", f"{path}:/root:norbind,{uidmap}"] if config.runtime_scratch == ConfigFeature.enabled or ( - config.runtime_scratch == ConfigFeature.auto and - config.output_format == OutputFormat.disk + config.runtime_scratch == ConfigFeature.auto and config.output_format == OutputFormat.disk ): scratch = stack.enter_context(tempfile.TemporaryDirectory(dir="/var/tmp")) os.chmod(scratch, 0o1777) @@ -3447,7 +3479,7 @@ def run_shell(args: Args, config: Config) -> None: cmdline += [ "--bind", f"{addr}:/run/host/journal/socket", "--set-credential=journal.forward_to_socket:/run/host/journal/socket", - ] + ] # fmt: skip for p in config.unit_properties: cmdline += ["--property", p] @@ -3545,7 +3577,8 @@ def run_serve(args: Args, config: Config) -> None: run( [python_binary(config, binary=None), "-m", "http.server", "8081"], - stdin=sys.stdin, stdout=sys.stdout, + stdin=sys.stdin, + stdout=sys.stdout, sandbox=config.sandbox( binary=python_binary(config, binary=None), network=True, @@ -3564,8 +3597,7 @@ def generate_key_cert_pair(args: Args) -> None: for f in ("mkosi.key", "mkosi.crt"): if Path(f).exists() and not args.force: - die(f"{f} already exists", - hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) + die(f"{f} already exists", hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.") logging.info( @@ -3591,7 +3623,7 @@ def generate_key_cert_pair(args: Args) -> None: "-nodes" ], env=dict(OPENSSL_CONF="/dev/null"), - ) + ) # fmt: skip def bump_image_version() -> None: @@ -3630,7 +3662,6 @@ def prepend_to_environ_path(config: Config) -> Iterator[None]: return with tempfile.TemporaryDirectory(prefix="mkosi.path-") as d: - for path in config.extra_search_paths: if not path.is_dir(): (Path(d) / path.name).symlink_to(path.absolute()) @@ -3647,8 +3678,10 @@ def prepend_to_environ_path(config: Config) -> Iterator[None]: def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Config: if not config.tools_tree_distribution: - die(f"{config.distribution} does not have a default tools tree distribution", - hint="use ToolsTreeDistribution= to set one explicitly") + die( + f"{config.distribution} does not have a default tools tree distribution", + hint="use ToolsTreeDistribution= to set one explicitly", + ) cmdline = [ "--directory", "", @@ -3676,7 +3709,7 @@ def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Co *(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []), *(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []), *(["-f"] * args.force), - ] + ] # fmt: skip _, [tools] = parse_config( cmdline + ["--include=mkosi-tools", "build"], @@ -3693,9 +3726,11 @@ def check_workspace_directory(config: Config) -> None: for tree in config.build_sources: if wd.is_relative_to(tree.source): - die(f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", + die( + f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure a different " - "workspace directory") + "workspace directory", + ) def run_clean_scripts(config: Config) -> None: @@ -3741,16 +3776,17 @@ def run_clean_scripts(config: Config) -> None: "--ro-bind", json, "/work/config.json", *(["--bind", str(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []), *sources, - ] + ], ), stdin=sys.stdin, - ) + ) # fmt: skip def needs_build(args: Args, config: Config, force: int = 1) -> bool: return ( - args.force >= force or - not (config.output_dir_or_cwd() / config.output_with_compression).exists() or + args.force >= force + or not (config.output_dir_or_cwd() / config.output_with_compression).exists() + or # When the output is a directory, its name is the same as the symlink we create that points # to the actual output when not building a directory. So if the full output path exists, we # have to check that it's not a symlink as well. @@ -3796,7 +3832,7 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: complete_step(f"Removing output files of {config.name()} image…"), flock_or_die(config.output_dir_or_cwd() / config.output) if (config.output_dir_or_cwd() / config.output).exists() - else contextlib.nullcontext() + else contextlib.nullcontext(), ): rmtree(*outputs, sandbox=sandbox) @@ -3825,10 +3861,7 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: lock_repository_metadata(config), ): rmtree( - *( - config.package_cache_dir_or_default() / d / subdir - for d in ("cache", "lib") - ), + *(config.package_cache_dir_or_default() / d / subdir for d in ("cache", "lib")), sandbox=sandbox, ) @@ -3852,8 +3885,8 @@ def ensure_directories_exist(config: Config) -> None: st = config.build_dir.stat() # Discard setuid/setgid bits if set as these are inherited and can leak into the image. - if stat.S_IMODE(st.st_mode) & (stat.S_ISGID|stat.S_ISUID): - config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID|stat.S_ISUID)) + if stat.S_IMODE(st.st_mode) & (stat.S_ISGID | stat.S_ISUID): + config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) def metadata_cache(config: Config) -> Path: @@ -3869,10 +3902,10 @@ def sync_repository_metadata(args: Args, images: Sequence[Config], *, resources: # If we have a metadata cache and any cached image and using cached metadata is not explicitly disabled, reuse the # metadata cache. if ( - last.incremental and - metadata_cache(last).exists() and - last.cacheonly != Cacheonly.never and - any(have_cache(config) for config in images) + last.incremental + and metadata_cache(last).exists() + and last.cacheonly != Cacheonly.never + and any(have_cache(config) for config in images) ): with complete_step("Copying cached package manager metadata"): copy_tree(metadata_cache(last), dst, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox) @@ -3936,7 +3969,7 @@ def run_build( unshare(CLONE_NEWNS) if os.getuid() == 0: - mount("", "/", "", MS_SLAVE|MS_REC, "") + mount("", "/", "", MS_SLAVE | MS_REC, "") # For extra safety when running as root, remount a bunch of stuff read-only. # Because some build systems use output directories in /usr, we only remount @@ -3952,7 +3985,7 @@ def run_build( attrs = MOUNT_ATTR_RDONLY if d not in ("/usr", "/opt"): - attrs |= MOUNT_ATTR_NOSUID|MOUNT_ATTR_NODEV|MOUNT_ATTR_NOEXEC + attrs |= MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | MOUNT_ATTR_NOEXEC mount_rbind(d, d, attrs) @@ -4004,16 +4037,12 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if all(config == Config.default() for config in images): - die("No configuration found", - hint="Make sure mkosi is run from a directory with configuration files") + die("No configuration found", hint="Make sure mkosi is run from a directory with configuration files") if args.verb == Verb.summary: if args.json: text = json.dumps( - {"Images": [config.to_dict() for config in images]}, - cls=JsonEncoder, - indent=4, - sort_keys=True + {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True ) else: text = "\n".join(summary(config) for config in images) @@ -4059,13 +4088,15 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: assert args.verb.needs_build() if ( - tools and - not (tools.output_dir_or_cwd() / tools.output).exists() and - args.verb != Verb.build and - not args.force + tools + and not (tools.output_dir_or_cwd() / tools.output).exists() + and args.verb != Verb.build + and not args.force ): - die(f"Default tools tree requested for image '{last.name()}' but it has not been built yet", - hint="Make sure to build the image first with 'mkosi build' or use '--force'") + die( + f"Default tools tree requested for image '{last.name()}' but it has not been built yet", + hint="Make sure to build the image first with 'mkosi build' or use '--force'", + ) if not last.repart_offline and os.getuid() != 0: die(f"Must be root to build {last.name()} image configured with RepartOffline=no") @@ -4077,20 +4108,19 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if args.verb != Verb.build and not args.force and not output.exists(): - die(f"Image '{last.name()}' has not been built yet", - hint="Make sure to build the image first with 'mkosi build' or use '--force'") + die( + f"Image '{last.name()}' has not been built yet", + hint="Make sure to build the image first with 'mkosi build' or use '--force'", + ) check_workspace_directory(last) # If we're doing an incremental build and the cache is not out of date, don't clean up the # tools tree so that we can reuse the previous one. - if ( - tools and - ( - not tools.incremental or - ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) or - needs_build(args, tools, force=2) - ) + if tools and ( + not tools.incremental + or ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) + or needs_build(args, tools, force=2) ): run_clean(args, tools, resources=resources) @@ -4119,7 +4149,7 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: tools.output_dir_or_cwd() / tools.output if tools and config.tools_tree == Path("default") else config.tools_tree - ) + ), ) with prepend_to_environ_path(config): @@ -4173,12 +4203,14 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if ( - last.output_format == OutputFormat.directory and - (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 and - os.getuid() != 0 + last.output_format == OutputFormat.directory + and (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 + and os.getuid() != 0 ): - die("Cannot operate on directory images built as root when running unprivileged", - hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image") + die( + "Cannot operate on directory images built as root when running unprivileged", + hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image", + ) with prepend_to_environ_path(last): run_vm = { diff --git a/mkosi/archive.py b/mkosi/archive.py index 0ba828aa1..6ad8df48a 100644 --- a/mkosi/archive.py +++ b/mkosi/archive.py @@ -20,7 +20,7 @@ def tar_exclude_apivfs_tmp() -> list[str]: "--exclude", "./tmp/*", "--exclude", "./run/*", "--exclude", "./var/tmp/*", - ] + ] # fmt: skip def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> None: @@ -50,7 +50,7 @@ def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> N stdout=f, # Make sure tar uses user/group information from the root directory instead of the host. sandbox=sandbox(binary="tar", options=["--ro-bind", src, src, *finalize_passwd_mounts(src)]), - ) + ) # fmt: skip def can_extract_tar(src: Path) -> bool: @@ -92,9 +92,9 @@ def extract_tar( sandbox=sandbox( binary="tar", # Make sure tar uses user/group information from the root directory instead of the host. - options=["--ro-bind", src, src, "--bind", dst, dst, *finalize_passwd_mounts(dst)] + options=["--ro-bind", src, src, "--bind", dst, dst, *finalize_passwd_mounts(dst)], ), - ) + ) # fmt: skip def make_cpio( @@ -128,4 +128,4 @@ def make_cpio( input="\0".join(os.fspath(f) for f in files), stdout=f, sandbox=sandbox(binary="cpio", options=["--ro-bind", src, src, *finalize_passwd_mounts(src)]), - ) + ) # fmt: skip diff --git a/mkosi/backport.py b/mkosi/backport.py index 2a542c4e7..e03fcf42c 100644 --- a/mkosi/backport.py +++ b/mkosi/backport.py @@ -16,7 +16,7 @@ @contextlib.contextmanager def _tempfile( reader, - suffix='', + suffix="", # gh-93353: Keep a reference to call os.remove() in late Python # finalization. *, @@ -38,10 +38,12 @@ def _tempfile( except FileNotFoundError: pass + @no_type_check def _temp_file(path): return _tempfile(path.read_bytes, suffix=path.name) + @no_type_check def _is_present_dir(path) -> bool: """ @@ -55,6 +57,7 @@ def _is_present_dir(path) -> bool: return path.is_dir() return False + @no_type_check @functools.singledispatch def as_file(path): @@ -64,6 +67,7 @@ def as_file(path): """ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) + @no_type_check @contextlib.contextmanager def _temp_path(dir: tempfile.TemporaryDirectory): @@ -73,6 +77,7 @@ def _temp_path(dir: tempfile.TemporaryDirectory): with dir as result: yield Path(result) + @no_type_check @contextlib.contextmanager def _temp_dir(path): @@ -84,6 +89,7 @@ def _temp_dir(path): with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: yield _write_contents(temp_dir, path) + @no_type_check def _write_contents(target, source): child = target.joinpath(source.name) diff --git a/mkosi/bootloader.py b/mkosi/bootloader.py index c61adc2b8..2fdd8afcb 100644 --- a/mkosi/bootloader.py +++ b/mkosi/bootloader.py @@ -48,9 +48,8 @@ def want_efi(config: Config) -> bool: return False if ( - (config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay) - and config.bootable == ConfigFeature.auto - ): + config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay + ) and config.bootable == ConfigFeature.auto: return False if config.architecture.to_efi() is None: @@ -181,7 +180,7 @@ def grub_mkimage( with ( complete_step(f"Generating grub image for {target}"), - tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig + tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig, ): earlyconfig.write( textwrap.dedent( @@ -236,16 +235,16 @@ def grub_mkimage( *(["--ro-bind", str(sbat), str(sbat)] if sbat else []), ], ), - ) + ) # fmt: skip def find_signed_grub_image(context: Context) -> Optional[Path]: arch = context.config.architecture.to_efi() patterns = [ - f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu - f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS - "usr/share/efi/*/grub.efi", # OpenSUSE + f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu + f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS + "usr/share/efi/*/grub.efi", # OpenSUSE ] for p in flatten(context.root.glob(pattern) for pattern in patterns): @@ -260,9 +259,9 @@ def find_signed_grub_image(context: Context) -> Optional[Path]: def python_binary(config: Config, *, binary: Optional[PathString]) -> PathString: tools = ( - not binary or - not (path := config.find_binary(binary)) or - not any(path.is_relative_to(d) for d in config.extra_search_paths) + not binary + or not (path := config.find_binary(binary)) + or not any(path.is_relative_to(d) for d in config.extra_search_paths) ) # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools @@ -393,7 +392,7 @@ def grub_bios_setup(context: Context, partitions: Sequence[Partition]) -> None: "--bind", mountinfo.name, "/proc/self/mountinfo", ], ), - ) + ) # fmt: skip def efi_boot_binary(context: Context) -> Path: @@ -423,7 +422,7 @@ def certificate_common_name(context: Context, certificate: Path) -> str: ], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="openssl", options=["--ro-bind", certificate, certificate]), - ).stdout + ).stdout # fmt: skip for line in output.splitlines(): if not line.strip().startswith("commonName"): @@ -438,7 +437,6 @@ def certificate_common_name(context: Context, certificate: Path) -> str: die(f"Certificate {certificate} is missing Common Name") - def pesign_prepare(context: Context) -> None: assert context.config.secure_boot_key assert context.config.secure_boot_certificate @@ -473,7 +471,7 @@ def pesign_prepare(context: Context) -> None: "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, ], ), - ) + ) # fmt: skip (context.workspace / "pesign").mkdir(exist_ok=True) @@ -492,7 +490,7 @@ def pesign_prepare(context: Context) -> None: "--ro-bind", context.workspace / "pesign", context.workspace / "pesign", ], ), - ) + ) # fmt: skip def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: @@ -500,20 +498,20 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: assert context.config.secure_boot_certificate if ( - context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or - context.config.secure_boot_sign_tool == SecureBootSignTool.auto and - context.config.find_binary("sbsign") is not None + context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign + or context.config.secure_boot_sign_tool == SecureBootSignTool.auto + and context.config.find_binary("sbsign") is not None ): cmd: list[PathString] = [ "sbsign", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(output), - ] + ] # fmt: skip options: list[PathString] = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--ro-bind", input, workdir(input), "--bind", output.parent, workdir(output.parent), - ] + ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): @@ -528,12 +526,12 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: binary="sbsign", options=options, devices=context.config.secure_boot_key_source.type != KeySourceType.file, - ) + ), ) elif ( - context.config.secure_boot_sign_tool == SecureBootSignTool.pesign or - context.config.secure_boot_sign_tool == SecureBootSignTool.auto and - context.config.find_binary("pesign") is not None + context.config.secure_boot_sign_tool == SecureBootSignTool.pesign + or context.config.secure_boot_sign_tool == SecureBootSignTool.auto + and context.config.find_binary("pesign") is not None ): pesign_prepare(context) run( @@ -554,7 +552,7 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: "--bind", output.parent, workdir(output), ] ), - ) + ) # fmt: skip else: die("One of sbsign or pesign is required to use SecureBoot=") @@ -616,7 +614,7 @@ def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: for kver in sorted( (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()), key=lambda k: GenericVersion(k.name), - reverse=True + reverse=True, ): # Make sure we look for anything that remotely resembles vmlinuz, as # the arch specific install scripts in the kernel source tree sometimes @@ -654,13 +652,15 @@ def install_systemd_boot(context: Context) -> None: signed = context.config.shim_bootloader == ShimBootloader.signed if not directory.glob("*.efi.signed" if signed else "*.efi"): if context.config.bootable == ConfigFeature.enabled: - die(f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" - f"systemd-boot binary was not found at {directory.relative_to(context.root)}") + die( + f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" + f"systemd-boot binary was not found at {directory.relative_to(context.root)}" + ) return if context.config.secure_boot and not signed: with complete_step("Signing systemd-boot binaries…"): - for input in itertools.chain(directory.glob('*.efi'), directory.glob('*.EFI')): + for input in itertools.chain(directory.glob("*.efi"), directory.glob("*.EFI")): output = directory / f"{input}.signed" sign_efi_binary(context, input, output) @@ -707,7 +707,7 @@ def install_systemd_boot(context: Context) -> None: "--bind", context.workspace, workdir(context.workspace), ], ), - ) + ) # fmt: skip with umask(~0o600): run( @@ -725,7 +725,7 @@ def install_systemd_boot(context: Context) -> None: "--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"), ] ), - ) + ) # fmt: skip # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: @@ -736,21 +736,21 @@ def install_systemd_boot(context: Context) -> None: "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(keys / f"{db}.auth"), - ] + ] # fmt: skip options: list[PathString] = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"), "--bind", keys, workdir(keys), - ] + ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): - cmd += ["--key", workdir(context.config.secure_boot_key),] + cmd += ["--key", workdir(context.config.secure_boot_key)] options += [ "--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key), - ] + ] # fmt: skip else: cmd += ["--key", context.config.secure_boot_key] cmd += [db, workdir(context.workspace / "mkosi.esl")] @@ -781,31 +781,31 @@ def install_shim(context: Context) -> None: arch = context.config.architecture.to_efi() signed = [ - f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu - f"usr/lib/shim/shim{arch}.efi.signed", # Debian - f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS - "usr/share/efi/*/shim.efi", # OpenSUSE + f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu + f"usr/lib/shim/shim{arch}.efi.signed", # Debian + f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS + "usr/share/efi/*/shim.efi", # OpenSUSE ] unsigned = [ - f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu - f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS - f"usr/share/shim/shim{arch}.efi", # Arch + f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu + f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS + f"usr/share/shim/shim{arch}.efi", # Arch ] find_and_install_shim_binary(context, "shim", signed, unsigned, dst) signed = [ - f"usr/lib/shim/mm{arch}.efi.signed", # Debian - f"usr/lib/shim/mm{arch}.efi", # Ubuntu - f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS - "usr/share/efi/*/MokManager.efi", # OpenSUSE + f"usr/lib/shim/mm{arch}.efi.signed", # Debian + f"usr/lib/shim/mm{arch}.efi", # Ubuntu + f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS + "usr/share/efi/*/MokManager.efi", # OpenSUSE ] unsigned = [ - f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu - f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS - f"usr/share/shim/mm{arch}.efi", # Arch + f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu + f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS + f"usr/share/shim/mm{arch}.efi", # Arch ] find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent) diff --git a/mkosi/completion.py b/mkosi/completion.py index 0a96c1a01..72f202e30 100644 --- a/mkosi/completion.py +++ b/mkosi/completion.py @@ -17,8 +17,8 @@ class CompGen(StrEnum): default = enum.auto() - files = enum.auto() - dirs = enum.auto() + files = enum.auto() + dirs = enum.auto() @staticmethod def from_action(action: argparse.Action) -> "CompGen": @@ -81,9 +81,11 @@ def collect_completion_arguments() -> list[CompletionItem]: compgen=CompGen.from_action(action), ) for action in parser._actions - if (action.option_strings and - action.help != argparse.SUPPRESS and - action.dest not in config.SETTINGS_LOOKUP_BY_DEST) + if ( + action.option_strings + and action.help != argparse.SUPPRESS + and action.dest not in config.SETTINGS_LOOKUP_BY_DEST + ) ] options += [ @@ -107,8 +109,9 @@ def to_bash_array(name: str, entries: Iterable[str]) -> str: def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str: return ( - f"{name.replace('-', '_')}=(" + - " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + ")" + f"{name.replace('-', '_')}=(" + + " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + + ")" ) completion = resources / "completion.bash" @@ -151,7 +154,7 @@ def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> c.write("complete -c mkosi -n '__fish_is_first_token' -a \"") c.write(" ".join(str(v) for v in config.Verb)) - c.write("\"\n") + c.write('"\n') for option in options: if not option.short and not option.long: @@ -165,12 +168,12 @@ def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> if isinstance(option.nargs, int) and option.nargs > 0: c.write("-r ") if option.choices: - c.write("-a \"") + c.write('-a "') c.write(" ".join(option.choices)) - c.write("\" ") + c.write('" ') if option.help is not None: help = option.help.replace("'", "\\'") - c.write(f"-d \"{help}\" ") + c.write(f'-d "{help}" ') c.write(option.compgen.to_fish()) c.write("\n") @@ -225,7 +228,7 @@ def print_completion(args: config.Args, *, resources: Path) -> None: if not args.cmdline: die( "No shell to generate completion script for specified", - hint="Please specify either one of: bash, fish, zsh" + hint="Please specify either one of: bash, fish, zsh", ) shell = args.cmdline[0] @@ -237,8 +240,7 @@ def print_completion(args: config.Args, *, resources: Path) -> None: func = finalize_completion_zsh else: die( - f"{shell!r} is not supported for completion scripts.", - hint="Please specify either one of: bash, fish, zsh" + f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh" ) completion_args = collect_completion_arguments() diff --git a/mkosi/config.py b/mkosi/config.py index c17889d07..3306f37a0 100644 --- a/mkosi/config.py +++ b/mkosi/config.py @@ -57,25 +57,25 @@ class Verb(StrEnum): - build = enum.auto() - clean = enum.auto() - summary = enum.auto() - cat_config = enum.auto() - shell = enum.auto() - boot = enum.auto() - qemu = enum.auto() - ssh = enum.auto() - serve = enum.auto() - bump = enum.auto() - help = enum.auto() - genkey = enum.auto() + build = enum.auto() + clean = enum.auto() + summary = enum.auto() + cat_config = enum.auto() + shell = enum.auto() + boot = enum.auto() + qemu = enum.auto() + ssh = enum.auto() + serve = enum.auto() + bump = enum.auto() + help = enum.auto() + genkey = enum.auto() documentation = enum.auto() - journalctl = enum.auto() - coredumpctl = enum.auto() - burn = enum.auto() - dependencies = enum.auto() - completion = enum.auto() - sysupdate = enum.auto() + journalctl = enum.auto() + coredumpctl = enum.auto() + burn = enum.auto() + dependencies = enum.auto() + completion = enum.auto() + sysupdate = enum.auto() def supports_cmdline(self) -> bool: return self in ( @@ -117,8 +117,8 @@ def needs_config(self) -> bool: class ConfigFeature(StrEnum): - auto = enum.auto() - enabled = enum.auto() + auto = enum.auto() + enabled = enum.auto() disabled = enum.auto() def to_tristate(self) -> str: @@ -168,23 +168,23 @@ def format(cls, cid: int) -> str: class SecureBootSignTool(StrEnum): - auto = enum.auto() + auto = enum.auto() sbsign = enum.auto() pesign = enum.auto() class OutputFormat(StrEnum): - confext = enum.auto() - cpio = enum.auto() + confext = enum.auto() + cpio = enum.auto() directory = enum.auto() - disk = enum.auto() - esp = enum.auto() - none = enum.auto() - portable = enum.auto() - sysext = enum.auto() - tar = enum.auto() - uki = enum.auto() - oci = enum.auto() + disk = enum.auto() + esp = enum.auto() + none = enum.auto() + portable = enum.auto() + sysext = enum.auto() + tar = enum.auto() + uki = enum.auto() + oci = enum.auto() def extension(self) -> str: return { @@ -196,7 +196,7 @@ def extension(self) -> str: OutputFormat.sysext: ".raw", OutputFormat.tar: ".tar", OutputFormat.uki: ".efi", - }.get(self, "") + }.get(self, "") # fmt: skip def use_outer_compression(self) -> bool: return self in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk) or self.is_extension_image() @@ -206,11 +206,12 @@ def is_extension_image(self) -> bool: class ManifestFormat(StrEnum): - json = enum.auto() # the standard manifest in json format + json = enum.auto() # the standard manifest in json format changelog = enum.auto() # human-readable text file with package changelogs class Compression(StrEnum): + # fmt: off none = enum.auto() zstd = enum.auto() zst = zstd @@ -220,21 +221,20 @@ class Compression(StrEnum): gzip = gz lz4 = enum.auto() lzma = enum.auto() + # fmt: on def __bool__(self) -> bool: return self != Compression.none def extension(self) -> str: - return { - Compression.zstd: ".zst" - }.get(self, f".{self}") + return {Compression.zstd: ".zst"}.get(self, f".{self}") def oci_media_type_suffix(self) -> str: suffix = { Compression.none: "", Compression.gz: "+gzip", Compression.zstd: "+zstd", - }.get(self) + }.get(self) # fmt: skip if not suffix: die(f"Compression {self} not supported for OCI layers") @@ -243,11 +243,11 @@ def oci_media_type_suffix(self) -> str: class DocFormat(StrEnum): - auto = enum.auto() + auto = enum.auto() markdown = enum.auto() - man = enum.auto() - pandoc = enum.auto() - system = enum.auto() + man = enum.auto() + pandoc = enum.auto() + system = enum.auto() @classmethod def all(cls) -> list["DocFormat"]: @@ -257,10 +257,10 @@ def all(cls) -> list["DocFormat"]: class Bootloader(StrEnum): - none = enum.auto() - uki = enum.auto() + none = enum.auto() + uki = enum.auto() systemd_boot = enum.auto() - grub = enum.auto() + grub = enum.auto() class BiosBootloader(StrEnum): @@ -269,25 +269,25 @@ class BiosBootloader(StrEnum): class ShimBootloader(StrEnum): - none = enum.auto() - signed = enum.auto() + none = enum.auto() + signed = enum.auto() unsigned = enum.auto() class Cacheonly(StrEnum): - always = enum.auto() - auto = enum.auto() - none = auto + always = enum.auto() + auto = enum.auto() + none = auto metadata = enum.auto() - never = enum.auto() + never = enum.auto() class QemuFirmware(StrEnum): - auto = enum.auto() - linux = enum.auto() - uefi = enum.auto() + auto = enum.auto() + linux = enum.auto() + uefi = enum.auto() uefi_secure_boot = enum.auto() - bios = enum.auto() + bios = enum.auto() def is_uefi(self) -> bool: return self in (QemuFirmware.uefi, QemuFirmware.uefi_secure_boot) @@ -295,83 +295,83 @@ def is_uefi(self) -> bool: class Network(StrEnum): interface = enum.auto() - user = enum.auto() - none = enum.auto() + user = enum.auto() + none = enum.auto() class Vmm(StrEnum): - qemu = enum.auto() + qemu = enum.auto() vmspawn = enum.auto() class Architecture(StrEnum): - alpha = enum.auto() - arc = enum.auto() - arm = enum.auto() - arm64 = enum.auto() - ia64 = enum.auto() + alpha = enum.auto() + arc = enum.auto() + arm = enum.auto() + arm64 = enum.auto() + ia64 = enum.auto() loongarch64 = enum.auto() - mips_le = enum.auto() - mips64_le = enum.auto() - parisc = enum.auto() - ppc = enum.auto() - ppc64 = enum.auto() - ppc64_le = enum.auto() - riscv32 = enum.auto() - riscv64 = enum.auto() - s390 = enum.auto() - s390x = enum.auto() - tilegx = enum.auto() - x86 = enum.auto() - x86_64 = enum.auto() + mips_le = enum.auto() + mips64_le = enum.auto() + parisc = enum.auto() + ppc = enum.auto() + ppc64 = enum.auto() + ppc64_le = enum.auto() + riscv32 = enum.auto() + riscv64 = enum.auto() + s390 = enum.auto() + s390x = enum.auto() + tilegx = enum.auto() + x86 = enum.auto() + x86_64 = enum.auto() @staticmethod def from_uname(s: str) -> "Architecture": a = { - "aarch64" : Architecture.arm64, - "aarch64_be" : Architecture.arm64, - "armv8l" : Architecture.arm, - "armv8b" : Architecture.arm, - "armv7ml" : Architecture.arm, - "armv7mb" : Architecture.arm, - "armv7l" : Architecture.arm, - "armv7b" : Architecture.arm, - "armv6l" : Architecture.arm, - "armv6b" : Architecture.arm, - "armv5tl" : Architecture.arm, - "armv5tel" : Architecture.arm, - "armv5tejl" : Architecture.arm, - "armv5tejb" : Architecture.arm, - "armv5teb" : Architecture.arm, - "armv5tb" : Architecture.arm, - "armv4tl" : Architecture.arm, - "armv4tb" : Architecture.arm, - "armv4l" : Architecture.arm, - "armv4b" : Architecture.arm, - "alpha" : Architecture.alpha, - "arc" : Architecture.arc, - "arceb" : Architecture.arc, - "x86_64" : Architecture.x86_64, - "i686" : Architecture.x86, - "i586" : Architecture.x86, - "i486" : Architecture.x86, - "i386" : Architecture.x86, - "ia64" : Architecture.ia64, - "parisc64" : Architecture.parisc, - "parisc" : Architecture.parisc, - "loongarch64" : Architecture.loongarch64, - "mips64" : Architecture.mips64_le, - "mips" : Architecture.mips_le, - "ppc64le" : Architecture.ppc64_le, - "ppc64" : Architecture.ppc64, - "ppc" : Architecture.ppc, - "riscv64" : Architecture.riscv64, - "riscv32" : Architecture.riscv32, - "riscv" : Architecture.riscv64, - "s390x" : Architecture.s390x, - "s390" : Architecture.s390, - "tilegx" : Architecture.tilegx, - }.get(s) + "aarch64": Architecture.arm64, + "aarch64_be": Architecture.arm64, + "armv8l": Architecture.arm, + "armv8b": Architecture.arm, + "armv7ml": Architecture.arm, + "armv7mb": Architecture.arm, + "armv7l": Architecture.arm, + "armv7b": Architecture.arm, + "armv6l": Architecture.arm, + "armv6b": Architecture.arm, + "armv5tl": Architecture.arm, + "armv5tel": Architecture.arm, + "armv5tejl": Architecture.arm, + "armv5tejb": Architecture.arm, + "armv5teb": Architecture.arm, + "armv5tb": Architecture.arm, + "armv4tl": Architecture.arm, + "armv4tb": Architecture.arm, + "armv4l": Architecture.arm, + "armv4b": Architecture.arm, + "alpha": Architecture.alpha, + "arc": Architecture.arc, + "arceb": Architecture.arc, + "x86_64": Architecture.x86_64, + "i686": Architecture.x86, + "i586": Architecture.x86, + "i486": Architecture.x86, + "i386": Architecture.x86, + "ia64": Architecture.ia64, + "parisc64": Architecture.parisc, + "parisc": Architecture.parisc, + "loongarch64": Architecture.loongarch64, + "mips64": Architecture.mips64_le, + "mips": Architecture.mips_le, + "ppc64le": Architecture.ppc64_le, + "ppc64": Architecture.ppc64, + "ppc": Architecture.ppc, + "riscv64": Architecture.riscv64, + "riscv32": Architecture.riscv32, + "riscv": Architecture.riscv64, + "s390x": Architecture.s390x, + "s390": Architecture.s390, + "tilegx": Architecture.tilegx, + }.get(s) # fmt: skip if not a: die(f"Architecture {s} is not supported") @@ -380,32 +380,32 @@ def from_uname(s: str) -> "Architecture": def to_efi(self) -> Optional[str]: return { - Architecture.x86_64 : "x64", - Architecture.x86 : "ia32", - Architecture.arm64 : "aa64", - Architecture.arm : "arm", - Architecture.riscv64 : "riscv64", - Architecture.loongarch64 : "loongarch64", - }.get(self) + Architecture.x86_64: "x64", + Architecture.x86: "ia32", + Architecture.arm64: "aa64", + Architecture.arm: "arm", + Architecture.riscv64: "riscv64", + Architecture.loongarch64: "loongarch64", + }.get(self) # fmt: skip def to_qemu(self) -> str: a = { - Architecture.alpha : "alpha", - Architecture.arm : "arm", - Architecture.arm64 : "aarch64", - Architecture.loongarch64 : "loongarch64", - Architecture.mips64_le : "mips", - Architecture.mips_le : "mips", - Architecture.parisc : "hppa", - Architecture.ppc : "ppc", - Architecture.ppc64 : "ppc64", - Architecture.ppc64_le : "ppc64", - Architecture.riscv32 : "riscv32", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.x86 : "i386", - Architecture.x86_64 : "x86_64", - }.get(self) + Architecture.alpha: "alpha", + Architecture.arm: "arm", + Architecture.arm64: "aarch64", + Architecture.loongarch64: "loongarch64", + Architecture.mips64_le: "mips", + Architecture.mips_le: "mips", + Architecture.parisc: "hppa", + Architecture.ppc: "ppc", + Architecture.ppc64: "ppc64", + Architecture.ppc64_le: "ppc64", + Architecture.riscv32: "riscv32", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.x86: "i386", + Architecture.x86_64: "x86_64", + }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by QEMU") @@ -414,20 +414,20 @@ def to_qemu(self) -> str: def to_oci(self) -> str: a = { - Architecture.arm : "arm", - Architecture.arm64 : "arm64", - Architecture.loongarch64 : "loong64", - Architecture.mips64_le : "mips64le", - Architecture.mips_le : "mipsle", - Architecture.ppc : "ppc", - Architecture.ppc64 : "ppc64", - Architecture.ppc64_le : "ppc64le", - Architecture.riscv32 : "riscv", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.x86 : "386", - Architecture.x86_64 : "amd64", - }.get(self) + Architecture.arm: "arm", + Architecture.arm64: "arm64", + Architecture.loongarch64: "loong64", + Architecture.mips64_le: "mips64le", + Architecture.mips_le: "mipsle", + Architecture.ppc: "ppc", + Architecture.ppc64: "ppc64", + Architecture.ppc64_le: "ppc64le", + Architecture.riscv32: "riscv", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.x86: "386", + Architecture.x86_64: "amd64", + }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by OCI") @@ -447,23 +447,22 @@ def supports_smm(self) -> bool: return self.is_x86_variant() def can_kvm(self) -> bool: - return ( - self == Architecture.native() or - (Architecture.native() == Architecture.x86_64 and self == Architecture.x86) + return self == Architecture.native() or ( + Architecture.native() == Architecture.x86_64 and self == Architecture.x86 ) def default_qemu_machine(self) -> str: m = { - Architecture.x86 : "q35", - Architecture.x86_64 : "q35", - Architecture.arm : "virt", - Architecture.arm64 : "virt", - Architecture.s390 : "s390-ccw-virtio", - Architecture.s390x : "s390-ccw-virtio", - Architecture.ppc : "pseries", - Architecture.ppc64 : "pseries", - Architecture.ppc64_le : "pseries", - } + Architecture.x86: "q35", + Architecture.x86_64: "q35", + Architecture.arm: "virt", + Architecture.arm64: "virt", + Architecture.s390: "s390-ccw-virtio", + Architecture.s390x: "s390-ccw-virtio", + Architecture.ppc: "pseries", + Architecture.ppc64: "pseries", + Architecture.ppc64_le: "pseries", + } # fmt: skip if self not in m: die(f"No qemu machine defined for architecture {self}") @@ -472,9 +471,9 @@ def default_qemu_machine(self) -> str: def default_qemu_nic_model(self) -> str: return { - Architecture.s390 : "virtio", - Architecture.s390x : "virtio", - }.get(self, "virtio-net-pci") + Architecture.s390: "virtio", + Architecture.s390x: "virtio", + }.get(self, "virtio-net-pci") # fmt: skip def is_native(self) -> bool: return self == self.native() @@ -503,15 +502,17 @@ def parse_boolean(s: str) -> bool: die(f"Invalid boolean literal: {s!r}") -def parse_path(value: str, - *, - required: bool = True, - resolve: bool = True, - expanduser: bool = True, - expandvars: bool = True, - secret: bool = False, - absolute: bool = False, - constants: Sequence[str] = ()) -> Path: +def parse_path( + value: str, + *, + required: bool = True, + resolve: bool = True, + expanduser: bool = True, + expandvars: bool = True, + secret: bool = False, + absolute: bool = False, + constants: Sequence[str] = (), +) -> Path: if value in constants: return Path(value) @@ -535,10 +536,12 @@ def parse_path(value: str, if secret and path.exists(): mode = path.stat().st_mode & 0o777 if mode & 0o007: - die(textwrap.dedent(f"""\ + die( + textwrap.dedent(f"""\ Permissions of '{path}' of '{mode:04o}' are too open. When creating secret files use an access mode that restricts access to the owner only. - """)) + """) + ) return path @@ -552,7 +555,7 @@ def config_parse_key(value: Optional[str], old: Optional[str]) -> Optional[Path] def make_tree_parser(absolute: bool = True, required: bool = False) -> Callable[[str], ConfigTree]: def parse_tree(value: str) -> ConfigTree: - src, sep, tgt = value.partition(':') + src, sep, tgt = value.partition(":") return ConfigTree( source=parse_path(src, required=required), @@ -562,7 +565,9 @@ def parse_tree(value: str) -> ConfigTree: resolve=False, expanduser=False, absolute=absolute, - ) if sep else None, + ) + if sep + else None, ) return parse_tree @@ -764,8 +769,8 @@ def config_default_repository_key_fetch(namespace: argparse.Namespace) -> bool: return cast( bool, - (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution()) or - namespace.tools_tree_distribution.is_rpm_distribution() + (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution()) + or namespace.tools_tree_distribution.is_rpm_distribution(), ) @@ -830,11 +835,9 @@ def config_match_enum(match: str, value: StrEnum) -> bool: return config_match_enum -def config_make_list_parser(delimiter: str, - *, - parse: Callable[[str], Any] = str, - unescape: bool = False, - reset: bool = True) -> ConfigParseCallback: +def config_make_list_parser( + delimiter: str, *, parse: Callable[[str], Any] = str, unescape: bool = False, reset: bool = True +) -> ConfigParseCallback: def config_parse_list(value: Optional[str], old: Optional[list[Any]]) -> Optional[list[Any]]: new = old.copy() if old else [] @@ -888,12 +891,14 @@ def config_match_version(match: str, value: str) -> bool: return True -def config_make_dict_parser(delimiter: str, - *, - parse: Callable[[str], tuple[str, Any]], - unescape: bool = False, - allow_paths: bool = False, - reset: bool = True) -> ConfigParseCallback: +def config_make_dict_parser( + delimiter: str, + *, + parse: Callable[[str], tuple[str, Any]], + unescape: bool = False, + allow_paths: bool = False, + reset: bool = True, +) -> ConfigParseCallback: def config_parse_dict(value: Optional[str], old: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]: new = old.copy() if old else {} @@ -953,13 +958,15 @@ def parse_credential(value: str) -> tuple[str, str]: return (key, value) -def make_path_parser(*, - required: bool = True, - resolve: bool = True, - expanduser: bool = True, - expandvars: bool = True, - secret: bool = False, - constants: Sequence[str] = ()) -> Callable[[str], Path]: +def make_path_parser( + *, + required: bool = True, + resolve: bool = True, + expanduser: bool = True, + expandvars: bool = True, + secret: bool = False, + constants: Sequence[str] = (), +) -> Callable[[str], Path]: return functools.partial( parse_path, required=required, @@ -971,13 +978,15 @@ def make_path_parser(*, ) -def config_make_path_parser(*, - required: bool = True, - resolve: bool = True, - expanduser: bool = True, - expandvars: bool = True, - secret: bool = False, - constants: Sequence[str] = ()) -> ConfigParseCallback: +def config_make_path_parser( + *, + required: bool = True, + resolve: bool = True, + expanduser: bool = True, + expandvars: bool = True, + secret: bool = False, + constants: Sequence[str] = (), +) -> ConfigParseCallback: def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]: if not value: return None @@ -1089,8 +1098,9 @@ def config_parse_profile(value: Optional[str], old: Optional[int] = None) -> Opt return None if not is_valid_filename(value): - die(f"{value!r} is not a valid profile", - hint="Profile= or --profile= requires a name with no path components.") + die( + f"{value!r} is not a valid profile", hint="Profile= or --profile= requires a name with no path components." + ) return value @@ -1179,14 +1189,13 @@ def file_run_or_read(file: Path) -> str: content = file.read_text() if content.startswith("#!/"): - die(f"{file} starts with a shebang ({content.splitlines()[0]})", - hint="This file should be executable") + die(f"{file} starts with a shebang ({content.splitlines()[0]})", hint="This file should be executable") return content class KeySourceType(StrEnum): - file = enum.auto() + file = enum.auto() engine = enum.auto() @@ -1252,7 +1261,7 @@ class ConfigSetting: def __post_init__(self) -> None: if not self.name: - object.__setattr__(self, 'name', ''.join(x.capitalize() for x in self.dest.split('_') if x)) + object.__setattr__(self, "name", "".join(x.capitalize() for x in self.dest.split("_") if x)) if not self.long: object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}") @@ -1285,9 +1294,11 @@ def _split_lines(self, text: str, width: int) -> list[str]: Otherwise, the text is wrapped without indentation. """ lines = text.splitlines() - subindent = ' ' if lines[0].endswith(':') else '' - return flatten(textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, - subsequent_indent=subindent) for line in lines) + subindent = " " if lines[0].endswith(":") else "" + return flatten( + textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent) + for line in lines + ) def parse_chdir(path: str) -> Optional[Path]: @@ -1326,7 +1337,7 @@ def __call__( parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], - option_string: Optional[str] = None + option_string: Optional[str] = None, ) -> None: logging.warning(f"{option_string} is no longer supported") @@ -1337,7 +1348,7 @@ def __call__( parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None] = None, - option_string: Optional[str] = None + option_string: Optional[str] = None, ) -> None: page(parser.format_help(), namespace.pager) parser.exit() @@ -1383,10 +1394,7 @@ def default(cls) -> "Args": @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Args": - return cls(**{ - k: v for k, v in vars(ns).items() - if k in inspect.signature(cls).parameters - }) + return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters}) def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) @@ -1415,16 +1423,17 @@ def key_transformer(k: str) -> str: k = key_transformer(k) if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v): - die(f"Serialized JSON has unknown field {k} with value {v}", - hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON") + die( + f"Serialized JSON has unknown field {k} with value {v}", + hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", + ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} - return dataclasses.replace(cls.default(), **{ - k: v for k, v in j.items() - if k in inspect.signature(cls).parameters - }) + return dataclasses.replace( + cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters} + ) PACKAGE_GLOBS = ( @@ -1655,10 +1664,7 @@ def default(cls) -> "Config": @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Config": - return cls(**{ - k: v for k, v in vars(ns).items() - if k in inspect.signature(cls).parameters - }) + return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters}) @property def output_with_format(self) -> str: @@ -1738,8 +1744,7 @@ def cache_manifest(self) -> dict[str, Any]: "repositories": sorted(self.repositories), "overlay": self.overlay, "prepare_scripts": sorted( - base64.b64encode(script.read_bytes()).decode() - for script in self.prepare_scripts + base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ), # We don't use the full path here since tests will often use temporary directories for the output directory # which would trigger a rebuild every time. @@ -1778,16 +1783,17 @@ def key_transformer(k: str) -> str: k = key_transformer(k) if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v): - die(f"Serialized JSON has unknown field {k} with value {v}", - hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON") + die( + f"Serialized JSON has unknown field {k} with value {v}", + hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", + ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} - return dataclasses.replace(cls.default(), **{ - k: v for k, v in j.items() - if k in inspect.signature(cls).parameters - }) + return dataclasses.replace( + cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters} + ) def find_binary(self, *names: PathString, tools: bool = True) -> Optional[Path]: return find_binary(*names, root=self.tools() if tools else Path("/"), extra=self.extra_search_paths) @@ -1813,9 +1819,9 @@ def sandbox( ] if ( - binary and - (path := self.find_binary(binary, tools=tools)) and - any(path.is_relative_to(d) for d in self.extra_search_paths) + binary + and (path := self.find_binary(binary, tools=tools)) + and any(path.is_relative_to(d) for d in self.extra_search_paths) ): tools = False opt += flatten(("--ro-bind", d, d) for d in self.extra_search_paths if not relaxed) @@ -1863,8 +1869,8 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple line = line.strip() - if line[0] == '[': - if line[-1] != ']': + if line[0] == "[": + if line[-1] != "]": die(f"{line} is not a valid section") # Yield the section name with an empty key and value to indicate we've finished the current section. @@ -2028,7 +2034,6 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple help="Repositories to use", scope=SettingScope.universal, ), - ConfigSetting( dest="output_format", short="-t", @@ -2171,7 +2176,6 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple paths=("mkosi.clean",), help="Clean script to run after cleanup", ), - ConfigSetting( dest="packages", short="-p", @@ -2235,11 +2239,11 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple ), ConfigSetting( dest="base_trees", - long='--base-tree', - metavar='PATH', + long="--base-tree", + metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), - help='Use the given tree as base tree (e.g. lower sysext layer)', + help="Use the given tree as base tree (e.g. lower sysext layer)", ), ConfigSetting( dest="skeleton_trees", @@ -2458,7 +2462,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple # The default value is set in `__init__.py` in `install_uki`. # `None` is used to determine if the roothash and boot count format # should be appended to the filename if they are found. - #default= + # default= help="Specify the format used for the UKI filename", ), ConfigSetting( @@ -2645,7 +2649,6 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), - ConfigSetting( dest="secure_boot", metavar="BOOL", @@ -2757,7 +2760,6 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple section="Validation", help="GPG key to use for signing", ), - ConfigSetting( dest="tools_tree", metavar="PATH", @@ -2861,7 +2863,12 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple section="Build", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), help="Use a sandbox tree to configure the various tools that mkosi executes", - paths=("mkosi.sandbox", "mkosi.sandbox.tar", "mkosi.pkgmngr", "mkosi.pkgmngr.tar",), + paths=( + "mkosi.sandbox", + "mkosi.sandbox.tar", + "mkosi.pkgmngr", + "mkosi.pkgmngr.tar", + ), scope=SettingScope.universal, ), ConfigSetting( @@ -2926,7 +2933,6 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple parse=config_parse_boolean, help="Whether mkosi can store information about previous builds", ), - ConfigSetting( dest="proxy_url", section="Host", @@ -2995,8 +3001,10 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple metavar="BOOL", section="Host", parse=config_parse_boolean, - help=('If specified, the container/VM is run with a temporary snapshot of the output ' - 'image that is removed immediately when the container/VM terminates'), + help=( + "If specified, the container/VM is run with a temporary snapshot of the output " + "image that is removed immediately when the container/VM terminates" + ), nargs="?", ), ConfigSetting( @@ -3277,7 +3285,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: prog="mkosi", description="Build Bespoke OS Images", # the synopsis below is supposed to be indented by two spaces - usage="\n " + textwrap.dedent("""\ + usage="\n " + + textwrap.dedent("""\ mkosi [options…] {b}summary{e} mkosi [options…] {b}cat-config{e} mkosi [options…] {b}build{e} [command line…] @@ -3309,14 +3318,16 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: help=argparse.SUPPRESS, ) parser.add_argument( - "-f", "--force", + "-f", + "--force", action="count", dest="force", default=0, help="Remove existing image file before operation", ) parser.add_argument( - "-C", "--directory", + "-C", + "--directory", type=parse_chdir if chdir else str, default=Path.cwd(), help="Change to specified directory before doing anything", @@ -3360,7 +3371,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: default="mkosi of %u", ) parser.add_argument( - "-B", "--auto-bump", + "-B", + "--auto-bump", help="Automatically bump image version after building", action="store_true", default=False, @@ -3379,7 +3391,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: default=False, ) parser.add_argument( - "-w", "--wipe-build-dir", + "-w", + "--wipe-build-dir", help="Remove the build directory before building the image", action="store_true", default=False, @@ -3413,7 +3426,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: help=argparse.SUPPRESS, ) parser.add_argument( - "-h", "--help", + "-h", + "--help", action=PagerHelpAction, help=argparse.SUPPRESS, ) @@ -3428,12 +3442,12 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: for long in [s.long, *s.compat_longs]: opts = [s.short, long] if s.short and long == s.long else [long] - group.add_argument( # type: ignore + group.add_argument( # type: ignore *opts, dest=s.dest, choices=s.choices, metavar=s.metavar, - nargs=s.nargs, # type: ignore + nargs=s.nargs, # type: ignore const=s.const, help=s.help if long == s.long else argparse.SUPPRESS, action=ConfigAction, @@ -3474,7 +3488,7 @@ def __call__( parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], - option_string: Optional[str] = None + option_string: Optional[str] = None, ) -> None: assert option_string is not None @@ -3500,7 +3514,7 @@ def __init__(self, resources: Path = Path("/")) -> None: # specified in configuration files. self.cli = argparse.Namespace() self.config = argparse.Namespace( - files = [], + files=[], ) self.defaults = argparse.Namespace() # Compare inodes instead of paths so we can't get tricked by bind mounts and such. @@ -3596,10 +3610,7 @@ def finalize_value(self, setting: ConfigSetting) -> Optional[Any]: # If a value was specified on the CLI, it always takes priority. If the setting is a collection of values, we # merge the value from the CLI with the value from the configuration, making sure that the value from the CLI # always takes priority. - if ( - hasattr(self.cli, setting.dest) and - (v := getattr(self.cli, setting.dest)) is not None - ): + if hasattr(self.cli, setting.dest) and (v := getattr(self.cli, setting.dest)) is not None: if isinstance(v, list): return (getattr(self.config, setting.dest, None) or []) + v elif isinstance(v, dict): @@ -3614,15 +3625,14 @@ def finalize_value(self, setting: ConfigSetting) -> Optional[Any]: # value either if the setting is set to the empty string on the command line. if ( - not hasattr(self.cli, setting.dest) and - hasattr(self.config, setting.dest) and - (v := getattr(self.config, setting.dest)) is not None + not hasattr(self.cli, setting.dest) + and hasattr(self.config, setting.dest) + and (v := getattr(self.config, setting.dest)) is not None ): return v - if ( - (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and - isinstance(setting.parse(None, None), (dict, list, set)) + if (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and isinstance( + setting.parse(None, None), (dict, list, set) ): default = setting.parse(None, None) elif hasattr(self.defaults, setting.dest): @@ -3719,7 +3729,7 @@ def match_config(self, path: Path) -> bool: return match_triggered is not False def parse_config_one(self, path: Path, profiles: bool = False, local: bool = False) -> bool: - s: Optional[ConfigSetting] # Make mypy happy + s: Optional[ConfigSetting] # Make mypy happy extras = path.is_dir() if path.is_dir(): @@ -3740,10 +3750,7 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal delattr(self.config, s.dest) for s in SETTINGS: - if ( - s.scope == SettingScope.universal and - (image := getattr(self.config, "image", None)) is not None - ): + if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None: continue if self.only_sections and s.section not in self.only_sections: @@ -3764,14 +3771,14 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal s.dest, s.parse( file_run_or_read(extra).rstrip("\n") if s.path_read_text else f, - getattr(self.config, s.dest, None) + getattr(self.config, s.dest, None), ), ) if path.exists(): abs_path = Path.cwd() / path logging.debug(f"Loading configuration file {abs_path}") - files = getattr(self.config, 'files') + files = getattr(self.config, "files") files += [abs_path] for section, k, v in parse_ini(path, only_sections=self.only_sections or {s.section for s in SETTINGS}): @@ -3784,10 +3791,7 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): die(f"Unknown setting {name}") - if ( - s.scope == SettingScope.universal and - (image := getattr(self.config, "image", None)) is not None - ): + if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None: die(f"Setting {name} cannot be configured in subimage {image}") if name in self.immutable: die(f"Setting {name} cannot be modified anymore at this point") @@ -3875,10 +3879,10 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu return args, () if ( - args.verb.needs_build() and - args.verb != Verb.build and - not args.force and - Path(".mkosi-private/history/latest.json").exists() + args.verb.needs_build() + and args.verb != Verb.build + and not args.force + and Path(".mkosi-private/history/latest.json").exists() ): prev = Config.from_json(Path(".mkosi-private/history/latest.json").read_text()) @@ -3897,7 +3901,7 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu if hasattr(context.config, s.dest): delattr(context.config, s.dest) - context.only_sections = ("Include", "Host",) + context.only_sections = ("Include", "Host") else: prev = None @@ -3931,9 +3935,7 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu # we check here to see if dependencies were explicitly provided and if not we gather # the list of default dependencies while we parse the subimages. dependencies: Optional[list[str]] = ( - None - if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies") - else [] + None if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies") else [] ) if args.directory is not None and Path("mkosi.images").exists(): @@ -3955,7 +3957,7 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu name: getattr(config, "environment")[name] for name in getattr(config, "pass_environment", {}) if name in getattr(config, "environment", {}) - } + }, ) for p in sorted(Path("mkosi.images").iterdir()): @@ -4023,14 +4025,20 @@ def load_credentials(args: argparse.Namespace) -> dict[str, str]: if "ssh.authorized_keys.root" not in creds: if args.ssh_certificate: - pubkey = run(["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"], - stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null")).stdout.strip() - sshpubkey = run(["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], - input=pubkey, stdout=subprocess.PIPE).stdout.strip() + pubkey = run( + ["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"], + stdout=subprocess.PIPE, + env=dict(OPENSSL_CONF="/dev/null"), + ).stdout.strip() + sshpubkey = run( + ["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=subprocess.PIPE + ).stdout.strip() creds["ssh.authorized_keys.root"] = sshpubkey elif args.ssh: - die("Ssh= is enabled but no SSH certificate was found", - hint="Run 'mkosi genkey' to automatically create one") + die( + "Ssh= is enabled but no SSH certificate was found", + hint="Run 'mkosi genkey' to automatically create one", + ) return creds @@ -4122,11 +4130,7 @@ def load_environment(args: argparse.Namespace) -> dict[str, str]: if gnupghome := os.getenv("GNUPGHOME"): env["GNUPGHOME"] = gnupghome - env |= dict( - parse_environment(line) - for f in args.environment_files - for line in f.read_text().strip().splitlines() - ) + env |= dict(parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines()) env |= args.environment return env @@ -4169,9 +4173,9 @@ def load_config(config: argparse.Namespace) -> Config: # For unprivileged builds we need the userxattr OverlayFS mount option, which is only available # in Linux v5.11 and later. if ( - (config.build_scripts or config.base_trees) and - GenericVersion(platform.release()) < GenericVersion("5.11") and - os.geteuid() != 0 + (config.build_scripts or config.base_trees) + and GenericVersion(platform.release()) < GenericVersion("5.11") + and os.geteuid() != 0 ): die("This unprivileged build configuration requires at least Linux v5.11") @@ -4241,7 +4245,7 @@ def cat_config(images: Sequence[Config]) -> str: # Display the paths as relative to ., if underneath. if path.is_relative_to(Path.cwd()): path = path.relative_to(Path.cwd()) - print(f'{Style.blue}# {path}{Style.reset}', file=c) + print(f"{Style.blue}# {path}{Style.reset}", file=c) print(path.read_text(), file=c) return c.getvalue() @@ -4601,9 +4605,11 @@ def want_selinux_relabel(config: Config, root: Path, fatal: bool = True) -> Opti die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config") return None - policy = run(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], - sandbox=config.sandbox(binary="sh", options=["--ro-bind", selinux, selinux]), - stdout=subprocess.PIPE).stdout.strip() + policy = run( + ["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], + sandbox=config.sandbox(binary="sh", options=["--ro-bind", selinux, selinux]), + stdout=subprocess.PIPE, + ).stdout.strip() if not policy: if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config") @@ -4642,5 +4648,8 @@ def systemd_tool_version(*tool: PathString, sandbox: SandboxProtocol = nosandbox [*tool, "--version"], stdout=subprocess.PIPE, sandbox=sandbox(binary=tool[-1]), - ).stdout.split()[2].strip("()").removeprefix("v") + ) + .stdout.split()[2] + .strip("()") + .removeprefix("v") ) diff --git a/mkosi/curl.py b/mkosi/curl.py index 900c392e9..5d792af13 100644 --- a/mkosi/curl.py +++ b/mkosi/curl.py @@ -28,4 +28,4 @@ def curl(config: Config, url: str, output_dir: Path) -> None: network=True, options=["--bind", output_dir, output_dir, *finalize_crypto_mounts(config)], ), - ) + ) # fmt: skip diff --git a/mkosi/distributions/__init__.py b/mkosi/distributions/__init__.py index 10df0902a..a04b7dc61 100644 --- a/mkosi/distributions/__init__.py +++ b/mkosi/distributions/__init__.py @@ -15,10 +15,10 @@ class PackageType(StrEnum): - none = enum.auto() - rpm = enum.auto() - deb = enum.auto() - pkg = enum.auto() + none = enum.auto() + rpm = enum.auto() + deb = enum.auto() + pkg = enum.auto() class DistributionInstaller: @@ -74,21 +74,21 @@ def grub_prefix(cls) -> str: class Distribution(StrEnum): # Please consult docs/distribution-policy.md and contact one # of the mkosi maintainers before implementing a new distribution. - fedora = enum.auto() - debian = enum.auto() - kali = enum.auto() - ubuntu = enum.auto() - arch = enum.auto() - opensuse = enum.auto() - mageia = enum.auto() - centos = enum.auto() - rhel = enum.auto() - rhel_ubi = enum.auto() + fedora = enum.auto() + debian = enum.auto() + kali = enum.auto() + ubuntu = enum.auto() + arch = enum.auto() + opensuse = enum.auto() + mageia = enum.auto() + centos = enum.auto() + rhel = enum.auto() + rhel_ubi = enum.auto() openmandriva = enum.auto() - rocky = enum.auto() - alma = enum.auto() - azure = enum.auto() - custom = enum.auto() + rocky = enum.auto() + alma = enum.auto() + azure = enum.auto() + custom = enum.auto() def is_centos_variant(self) -> bool: return self in ( @@ -156,7 +156,7 @@ def createrepo(self, context: "Context") -> None: return self.installer().package_manager(context.config).createrepo(context) def installer(self) -> type[DistributionInstaller]: - modname = str(self).replace('-', '_') + modname = str(self).replace("-", "_") mod = importlib.import_module(f"mkosi.distributions.{modname}") installer = getattr(mod, "Installer") assert issubclass(installer, DistributionInstaller) diff --git a/mkosi/distributions/arch.py b/mkosi/distributions/arch.py index aaab71fc7..86711f88b 100644 --- a/mkosi/distributions/arch.py +++ b/mkosi/distributions/arch.py @@ -65,7 +65,8 @@ def repositories(cls, context: Context) -> Iterable[PacmanRepository]: # Testing repositories have to go before regular ones to to take precedence. repos = [ - repo for repo in ( + repo + for repo in ( "core-testing", "core-testing-debug", "extra-testing", @@ -74,7 +75,8 @@ def repositories(cls, context: Context) -> Iterable[PacmanRepository]: "extra-debug", "multilib-testing", "multilib", - ) if repo in context.config.repositories + ) + if repo in context.config.repositories ] + ["core", "extra"] if context.config.architecture.is_arm_variant(): @@ -86,13 +88,12 @@ def repositories(cls, context: Context) -> Iterable[PacmanRepository]: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - Architecture.arm : "armv7h", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + Architecture.arm: "armv7h", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Arch Linux") return a - diff --git a/mkosi/distributions/azure.py b/mkosi/distributions/azure.py index 985251ee9..77ca1dc9e 100644 --- a/mkosi/distributions/azure.py +++ b/mkosi/distributions/azure.py @@ -98,9 +98,9 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.arm64 : "aarch64", - Architecture.x86_64 : "x86_64", - }.get(arch) + Architecture.arm64: "aarch64", + Architecture.x86_64: "x86_64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") diff --git a/mkosi/distributions/centos.py b/mkosi/distributions/centos.py index 67e309bbd..76be2d57f 100644 --- a/mkosi/distributions/centos.py +++ b/mkosi/distributions/centos.py @@ -53,8 +53,8 @@ def dbpath(cls, context: Context) -> str: # The Hyperscale SIG uses /usr/lib/sysimage/rpm in its rebuild of rpm for C9S that's shipped in the # hyperscale-packages-experimental repository. if ( - GenericVersion(context.config.release) > 9 or - "hyperscale-packages-experimental" in context.config.repositories + GenericVersion(context.config.release) > 9 + or "hyperscale-packages-experimental" in context.config.repositories ): return "/usr/lib/sysimage/rpm" @@ -84,11 +84,11 @@ def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.ppc64_le : "ppc64le", - Architecture.s390x : "s390x", - Architecture.arm64 : "aarch64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.ppc64_le: "ppc64le", + Architecture.s390x: "s390x", + Architecture.arm64: "aarch64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") @@ -206,7 +206,7 @@ def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: ("epel", "epel"), ("epel-next", "epel/next"), ("epel-testing", "epel/testing"), - ("epel-next-testing", "epel/testing/next") + ("epel-next-testing", "epel/testing/next"), ): # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror URL and # path we were given. Since this doesn't work for all scenarios, we also allow overriding the mirror @@ -235,41 +235,19 @@ def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: for repo in ("epel", "epel-next"): yield RpmRepository(repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False) yield RpmRepository( - f"{repo}-debuginfo", - f"{url}&repo={repo}-debug-$releasever", - gpgurls, - enabled=False - ) - yield RpmRepository( - f"{repo}-source", - f"{url}&repo={repo}-source-$releasever", - gpgurls, - enabled=False + f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False ) + yield RpmRepository(f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False) + yield RpmRepository("epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False) yield RpmRepository( - "epel-testing", - f"{url}&repo=testing-epel$releasever", - gpgurls, - enabled=False - ) - yield RpmRepository( - "epel-testing-debuginfo", - f"{url}&repo=testing-debug-epel$releasever", - gpgurls, - enabled=False + "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( - "epel-testing-source", - f"{url}&repo=testing-source-epel$releasever", - gpgurls, - enabled=False + "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( - "epel-next-testing", - f"{url}&repo=epel-testing-next-$releasever", - gpgurls, - enabled=False + "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-next-testing-debuginfo", diff --git a/mkosi/distributions/debian.py b/mkosi/distributions/debian.py index 4b1d029e1..a3c83a303 100644 --- a/mkosi/distributions/debian.py +++ b/mkosi/distributions/debian.py @@ -122,7 +122,7 @@ def install(cls, context: Context) -> None: "sparc" : ["lib64"], "sparc64" : ["lib32", "lib64"], "x32" : ["lib32", "lib64", "libx32"], - }.get(context.config.distribution.architecture(context.config.architecture), []) + }.get(context.config.distribution.architecture(context.config.architecture), []) # fmt: skip with umask(~0o755): for d in subdirs: @@ -180,7 +180,7 @@ def install(cls, context: Context) -> None: if not context.config.with_docs else [] ), - sandbox=context.sandbox + sandbox=context.sandbox, ) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer @@ -213,7 +213,6 @@ def install_packages(cls, context: Context, packages: Sequence[str], apivfs: boo # Let's make sure it is enabled by default in our images. (context.root / "etc/systemd/system-generators/systemd-gpt-auto-generator").unlink(missing_ok=True) - @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Apt.invoke(context, "purge", packages, apivfs=True) @@ -221,22 +220,22 @@ def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.arm64 : "arm64", - Architecture.arm : "armhf", - Architecture.alpha : "alpha", - Architecture.x86_64 : "amd64", - Architecture.x86 : "i386", - Architecture.ia64 : "ia64", - Architecture.loongarch64 : "loongarch64", - Architecture.mips64_le : "mips64el", - Architecture.mips_le : "mipsel", - Architecture.parisc : "hppa", - Architecture.ppc64_le : "ppc64el", - Architecture.ppc64 : "ppc64", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.s390 : "s390", - }.get(arch) + Architecture.arm64: "arm64", + Architecture.arm: "armhf", + Architecture.alpha: "alpha", + Architecture.x86_64: "amd64", + Architecture.x86: "i386", + Architecture.ia64: "ia64", + Architecture.loongarch64: "loongarch64", + Architecture.mips64_le: "mips64el", + Architecture.mips_le: "mipsel", + Architecture.parisc: "hppa", + Architecture.ppc64_le: "ppc64el", + Architecture.ppc64: "ppc64", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.s390: "s390", + }.get(arch) # fmt: skip if not a: die(f"Architecture {arch} is not supported by Debian") @@ -275,7 +274,7 @@ def fixup_os_release(context: Context) -> None: with osrelease.open("r") as old, newosrelease.open("w") as new: for line in old.readlines(): if line.startswith("VERSION_CODENAME="): - new.write('VERSION_CODENAME=sid\n') + new.write("VERSION_CODENAME=sid\n") else: new.write(line) @@ -285,16 +284,19 @@ def fixup_os_release(context: Context) -> None: # precedence over /usr/lib/os-release, and ignore the latter and assume that if an usr-only # image is built then the package manager will not run on it. if candidate == "etc/os-release": - run([ - "dpkg-divert", - "--quiet", - "--root=/buildroot", - "--local", - "--add", - "--rename", - "--divert", - f"/{candidate}.dpkg", - f"/{candidate}", - ], sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"])) + run( + [ + "dpkg-divert", + "--quiet", + "--root=/buildroot", + "--local", + "--add", + "--rename", + "--divert", + f"/{candidate}.dpkg", + f"/{candidate}", + ], + sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"]), + ) newosrelease.rename(osrelease) diff --git a/mkosi/distributions/fedora.py b/mkosi/distributions/fedora.py index ddd2abd94..9696e6a25 100644 --- a/mkosi/distributions/fedora.py +++ b/mkosi/distributions/fedora.py @@ -46,8 +46,10 @@ def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: if not key1 and not key2: if not context.config.repository_key_fetch: - die("Fedora GPG keys not found in /usr/share/distribution-gpg-keys", - hint="Make sure the distribution-gpg-keys package is installed") + die( + "Fedora GPG keys not found in /usr/share/distribution-gpg-keys", + hint="Make sure the distribution-gpg-keys package is installed", + ) if context.config.release == "rawhide": # https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, let's @@ -118,13 +120,15 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: return if context.config.release == "eln": - mirror = context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" + mirror = ( + context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" + ) for repo in ("Appstream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" yield RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls) yield RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False) - elif (m := context.config.mirror): + elif m := context.config.mirror: directory = "development" if context.config.release == "rawhide" else "releases" url = f"baseurl={join_mirror(m, f'linux/{directory}/$releasever/Everything')}" yield RpmRepository("fedora", f"{url}/$basearch/os", gpgurls) @@ -156,16 +160,10 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: enabled=False, ) yield RpmRepository( - "updates-source", - f"{url}&repo=updates-released-source-f$releasever", - gpgurls, - enabled=False + "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False ) yield RpmRepository( - "updates-testing", - f"{url}&repo=updates-testing-f$releasever", - gpgurls, - enabled=False + "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False ) yield RpmRepository( "updates-testing-debuginfo", @@ -183,14 +181,14 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.arm64 : "aarch64", - Architecture.mips64_le : "mips64el", - Architecture.mips_le : "mipsel", - Architecture.ppc64_le : "ppc64le", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.x86_64 : "x86_64", - }.get(arch) + Architecture.arm64: "aarch64", + Architecture.mips64_le: "mips64el", + Architecture.mips_le: "mipsel", + Architecture.ppc64_le: "ppc64le", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.x86_64: "x86_64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Fedora") diff --git a/mkosi/distributions/mageia.py b/mkosi/distributions/mageia.py index 6e91853a8..d461cc392 100644 --- a/mkosi/distributions/mageia.py +++ b/mkosi/distributions/mageia.py @@ -52,9 +52,9 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Mageia") diff --git a/mkosi/distributions/openmandriva.py b/mkosi/distributions/openmandriva.py index a153d68de..1e0de8b54 100644 --- a/mkosi/distributions/openmandriva.py +++ b/mkosi/distributions/openmandriva.py @@ -49,10 +49,10 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - Architecture.riscv64 : "riscv64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + Architecture.riscv64: "riscv64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by OpenMandriva") diff --git a/mkosi/distributions/opensuse.py b/mkosi/distributions/opensuse.py index 30a91be2c..4e3356ee7 100644 --- a/mkosi/distributions/opensuse.py +++ b/mkosi/distributions/opensuse.py @@ -72,7 +72,8 @@ def install_packages(cls, context: Context, packages: Sequence[str], apivfs: boo "--recommends" if context.config.with_recommends else "--no-recommends", *sort_packages(packages), ], - apivfs=apivfs) + apivfs=apivfs, + ) # fmt: skip else: Dnf.invoke(context, "install", sort_packages(packages), apivfs=apivfs) @@ -100,8 +101,10 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: ) if not gpgkeys and not context.config.repository_key_fetch: - die("OpenSUSE GPG keys not found in /usr/share/distribution-gpg-keys", - hint="Make sure the distribution-gpg-keys package is installed") + die( + "OpenSUSE GPG keys not found in /usr/share/distribution-gpg-keys", + hint="Make sure the distribution-gpg-keys package is installed", + ) if zypper and gpgkeys: run( @@ -112,8 +115,8 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: "--bind", context.root, "/buildroot", *finalize_crypto_mounts(context.config), ], - ) - ) + ), + ) # fmt: skip if context.config.release == "tumbleweed": if context.config.architecture == Architecture.x86_64: @@ -162,11 +165,13 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: ) else: if ( - context.config.release in ("current", "stable", "leap") and - context.config.architecture != Architecture.x86_64 + context.config.release in ("current", "stable", "leap") + and context.config.architecture != Architecture.x86_64 ): - die(f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture", - hint="Specify either tumbleweed or a specific leap release such as 15.6") + die( + f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture", + hint="Specify either tumbleweed or a specific leap release such as 15.6", + ) if context.config.release in ("current", "stable", "leap"): release = "openSUSE-current" @@ -225,9 +230,9 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by OpenSUSE") diff --git a/mkosi/distributions/ubuntu.py b/mkosi/distributions/ubuntu.py index 803a91f69..f9cd81374 100644 --- a/mkosi/distributions/ubuntu.py +++ b/mkosi/distributions/ubuntu.py @@ -79,4 +79,3 @@ def repositories(cls, context: Context, local: bool = True) -> Iterable[AptRepos components=components, signedby=signedby, ) - diff --git a/mkosi/initrd.py b/mkosi/initrd.py index 3b6a9d95f..97ec49153 100644 --- a/mkosi/initrd.py +++ b/mkosi/initrd.py @@ -36,19 +36,22 @@ def main() -> None: default=platform.uname().release, ) parser.add_argument( - "-t", "--format", + "-t", + "--format", choices=[str(OutputFormat.cpio), str(OutputFormat.uki), str(OutputFormat.directory)], help="Output format (CPIO archive, UKI or local directory)", default="cpio", ) parser.add_argument( - "-o", "--output", + "-o", + "--output", metavar="NAME", help="Output name", default="initrd", ) parser.add_argument( - "-O", "--output-dir", + "-O", + "--output-dir", metavar="DIR", help="Output directory", default="", @@ -66,7 +69,8 @@ def main() -> None: default=False, ) parser.add_argument( - "-D", "--show-documentation", + "-D", + "--show-documentation", help="Show the man page", action="store_true", default=False, @@ -98,7 +102,7 @@ def main() -> None: "--kernel-modules-include=host", "--build-sources", "", "--include=mkosi-initrd", - ] + ] # fmt: skip if args.debug: cmdline += ["--debug"] @@ -145,8 +149,9 @@ def main() -> None: if (Path("/etc") / p).resolve().is_file(): shutil.copy2(Path("/etc") / p, Path(d) / "etc" / p) else: - shutil.copytree(Path("/etc") / p, Path(d) / "etc" / p, - ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True) + shutil.copytree( + Path("/etc") / p, Path(d) / "etc" / p, ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True + ) cmdline += ["--sandbox-tree", d] @@ -156,7 +161,7 @@ def main() -> None: cmdline, stdin=sys.stdin, stdout=sys.stdout, - env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {} + env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {}, ) diff --git a/mkosi/installer/__init__.py b/mkosi/installer/__init__.py index 47914ddae..fd9930f93 100644 --- a/mkosi/installer/__init__.py +++ b/mkosi/installer/__init__.py @@ -28,7 +28,7 @@ def cache_subdirs(cls, cache: Path) -> list[Path]: @classmethod def state_subdirs(cls, state: Path) -> list[Path]: - return [] + return [] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: @@ -37,7 +37,7 @@ def scripts(cls, context: Context) -> dict[str, list[PathString]]: @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { - "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. + "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. # systemd's chroot detection doesn't work when unprivileged so tell it explicitly. "SYSTEMD_IN_CHROOT": "1", } @@ -46,8 +46,8 @@ def finalize_environment(cls, context: Context) -> dict[str, str]: env["SYSTEMD_HWDB_UPDATE_BYPASS"] = "1" if ( - "KERNEL_INSTALL_BYPASS" not in context.config.environment and - context.config.bootable != ConfigFeature.disabled + "KERNEL_INSTALL_BYPASS" not in context.config.environment + and context.config.bootable != ConfigFeature.disabled ): env["KERNEL_INSTALL_BYPASS"] = "1" else: @@ -70,7 +70,7 @@ def mounts(cls, context: Context) -> list[PathString]: mounts = [ *finalize_crypto_mounts(context.config), "--bind", context.repository, "/repository", - ] + ] # fmt: skip if context.config.local_mirror and (mirror := startswith(context.config.local_mirror, "file://")): mounts += ["--ro-bind", mirror, mirror] @@ -111,7 +111,7 @@ def options(cls, *, root: PathString, apivfs: bool = True) -> list[PathString]: # original root won't be available anymore. If we're not in the sandbox yet, we want to pick up the passwd # files from the original root. *finalize_passwd_mounts(root), - ] + ] # fmt: skip @classmethod def apivfs_script_cmd(cls, context: Context) -> list[PathString]: @@ -123,7 +123,7 @@ def apivfs_script_cmd(cls, context: Context) -> list[PathString]: *apivfs_options(), *cls.options(root="/buildroot"), "--", - ] + ] # fmt: skip @classmethod def sandbox( @@ -142,7 +142,7 @@ def sandbox( *cls.options(root=context.root, apivfs=apivfs), *options, ], - ) + ) # fmt: skip @classmethod def sync(cls, context: Context, force: bool) -> None: @@ -168,9 +168,9 @@ def clean_package_manager_metadata(context: Context) -> None: if context.config.clean_package_metadata == ConfigFeature.disabled: return - if ( - context.config.clean_package_metadata == ConfigFeature.auto and - context.config.output_format in (OutputFormat.directory, OutputFormat.tar) + if context.config.clean_package_metadata == ConfigFeature.auto and context.config.output_format in ( + OutputFormat.directory, + OutputFormat.tar, ): return @@ -181,10 +181,12 @@ def clean_package_manager_metadata(context: Context) -> None: executable = context.config.distribution.package_manager(context.config).executable(context.config) remove = [] - for tool, paths in (("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), - ("dnf5", ["usr/lib/sysimage/libdnf5"]), - ("dpkg", ["var/lib/dpkg"]), - (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"])): + for tool, paths in ( + ("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), + ("dnf5", ["usr/lib/sysimage/libdnf5"]), + ("dpkg", ["var/lib/dpkg"]), + (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"]), + ): # fmt: skip if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary(tool, root=context.root): remove += [context.root / p for p in paths if (context.root / p).exists()] diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py index c7c983f99..23435becf 100644 --- a/mkosi/installer/apt.py +++ b/mkosi/installer/apt.py @@ -71,7 +71,8 @@ def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { **{ - command: cmd + cls.env_cmd(context) + cls.cmd(context, command) for command in ( + command: cmd + cls.env_cmd(context) + cls.cmd(context, command) + for command in ( "apt", "apt-cache", "apt-cdrom", @@ -84,16 +85,17 @@ def scripts(cls, context: Context) -> dict[str, list[PathString]]: ) }, **{ - command: cmd + cls.dpkg_cmd(command) for command in( + command: cmd + cls.dpkg_cmd(command) + for command in ( "dpkg", "dpkg-query", ) }, - "mkosi-install" : ["apt-get", "install"], - "mkosi-upgrade" : ["apt-get", "upgrade"], - "mkosi-remove" : ["apt-get", "purge"], + "mkosi-install": ["apt-get", "install"], + "mkosi-upgrade": ["apt-get", "upgrade"], + "mkosi-remove": ["apt-get", "purge"], "mkosi-reinstall": ["apt-get", "install", "--reinstall"], - } + } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: @@ -130,7 +132,7 @@ def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: die( f"Keyring for repo {repo.url} not found at {repo.signedby}", hint="Make sure the right keyring package (e.g. debian-archive-keyring, kali-archive-keyring " - "or ubuntu-keyring) is installed", + "or ubuntu-keyring) is installed", ) with sources.open("w") as f: @@ -141,7 +143,7 @@ def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "APT_CONFIG": "/etc/apt.conf", - "DEBIAN_FRONTEND" : "noninteractive", + "DEBIAN_FRONTEND": "noninteractive", "DEBCONF_INTERACTIVE_SEEN": "true", } @@ -180,14 +182,14 @@ def cmd(cls, context: Context, command: str = "apt-get") -> list[PathString]: "-o", "DPkg::Use-Pty=false", "-o", "DPkg::Install::Recursive::Minimum=1000", "-o", "pkgCacheGen::ForceEssential=,", - ] + ] # fmt: skip if not context.config.repository_key_check: cmdline += [ "-o", "Acquire::AllowInsecureRepositories=true", "-o", "Acquire::AllowDowngradeToInsecureRepositories=true", "-o", "APT::Get::AllowUnauthenticated=true", - ] + ] # fmt: skip if not context.config.with_docs: cmdline += [f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs] @@ -197,7 +199,7 @@ def cmd(cls, context: Context, command: str = "apt-get") -> list[PathString]: cmdline += [ "-o", f"Acquire::http::Proxy={context.config.proxy_url}", "-o", f"Acquire::https::Proxy={context.config.proxy_url}", - ] + ] # fmt: skip return cmdline @@ -276,4 +278,4 @@ def createrepo(cls, context: Context) -> None: "-o", "Dir::Etc::sourceparts=-", "-o", "APT::Get::List-Cleanup=0", ], - ) + ) # fmt: skip diff --git a/mkosi/installer/dnf.py b/mkosi/installer/dnf.py index 39bbbe800..83fb48ad0 100644 --- a/mkosi/installer/dnf.py +++ b/mkosi/installer/dnf.py @@ -25,22 +25,18 @@ def subdir(cls, config: Config) -> Path: @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: - return [ - p / "packages" - for p in cache.iterdir() - if p.is_dir() and "-" in p.name and "mkosi" not in p.name - ] + return [p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "dnf": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), - "mkosi-install" : ["dnf", "install"], - "mkosi-upgrade" : ["dnf", "upgrade"], - "mkosi-remove" : ["dnf", "remove"], + "mkosi-install": ["dnf", "install"], + "mkosi-upgrade": ["dnf", "upgrade"], + "mkosi-remove": ["dnf", "remove"], "mkosi-reinstall": ["dnf", "reinstall"], - } + } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[RpmRepository], filelists: bool = True) -> None: @@ -112,9 +108,9 @@ def finalize_environment(cls, context: Context) -> dict[str, str]: @classmethod def cmd( - cls, - context: Context, - cached_metadata: bool = True, + cls, + context: Context, + cached_metadata: bool = True, ) -> list[PathString]: dnf = cls.executable(context.config) @@ -131,7 +127,7 @@ def cmd( f"--setopt=install_weak_deps={int(context.config.with_recommends)}", "--setopt=check_config_file_age=0", "--disable-plugin=*" if dnf.endswith("dnf5") else "--disableplugin=*", - ] + ] # fmt: skip for plugin in ("builddep", "versionlock"): cmdline += ["--enable-plugin", plugin] if dnf.endswith("dnf5") else ["--enableplugin", plugin] @@ -216,8 +212,10 @@ def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> N @classmethod def createrepo(cls, context: Context) -> None: - run(["createrepo_c", context.repository], - sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository])) + run( + ["createrepo_c", context.repository], + sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]), + ) (context.sandbox_tree / "etc/yum.repos.d/mkosi-local.repo").write_text( textwrap.dedent( diff --git a/mkosi/installer/pacman.py b/mkosi/installer/pacman.py index f8ba927c1..e8e43c589 100644 --- a/mkosi/installer/pacman.py +++ b/mkosi/installer/pacman.py @@ -42,11 +42,11 @@ def state_subdirs(cls, state: Path) -> list[Path]: def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "pacman": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), - "mkosi-install" : ["pacman", "--sync", "--needed"], - "mkosi-upgrade" : ["pacman", "--sync", "--sysupgrade", "--needed"], - "mkosi-remove" : ["pacman", "--remove", "--recursive", "--nosave"], + "mkosi-install": ["pacman", "--sync", "--needed"], + "mkosi-upgrade": ["pacman", "--sync", "--sysupgrade", "--needed"], + "mkosi-remove": ["pacman", "--remove", "--recursive", "--nosave"], "mkosi-reinstall": ["pacman", "--sync"], - } + } # fmt: skip @classmethod def mounts(cls, context: Context) -> list[PathString]: @@ -55,7 +55,7 @@ def mounts(cls, context: Context) -> list[PathString]: # pacman writes downloaded packages to the first writable cache directory. We don't want it to write to our # local repository directory so we expose it as a read-only directory to pacman. "--ro-bind", context.repository, "/var/cache/pacman/mkosi", - ] + ] # fmt: skip if (context.root / "var/lib/pacman/local").exists(): # pacman reuses the same directory for the sync databases and the local database containing the list of @@ -151,7 +151,7 @@ def cmd(cls, context: Context) -> list[PathString]: "--arch", context.config.distribution.architecture(context.config.architecture), "--color", "auto", "--noconfirm", - ] + ] # fmt: skip @classmethod def invoke( @@ -181,7 +181,7 @@ def createrepo(cls, context: Context) -> None: "repo-add", "--quiet", context.repository / "mkosi.db.tar", - *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)) + *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)), ], sandbox=context.sandbox(binary="repo-add", options=["--bind", context.repository, context.repository]), ) @@ -198,7 +198,4 @@ def createrepo(cls, context: Context) -> None: ) # pacman can't sync a single repository, so we go behind its back and do it ourselves. - shutil.move( - context.repository / "mkosi.db.tar", - context.metadata_dir / "lib/pacman/sync/mkosi.db" - ) + shutil.move(context.repository / "mkosi.db.tar", context.metadata_dir / "lib/pacman/sync/mkosi.db") diff --git a/mkosi/installer/rpm.py b/mkosi/installer/rpm.py index 39ef644f0..ff99f0d32 100644 --- a/mkosi/installer/rpm.py +++ b/mkosi/installer/rpm.py @@ -36,20 +36,12 @@ def find_rpm_gpgkey( @overload def find_rpm_gpgkey( - context: Context, - key: str, - fallback: Optional[str] = None, - *, - required: Literal[False] + context: Context, key: str, fallback: Optional[str] = None, *, required: Literal[False] ) -> Optional[str]: ... def find_rpm_gpgkey( - context: Context, - key: str, - fallback: Optional[str] = None, - *, - required: bool = True + context: Context, key: str, fallback: Optional[str] = None, *, required: bool = True ) -> Optional[str]: root = context.config.tools() if context.config.tools_tree_certificates else Path("/") @@ -63,8 +55,10 @@ def find_rpm_gpgkey( return fallback if required: - die(f"{key} GPG key not found in /usr/share/distribution-gpg-keys", - hint="Make sure the distribution-gpg-keys package is installed") + die( + f"{key} GPG key not found in /usr/share/distribution-gpg-keys", + hint="Make sure the distribution-gpg-keys package is installed", + ) return None @@ -78,8 +72,11 @@ def setup_rpm(context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm") -> Non if not (confdir / "macros.dbpath").exists(): (confdir / "macros.dbpath").write_text(f"%_dbpath {dbpath}") - plugindir = Path(run(["rpm", "--eval", "%{__plugindir}"], - sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE).stdout.strip()) + plugindir = Path( + run( + ["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE + ).stdout.strip() + ) if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists(): with (confdir / "macros.disable-plugins").open("w") as f: for plugin in plugindir.iterdir(): diff --git a/mkosi/installer/zypper.py b/mkosi/installer/zypper.py index 98de90fb0..ff8708786 100644 --- a/mkosi/installer/zypper.py +++ b/mkosi/installer/zypper.py @@ -32,16 +32,16 @@ def scripts(cls, context: Context) -> dict[str, list[PathString]]: "install", "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", - ] + ] # fmt: skip return { "zypper": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), - "rpm" : cls.apivfs_script_cmd(context) + rpm_cmd(), - "mkosi-install" : install, - "mkosi-upgrade" : ["zypper", "update"], - "mkosi-remove" : ["zypper", "remove", "--clean-deps"], + "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), + "mkosi-install": install, + "mkosi-upgrade": ["zypper", "update"], + "mkosi-remove": ["zypper", "remove", "--clean-deps"], "mkosi-reinstall": install + ["--force"], - } + } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None: @@ -138,8 +138,10 @@ def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> N @classmethod def createrepo(cls, context: Context) -> None: - run(["createrepo_c", context.repository], - sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository])) + run( + ["createrepo_c", context.repository], + sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]), + ) (context.sandbox_tree / "etc/zypp/repos.d/mkosi-local.repo").write_text( textwrap.dedent( diff --git a/mkosi/kmod.py b/mkosi/kmod.py index 9e38b4a78..e04d4fea9 100644 --- a/mkosi/kmod.py +++ b/mkosi/kmod.py @@ -15,7 +15,7 @@ def loaded_modules() -> list[str]: # Loaded modules are listed with underscores but the filenames might use dashes instead. - return [fr"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()] + return [rf"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()] def filter_kernel_modules(root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str]) -> list[Path]: @@ -79,7 +79,7 @@ def resolve_module_dependencies( # modules than the max number of accepted CLI arguments, we split the modules list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): - chunk = list(nametofile.keys())[i:i+8500] + chunk = list(nametofile.keys())[i : i + 8500] info += run( ["modinfo", "--set-version", kver, "--null", *chunk], stdout=subprocess.PIPE, @@ -201,8 +201,7 @@ def process_kernel_modules( firmwared = Path("usr/lib/firmware") with complete_step("Applying kernel module filters"): - required = set( - gen_required_kernel_modules(root, kver, include=include, exclude=exclude)) + required = set(gen_required_kernel_modules(root, kver, include=include, exclude=exclude)) with chdir(root): modules = sorted(modulesd.rglob("*.ko*"), reverse=True) diff --git a/mkosi/log.py b/mkosi/log.py index 3895170b9..bb000759e 100644 --- a/mkosi/log.py +++ b/mkosi/log.py @@ -22,17 +22,17 @@ def terminal_is_dumb() -> bool: class Style: - bold: Final[str] = "\033[0;1;39m" if not terminal_is_dumb() else "" - blue: Final[str] = "\033[0;1;34m" if not terminal_is_dumb() else "" - gray: Final[str] = "\033[0;38;5;245m" if not terminal_is_dumb() else "" - red: Final[str] = "\033[31;1m" if not terminal_is_dumb() else "" - yellow: Final[str] = "\033[33;1m" if not terminal_is_dumb() else "" - reset: Final[str] = "\033[0m" if not terminal_is_dumb() else "" + # fmt: off + bold: Final[str] = "\033[0;1;39m" if not terminal_is_dumb() else "" + blue: Final[str] = "\033[0;1;34m" if not terminal_is_dumb() else "" + gray: Final[str] = "\033[0;38;5;245m" if not terminal_is_dumb() else "" + red: Final[str] = "\033[31;1m" if not terminal_is_dumb() else "" + yellow: Final[str] = "\033[33;1m" if not terminal_is_dumb() else "" + reset: Final[str] = "\033[0m" if not terminal_is_dumb() else "" + # fmt: on -def die(message: str, - *, - hint: Optional[str] = None) -> NoReturn: +def die(message: str, *, hint: Optional[str] = None) -> NoReturn: logging.error(f"{message}") if hint: logging.info(f"({hint})") @@ -84,7 +84,7 @@ def __init__(self, fmt: Optional[str] = None, *args: Any, **kwargs: Any) -> None logging.WARNING: logging.Formatter(f"‣ {Style.yellow}{fmt}{Style.reset}"), logging.ERROR: logging.Formatter(f"‣ {Style.red}{fmt}{Style.reset}"), logging.CRITICAL: logging.Formatter(f"‣ {Style.red}{Style.bold}{fmt}{Style.reset}"), - } + } # fmt: skip super().__init__(fmt, *args, **kwargs) diff --git a/mkosi/manifest.py b/mkosi/manifest.py index eebecff60..ed31b9caa 100644 --- a/mkosi/manifest.py +++ b/mkosi/manifest.py @@ -111,7 +111,7 @@ def record_rpm_packages(self) -> None: ], stdout=subprocess.PIPE, sandbox=self.context.sandbox(binary="rpm", options=["--ro-bind", self.context.root, "/buildroot"]), - ) + ) # fmt: skip packages = sorted(c.stdout.splitlines()) @@ -133,8 +133,8 @@ def record_rpm_packages(self) -> None: # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if ( - self.context.config.base_trees and - datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp + self.context.config.base_trees + and datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp ): continue @@ -173,15 +173,14 @@ def record_deb_packages(self) -> None: "dpkg-query", "--admindir=/buildroot/var/lib/dpkg", "--show", - "--showformat", - r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n', + "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n", ], stdout=subprocess.PIPE, sandbox=self.context.sandbox( binary="dpkg-query", options=["--ro-bind", self.context.root, "/buildroot"], ), - ) + ) # fmt: skip packages = sorted(c.stdout.splitlines()) @@ -196,8 +195,8 @@ def record_deb_packages(self) -> None: # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if ( - self.context.config.base_trees and - datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp + self.context.config.base_trees + and datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp ): continue diff --git a/mkosi/mounts.py b/mkosi/mounts.py index 01ec0cb47..a2ddb60f8 100644 --- a/mkosi/mounts.py +++ b/mkosi/mounts.py @@ -77,7 +77,7 @@ def finalize_source_mounts(config: Config, *, ephemeral: bool) -> Iterator[list[ "--overlay-upperdir", upperdir, "--overlay-workdir", workdir, "--overlay", dst, - ] + ] # fmt: skip else: options += ["--bind", src, dst] @@ -104,6 +104,5 @@ def finalize_crypto_mounts(config: Config) -> list[PathString]: return flatten( ("--symlink", src.readlink(), target) if src.is_symlink() else ("--ro-bind", src, target) - for src, target - in sorted(set(mounts), key=lambda s: s[1]) + for src, target in sorted(set(mounts), key=lambda s: s[1]) ) diff --git a/mkosi/qemu.py b/mkosi/qemu.py index 39671d42f..6189d3645 100644 --- a/mkosi/qemu.py +++ b/mkosi/qemu.py @@ -49,7 +49,7 @@ from mkosi.versioncomp import GenericVersion QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0") -VHOST_VSOCK_SET_GUEST_CID = 0x4008af60 +VHOST_VSOCK_SET_GUEST_CID = 0x4008AF60 class QemuDeviceNode(StrEnum): @@ -72,7 +72,7 @@ def feature(self, config: Config) -> ConfigFeature: }[self] def open(self) -> int: - return os.open(self.device(), os.O_RDWR|os.O_CLOEXEC|os.O_NONBLOCK) + return os.open(self.device(), os.O_RDWR | os.O_CLOEXEC | os.O_NONBLOCK) def available(self, log: bool = False) -> bool: try: @@ -102,7 +102,7 @@ def hash_output(config: Config) -> "hashlib._Hash": def hash_to_vsock_cid(hash: "hashlib._Hash") -> int: - cid = int.from_bytes(hash.digest()[:4], byteorder='little') + cid = int.from_bytes(hash.digest()[:4], byteorder="little") # Make sure we don't return any of the well-known CIDs. return max(3, min(cid, 0xFFFFFFFF - 1)) @@ -128,7 +128,7 @@ def find_unused_vsock_cid(config: Config, vfd: int) -> int: if not vsock_cid_in_use(vfd, cid): return cid - hash.update(i.to_bytes(length=4, byteorder='little')) + hash.update(i.to_bytes(length=4, byteorder="little")) for i in range(64): cid = random.randint(0, 0xFFFFFFFF - 1) @@ -140,8 +140,8 @@ def find_unused_vsock_cid(config: Config, vfd: int) -> int: class KernelType(StrEnum): - pe = enum.auto() - uki = enum.auto() + pe = enum.auto() + uki = enum.auto() unknown = enum.auto() @classmethod @@ -328,7 +328,7 @@ def start_virtiofsd( "--no-announce-submounts", "--sandbox=chroot", f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}", - ] + ] # fmt: skip if selinux: cmdline += ["--security-label"] @@ -393,7 +393,7 @@ def start_virtiofsd( ], setup=scope + become_root_in_subuid_range_cmd() if scope and not uidmap else [], ), - ) as proc: + ) as proc: # fmt: skip yield path proc.terminate() @@ -426,7 +426,7 @@ async def notify() -> None: with s: data = [] try: - while (buf := await loop.sock_recv(s, 4096)): + while buf := await loop.sock_recv(s, 4096): data.append(buf) except ConnectionResetError: logging.debug("vsock notify listener connection reset by peer") @@ -511,12 +511,11 @@ def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: user=user if not scope else None, group=group if not scope else None, foreground=False, - ) as proc: + ) as proc: # fmt: skip yield proc.terminate() - @contextlib.contextmanager def start_journal_remote_vsock(config: Config) -> Iterator[str]: with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as sock: @@ -549,6 +548,7 @@ def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}" try: + def copy() -> None: if config.output_format == OutputFormat.directory: become_root_in_subuid_range() @@ -567,7 +567,8 @@ def copy() -> None: ) copy_tree( - src, tmp, + src, + tmp, # Make sure the ownership is changed to the (fake) root user if the directory was not built as root. preserve=config.output_format == OutputFormat.directory and src.stat().st_uid == 0, use_subvolumes=config.use_subvolumes, @@ -578,6 +579,7 @@ def copy() -> None: fork_and_wait(copy) yield tmp finally: + def rm() -> None: if config.output_format == OutputFormat.directory: become_root_in_subuid_range() @@ -599,8 +601,8 @@ def qemu_version(config: Config, binary: Path) -> GenericVersion: def want_scratch(config: Config) -> bool: return config.runtime_scratch == ConfigFeature.enabled or ( - config.runtime_scratch == ConfigFeature.auto and - config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None + config.runtime_scratch == ConfigFeature.auto + and config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None ) @@ -613,7 +615,7 @@ def generate_scratch_fs(config: Config) -> Iterator[Path]: run( [f"mkfs.{fs}", "-L", "scratch", *extra.split(), scratch.name], stdout=subprocess.DEVNULL, - sandbox=config.sandbox(binary= f"mkfs.{fs}", options=["--bind", scratch.name, scratch.name]), + sandbox=config.sandbox(binary=f"mkfs.{fs}", options=["--bind", scratch.name, scratch.name]), ) yield Path(scratch.name) @@ -627,8 +629,7 @@ def finalize_qemu_firmware(config: Config, kernel: Optional[Path]) -> QemuFirmwa else QemuFirmware.linux ) elif ( - config.output_format in (OutputFormat.cpio, OutputFormat.directory) or - config.architecture.to_efi() is None + config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None ): return QemuFirmware.linux else: @@ -671,7 +672,7 @@ def finalize_firmware_variables( "--ro-bind", config.secure_boot_certificate, config.secure_boot_certificate, ], ), - ) + ) # fmt: skip else: tools = Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools() vars = ( @@ -700,7 +701,7 @@ def apply_runtime_size(config: Config, image: Path) -> None: image, ], sandbox=config.sandbox(binary="systemd-repart", options=["--bind", image, image]), - ) + ) # fmt: skip @contextlib.contextmanager @@ -716,8 +717,10 @@ def finalize_state(config: Config, cid: int) -> Iterator[None]: with flock(INVOKING_USER.runtime_dir() / "machine"): if (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): - die(f"Another virtual machine named {config.machine_or_name()} is already running", - hint="Use --machine to specify a different virtual machine name") + die( + f"Another virtual machine named {config.machine_or_name()} is already running", + hint="Use --machine to specify a different virtual machine name", + ) p.write_text( json.dumps( @@ -751,7 +754,7 @@ def scope_cmd( if os.getuid() != 0 and "DBUS_SESSION_BUS_ADDRESS" in os.environ and "XDG_RUNTIME_DIR" in os.environ: env = { "DBUS_SESSION_BUS_ADDRESS": os.environ["DBUS_SESSION_BUS_ADDRESS"], - "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"] + "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"], } elif os.getuid() == 0: if "DBUS_SYSTEM_ADDRESS" in os.environ: @@ -777,13 +780,12 @@ def scope_cmd( *(["--uid", str(user)] if user is not None else []), *(["--gid", str(group)] if group is not None else []), *([f"--property={p}" for p in properties]), - ] + ] # fmt: skip def register_machine(config: Config, pid: int, fname: Path) -> None: - if ( - os.getuid() != 0 or - ("DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists()) + if os.getuid() != 0 or ( + "DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists() ): return @@ -803,7 +805,7 @@ def register_machine(config: Config, pid: int, fname: Path) -> None: "vm", str(pid), fname if fname.is_dir() else "", - ], + ], # fmt: skip foreground=False, env=os.environ | config.environment, sandbox=config.sandbox(binary="busctl", relaxed=True), @@ -824,9 +826,9 @@ def run_qemu(args: Args, config: Config) -> None: die(f"{config.output_format} images cannot be booted in qemu") if ( - config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and - config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux) and - not config.qemu_firmware.is_uefi() + config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) + and config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux) + and not config.qemu_firmware.is_uefi() ): die(f"{config.output_format} images cannot be booted with the '{config.qemu_firmware}' firmware") @@ -844,16 +846,15 @@ def run_qemu(args: Args, config: Config) -> None: # after unsharing the user namespace. To get around this, open all those device nodes early can pass them as file # descriptors to qemu later. Note that we can't pass the kvm file descriptor to qemu until version 9.0. qemu_device_fds = { - d: d.open() - for d in QemuDeviceNode - if d.feature(config) != ConfigFeature.disabled and d.available(log=True) + d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } if not (qemu := config.find_binary(f"qemu-system-{config.architecture.to_qemu()}")): die("qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed on the host system?") - have_kvm = ((qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or - (qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds)) + have_kvm = (qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or ( + qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds + ) if config.qemu_kvm == ConfigFeature.enabled and not have_kvm: die("KVM acceleration requested but cannot access /dev/kvm") @@ -878,12 +879,9 @@ def run_qemu(args: Args, config: Config) -> None: firmware = finalize_qemu_firmware(config, kernel) - if ( - not kernel and - ( - firmware == QemuFirmware.linux or - config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) - ) + if not kernel and ( + firmware == QemuFirmware.linux + or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) ): if firmware.is_uefi(): name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki @@ -901,10 +899,10 @@ def run_qemu(args: Args, config: Config) -> None: # A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd. shm = [] if ( - config.runtime_trees or - config.runtime_build_sources or - config.runtime_home or - config.output_format == OutputFormat.directory + config.runtime_trees + or config.runtime_build_sources + or config.runtime_home + or config.output_format == OutputFormat.directory ): shm = ["-object", f"memory-backend-memfd,id=mem,size={config.qemu_mem // 1024**2}M,share=on"] @@ -924,7 +922,7 @@ def run_qemu(args: Args, config: Config) -> None: "-device", "virtio-balloon,free-page-reporting=on", "-no-user-config", *shm, - ] + ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"] @@ -957,14 +955,13 @@ def run_qemu(args: Args, config: Config) -> None: cid = config.qemu_vsock_cid if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): - die(f"VSock connection ID {cid} is already in use by another virtual machine", - hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID") + die( + f"VSock connection ID {cid} is already in use by another virtual machine", + hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID", + ) index = list(qemu_device_fds.keys()).index(QemuDeviceNode.vhost_vsock) - cmdline += [ - "-device", - f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}" - ] + cmdline += ["-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}"] cmdline += ["-cpu", "max"] @@ -980,7 +977,7 @@ def run_qemu(args: Args, config: Config) -> None: "-device", "virtio-serial-pci,id=mkosi-virtio-serial-pci", "-device", "virtconsole,chardev=console", "-mon", "console", - ] + ] # fmt: skip # QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that. if firmware.is_uefi(): @@ -998,7 +995,7 @@ def run_qemu(args: Args, config: Config) -> None: cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", - ] + ] # fmt: skip if config.qemu_cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp): # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size 2048. @@ -1016,7 +1013,7 @@ def run_qemu(args: Args, config: Config) -> None: "--sector-size=2048", "--copy-from", workdir(src), workdir(fname), - ], + ], # fmt: skip sandbox=config.sandbox( binary="systemd-repart", options=[ @@ -1024,7 +1021,7 @@ def run_qemu(args: Args, config: Config) -> None: "--ro-bind", src, workdir(src), ], ), - ) + ) # fmt: skip stack.callback(lambda: fname.unlink()) else: fname = stack.enter_context( @@ -1033,12 +1030,8 @@ def run_qemu(args: Args, config: Config) -> None: apply_runtime_size(config, fname) - if ( - kernel and - ( - KernelType.identify(config, kernel) != KernelType.uki or - not config.architecture.supports_smbios(firmware) - ) + if kernel and ( + KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ): kcl = config.kernel_command_line + config.kernel_command_line_extra else: @@ -1064,27 +1057,24 @@ def run_qemu(args: Args, config: Config) -> None: fname, name=config.machine_or_name(), uidmap=False, - selinux=bool(want_selinux_relabel(config, fname, fatal=False))), + selinux=bool(want_selinux_relabel(config, fname, fatal=False)), + ), ) cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root", - ] + ] # fmt: skip kcl += ["root=root", "rootfstype=virtiofs"] credentials = dict(config.credentials) def add_virtiofs_mount( - sock: Path, - dst: PathString, - cmdline: list[PathString], - credentials: dict[str, str], - *, tag: str + sock: Path, dst: PathString, cmdline: list[PathString], credentials: dict[str, str], *, tag: str ) -> None: cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}", - ] + ] # fmt: skip if "fstab.extra" not in credentials: credentials["fstab.extra"] = "" @@ -1133,15 +1123,16 @@ def add_virtiofs_mount( cmdline += [ "-drive", f"if=none,id=scratch,file={scratch},format=raw,discard=on,{cache}", "-device", "scsi-hd,drive=scratch", - ] + ] # fmt: skip kcl += [f"systemd.mount-extra=LABEL=scratch:/var/tmp:{config.distribution.filesystem()}"] if config.output_format == OutputFormat.cpio: cmdline += ["-initrd", fname] elif ( - kernel and KernelType.identify(config, kernel) != KernelType.uki and - "-initrd" not in args.cmdline and - (config.output_dir_or_cwd() / config.output_split_initrd).exists() + kernel + and KernelType.identify(config, kernel) != KernelType.uki + and "-initrd" not in args.cmdline + and (config.output_dir_or_cwd() / config.output_split_initrd).exists() ): cmdline += ["-initrd", config.output_dir_or_cwd() / config.output_split_initrd] @@ -1149,20 +1140,19 @@ def add_virtiofs_mount( direct = fname.stat().st_size % resource.getpagesize() == 0 ephemeral = config.ephemeral cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring" - cmdline += ["-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", - "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1"] - - if ( - config.qemu_swtpm == ConfigFeature.enabled or - ( - config.qemu_swtpm == ConfigFeature.auto and - firmware.is_uefi() and - config.find_binary("swtpm") is not None - ) + cmdline += [ + "-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", + "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1", + ] # fmt: skip + + if config.qemu_swtpm == ConfigFeature.enabled or ( + config.qemu_swtpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None ): sock = stack.enter_context(start_swtpm(config)) - cmdline += ["-chardev", f"socket,id=chrtpm,path={sock}", - "-tpmdev", "emulator,id=tpm0,chardev=chrtpm"] + cmdline += [ + "-chardev", f"socket,id=chrtpm,path={sock}", + "-tpmdev", "emulator,id=tpm0,chardev=chrtpm", + ] # fmt: skip if config.architecture.is_x86_variant(): cmdline += ["-device", "tpm-tis,tpmdev=tpm0"] @@ -1189,12 +1179,8 @@ def add_virtiofs_mount( elif kernel: kcl += [f"systemd.set_credential_binary={k}:{payload}"] - if ( - kernel and - ( - KernelType.identify(config, kernel) != KernelType.uki or - not config.architecture.supports_smbios(firmware) - ) + if kernel and ( + KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ): cmdline += ["-append", " ".join(kcl)] elif config.architecture.supports_smbios(firmware): @@ -1277,14 +1263,18 @@ def add_virtiofs_mount( def run_ssh(args: Args, config: Config) -> None: with flock(INVOKING_USER.runtime_dir() / "machine"): if not (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): - die(f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", - hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?") + die( + f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", + hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?", + ) state = json.loads(p.read_text()) if not state["SshKey"]: - die("An SSH key must be configured when booting the image to use 'mkosi ssh'", - hint="Use 'mkosi genkey' to generate a new SSH key and certificate") + die( + "An SSH key must be configured when booting the image to use 'mkosi ssh'", + hint="Use 'mkosi genkey' to generate a new SSH key and certificate", + ) cmd: list[PathString] = [ "ssh", @@ -1296,7 +1286,7 @@ def run_ssh(args: Args, config: Config) -> None: "-o", "LogLevel=ERROR", "-o", f"ProxyCommand={state['ProxyCommand']}", "root@mkosi", - ] + ] # fmt: skip cmd += args.cmdline diff --git a/mkosi/run.py b/mkosi/run.py index 261c017a2..644becf28 100644 --- a/mkosi/run.py +++ b/mkosi/run.py @@ -82,10 +82,10 @@ def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> It # Failures from self come from the forks we spawn to build images in a user namespace. We've already done all # the logging for those failures so we don't log stacktraces for those either. if ( - ARG_DEBUG.get() and - e.cmd and - str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") and - "qemu-system" not in str(e.cmd[0]) + ARG_DEBUG.get() + and e.cmd + and str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") + and "qemu-system" not in str(e.cmd[0]) ): sys.excepthook(*ensure_exc_info()) except BaseException: @@ -125,7 +125,7 @@ def log_process_failure(sandbox: Sequence[str], cmdline: Sequence[str], returnco logging.error(f"{cmdline[0]} not found.") else: logging.error( - f"\"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}\" returned non-zero exit code " + f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}" returned non-zero exit code ' f"{returncode}." ) @@ -411,8 +411,7 @@ def finalize_passwd_mounts(root: PathString) -> list[PathString]: directory instead of from the host. """ return flatten( - ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") - for f in ("passwd", "group", "shadow", "gshadow") + ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") for f in ("passwd", "group", "shadow", "gshadow") ) @@ -420,7 +419,7 @@ def network_options(*, network: bool) -> list[PathString]: return [ "--setenv", "SYSTEMD_OFFLINE", one_zero(network), *(["--unshare-net"] if not network else []), - ] + ] # fmt: skip @contextlib.contextmanager @@ -444,6 +443,7 @@ def vartmpdir() -> Iterator[Path]: (d / "work").rmdir() except OSError as e: if e.errno == errno.ENOTEMPTY: + def remove() -> None: acquire_privileges() shutil.rmtree(d) @@ -480,14 +480,14 @@ def sandbox_cmd( # apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they still use # sandbox.py, so we make sure it is available inside the sandbox so it can be executed there as well. "--ro-bind", Path(mkosi.sandbox.__file__), "/sandbox.py", - ] + ] # fmt: skip if overlay and (overlay / "usr").exists(): cmdline += [ "--overlay-lowerdir", tools / "usr" "--overlay-lowerdir", overlay / "usr", "--overlay", "/usr", - ] + ] # fmt: skip else: cmdline += ["--ro-bind", tools / "usr", "/usr"] @@ -534,7 +534,7 @@ def sandbox_cmd( "--dir", "/var/log", "--unshare-ipc", "--symlink", "../proc/self/mounts", "/etc/mtab", - ] + ] # fmt: skip if devices: cmdline += ["--bind", "/sys", "/sys", "--bind", "/dev", "/dev"] @@ -574,7 +574,7 @@ def sandbox_cmd( "--overlay-upperdir", tmp or "tmpfs", *(["--overlay-workdir", str(work)] if work else []), "--overlay", Path("/") / d, - ] + ] # fmt: skip elif not relaxed: if tmp: cmdline += ["--bind", tmp, Path("/") / d] @@ -602,7 +602,7 @@ def apivfs_options(*, root: Path = Path("/buildroot")) -> list[PathString]: # Make sure anything running in the root directory thinks it's in a container. $container can't always # be accessed so we write /run/host/container-manager as well which is always accessible. "--write", "mkosi", root / "run/host/container-manager", - ] + ] # fmt: skip def chroot_options() -> list[PathString]: @@ -618,7 +618,7 @@ def chroot_options() -> list[PathString]: "--setenv", "HOME", "/", "--setenv", "PATH", "/usr/bin:/usr/sbin", "--setenv", "BUILDROOT", "/", - ] + ] # fmt: skip @contextlib.contextmanager @@ -636,7 +636,7 @@ def chroot_cmd( *network_options(network=network), *apivfs_options(root=Path("/")), *chroot_options(), - ] + ] # fmt: skip if network and Path("/etc/resolv.conf").exists(): cmdline += ["--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf"] diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py index 581bbd7a1..34fbde4f3 100755 --- a/mkosi/sandbox.py +++ b/mkosi/sandbox.py @@ -54,6 +54,7 @@ SCMP_ACT_ALLOW = 0x7FFF0000 SCMP_ACT_ERRNO = 0x00050000 + class mount_attr(ctypes.Structure): _fields_ = [ ("attr_set", ctypes.c_uint64), @@ -231,12 +232,21 @@ def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: try: libc.mount_setattr.argtypes = ( - ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, + ctypes.c_void_p, + ctypes.c_size_t, ) r = libc.mount_setattr(fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) except AttributeError: libc.syscall.argtypes = ( - ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, + ctypes.c_long, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, + ctypes.c_void_p, + ctypes.c_size_t, ) r = libc.syscall(NR_mount_setattr, fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) @@ -248,7 +258,12 @@ def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: r = libc.move_mount(fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) except AttributeError: libc.syscall.argtypes = ( - ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, + ctypes.c_long, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, ) r = libc.syscall(NR_move_mount, fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) @@ -387,14 +402,15 @@ def optimize(cls, fsops: list["FSOperation"]) -> list["FSOperation"]: # Drop all bind mounts that are mounted from beneath another bind mount to the same # location within the new rootfs. optimized = [ - m for m in binds + m + for m in binds if not any( - m != n and - m.readonly == n.readonly and - m.required == n.required and - is_relative_to(m.src, n.src) and - is_relative_to(m.dst, n.dst) and - os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst) + m != n + and m.readonly == n.readonly + and m.required == n.required + and is_relative_to(m.src, n.src) + and is_relative_to(m.dst, n.dst) + and os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst) for n in binds ) ] @@ -602,8 +618,8 @@ def execute(self, oldroot: str, newroot: str) -> None: mount("overlayfs", dst, "overlay", 0, ",".join(options)) -ANSI_HIGHLIGHT = "\x1B[0;1;39m" if os.isatty(2) else "" -ANSI_NORMAL = "\x1B[0m" if os.isatty(2) else "" +ANSI_HIGHLIGHT = "\x1b[0;1;39m" if os.isatty(2) else "" +ANSI_NORMAL = "\x1b[0m" if os.isatty(2) else "" HELP = f"""\ mkosi-sandbox [OPTIONS...] COMMAND [ARGUMENTS...] @@ -638,6 +654,7 @@ def execute(self, oldroot: str, newroot: str) -> None: See the mkosi-sandbox(1) man page for details.\ """ + def main() -> None: # We don't use argparse as it takes +- 10ms to import and since this is purely for internal # use, it's not necessary to have good UX for this CLI interface so it's trivial to write @@ -764,8 +781,8 @@ def main() -> None: os.chdir("/tmp") with umask(~0o755): - os.mkdir("newroot") # This is where we set up the sandbox rootfs - os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. + os.mkdir("newroot") # This is where we set up the sandbox rootfs + os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. # Make sure that newroot is a mountpoint. mount("newroot", "newroot", "", MS_BIND | MS_REC, "") diff --git a/mkosi/sysupdate.py b/mkosi/sysupdate.py index efb3cf7ef..b2201bab7 100644 --- a/mkosi/sysupdate.py +++ b/mkosi/sysupdate.py @@ -15,8 +15,10 @@ def run_sysupdate(args: Args, config: Config) -> None: die("SplitArtifacts= must be enabled to be able to use mkosi sysupdate") if not config.sysupdate_dir: - die("No sysupdate definitions directory specified", - hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=") + die( + "No sysupdate definitions directory specified", + hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=", + ) if not (sysupdate := config.find_binary("systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate")): die("Could not find systemd-sysupdate") @@ -26,7 +28,7 @@ def run_sysupdate(args: Args, config: Config) -> None: "--definitions", config.sysupdate_dir, "--transfer-source", config.output_dir_or_cwd(), *args.cmdline, - ] + ] # fmt: skip run( cmd, @@ -42,6 +44,6 @@ def run_sysupdate(args: Args, config: Config) -> None: options=[ *(["--bind", "/boot", "/boot"] if Path("/boot").exists() else []), *(["--bind", "/efi", "/efi"] if Path("/efi").exists() else []), - ] + ], ), ) diff --git a/mkosi/tree.py b/mkosi/tree.py index fb7d29631..214511539 100644 --- a/mkosi/tree.py +++ b/mkosi/tree.py @@ -28,7 +28,9 @@ def cp_version(*, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: ["cp", "--version"], sandbox=sandbox(binary="cp"), stdout=subprocess.PIPE, - ).stdout.splitlines()[0].split()[3] + ) + .stdout.splitlines()[0] + .split()[3] ) @@ -51,7 +53,7 @@ def make_tree( result = run( ["btrfs", "subvolume", "create", workdir(path, sandbox)], sandbox=sandbox(binary="btrfs", options=["--bind", path.parent, workdir(path.parent, sandbox)]), - check=use_subvolumes == ConfigFeature.enabled + check=use_subvolumes == ConfigFeature.enabled, ).returncode else: result = 1 @@ -92,7 +94,7 @@ def copy_tree( options: list[PathString] = [ "--ro-bind", src, workdir(src, sandbox), "--bind", dst.parent, workdir(dst.parent, sandbox), - ] + ] # fmt: skip def copy() -> None: cmdline: list[PathString] = [ @@ -102,7 +104,8 @@ def copy() -> None: f"--preserve=mode,links{',timestamps,ownership,xattr' if preserve else ''}", "--reflink=auto", "--copy-contents", - workdir(src, sandbox), workdir(dst, sandbox), + workdir(src, sandbox), + workdir(dst, sandbox), ] if dst.exists() and dst.is_dir() and any(dst.iterdir()) and cp_version(sandbox=sandbox) >= "9.5": @@ -118,16 +121,12 @@ def copy() -> None: # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. if ( - use_subvolumes == ConfigFeature.disabled or - not preserve or - not is_subvolume(src) or - (dst.exists() and (not dst.is_dir() or any(dst.iterdir()))) + use_subvolumes == ConfigFeature.disabled + or not preserve + or not is_subvolume(src) + or (dst.exists() and (not dst.is_dir() or any(dst.iterdir()))) ): - with ( - preserve_target_directories_stat(src, dst) - if not preserve - else contextlib.nullcontext() - ): + with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst @@ -143,11 +142,7 @@ def copy() -> None: ).returncode if result != 0: - with ( - preserve_target_directories_stat(src, dst) - if not preserve - else contextlib.nullcontext() - ): + with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst @@ -162,14 +157,15 @@ def rmtree(*paths: Path, sandbox: SandboxProtocol = nosandbox) -> None: if subvolumes := sorted({p for p in paths if p.exists() and is_subvolume(p)}): # Silence and ignore failures since when not running as root, this will fail with a permission error unless the # btrfs filesystem is mounted with user_subvol_rm_allowed. - run(["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)], + run( + ["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)], check=False, sandbox=sandbox( - binary="btrfs", - options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes) + binary="btrfs", options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes) ), stdout=subprocess.DEVNULL if not ARG_DEBUG.get() else None, - stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None) + stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None, + ) filtered = sorted({p for p in paths if p.exists() or p.is_symlink()}) if filtered: @@ -187,7 +183,7 @@ def move_tree( dst: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, - sandbox: SandboxProtocol = nosandbox + sandbox: SandboxProtocol = nosandbox, ) -> Path: src = src.absolute() dst = dst.absolute() diff --git a/mkosi/types.py b/mkosi/types.py index aa7b7b8fb..3b3b6077f 100644 --- a/mkosi/types.py +++ b/mkosi/types.py @@ -24,5 +24,6 @@ # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224 _T_co = TypeVar("_T_co", covariant=True) + class SupportsRead(Protocol[_T_co]): def read(self, __length: int = ...) -> _T_co: ... diff --git a/mkosi/user.py b/mkosi/user.py index 407d981c2..e74d4d182 100644 --- a/mkosi/user.py +++ b/mkosi/user.py @@ -73,7 +73,7 @@ def chown(cls, path: Path) -> None: # If we created a file/directory in a parent directory owned by a regular user, make sure the path and any # parent directories are owned by the invoking user as well. - if (q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None)): + if q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None): st = q.stat() os.chown(path, st.st_uid, st.st_gid) @@ -133,14 +133,14 @@ def become_root_in_subuid_range() -> None: 0, subuid, SUBRANGE - 100, SUBRANGE - 100, os.getuid(), 1, SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99 - ] + ] # fmt: skip newgidmap = [ "flock", "--exclusive", "--close", lock, "newgidmap", pid, 0, subgid, SUBRANGE - 100, SUBRANGE - 100, os.getgid(), 1, SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99 - ] + ] # fmt: skip # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping to the # process in the user namespace. The mapping can only be assigned after the user namespace has been unshared. @@ -183,6 +183,6 @@ def become_root_in_subuid_range_cmd() -> list[str]: "--map-groups", f"{SUBRANGE - 100}:{os.getgid()}:1", "--map-groups", f"{SUBRANGE - 100 + 1}:{subgid + SUBRANGE - 100 + 1}:99", "--keep-caps", - ] + ] # fmt: skip return [str(x) for x in cmd] diff --git a/mkosi/util.py b/mkosi/util.py index 1afabdb0a..1cd804385 100644 --- a/mkosi/util.py +++ b/mkosi/util.py @@ -121,7 +121,7 @@ def make_executable(*paths: Path) -> None: @contextlib.contextmanager def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: - fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY) + fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY) try: fcntl.fcntl(fd, fcntl.FD_CLOEXEC) logging.debug(f"Acquiring lock on {path}") @@ -135,15 +135,17 @@ def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: @contextlib.contextmanager def flock_or_die(path: Path) -> Iterator[Path]: try: - with flock(path, fcntl.LOCK_EX|fcntl.LOCK_NB): + with flock(path, fcntl.LOCK_EX | fcntl.LOCK_NB): yield path except OSError as e: if e.errno != errno.EWOULDBLOCK: raise e - die(f"Cannot lock {path} as it is locked by another process", + die( + f"Cannot lock {path} as it is locked by another process", hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting multiple " - "instances of the same image") + "instances of the same image", + ) @contextlib.contextmanager @@ -183,7 +185,7 @@ def choices(cls) -> list[str]: def parents_below(path: Path, below: Path) -> list[Path]: parents = list(path.parents) - return parents[:parents.index(below)] + return parents[: parents.index(below)] @contextlib.contextmanager @@ -192,10 +194,7 @@ def resource_path(mod: ModuleType) -> Iterator[Path]: with as_file(t) as p: # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking user so # that any commands executed as the invoking user can access files within it. - if ( - p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and - stat.S_IMODE(p.parent.stat().st_mode) == 0o700 - ): + if p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700: p.parent.chmod(0o755) yield p @@ -204,7 +203,7 @@ def resource_path(mod: ModuleType) -> Iterator[Path]: def hash_file(path: Path) -> str: # TODO Replace with hashlib.file_digest after dropping support for Python 3.10. h = hashlib.sha256() - b = bytearray(16 * 1024**2) + b = bytearray(16 * 1024**2) mv = memoryview(b) with path.open("rb", buffering=0) as f: diff --git a/mkosi/versioncomp.py b/mkosi/versioncomp.py index 8e2e06423..a6c45865a 100644 --- a/mkosi/versioncomp.py +++ b/mkosi/versioncomp.py @@ -21,6 +21,7 @@ def __init__(self, version: str): @classmethod def compare_versions(cls, v1: str, v2: str) -> int: """Implements comparison according to UAPI Group Version Format Specification""" + def rstrip_invalid_version_chars(s: str) -> str: valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."} for i, c in enumerate(s): @@ -84,9 +85,9 @@ def letter_prefix(s: str) -> str: v2 = v2.removeprefix("^") elif v1.startswith("^"): # TODO: bug? - return cls._LEFT_SMALLER #cls._RIGHT_SMALLER + return cls._LEFT_SMALLER # cls._RIGHT_SMALLER elif v2.startswith("^"): - return cls._RIGHT_SMALLER #cls._LEFT_SMALLER + return cls._RIGHT_SMALLER # cls._LEFT_SMALLER # If the remaining part of one of strings starts with ".": if the other remaining part # does not start with ., the string with . compares lower. Otherwise, both dot diff --git a/mkosi/vmspawn.py b/mkosi/vmspawn.py index 78bbaec4d..77aadbc75 100644 --- a/mkosi/vmspawn.py +++ b/mkosi/vmspawn.py @@ -45,7 +45,7 @@ def run_vmspawn(args: Args, config: Config) -> None: if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}", - hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn" + hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn", ) cmdline: list[PathString] = [ @@ -56,7 +56,7 @@ def run_vmspawn(args: Args, config: Config) -> None: "--vsock", config.qemu_vsock.to_tristate(), "--tpm", config.qemu_swtpm.to_tristate(), "--secure-boot", yes_no(config.secure_boot), - ] + ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["--network-user-mode"] diff --git a/tests/__init__.py b/tests/__init__.py index fb31afc5a..d643d2eec 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -77,7 +77,7 @@ def mkosi( user=user, group=group, env=os.environ, - ) + ) # fmt: skip def build(self, options: Sequence[PathString] = (), args: Sequence[str] = ()) -> CompletedProcess: kcl = [ @@ -107,7 +107,7 @@ def build(self, options: Sequence[PathString] = (), args: Sequence[str] = ()) -> "--output-dir", self.output_dir, *(["--debug-shell"] if self.config.debug_shell else []), *options, - ] + ] # fmt: skip self.mkosi("summary", options, user=self.uid, group=self.uid) diff --git a/tests/test_boot.py b/tests/test_boot.py index 3ee3af7bf..a0cdff3cd 100644 --- a/tests/test_boot.py +++ b/tests/test_boot.py @@ -19,8 +19,7 @@ def have_vmspawn() -> bool: return ( find_binary("systemd-vmspawn") is not None - and GenericVersion(run(["systemd-vmspawn", "--version"], - stdout=subprocess.PIPE).stdout.strip()) >= 256 + and GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 ) diff --git a/tests/test_config.py b/tests/test_config.py index 3cbdce993..51c40ea77 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -50,11 +50,11 @@ def test_compression_enum_bool() -> None: def test_compression_enum_str() -> None: assert str(Compression.none) == "none" assert str(Compression.zstd) == "zstd" - assert str(Compression.zst) == "zstd" - assert str(Compression.xz) == "xz" - assert str(Compression.bz2) == "bz2" - assert str(Compression.gz) == "gz" - assert str(Compression.lz4) == "lz4" + assert str(Compression.zst) == "zstd" + assert str(Compression.xz) == "xz" + assert str(Compression.bz2) == "bz2" + assert str(Compression.gz) == "gz" + assert str(Compression.lz4) == "lz4" assert str(Compression.lzma) == "lzma" @@ -129,7 +129,7 @@ def test_parse_config(tmp_path: Path) -> None: "--credential", "my.cred=cli.value", "--repositories", "universe", ] - ) + ) # fmt: skip # Values from the CLI should take priority. assert config.distribution == Distribution.fedora @@ -145,7 +145,7 @@ def test_parse_config(tmp_path: Path) -> None: "--credential", "", "--repositories", "", ] - ) + ) # fmt: skip # Empty values on the CLIs resets non-collection based settings to their defaults and collection based settings to # empty collections. @@ -708,9 +708,7 @@ def test_match_distribution(tmp_path: Path, dist1: Distribution, dist2: Distribu assert "testpkg3" in conf.packages -@pytest.mark.parametrize( - "release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2) -) +@pytest.mark.parametrize("release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2)) def test_match_release(tmp_path: Path, release1: int, release2: int) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") @@ -805,9 +803,7 @@ def test_match_repositories(tmp_path: Path) -> None: @pytest.mark.parametrize( - "image1,image2", itertools.combinations_with_replacement( - ["image_a", "image_b", "image_c"], 2 - ) + "image1,image2", itertools.combinations_with_replacement(["image_a", "image_b", "image_c"], 2) ) def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: with chdir(tmp_path): @@ -877,10 +873,11 @@ def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: @pytest.mark.parametrize( - "op,version", itertools.product( + "op,version", + itertools.product( ["", "==", "<", ">", "<=", ">="], [122, 123, 124], - ) + ), ) def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: opfunc = { @@ -890,7 +887,7 @@ def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: "<=": operator.le, ">": operator.gt, ">=": operator.ge, - }.get(op, operator.eq,) + }.get(op, operator.eq) with chdir(tmp_path): parent = Path("mkosi.conf") @@ -1135,7 +1132,7 @@ def test_specifiers(tmp_path: Path) -> None: def test_kernel_specifiers(tmp_path: Path) -> None: - kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 + kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 token = "MySystemImage" roothash = "67e893261799236dcf20529115ba9fae4fd7c2269e1e658d42269503e5760d38" boot_count = "3" @@ -1217,10 +1214,10 @@ def test_environment(tmp_path: Path) -> None: _, [sub, config] = parse_config() expected = { - "TestValue1": "100", # from other.env - "TestValue2": "300", # from mkosi.conf - "TestValue3": "400", # from mkosi.conf - "TestValue4": "99", # from mkosi.env + "TestValue1": "100", # from other.env + "TestValue2": "300", # from mkosi.conf + "TestValue3": "400", # from mkosi.conf + "TestValue4": "99", # from mkosi.env } # Only check values for keys from expected, as config.environment contains other items as well diff --git a/tests/test_initrd.py b/tests/test_initrd.py index 0e39b19ce..ae61cb161 100644 --- a/tests/test_initrd.py +++ b/tests/test_initrd.py @@ -92,12 +92,14 @@ def test_initrd_lvm(config: ImageConfig) -> None: lvm.rename(Path(image.output_dir) / "image.raw") - image.qemu([ - "--qemu-firmware=linux", - # LVM confuses systemd-repart so we mask it for this test. - "--kernel-command-line-extra=systemd.mask=systemd-repart.service", - "--kernel-command-line-extra=root=LABEL=root", - ]) + image.qemu( + [ + "--qemu-firmware=linux", + # LVM confuses systemd-repart so we mask it for this test. + "--kernel-command-line-extra=systemd.mask=systemd-repart.service", + "--kernel-command-line-extra=root=LABEL=root", + ] + ) def test_initrd_luks(config: ImageConfig, passphrase: Path) -> None: @@ -172,7 +174,7 @@ def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: "luksFormat", f"{lodev}p1", ] - ) + ) # fmt: skip run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"]) stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"])) luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip() @@ -200,13 +202,15 @@ def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: lvm.rename(Path(image.output_dir) / "image.raw") - image.qemu([ - "--format=disk", - "--credential=cryptsetup.passphrase=mkosi", - "--qemu-firmware=linux", - "--kernel-command-line-extra=root=LABEL=root", - f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}", - ]) + image.qemu( + [ + "--format=disk", + "--credential=cryptsetup.passphrase=mkosi", + "--qemu-firmware=linux", + "--kernel-command-line-extra=root=LABEL=root", + f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}", + ] + ) def test_initrd_size(config: ImageConfig) -> None: diff --git a/tests/test_json.py b/tests/test_json.py index 6217d73ba..2a6cae076 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -379,13 +379,13 @@ def test_config() -> None: build_sources_ephemeral=True, cache_dir=Path("/is/this/the/cachedir"), cacheonly=Cacheonly.always, - checksum= False, + checksum=False, clean_package_metadata=ConfigFeature.auto, clean_scripts=[Path("/clean")], compress_level=3, compress_output=Compression.bz2, configure_scripts=[Path("/configure")], - credentials= {"credkey": "credval"}, + credentials={"credkey": "credval"}, dependencies=["dep1"], distribution=Distribution.fedora, environment={"foo": "foo", "BAR": "BAR", "Qux": "Qux"}, @@ -521,7 +521,7 @@ def test_config() -> None: with_docs=True, with_network=False, with_recommends=True, - with_tests= True, + with_tests=True, workspace_dir=Path("/cwd"), ) diff --git a/tests/test_sysext.py b/tests/test_sysext.py index 6650aa50a..c8c3b7300 100644 --- a/tests/test_sysext.py +++ b/tests/test_sysext.py @@ -14,12 +14,15 @@ def test_sysext(config: ImageConfig) -> None: image.build(["--clean-package-metadata=no", "--format=directory"]) with Image(image.config) as sysext: - sysext.build([ - "--directory", "", - "--incremental=no", - "--base-tree", Path(image.output_dir) / "image", - "--overlay", - "--package=dnsmasq", - "--format=disk", - ]) - + sysext.build( + [ + "--directory", + "", + "--incremental=no", + "--base-tree", + Path(image.output_dir) / "image", + "--overlay", + "--package=dnsmasq", + "--format=disk", + ] + ) diff --git a/tests/test_versioncomp.py b/tests/test_versioncomp.py index 6743f8fc9..d715d3e87 100644 --- a/tests/test_versioncomp.py +++ b/tests/test_versioncomp.py @@ -72,8 +72,8 @@ def test_generic_version_spec() -> None: GenericVersion("124-1"), ], ), - 2 - ) + 2, + ), ) def test_generic_version_strverscmp_improved_doc( s1: tuple[int, GenericVersion], @@ -86,9 +86,9 @@ def test_generic_version_strverscmp_improved_doc( i1, v1 = s1 i2, v2 = s2 assert (v1 == v2) == (i1 == i2) - assert (v1 < v2) == (i1 < i2) + assert (v1 < v2) == (i1 < i2) assert (v1 <= v2) == (i1 <= i2) - assert (v1 > v2) == (i1 > i2) + assert (v1 > v2) == (i1 > i2) assert (v1 >= v2) == (i1 >= i2) assert (v1 != v2) == (i1 != i2) @@ -122,8 +122,8 @@ def test_generic_version_rpmvercmp() -> None: RPMVERCMP("5.5p1", "5.5p10", -1) RPMVERCMP("5.5p10", "5.5p1", 1) - RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ - RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ + RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ + RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("xyz10", "xyz10", 0) RPMVERCMP("xyz10", "xyz10.1", -1) @@ -165,8 +165,8 @@ def test_generic_version_rpmvercmp() -> None: RPMVERCMP("20101122", "20101121", 1) RPMVERCMP("2_0", "2_0", 0) - RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal - RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal + RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal + RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal # RhBug:178798 case */ RPMVERCMP("a", "a", 0) @@ -224,7 +224,7 @@ def test_generic_version_rpmvercmp() -> None: print("/* RPM version comparison oddities */") # RhBug:811992 case RPMVERCMP("1b.fc17", "1b.fc17", 0) - RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ + RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ RPMVERCMP("1.fc17", "1b.fc17", -1) RPMVERCMP("1g.fc17", "1g.fc17", 0) RPMVERCMP("1g.fc17", "1.fc17", 1) From 380279c552369f13a739c5e4c0a367e6b660d68c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Behrmann?= Date: Fri, 20 Sep 2024 10:27:38 +0200 Subject: [PATCH 2/6] Reformat to 109 columns --- kernel-install/50-mkosi.install | 6 +- mkosi/__init__.py | 224 +++++++++++++++++++++++--------- mkosi/bootloader.py | 50 +++---- mkosi/completion.py | 20 ++- mkosi/config.py | 204 +++++++++++++++++++---------- mkosi/distributions/__init__.py | 3 +- mkosi/distributions/centos.py | 24 +++- mkosi/distributions/debian.py | 50 +++---- mkosi/distributions/fedora.py | 36 +++-- mkosi/distributions/mageia.py | 4 +- mkosi/distributions/opensuse.py | 10 +- mkosi/initrd.py | 16 ++- mkosi/installer/__init__.py | 28 ++-- mkosi/installer/apt.py | 18 ++- mkosi/installer/dnf.py | 20 ++- mkosi/installer/pacman.py | 19 +-- mkosi/installer/rpm.py | 8 +- mkosi/installer/zypper.py | 15 ++- mkosi/kmod.py | 51 +++++--- mkosi/manifest.py | 34 ++--- mkosi/mounts.py | 4 +- mkosi/qemu.py | 163 ++++++++++++++--------- mkosi/run.py | 98 +++++++------- mkosi/sandbox.py | 25 +++- mkosi/sysupdate.py | 3 +- mkosi/tree.py | 10 +- mkosi/user.py | 31 ++--- mkosi/util.py | 13 +- mkosi/vmspawn.py | 3 +- pyproject.toml | 2 +- tests/__init__.py | 8 +- tests/conftest.py | 4 +- tests/test_boot.py | 22 ++-- tests/test_config.py | 19 ++- tests/test_initrd.py | 8 +- tests/test_json.py | 5 +- 36 files changed, 818 insertions(+), 440 deletions(-) diff --git a/kernel-install/50-mkosi.install b/kernel-install/50-mkosi.install index f8b77f601..1270d3857 100644 --- a/kernel-install/50-mkosi.install +++ b/kernel-install/50-mkosi.install @@ -139,7 +139,11 @@ def main() -> None: if context.layout != "uki" and context.initrds: return - format = OutputFormat.uki if context.layout == "uki" and context.uki_generator == "mkosi" else OutputFormat.cpio + if context.layout == "uki" and context.uki_generator == "mkosi": + format = OutputFormat.uki + else: + format = OutputFormat.cpio + output = "initrd" if format == OutputFormat.cpio else "uki" cmdline: list[PathString] = [ diff --git a/mkosi/__init__.py b/mkosi/__init__.py index 9e0cbae03..0edc8bf97 100644 --- a/mkosi/__init__.py +++ b/mkosi/__init__.py @@ -186,7 +186,10 @@ def install_distribution(context: Context) -> None: with complete_step(f"Installing extra packages for {str(context.config.distribution).capitalize()}"): context.config.distribution.install_packages(context, context.config.packages) else: - if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): + if context.config.overlay or context.config.output_format in ( + OutputFormat.sysext, + OutputFormat.confext, + ): if context.config.packages: die( "Cannot install packages in extension images without a base tree", @@ -359,7 +362,10 @@ def configure_extension_release(context: Context) -> None: f.write(f"{prefix}_VERSION_ID={context.config.image_version}\n") if f"{prefix}_SCOPE" not in extrelease: - f.write(f"{prefix}_SCOPE={context.config.environment.get(f'{prefix}_SCOPE', 'initrd system portable')}\n") + f.write( + f"{prefix}_SCOPE=" + f"{context.config.environment.get(f'{prefix}_SCOPE', 'initrd system portable')}\n" + ) if "ARCHITECTURE" not in extrelease: f.write(f"ARCHITECTURE={context.config.architecture}\n") @@ -394,7 +400,9 @@ def configure_autologin(context: Context) -> None: context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600" ) configure_autologin_service(context, "getty@tty1.service", "--noclear -") - configure_autologin_service(context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -") + configure_autologin_service( + context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -" + ) @contextlib.contextmanager @@ -555,7 +563,9 @@ def run_sync_scripts(config: Config) -> None: with ( finalize_source_mounts(config, ephemeral=False) as sources, finalize_config_json(config) as json, - tempfile.TemporaryDirectory(dir=config.workspace_dir_or_default(), prefix="mkosi-metadata-") as sandbox_tree, + tempfile.TemporaryDirectory( + dir=config.workspace_dir_or_default(), prefix="mkosi-metadata-" + ) as sandbox_tree, ): install_sandbox_trees(config, Path(sandbox_tree)) @@ -1153,7 +1163,9 @@ def finalize_default_initrd( else: rootpwopt = None - relabel = ConfigFeature.auto if config.selinux_relabel == ConfigFeature.enabled else config.selinux_relabel + relabel = ( + ConfigFeature.auto if config.selinux_relabel == ConfigFeature.enabled else config.selinux_relabel + ) # Default values are assigned via the parser so we go via the argument parser to construct # the config for the initrd. @@ -1381,7 +1393,10 @@ def build_kernel_modules_initrd(context: Context, kver: str) -> Path: # Older Debian and Ubuntu releases do not compress their kernel modules, so we compress the # initramfs instead. Note that this is not ideal since the compressed kernel modules will # all be decompressed on boot which requires significant memory. - elif context.config.distribution == Distribution.debian and context.config.release in ("sid", "testing"): + elif context.config.distribution == Distribution.debian and context.config.release in ( + "sid", + "testing", + ): compression = Compression.none else: compression = Compression.zstd @@ -1500,7 +1515,7 @@ def build_uki( "--pcr-public-key", context.config.secure_boot_certificate, ] # fmt: skip options += [ - "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, + "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, # noqa ] # fmt: skip cmd += ["build", "--linux", kimg] @@ -1560,7 +1575,11 @@ def systemd_stub_version(context: Context, stub: Path) -> Optional[GenericVersio if not sdmagic_text: return None - if not (version := re.match(r"#### LoaderInfo: systemd-stub (?P[.~^a-zA-Z0-9-+]+) ####", sdmagic_text)): + if not ( + version := re.match( + r"#### LoaderInfo: systemd-stub (?P[.~^a-zA-Z0-9-+]+) ####", sdmagic_text + ) + ): die(f"Unable to determine systemd-stub version, found {sdmagic_text!r}") return GenericVersion(version.group("version")) @@ -1584,7 +1603,9 @@ def find_entry_token(context: Context) -> str: or ( "--version" not in run( - ["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install") + ["kernel-install", "--help"], + stdout=subprocess.PIPE, + sandbox=context.sandbox(binary="kernel-install"), ).stdout ) or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" @@ -1594,7 +1615,9 @@ def find_entry_token(context: Context) -> str: output = json.loads( run( ["kernel-install", "--root=/buildroot", "--json=pretty", "inspect"], - sandbox=context.sandbox(binary="kernel-install", options=["--ro-bind", context.root, "/buildroot"]), + sandbox=context.sandbox( + binary="kernel-install", options=["--ro-bind", context.root, "/buildroot"] + ), stdout=subprocess.PIPE, env={"BOOT_ROOT": "/boot"}, ).stdout @@ -1604,7 +1627,9 @@ def find_entry_token(context: Context) -> str: return cast(str, output["EntryToken"]) -def finalize_cmdline(context: Context, partitions: Sequence[Partition], roothash: Optional[str]) -> list[str]: +def finalize_cmdline( + context: Context, partitions: Sequence[Partition], roothash: Optional[str] +) -> list[str]: if (context.root / "etc/kernel/cmdline").exists(): cmdline = [(context.root / "etc/kernel/cmdline").read_text().strip()] elif (context.root / "usr/lib/kernel/cmdline").exists(): @@ -1722,7 +1747,7 @@ def install_type1( linux /{kimg.relative_to(context.root / "boot")} {" ".join(cmdline)} initrd {" ".join(os.fspath(Path("/") / i.relative_to(context.root / "boot")) for i in initrds)} }} - """ + """ # noqa ) ) @@ -1749,7 +1774,9 @@ def replacer(match: re.Match[str]) -> str: return re.sub(r"&(?P[&a-zA-Z])", replacer, text) -def install_uki(context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition]) -> None: +def install_uki( + context: Context, kver: str, kimg: Path, token: str, partitions: Sequence[Partition] +) -> None: bootloader_entry_format = context.config.unified_kernel_image_format or "&e-&k" roothash_value = "" @@ -1858,7 +1885,10 @@ def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: stub = systemd_stub_binary(context) if want_uki(context) and not stub.exists(): - die(f"Unified kernel image(s) requested but systemd-stub not found at /{stub.relative_to(context.root)}") + die( + "Unified kernel image(s) requested but systemd-stub not found at " + f"/{stub.relative_to(context.root)}" + ) if context.config.bootable == ConfigFeature.enabled and not any(gen_kernel_images(context)): die("A bootable image was requested but no kernel was found") @@ -1875,9 +1905,13 @@ def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: break -def make_uki(context: Context, stub: Path, kver: str, kimg: Path, microcode: list[Path], output: Path) -> None: +def make_uki( + context: Context, stub: Path, kver: str, kimg: Path, microcode: list[Path], output: Path +) -> None: make_cpio(context.root, context.workspace / "initrd", sandbox=context.sandbox) - maybe_compress(context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd") + maybe_compress( + context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd" + ) initrds = [context.workspace / "initrd"] @@ -1899,7 +1933,9 @@ def compressor_command(context: Context, compression: Compression) -> list[PathS die(f"Unknown compression {compression}") -def maybe_compress(context: Context, compression: Compression, src: Path, dst: Optional[Path] = None) -> None: +def maybe_compress( + context: Context, compression: Compression, src: Path, dst: Optional[Path] = None +) -> None: if not compression or src.is_dir(): if dst: move_tree( @@ -1917,7 +1953,8 @@ def maybe_compress(context: Context, compression: Compression, src: Path, dst: O with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: - src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. + # if src == dst, make sure dst doesn't truncate the src file but creates a new file. + src.unlink() with dst.open("wb") as o: run(cmd, stdin=i, stdout=o, sandbox=context.sandbox(binary=cmd[0])) @@ -2006,7 +2043,9 @@ def calculate_sha256sum(context: Context) -> None: print(hash_file(p) + " *" + p.name, file=f) - (context.workspace / context.config.output_checksum).rename(context.staging / context.config.output_checksum) + (context.workspace / context.config.output_checksum).rename( + context.staging / context.config.output_checksum + ) def calculate_signature(context: Context) -> None: @@ -2134,7 +2173,12 @@ def check_inputs(config: Config) -> None: if not tree.source.exists(): die(f"{name.capitalize()} tree {tree.source} not found") - if tree.source.is_file() and tree.source.suffix == ".raw" and not tree.target and os.getuid() != 0: + if ( + tree.source.is_file() + and tree.source.suffix == ".raw" + and not tree.target + and os.getuid() != 0 + ): die(f"Must run as root to use disk images in {name} trees") if config.output_format != OutputFormat.none and config.bootable != ConfigFeature.disabled: @@ -2219,8 +2263,8 @@ def check_tools(config: Config, verb: Verb) -> None: config, version="254", reason="build bootable images", - hint="Use ToolsTree=default to download most required tools including ukify automatically or use " - "Bootable=no to create a non-bootable image which doesn't require ukify", + hint="Use ToolsTree=default to download most required tools including ukify automatically " + "or use Bootable=no to create a non-bootable image which doesn't require ukify", ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): @@ -2341,7 +2385,10 @@ def configure_initrd(context: Context) -> None: if not context.config.make_initrd: return - if not (context.root / "etc/initrd-release").exists() and not (context.root / "etc/initrd-release").is_symlink(): + if ( + not (context.root / "etc/initrd-release").exists() + and not (context.root / "etc/initrd-release").is_symlink() + ): (context.root / "etc/initrd-release").symlink_to("/etc/os-release") @@ -2367,7 +2414,11 @@ def run_depmod(context: Context, *, cache: bool = False) -> None: for kver, _ in gen_kernel_images(context): modulesd = context.root / "usr/lib/modules" / kver - if not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs): + if ( + not cache + and not context.config.kernel_modules_exclude + and all((modulesd / o).exists() for o in outputs) + ): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue @@ -2399,7 +2450,9 @@ def run_sysusers(context: Context) -> None: with complete_step("Generating system users"): run( ["systemd-sysusers", "--root=/buildroot"], - sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"]), + sandbox=context.sandbox( + binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"] + ), ) @@ -2529,7 +2582,9 @@ def run_firstboot(context: Context) -> None: with complete_step("Applying first boot settings"): run( ["systemd-firstboot", "--root=/buildroot", "--force", *options], - sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"]), + sandbox=context.sandbox( + binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"] + ), ) # Initrds generally don't ship with only /usr so there's not much point in putting the @@ -2655,7 +2710,9 @@ def reuse_cache(context: Context) -> bool: return True -def save_uki_components(context: Context) -> tuple[Optional[Path], Optional[str], Optional[Path], list[Path]]: +def save_uki_components( + context: Context, +) -> tuple[Optional[Path], Optional[str], Optional[Path], list[Path]]: if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): return None, None, None, [] @@ -2726,7 +2783,11 @@ def make_image( cmdline += ["--private-key", context.config.verity_key] if context.config.verity_certificate: cmdline += ["--certificate", workdir(context.config.verity_certificate)] - options += ["--ro-bind", context.config.verity_certificate, workdir(context.config.verity_certificate)] + options += [ + "--ro-bind", + context.config.verity_certificate, + workdir(context.config.verity_certificate), + ] if skip: cmdline += ["--defer-partitions", ",".join(skip)] if split: @@ -2844,7 +2905,9 @@ def make_disk( definitions = [defaults] - return make_image(context, msg=msg, skip=skip, split=split, tabs=tabs, root=context.root, definitions=definitions) + return make_image( + context, msg=msg, skip=skip, split=split, tabs=tabs, root=context.root, definitions=definitions + ) def make_oci(context: Context, root_layer: Path, dst: Path) -> None: @@ -3146,11 +3209,13 @@ def copy_repository_metadata(config: Config, dst: Path) -> None: exclude: list[PathString] if d == "cache": exclude = flatten( - ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).cache_subdirs(src) + ("--ro-bind", tmp, p) + for p in config.distribution.package_manager(config).cache_subdirs(src) ) else: exclude = flatten( - ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).state_subdirs(src) + ("--ro-bind", tmp, p) + for p in config.distribution.package_manager(config).state_subdirs(src) ) subdst = dst / d / subdir @@ -3204,7 +3269,11 @@ def build_image(context: Context) -> None: wantrepo = ( ( not cached - and (context.config.packages or context.config.build_packages or context.config.prepare_scripts) + and ( + context.config.packages + or context.config.build_packages + or context.config.prepare_scripts + ) ) or context.config.volatile_packages or context.config.postinst_scripts @@ -3383,7 +3452,9 @@ def run_shell(args: Args, config: Config) -> None: # Make sure the latest nspawn settings are always used. if config.nspawn_settings: if not (config.output_dir_or_cwd() / f"{name}.nspawn").exists(): - stack.callback(lambda: (config.output_dir_or_cwd() / f"{name}.nspawn").unlink(missing_ok=True)) + stack.callback( + lambda: (config.output_dir_or_cwd() / f"{name}.nspawn").unlink(missing_ok=True) + ) shutil.copy2(config.nspawn_settings, config.output_dir_or_cwd() / f"{name}.nspawn") # If we're booting a directory image that wasn't built by root, we always make an ephemeral @@ -3428,7 +3499,11 @@ def run_shell(args: Args, config: Config) -> None: # Let's allow running a shell in a non-ephemeral image but in that case only map a # single user into the image so it can't get polluted with files or directories # owned by other users. - if args.verb == Verb.shell and config.output_format == OutputFormat.directory and not config.ephemeral: + if ( + args.verb == Verb.shell + and config.output_format == OutputFormat.directory + and not config.ephemeral + ): range = 1 else: range = 65536 @@ -3470,7 +3545,9 @@ def run_shell(args: Args, config: Config) -> None: if args.verb == Verb.boot and config.forward_journal: with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: - addr = Path(os.getenv("TMPDIR", "/tmp")) / f"mkosi-journal-remote-unix-{uuid.uuid4().hex[:16]}" + addr = ( + Path(os.getenv("TMPDIR", "/tmp")) / f"mkosi-journal-remote-unix-{uuid.uuid4().hex[:16]}" + ) sock.bind(os.fspath(addr)) sock.listen() if config.output_format == OutputFormat.directory and (stat := os.stat(fname)).st_uid != 0: @@ -3518,7 +3595,10 @@ def run_shell(args: Args, config: Config) -> None: def run_systemd_tool(tool: str, args: Args, config: Config) -> None: - if config.output_format not in (OutputFormat.disk, OutputFormat.directory) and not config.forward_journal: + if ( + config.output_format not in (OutputFormat.disk, OutputFormat.directory) + and not config.forward_journal + ): die(f"{config.output_format} images cannot be inspected with {tool}") if ( @@ -3538,9 +3618,14 @@ def run_systemd_tool(tool: str, args: Args, config: Config) -> None: output = config.output_dir_or_cwd() / config.output if config.forward_journal and not config.forward_journal.exists(): - die(f"Journal directory/file configured with ForwardJournal= does not exist, cannot inspect with {tool}") + die( + "Journal directory/file configured with ForwardJournal= does not exist, " + f"cannot inspect with {tool}" + ) elif not output.exists(): - die(f"Output {config.output_dir_or_cwd() / config.output} does not exist, cannot inspect with {tool}") + die( + f"Output {config.output_dir_or_cwd() / config.output} does not exist, cannot inspect with {tool}" + ) cmd: list[PathString] = [tool_path] @@ -3700,7 +3785,7 @@ def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Co "--incremental", str(config.incremental), *([f"--package={package}" for package in config.tools_tree_packages]), "--output", f"{config.tools_tree_distribution}-tools", - *(["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else []), + *(["--source-date-epoch", str(config.source_date_epoch)] if config.source_date_epoch is not None else []), # noqa *([f"--environment={k}='{v}'" for k, v in config.environment.items()]), *([f"--extra-search-path={p}" for p in config.extra_search_paths]), *(["--proxy-url", config.proxy_url] if config.proxy_url else []), @@ -3727,9 +3812,10 @@ def check_workspace_directory(config: Config) -> None: for tree in config.build_sources: if wd.is_relative_to(tree.source): die( - f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", - hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure a different " - "workspace directory", + f"The workspace directory ({wd}) cannot be a subdirectory of " + f"any source directory ({tree.source})", + hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure " + "a different workspace directory", ) @@ -3774,7 +3860,7 @@ def run_clean_scripts(config: Config) -> None: "--dir", "/work/out", "--ro-bind", script, "/work/clean", "--ro-bind", json, "/work/config.json", - *(["--bind", str(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []), + *(["--bind", str(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []), # noqa *sources, ], ), @@ -3819,7 +3905,10 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: outputs = { config.output_dir_or_cwd() / output for output in config.outputs - if (config.output_dir_or_cwd() / output).exists() or (config.output_dir_or_cwd() / output).is_symlink() + if ( + (config.output_dir_or_cwd() / output).exists() + or (config.output_dir_or_cwd() / output).is_symlink() + ) } # Make sure we resolve the symlink we create in the output directory and remove its target @@ -3836,7 +3925,12 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: ): rmtree(*outputs, sandbox=sandbox) - if remove_build_cache and config.build_dir and config.build_dir.exists() and any(config.build_dir.iterdir()): + if ( + remove_build_cache + and config.build_dir + and config.build_dir.exists() + and any(config.build_dir.iterdir()) + ): with complete_step(f"Clearing out build directory of {config.name()} image…"): rmtree(*config.build_dir.iterdir(), sandbox=sandbox) @@ -3851,7 +3945,10 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: if any(p.exists() for p in itertools.chain(cache_tree_paths(config), initrd, metadata)): with complete_step(f"Removing cache entries of {config.name()} image…"): - rmtree(*(p for p in itertools.chain(cache_tree_paths(config), initrd) if p.exists()), sandbox=sandbox) + rmtree( + *(p for p in itertools.chain(cache_tree_paths(config), initrd) if p.exists()), + sandbox=sandbox, + ) if remove_package_cache and any(config.package_cache_dir_or_default().glob("*")): subdir = config.distribution.package_manager(config).subdir(config) @@ -3899,8 +3996,8 @@ def metadata_cache(config: Config) -> Path: def sync_repository_metadata(args: Args, images: Sequence[Config], *, resources: Path, dst: Path) -> None: last = images[-1] - # If we have a metadata cache and any cached image and using cached metadata is not explicitly disabled, reuse the - # metadata cache. + # If we have a metadata cache and any cached image and using cached metadata is not explicitly disabled, + # reuse the metadata cache. if ( last.incremental and metadata_cache(last).exists() @@ -3944,8 +4041,8 @@ def sync_repository_metadata(args: Args, images: Sequence[Config], *, resources: for p in last.distribution.package_manager(last).cache_subdirs(src): p.mkdir(parents=True, exist_ok=True) - # If we're in incremental mode and caching metadata is not explicitly disabled, cache the synced repository - # metadata so we can reuse it later. + # If we're in incremental mode and caching metadata is not explicitly disabled, cache the synced + # repository metadata so we can reuse it later. if last.incremental and last.cacheonly != Cacheonly.never: rmtree(metadata_cache(last), sandbox=last.sandbox) make_tree(metadata_cache(last), use_subvolumes=last.use_subvolumes, sandbox=last.sandbox) @@ -3971,9 +4068,9 @@ def run_build( if os.getuid() == 0: mount("", "/", "", MS_SLAVE | MS_REC, "") - # For extra safety when running as root, remount a bunch of stuff read-only. - # Because some build systems use output directories in /usr, we only remount - # /usr read-only if the output directory is not relative to it. + # For extra safety when running as root, remount a bunch of stuff read-only. Because some build systems + # use output directories in /usr, we only remount /usr read-only if the output directory is not relative + # to it. if os.getuid() == 0: remount = ["/etc", "/opt", "/boot", "/efi", "/media"] if not config.output_dir_or_cwd().is_relative_to("/usr"): @@ -4016,7 +4113,9 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: if args.verb == Verb.documentation: manual = args.cmdline[0] if args.cmdline else "mkosi" - formats: list[DocFormat] = [args.doc_format] if args.doc_format != DocFormat.auto else DocFormat.all() + formats: list[DocFormat] = ( + [args.doc_format] if args.doc_format != DocFormat.auto else DocFormat.all() + ) return show_docs(manual, formats, resources=resources, pager=args.pager) if args.verb == Verb.genkey: @@ -4037,12 +4136,17 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if all(config == Config.default() for config in images): - die("No configuration found", hint="Make sure mkosi is run from a directory with configuration files") + die( + "No configuration found", hint="Make sure mkosi is run from a directory with configuration files" + ) if args.verb == Verb.summary: if args.json: text = json.dumps( - {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True + {"Images": [config.to_dict() for config in images]}, + cls=JsonEncoder, + indent=4, + sort_keys=True, ) else: text = "\n".join(summary(config) for config in images) @@ -4163,8 +4267,12 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: ensure_directories_exist(last) with ( - tempfile.TemporaryDirectory(dir=last.workspace_dir_or_default(), prefix="mkosi-metadata-") as metadata_dir, - tempfile.TemporaryDirectory(dir=last.workspace_dir_or_default(), prefix="mkosi-packages-") as package_dir, + tempfile.TemporaryDirectory( + dir=last.workspace_dir_or_default(), prefix="mkosi-metadata-" + ) as metadata_dir, + tempfile.TemporaryDirectory( + dir=last.workspace_dir_or_default(), prefix="mkosi-packages-" + ) as package_dir, ): sync_repository_metadata(args, images, resources=resources, dst=Path(metadata_dir)) diff --git a/mkosi/bootloader.py b/mkosi/bootloader.py index 2fdd8afcb..47434f8bb 100644 --- a/mkosi/bootloader.py +++ b/mkosi/bootloader.py @@ -34,9 +34,8 @@ def want_efi(config: Config) -> bool: # Do we want to make the image bootable on EFI firmware? - # Note that this returns True also in the case where autodetection might later - # cause the system to not be made bootable on EFI firmware after the filesystem - # has been populated. + # Note that this returns True also in the case where autodetection might later cause the system to not be + # made bootable on EFI firmware after the filesystem has been populated. if config.output_format in (OutputFormat.uki, OutputFormat.esp): return True @@ -48,7 +47,9 @@ def want_efi(config: Config) -> bool: return False if ( - config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay + config.output_format == OutputFormat.cpio + or config.output_format.is_extension_image() + or config.overlay ) and config.bootable == ConfigFeature.auto: return False @@ -148,8 +149,9 @@ def prepare_grub_config(context: Context) -> Optional[Path]: f.write("set timeout=0\n") if want_grub_efi(context): - # Signed EFI grub shipped by distributions reads its configuration from /EFI//grub.cfg (except - # in OpenSUSE) in the ESP so let's put a shim there to redirect to the actual configuration file. + # Signed EFI grub shipped by distributions reads its configuration from /EFI//grub.cfg + # (except in OpenSUSE) in the ESP so let's put a shim there to redirect to the actual configuration + # file. if context.config.distribution == Distribution.opensuse: earlyconfig = context.root / "efi/EFI/BOOT/grub.cfg" else: @@ -371,10 +373,10 @@ def grub_bios_setup(context: Context, partitions: Sequence[Partition]) -> None: tempfile.NamedTemporaryFile(mode="w") as mountinfo, ): # grub-bios-setup insists on being able to open the root device that --directory is located on, which - # needs root privileges. However, it only uses the root device when it is unable to embed itself in the - # bios boot partition. To make installation work unprivileged, we trick grub to think that the root - # device is our image by mounting over its /proc/self/mountinfo file (where it gets its information from) - # with our own file correlating the root directory to our image file. + # needs root privileges. However, it only uses the root device when it is unable to embed itself in + # the bios boot partition. To make installation work unprivileged, we trick grub to think that the + # root device is our image by mounting over its /proc/self/mountinfo file (where it gets its + # information from) with our own file correlating the root directory to our image file. mountinfo.write(f"1 0 1:1 / / - fat {context.staging / context.config.output_with_format}\n") mountinfo.flush() @@ -468,7 +470,7 @@ def pesign_prepare(context: Context) -> None: binary="openssl", options=[ "--ro-bind", context.config.secure_boot_key, context.config.secure_boot_key, - "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, + "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, # noqa ], ), ) # fmt: skip @@ -508,7 +510,7 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: "--output", workdir(output), ] # fmt: skip options: list[PathString] = [ - "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), + "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), # noqa "--ro-bind", input, workdir(input), "--bind", output.parent, workdir(output.parent), ] # fmt: skip @@ -570,7 +572,9 @@ def find_and_install_shim_binary( for pattern in signed: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): - logging.warning(f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}") + logging.warning( + f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}" + ) continue rel = p.relative_to(context.root) @@ -587,7 +591,9 @@ def find_and_install_shim_binary( for pattern in unsigned: for p in context.root.glob(pattern): if p.is_symlink() and p.readlink().is_absolute(): - logging.warning(f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}") + logging.warning( + f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}" + ) continue rel = p.relative_to(context.root) @@ -616,12 +622,10 @@ def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: key=lambda k: GenericVersion(k.name), reverse=True, ): - # Make sure we look for anything that remotely resembles vmlinuz, as - # the arch specific install scripts in the kernel source tree sometimes - # do weird stuff. But let's make sure we're not returning UKIs as the - # UKI on Fedora is named vmlinuz-virt.efi. Also look for uncompressed - # images (vmlinux) as some architectures ship those. Prefer vmlinuz if - # both are present. + # Make sure we look for anything that remotely resembles vmlinuz, as the arch specific install + # scripts in the kernel source tree sometimes do weird stuff. But let's make sure we're not returning + # UKIs as the UKI on Fedora is named vmlinuz-virt.efi. Also look for uncompressed images (vmlinux) as + # some architectures ship those. Prefer vmlinuz if both are present. for kimg in kver.glob("vmlinuz*"): if KernelType.identify(context.config, kimg) != KernelType.uki: yield kver.name, kimg @@ -722,7 +726,7 @@ def install_systemd_boot(context: Context) -> None: binary="sbsiglist", options=[ "--bind", context.workspace, workdir(context.workspace), - "--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"), + "--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"), # noqa ] ), ) # fmt: skip @@ -741,7 +745,7 @@ def install_systemd_boot(context: Context) -> None: "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), - "--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"), + "--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"), # noqa "--bind", keys, workdir(keys), ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: @@ -749,7 +753,7 @@ def install_systemd_boot(context: Context) -> None: if context.config.secure_boot_key.exists(): cmd += ["--key", workdir(context.config.secure_boot_key)] options += [ - "--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key), + "--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key), # noqa ] # fmt: skip else: cmd += ["--key", context.config.secure_boot_key] diff --git a/mkosi/completion.py b/mkosi/completion.py index 72f202e30..50ded3728 100644 --- a/mkosi/completion.py +++ b/mkosi/completion.py @@ -123,19 +123,26 @@ def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str: c.write(to_bash_array("_mkosi_options", options_by_key.keys())) c.write("\n\n") - nargs = to_bash_hasharray("_mkosi_nargs", {optname: v.nargs for optname, v in options_by_key.items()}) + nargs = to_bash_hasharray( + "_mkosi_nargs", {optname: v.nargs for optname, v in options_by_key.items()} + ) c.write(nargs) c.write("\n\n") choices = to_bash_hasharray( - "_mkosi_choices", {optname: " ".join(v.choices) for optname, v in options_by_key.items() if v.choices} + "_mkosi_choices", + {optname: " ".join(v.choices) for optname, v in options_by_key.items() if v.choices}, ) c.write(choices) c.write("\n\n") compgen = to_bash_hasharray( "_mkosi_compgen", - {optname: v.compgen.to_bash() for optname, v in options_by_key.items() if v.compgen != CompGen.default}, + { + optname: v.compgen.to_bash() + for optname, v in options_by_key.items() + if v.compgen != CompGen.default + }, ) c.write(compgen) c.write("\n\n") @@ -182,7 +189,9 @@ def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> def finalize_completion_zsh(options: list[CompletionItem], resources: Path) -> str: def to_zsh_array(name: str, entries: Iterable[str]) -> str: - return f"declare -a {name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" + return ( + f"declare -a {name.replace('-', '_')}=(" + " ".join(shlex.quote(str(e)) for e in entries) + ")" + ) completion = resources / "completion.zsh" @@ -240,7 +249,8 @@ def print_completion(args: config.Args, *, resources: Path) -> None: func = finalize_completion_zsh else: die( - f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh" + f"{shell!r} is not supported for completion scripts.", + hint="Please specify either one of: bash, fish, zsh", ) completion_args = collect_completion_arguments() diff --git a/mkosi/config.py b/mkosi/config.py index 3306f37a0..ebf088c8f 100644 --- a/mkosi/config.py +++ b/mkosi/config.py @@ -135,7 +135,10 @@ class ConfigTree: target: Optional[Path] def with_prefix(self, prefix: PathString = "/") -> tuple[Path, Path]: - return (self.source, Path(prefix) / os.fspath(self.target).lstrip("/") if self.target else Path(prefix)) + return ( + self.source, + Path(prefix) / os.fspath(self.target).lstrip("/") if self.target else Path(prefix), + ) def __str__(self) -> str: return f"{self.source}:{self.target}" if self.target else f"{self.source}" @@ -769,7 +772,10 @@ def config_default_repository_key_fetch(namespace: argparse.Namespace) -> bool: return cast( bool, - (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution()) + ( + namespace.tools_tree_distribution == Distribution.ubuntu + and namespace.distribution.is_rpm_distribution() + ) or namespace.tools_tree_distribution.is_rpm_distribution(), ) @@ -815,7 +821,9 @@ def config_parse_enum(value: Optional[str], old: Optional[StrEnum]) -> Optional[ return config_parse_enum -def config_make_enum_parser_with_boolean(type: type[StrEnum], *, yes: StrEnum, no: StrEnum) -> ConfigParseCallback: +def config_make_enum_parser_with_boolean( + type: type[StrEnum], *, yes: StrEnum, no: StrEnum +) -> ConfigParseCallback: def config_parse_enum(value: Optional[str], old: Optional[StrEnum]) -> Optional[StrEnum]: if not value: return None @@ -1029,7 +1037,9 @@ def match_path_exists(value: str) -> bool: return Path(value).exists() -def config_parse_root_password(value: Optional[str], old: Optional[tuple[str, bool]]) -> Optional[tuple[str, bool]]: +def config_parse_root_password( + value: Optional[str], old: Optional[tuple[str, bool]] +) -> Optional[tuple[str, bool]]: if not value: return None @@ -1099,7 +1109,8 @@ def config_parse_profile(value: Optional[str], old: Optional[int] = None) -> Opt if not is_valid_filename(value): die( - f"{value!r} is not a valid profile", hint="Profile= or --profile= requires a name with no path components." + f"{value!r} is not a valid profile", + hint="Profile= or --profile= requires a name with no path components.", ) return value @@ -1168,7 +1179,9 @@ def config_parse_vsock_cid(value: Optional[str], old: Optional[int]) -> Optional return cid -def config_parse_minimum_version(value: Optional[str], old: Optional[GenericVersion]) -> Optional[GenericVersion]: +def config_parse_minimum_version( + value: Optional[str], old: Optional[GenericVersion] +) -> Optional[GenericVersion]: if not value: return old @@ -1189,7 +1202,10 @@ def file_run_or_read(file: Path) -> str: content = file.read_text() if content.startswith("#!/"): - die(f"{file} starts with a shebang ({content.splitlines()[0]})", hint="This file should be executable") + die( + f"{file} starts with a shebang ({content.splitlines()[0]})", + hint="This file should be executable", + ) return content @@ -1296,7 +1312,9 @@ def _split_lines(self, text: str, width: int) -> list[str]: lines = text.splitlines() subindent = " " if lines[0].endswith(":") else "" return flatten( - textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent) + textwrap.wrap( + line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent + ) for line in lines ) @@ -1414,7 +1432,9 @@ def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead elif hasattr(s, "read"): j = json.load(s) else: - raise ValueError(f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files.") + raise ValueError( + f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files." + ) def key_transformer(k: str) -> str: return "_".join(part.lower() for part in FALLBACK_NAME_TO_DEST_SPLITTER.split(k)) @@ -1637,7 +1657,11 @@ def workspace_dir_or_default(self) -> Path: if self.workspace_dir: return self.workspace_dir - if (cache := INVOKING_USER.cache_dir()) and cache != Path("/var/cache/mkosi") and os.access(cache, os.W_OK): + if ( + (cache := INVOKING_USER.cache_dir()) + and cache != Path("/var/cache/mkosi") + and os.access(cache, os.W_OK) + ): return cache return Path("/var/tmp") @@ -1746,8 +1770,8 @@ def cache_manifest(self) -> dict[str, Any]: "prepare_scripts": sorted( base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ), - # We don't use the full path here since tests will often use temporary directories for the output directory - # which would trigger a rebuild every time. + # We don't use the full path here since tests will often use temporary directories for the output + # directory which would trigger a rebuild every time. "tools_tree": self.tools_tree.name if self.tools_tree else None, "tools_tree_distribution": self.tools_tree_distribution, "tools_tree_release": self.tools_tree_release, @@ -1772,7 +1796,9 @@ def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead elif hasattr(s, "read"): j = json.load(s) else: - raise ValueError(f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files.") + raise ValueError( + f"{cls.__name__} can only be constructed from JSON from strings, dictionaries and files." + ) def key_transformer(k: str) -> str: if (s := SETTINGS_LOOKUP_BY_NAME.get(k)) is not None: @@ -1873,7 +1899,8 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple if line[-1] != "]": die(f"{line} is not a valid section") - # Yield the section name with an empty key and value to indicate we've finished the current section. + # Yield the section name with an empty key and value to indicate we've finished the current + # section. if section: yield section, "", "" @@ -2547,7 +2574,8 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple metavar="BOOL", section="Content", parse=config_parse_boolean, - help="When building a kernel modules initrd, include the currently loaded modules on the host in the image", + help="When building a kernel modules initrd, include the currently loaded modules " + "on the host in the image", ), ConfigSetting( dest="kernel_modules_initrd_exclude", @@ -2729,7 +2757,8 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple metavar="FEATURE", section="Validation", parse=config_parse_feature, - help="Measure the components of the unified kernel image (UKI) and embed the PCR signature into the UKI", + help="Measure the components of the unified kernel image (UKI) and " + "embed the PCR signature into the UKI", ), ConfigSetting( dest="passphrase", @@ -2795,7 +2824,9 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple metavar="MIRROR", section="Build", default_factory_depends=("distribution", "mirror", "tools_tree_distribution"), - default_factory=lambda ns: ns.mirror if ns.mirror and ns.distribution == ns.tools_tree_distribution else None, + default_factory=( + lambda ns: ns.mirror if ns.mirror and ns.distribution == ns.tools_tree_distribution else None + ), help="Set the mirror to use for the default tools tree", ), ConfigSetting( @@ -3012,7 +3043,9 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple long="--credential", metavar="NAME=VALUE", section="Host", - parse=config_make_dict_parser(delimiter=" ", parse=parse_credential, allow_paths=True, unescape=True), + parse=config_make_dict_parser( + delimiter=" ", parse=parse_credential, allow_paths=True, unescape=True + ), help="Pass a systemd credential to systemd-nspawn or qemu", paths=("mkosi.credentials",), ), @@ -3535,7 +3568,8 @@ def expand_specifiers(self, text: str, path: Path) -> str: elif setting := SETTINGS_LOOKUP_BY_SPECIFIER.get(c): if (v := self.finalize_value(setting)) is None: logging.warning( - f"Setting {setting.name} specified by specifier '%{c}' in {text} is not yet set, ignoring" + f"Setting {setting.name} specified by specifier '%{c}' " + f"in {text} is not yet set, ignoring" ) continue @@ -3543,8 +3577,8 @@ def expand_specifiers(self, text: str, path: Path) -> str: elif specifier := SPECIFIERS_LOOKUP_BY_CHAR.get(c): specifierns = argparse.Namespace() - # Some specifier methods might want to access the image name or directory mkosi was invoked in so - # let's make sure those are available. + # Some specifier methods might want to access the image name or directory mkosi was + # invoked in so let's make sure those are available. setattr(specifierns, "image", getattr(self.config, "image", None)) setattr(specifierns, "directory", self.cli.directory) @@ -3553,8 +3587,8 @@ def expand_specifiers(self, text: str, path: Path) -> str: if (v := self.finalize_value(setting)) is None: logging.warning( - f"Setting {setting.name} which specifier '%{c}' in {text} depends on is not yet set, " - "ignoring" + f"Setting {setting.name} which specifier '%{c}' in {text} depends on " + "is not yet set, ignoring" ) break @@ -3607,9 +3641,9 @@ def parse_new_includes(self) -> None: self.parse_config_one(path if path.is_file() else Path(".")) def finalize_value(self, setting: ConfigSetting) -> Optional[Any]: - # If a value was specified on the CLI, it always takes priority. If the setting is a collection of values, we - # merge the value from the CLI with the value from the configuration, making sure that the value from the CLI - # always takes priority. + # If a value was specified on the CLI, it always takes priority. If the setting is a collection of + # values, we merge the value from the CLI with the value from the configuration, making sure that the + # value from the CLI always takes priority. if hasattr(self.cli, setting.dest) and (v := getattr(self.cli, setting.dest)) is not None: if isinstance(v, list): return (getattr(self.config, setting.dest, None) or []) + v @@ -3621,8 +3655,8 @@ def finalize_value(self, setting: ConfigSetting) -> Optional[Any]: return v # If the setting was assigned the empty string on the CLI, we don't use any value configured in the - # configuration file. Additionally, if the setting is a collection of values, we won't use any default - # value either if the setting is set to the empty string on the command line. + # configuration file. Additionally, if the setting is a collection of values, we won't use any + # default value either if the setting is set to the empty string on the command line. if ( not hasattr(self.cli, setting.dest) @@ -3638,15 +3672,18 @@ def finalize_value(self, setting: ConfigSetting) -> Optional[Any]: elif hasattr(self.defaults, setting.dest): default = getattr(self.defaults, setting.dest) elif setting.default_factory: - # To determine default values, we need the final values of various settings in - # a namespace object, but we don't want to copy the final values into the config - # namespace object just yet so we create a new namespace object instead. + # To determine default values, we need the final values of various settings in a namespace + # object, but we don't want to copy the final values into the config namespace object just yet so + # we create a new namespace object instead. factoryns = argparse.Namespace( - **{d: self.finalize_value(SETTINGS_LOOKUP_BY_DEST[d]) for d in setting.default_factory_depends} + **{ + d: self.finalize_value(SETTINGS_LOOKUP_BY_DEST[d]) + for d in setting.default_factory_depends + } ) - # Some default factory methods want to access the image name or directory mkosi - # was invoked in so let's make sure those are available. + # Some default factory methods want to access the image name or directory mkosi was invoked in so + # let's make sure those are available. setattr(factoryns, "image", getattr(self.config, "image", None)) setattr(factoryns, "directory", self.cli.directory) @@ -3702,8 +3739,8 @@ def match_config(self, path: Path) -> bool: if k != s.name: logging.warning(f"Setting {k} is deprecated, please use {s.name} instead.") - # If we encounter a setting that has not been explicitly configured yet, we assign the default value - # first so that we can match on default values for settings. + # If we encounter a setting that has not been explicitly configured yet, we assign the + # default value first so that we can match on default values for settings. if (value := self.finalize_value(s)) is None: result = False else: @@ -3742,15 +3779,18 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal if local and (path.parent / "mkosi.local.conf").exists(): self.parse_config_one(path.parent / "mkosi.local.conf") - # Configuration from mkosi.local.conf should override other file based configuration but not the CLI - # itself so move the finalized values to the CLI namespace. + # Configuration from mkosi.local.conf should override other file based configuration but not + # the CLI itself so move the finalized values to the CLI namespace. for s in SETTINGS: if hasattr(self.config, s.dest): setattr(self.cli, s.dest, self.finalize_value(s)) delattr(self.config, s.dest) for s in SETTINGS: - if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None: + if ( + s.scope == SettingScope.universal + and (image := getattr(self.config, "image", None)) is not None + ): continue if self.only_sections and s.section not in self.only_sections: @@ -3781,7 +3821,9 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal files = getattr(self.config, "files") files += [abs_path] - for section, k, v in parse_ini(path, only_sections=self.only_sections or {s.section for s in SETTINGS}): + for section, k, v in parse_ini( + path, only_sections=self.only_sections or {s.section for s in SETTINGS} + ): if not k and not v: continue @@ -3791,13 +3833,18 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): die(f"Unknown setting {name}") - if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None: + if ( + s.scope == SettingScope.universal + and (image := getattr(self.config, "image", None)) is not None + ): die(f"Setting {name} cannot be configured in subimage {image}") if name in self.immutable: die(f"Setting {name} cannot be modified anymore at this point") if section != s.section: - logging.warning(f"Setting {name} should be configured in [{s.section}], not [{section}].") + logging.warning( + f"Setting {name} should be configured in [{s.section}], not [{section}]." + ) if name != s.name: logging.warning(f"Setting {name} is deprecated, please use {s.name} instead.") @@ -3833,7 +3880,9 @@ def parse_config_one(self, path: Path, profiles: bool = False, local: bool = Fal return True -def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tuple[Args, tuple[Config, ...]]: +def parse_config( + argv: Sequence[str] = (), *, resources: Path = Path("/") +) -> tuple[Args, tuple[Config, ...]]: argv = list(argv) # Make sure the verb command gets explicitly passed. Insert a -- before the positional verb argument @@ -3857,9 +3906,8 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu context = ParseContext(resources) - # The "image" field does not directly map to a setting but is required - # to determine some default values for settings, so let's set it on the - # config namespace immediately so it's available. + # The "image" field does not directly map to a setting but is required to determine some default values + # for settings, so let's set it on the config namespace immediately so it's available. setattr(context.config, "image", None) # First, we parse the command line arguments into a separate namespace. @@ -3887,15 +3935,17 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu prev = Config.from_json(Path(".mkosi-private/history/latest.json").read_text()) # If we're operating on a previously built image (qemu, boot, shell, ...), we're not rebuilding the - # image and the configuration of the latest build is available, we load the config that was used to build the - # previous image from there instead of parsing configuration files, except for the Host section settings which - # we allow changing without requiring a rebuild of the image. + # image and the configuration of the latest build is available, we load the config that was used to + # build the previous image from there instead of parsing configuration files, except for the Host + # section settings which we allow changing without requiring a rebuild of the image. for s in SETTINGS: if s.section in ("Include", "Host"): continue if hasattr(context.cli, s.dest) and getattr(context.cli, s.dest) != getattr(prev, s.dest): - logging.warning(f"Ignoring {s.long} from the CLI. Run with -f to rebuild the image with this setting") + logging.warning( + f"Ignoring {s.long} from the CLI. Run with -f to rebuild the image with this setting" + ) setattr(context.cli, s.dest, getattr(prev, s.dest)) if hasattr(context.config, s.dest): @@ -3917,9 +3967,9 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu config = copy.deepcopy(context.config) - # After we've finished parsing the configuration, we'll have values in both - # namespaces (context.cli, context.config). To be able to parse the values from a - # single namespace, we merge the final values of each setting into one namespace. + # After we've finished parsing the configuration, we'll have values in both namespaces (context.cli, + # context.config). To be able to parse the values from a single namespace, we merge the final values of + # each setting into one namespace. for s in SETTINGS: setattr(config, s.dest, context.finalize_value(s)) @@ -4130,7 +4180,11 @@ def load_environment(args: argparse.Namespace) -> dict[str, str]: if gnupghome := os.getenv("GNUPGHOME"): env["GNUPGHOME"] = gnupghome - env |= dict(parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines()) + env |= dict( + parse_environment(line) + for f in args.environment_files + for line in f.read_text().strip().splitlines() + ) env |= args.environment return env @@ -4152,7 +4206,10 @@ def load_config(config: argparse.Namespace) -> Config: # Make sure we don't modify the input namespace. config = copy.deepcopy(config) - if config.build_dir and config.build_dir.name != f"{config.distribution}~{config.release}~{config.architecture}": + if ( + config.build_dir + and config.build_dir.name != f"{config.distribution}~{config.release}~{config.architecture}" + ): config.build_dir /= f"{config.distribution}~{config.release}~{config.architecture}" if config.sign: @@ -4476,7 +4533,9 @@ def path_list_transformer(pathlist: list[str], fieldtype: type[list[Path]]) -> l def uuid_transformer(uuidstr: str, fieldtype: type[uuid.UUID]) -> uuid.UUID: return uuid.UUID(uuidstr) - def optional_uuid_transformer(uuidstr: Optional[str], fieldtype: type[Optional[uuid.UUID]]) -> Optional[uuid.UUID]: + def optional_uuid_transformer( + uuidstr: Optional[str], fieldtype: type[Optional[uuid.UUID]] + ) -> Optional[uuid.UUID]: return uuid.UUID(uuidstr) if uuidstr is not None else None def root_password_transformer( @@ -4486,7 +4545,9 @@ def root_password_transformer( return None return (cast(str, rootpw[0]), cast(bool, rootpw[1])) - def config_tree_transformer(trees: list[dict[str, Any]], fieldtype: type[ConfigTree]) -> list[ConfigTree]: + def config_tree_transformer( + trees: list[dict[str, Any]], fieldtype: type[ConfigTree] + ) -> list[ConfigTree]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] for d in trees: @@ -4510,7 +4571,9 @@ def enum_list_transformer(enumlist: list[str], fieldtype: type[list[E]]) -> list enumtype = fieldtype.__args__[0] # type: ignore return [enumtype[e] for e in enumlist] - def config_drive_transformer(drives: list[dict[str, Any]], fieldtype: type[QemuDrive]) -> list[QemuDrive]: + def config_drive_transformer( + drives: list[dict[str, Any]], fieldtype: type[QemuDrive] + ) -> list[QemuDrive]: # TODO: exchange for TypeGuard and list comprehension once on 3.10 ret = [] @@ -4540,7 +4603,10 @@ def key_source_transformer(keysource: dict[str, Any], fieldtype: type[KeySource] return KeySource(type=KeySourceType(keysource["Type"]), source=keysource.get("Source", "")) # The type of this should be - # dict[type, Callable[a stringy JSON object (str, null, list or dict of str), type of the key], type of the key] + # dict[ + # type, + # Callable[a stringy JSON object (str, null, list or dict of str), type of the key], type of the key + # ] # though this seems impossible to express, since e.g. mypy will make this a # builtins.dict[builtins.object, builtins.function] # whereas pyright gives the type of the dict keys as the proper union of @@ -4578,8 +4644,8 @@ def key_source_transformer(keysource: dict[str, Any], fieldtype: type[KeySource] def json_transformer(key: str, val: Any) -> Any: fieldtype: Optional[dataclasses.Field[Any]] = fields_by_name.get(key) - # It is unlikely that the type of a field will be None only, so let's not bother with a different sentinel - # value + # It is unlikely that the type of a field will be None only, so let's not bother with a different + # sentinel value if fieldtype is None: raise ValueError(f"{refcls} has no field {key}") @@ -4588,14 +4654,18 @@ def json_transformer(key: str, val: Any) -> Any: try: return transformer(val, fieldtype.type) except (ValueError, IndexError, AssertionError) as e: - raise ValueError(f"Unable to parse {val:r} for attribute {key:r} for {refcls.__name__}") from e + raise ValueError( + f"Unable to parse {val:r} for attribute {key:r} for {refcls.__name__}" + ) from e return val return json_transformer -def want_selinux_relabel(config: Config, root: Path, fatal: bool = True) -> Optional[tuple[Path, str, Path, Path]]: +def want_selinux_relabel( + config: Config, root: Path, fatal: bool = True +) -> Optional[tuple[Path, str, Path, Path]]: if config.selinux_relabel == ConfigFeature.disabled: return None @@ -4629,9 +4699,11 @@ def want_selinux_relabel(config: Config, root: Path, fatal: bool = True) -> Opti binpolicydir = root / "etc/selinux" / policy / "policy" # The policy file is named policy.XX where XX is the policy version that indicates what features are - # available. We check for string.digits instead of using isdecimal() as the latter checks for more than just - # digits. - policies = [p for p in binpolicydir.glob("*") if p.suffix and all(c in string.digits for c in p.suffix[1:])] + # available. We check for string.digits instead of using isdecimal() as the latter checks for more than + # just digits. + policies = [ + p for p in binpolicydir.glob("*") if p.suffix and all(c in string.digits for c in p.suffix[1:]) + ] if not policies: if fatal and config.selinux_relabel == ConfigFeature.enabled: die(f"SELinux relabel is requested but SELinux binary policy not found in {binpolicydir}") diff --git a/mkosi/distributions/__init__.py b/mkosi/distributions/__init__.py index a04b7dc61..d883c826e 100644 --- a/mkosi/distributions/__init__.py +++ b/mkosi/distributions/__init__.py @@ -194,7 +194,8 @@ def detect_distribution() -> tuple[Optional[Distribution], Optional[str]]: def join_mirror(mirror: str, link: str) -> str: - # urljoin() behaves weirdly if the base does not end with a / or the path starts with a / so fix them up as needed. + # urljoin() behaves weirdly if the base does not end with a / or the path starts with a / so fix them up + # as needed. if not mirror.endswith("/"): mirror = f"{mirror}/" link = link.removeprefix("/") diff --git a/mkosi/distributions/centos.py b/mkosi/distributions/centos.py index 76be2d57f..5beafe33b 100644 --- a/mkosi/distributions/centos.py +++ b/mkosi/distributions/centos.py @@ -97,7 +97,11 @@ def architecture(cls, arch: Architecture) -> str: @staticmethod def gpgurls(context: Context) -> tuple[str, ...]: - rel = "RPM-GPG-KEY-CentOS-Official" if context.config.release == "9" else "RPM-GPG-KEY-CentOS-Official-SHA256" + if context.config.release == "9": + rel = "RPM-GPG-KEY-CentOS-Official" + else: + rel = "RPM-GPG-KEY-CentOS-Official-SHA256" + return tuple( find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in (rel, "RPM-GPG-KEY-CentOS-SIG-Extras") @@ -208,9 +212,9 @@ def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: ("epel-testing", "epel/testing"), ("epel-next-testing", "epel/testing/next"), ): - # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror URL and - # path we were given. Since this doesn't work for all scenarios, we also allow overriding the mirror - # via an environment variable. + # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror + # URL and path we were given. Since this doesn't work for all scenarios, we also allow + # overriding the mirror via an environment variable. url = context.config.environment.get("EPEL_MIRROR", join_mirror(mirror, "../fedora")) yield RpmRepository( repo, @@ -237,9 +241,13 @@ def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: yield RpmRepository( f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False ) - yield RpmRepository(f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False) + yield RpmRepository( + f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False + ) - yield RpmRepository("epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False) + yield RpmRepository( + "epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False + ) yield RpmRepository( "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False ) @@ -276,7 +284,9 @@ def sig_repositories(cls, context: Context) -> Iterable[RpmRepository]: ) for sig, components, keys in sigs: - gpgurls = tuple(find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys) + gpgurls = tuple( + find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys + ) for c in components: if mirror := context.config.mirror: diff --git a/mkosi/distributions/debian.py b/mkosi/distributions/debian.py index a3c83a303..80875f85a 100644 --- a/mkosi/distributions/debian.py +++ b/mkosi/distributions/debian.py @@ -106,8 +106,8 @@ def install(cls, context: Context) -> None: # unpacked yet, causing the script to fail. To avoid these issues, we have to extract all the # essential debs first, and only then run the maintainer scripts for them. - # First, we set up merged usr. - # This list is taken from https://salsa.debian.org/installer-team/debootstrap/-/blob/master/functions#L1369. + # First, we set up merged usr. This list is taken from + # https://salsa.debian.org/installer-team/debootstrap/-/blob/master/functions#L1369. subdirs = ["bin", "sbin", "lib"] + { "amd64" : ["lib32", "lib64", "libx32"], "i386" : ["lib64", "libx32"], @@ -129,11 +129,11 @@ def install(cls, context: Context) -> None: (context.root / d).symlink_to(f"usr/{d}") (context.root / f"usr/{d}").mkdir(parents=True, exist_ok=True) - # Next, we invoke apt-get install to download all the essential packages. With DPkg::Pre-Install-Pkgs, - # we specify a shell command that will receive the list of packages that will be installed on stdin. - # By configuring Debug::pkgDpkgPm=1, apt-get install will not actually execute any dpkg commands, so - # all it does is download the essential debs and tell us their full in the apt cache without actually - # installing them. + # Next, we invoke apt-get install to download all the essential packages. With + # DPkg::Pre-Install-Pkgs, we specify a shell command that will receive the list of packages that will + # be installed on stdin. By configuring Debug::pkgDpkgPm=1, apt-get install will not actually + # execute any dpkg commands, so all it does is download the essential debs and tell us their full in + # the apt cache without actually installing them. with tempfile.NamedTemporaryFile(mode="r") as f: Apt.invoke( context, @@ -154,9 +154,9 @@ def install(cls, context: Context) -> None: # then extracting the tar file into the chroot. for deb in essential: - # If a deb path is in the form of "/var/cache/apt/", we transform it to the corresponding path in - # mkosi's package cache directory. If it's relative to /repository, we transform it to the corresponding - # path in mkosi's local package repository. Otherwise, we use the path as is. + # If a deb path is in the form of "/var/cache/apt/", we transform it to the corresponding + # path in mkosi's package cache directory. If it's relative to /repository, we transform it to + # the corresponding path in mkosi's local package repository. Otherwise, we use the path as is. if Path(deb).is_relative_to("/var/cache"): path = context.config.package_cache_dir_or_default() / Path(deb).relative_to("/var") elif Path(deb).is_relative_to("/repository"): @@ -186,17 +186,19 @@ def install(cls, context: Context) -> None: # Finally, run apt to properly install packages in the chroot without having to worry that maintainer # scripts won't find basic tools that they depend on. - cls.install_packages(context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential]) + cls.install_packages( + context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential] + ) fixup_os_release(context) @classmethod def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None: - # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones to - # start. Let's install one that denies all daemon startups. + # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones + # to start. Let's install one that denies all daemon startups. # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information. - # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be managed by - # the admin. + # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be + # managed by the admin. policyrcd = context.root / "usr/sbin/policy-rc.d" with umask(~0o755): policyrcd.parent.mkdir(parents=True, exist_ok=True) @@ -259,8 +261,8 @@ def fixup_os_release(context: Context) -> None: return # Debian being Debian means we need to special case handling os-release. Fix the content to actually - # match what we are building, and set up a diversion so that dpkg doesn't overwrite it on package updates. - # Upstream bug report: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008735. + # match what we are building, and set up a diversion so that dpkg doesn't overwrite it on package + # updates. Upstream bug report: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1008735. for candidate in ["etc/os-release", "usr/lib/os-release", "usr/lib/initrd-release"]: osrelease = context.root / candidate newosrelease = osrelease.with_suffix(".new") @@ -278,11 +280,11 @@ def fixup_os_release(context: Context) -> None: else: new.write(line) - # On dpkg distributions we cannot simply overwrite /etc/os-release as it is owned by a package. - # We need to set up a diversion first, so that it is not overwritten by package updates. - # We do this for /etc/os-release as that will be overwritten on package updates and has - # precedence over /usr/lib/os-release, and ignore the latter and assume that if an usr-only - # image is built then the package manager will not run on it. + # On dpkg distributions we cannot simply overwrite /etc/os-release as it is owned by a package. We + # need to set up a diversion first, so that it is not overwritten by package updates. We do this for + # /etc/os-release as that will be overwritten on package updates and has precedence over + # /usr/lib/os-release, and ignore the latter and assume that if an usr-only image is built then the + # package manager will not run on it. if candidate == "etc/os-release": run( [ @@ -296,7 +298,9 @@ def fixup_os_release(context: Context) -> None: f"/{candidate}.dpkg", f"/{candidate}", ], - sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"]), + sandbox=context.sandbox( + binary="dpkg-divert", options=["--bind", context.root, "/buildroot"] + ), ) newosrelease.rename(osrelease) diff --git a/mkosi/distributions/fedora.py b/mkosi/distributions/fedora.py index 9696e6a25..c79302fe8 100644 --- a/mkosi/distributions/fedora.py +++ b/mkosi/distributions/fedora.py @@ -22,8 +22,12 @@ @tuplify def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: - key1 = find_rpm_gpgkey(context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-primary", required=False) - key2 = find_rpm_gpgkey(context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-secondary", required=False) + key1 = find_rpm_gpgkey( + context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-primary", required=False + ) + key2 = find_rpm_gpgkey( + context, key=f"RPM-GPG-KEY-fedora-{context.config.release}-secondary", required=False + ) if key1: # During branching, there is always a kerfuffle with the key transition. @@ -52,12 +56,12 @@ def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: ) if context.config.release == "rawhide": - # https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, let's - # fetch it from distribution-gpg-keys on github, which is generally up-to-date. + # https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, + # let's fetch it from distribution-gpg-keys on github, which is generally up-to-date. keys = "https://raw.githubusercontent.com/rpm-software-management/distribution-gpg-keys/main/keys/fedora" - # The rawhide key is a symlink and github doesn't redirect those to the actual file for some reason, so we - # fetch the file and read the release it points to ourselves. + # The rawhide key is a symlink and github doesn't redirect those to the actual file for some + # reason, so we fetch the file and read the release it points to ourselves. with tempfile.TemporaryDirectory() as d: curl(context.config, f"{keys}/RPM-GPG-KEY-fedora-rawhide-primary", Path(d)) key = (Path(d) / "RPM-GPG-KEY-fedora-rawhide-primary").read_text() @@ -121,7 +125,8 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: if context.config.release == "eln": mirror = ( - context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" + context.config.mirror + or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" ) for repo in ("Appstream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" @@ -143,13 +148,19 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: url = f"baseurl={join_mirror(m, 'linux/updates/testing/$releasever/Everything')}" yield RpmRepository("updates-testing", f"{url}/$basearch", gpgurls, enabled=False) - yield RpmRepository("updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False) + yield RpmRepository( + "updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False + ) yield RpmRepository("updates-testing-source", f"{url}/source/tree", gpgurls, enabled=False) else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" yield RpmRepository("fedora", f"{url}&repo=fedora-$releasever", gpgurls) - yield RpmRepository("fedora-debuginfo", f"{url}&repo=fedora-debug-$releasever", gpgurls, enabled=False) - yield RpmRepository("fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False) + yield RpmRepository( + "fedora-debuginfo", f"{url}&repo=fedora-debug-$releasever", gpgurls, enabled=False + ) + yield RpmRepository( + "fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False + ) if context.config.release != "rawhide": yield RpmRepository("updates", f"{url}&repo=updates-released-f$releasever", gpgurls) @@ -160,7 +171,10 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: enabled=False, ) yield RpmRepository( - "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False + "updates-source", + f"{url}&repo=updates-released-source-f$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False diff --git a/mkosi/distributions/mageia.py b/mkosi/distributions/mageia.py index d461cc392..ce8317dc6 100644 --- a/mkosi/distributions/mageia.py +++ b/mkosi/distributions/mageia.py @@ -41,7 +41,9 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: return if context.config.mirror: - url = f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}" + url = ( + f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}" + ) yield RpmRepository("core-release", f"{url}/release", gpgurls) yield RpmRepository("core-updates", f"{url}/updates/", gpgurls) else: diff --git a/mkosi/distributions/opensuse.py b/mkosi/distributions/opensuse.py index 4e3356ee7..fbbd7b465 100644 --- a/mkosi/distributions/opensuse.py +++ b/mkosi/distributions/opensuse.py @@ -108,7 +108,12 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: if zypper and gpgkeys: run( - ["rpm", "--root=/buildroot", "--import", *(key.removeprefix("file://") for key in gpgkeys)], + [ + "rpm", + "--root=/buildroot", + "--import", + *(key.removeprefix("file://") for key in gpgkeys), + ], sandbox=context.sandbox( binary="rpm", options=[ @@ -169,7 +174,8 @@ def repositories(cls, context: Context) -> Iterable[RpmRepository]: and context.config.architecture != Architecture.x86_64 ): die( - f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture", + f"{cls.pretty_name()} only supports current and stable releases " + "for the x86-64 architecture", hint="Specify either tumbleweed or a specific leap release such as 15.6", ) diff --git a/mkosi/initrd.py b/mkosi/initrd.py index 97ec49153..075a9ae18 100644 --- a/mkosi/initrd.py +++ b/mkosi/initrd.py @@ -118,7 +118,12 @@ def main() -> None: if args.format != OutputFormat.directory.value: cmdline += ["--output-mode=600"] - for d in ("/usr/lib/mkosi-initrd", "/usr/local/lib/mkosi-initrd", "/run/mkosi-initrd", "/etc/mkosi-initrd"): + for d in ( + "/usr/lib/mkosi-initrd", + "/usr/local/lib/mkosi-initrd", + "/run/mkosi-initrd", + "/etc/mkosi-initrd", + ): if Path(d).exists(): cmdline += ["--include", d] @@ -150,13 +155,16 @@ def main() -> None: shutil.copy2(Path("/etc") / p, Path(d) / "etc" / p) else: shutil.copytree( - Path("/etc") / p, Path(d) / "etc" / p, ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True + Path("/etc") / p, + Path(d) / "etc" / p, + ignore=shutil.ignore_patterns("gnupg"), + dirs_exist_ok=True, ) cmdline += ["--sandbox-tree", d] - # Prefer dnf as dnf5 has not yet officially replaced it and there's a much bigger chance that there will be a - # populated dnf cache directory. + # Prefer dnf as dnf5 has not yet officially replaced it and there's a much bigger chance that there + # will be a populated dnf cache directory. run( cmdline, stdin=sys.stdin, diff --git a/mkosi/installer/__init__.py b/mkosi/installer/__init__.py index fd9930f93..1b331d004 100644 --- a/mkosi/installer/__init__.py +++ b/mkosi/installer/__init__.py @@ -81,10 +81,10 @@ def mounts(cls, context: Context) -> list[PathString]: src = context.metadata_dir / d / subdir mounts += ["--bind", src, Path("/var") / d / subdir] - # If we're not operating on the configured package cache directory, we're operating on a snapshot of the - # repository metadata. To make sure any downloaded packages are still cached in the configured package - # cache directory in this scenario, we mount in the relevant directories from the configured package cache - # directory. + # If we're not operating on the configured package cache directory, we're operating on a snapshot + # of the repository metadata. To make sure any downloaded packages are still cached in the + # configured package cache directory in this scenario, we mount in the relevant directories from + # the configured package cache directory. if d == "cache" and context.metadata_dir != context.config.package_cache_dir_or_default(): caches = context.config.distribution.package_manager(context.config).cache_subdirs(src) mounts += flatten( @@ -94,7 +94,9 @@ def mounts(cls, context: Context) -> list[PathString]: Path("/var") / d / subdir / p.relative_to(src), ) for p in caches - if (context.config.package_cache_dir_or_default() / d / subdir / p.relative_to(src)).exists() + if ( + context.config.package_cache_dir_or_default() / d / subdir / p.relative_to(src) + ).exists() ) return mounts @@ -107,9 +109,9 @@ def options(cls, *, root: PathString, apivfs: bool = True) -> list[PathString]: "--suppress-chown", # Make sure /etc/machine-id is not overwritten by any package manager post install scripts. "--ro-bind-try", Path(root) / "etc/machine-id", "/buildroot/etc/machine-id", - # If we're already in the sandbox, we want to pick up use the passwd files from /buildroot since the - # original root won't be available anymore. If we're not in the sandbox yet, we want to pick up the passwd - # files from the original root. + # If we're already in the sandbox, we want to pick up use the passwd files from /buildroot since + # the original root won't be available anymore. If we're not in the sandbox yet, we want to pick + # up the passwd files from the original root. *finalize_passwd_mounts(root), ] # fmt: skip @@ -174,9 +176,9 @@ def clean_package_manager_metadata(context: Context) -> None: ): return - # If cleaning is not explicitly requested, keep the repository metadata if we're building a directory or tar image - # (which are often used as a base tree for extension images and thus should retain package manager metadata) or if - # the corresponding package manager is installed in the image. + # If cleaning is not explicitly requested, keep the repository metadata if we're building a directory or + # tar image (which are often used as a base tree for extension images and thus should retain package + # manager metadata) or if the corresponding package manager is installed in the image. executable = context.config.distribution.package_manager(context.config).executable(context.config) remove = [] @@ -187,7 +189,9 @@ def clean_package_manager_metadata(context: Context) -> None: ("dpkg", ["var/lib/dpkg"]), (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"]), ): # fmt: skip - if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary(tool, root=context.root): + if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary( + tool, root=context.root + ): remove += [context.root / p for p in paths if (context.root / p).exists()] rmtree(*remove, sandbox=context.sandbox) diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py index 23435becf..ab71473b6 100644 --- a/mkosi/installer/apt.py +++ b/mkosi/installer/apt.py @@ -111,10 +111,11 @@ def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: (context.root / "var/lib/dpkg/available").touch() - # We have a special apt.conf outside of the sandbox tree that only configures "Dir::Etc" that we pass to - # APT_CONFIG to tell apt it should read config files from /etc/apt in case this is overridden by distributions. - # This is required because apt parses CLI configuration options after parsing its configuration files and as - # such we can't use CLI options to tell apt where to look for configuration files. + # We have a special apt.conf outside of the sandbox tree that only configures "Dir::Etc" that we pass + # to APT_CONFIG to tell apt it should read config files from /etc/apt in case this is overridden by + # distributions. This is required because apt parses CLI configuration options after parsing its + # configuration files and as such we can't use CLI options to tell apt where to look for + # configuration files. config = context.sandbox_tree / "etc/apt.conf" if not config.exists(): config.write_text( @@ -131,8 +132,8 @@ def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: if repo.signedby and not repo.signedby.exists(): die( f"Keyring for repo {repo.url} not found at {repo.signedby}", - hint="Make sure the right keyring package (e.g. debian-archive-keyring, kali-archive-keyring " - "or ubuntu-keyring) is installed", + hint="Make sure the right keyring package (e.g. debian-archive-keyring, " + "kali-archive-keyring or ubuntu-keyring) is installed", ) with sources.open("w") as f: @@ -192,7 +193,10 @@ def cmd(cls, context: Context, command: str = "apt-get") -> list[PathString]: ] # fmt: skip if not context.config.with_docs: - cmdline += [f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs] + cmdline += [ + f"--option=DPkg::Options::=--path-exclude=/{glob}" + for glob in cls.documentation_exclude_globs + ] cmdline += ["--option=DPkg::Options::=--path-include=/usr/share/doc/*/copyright"] if context.config.proxy_url: diff --git a/mkosi/installer/dnf.py b/mkosi/installer/dnf.py index 83fb48ad0..5b4c4db7b 100644 --- a/mkosi/installer/dnf.py +++ b/mkosi/installer/dnf.py @@ -25,7 +25,9 @@ def subdir(cls, config: Config) -> Path: @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: - return [p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name] + return [ + p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name + ] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: @@ -53,8 +55,8 @@ def setup(cls, context: Context, repositories: Sequence[RpmRepository], filelist if cls.executable(context.config).endswith("dnf5") and filelists: f.write("[main]\noptional_metadata_types=filelists\n") - # The versionlock plugin will fail if enabled without a configuration file so lets' write a noop configuration - # file to make it happy which can be overridden by users. + # The versionlock plugin will fail if enabled without a configuration file so lets' write a noop + # configuration file to make it happy which can be overridden by users. versionlock = context.sandbox_tree / "etc/dnf/plugins/versionlock.conf" if not versionlock.exists(): versionlock.parent.mkdir(parents=True, exist_ok=True) @@ -150,7 +152,9 @@ def cmd( cmdline += ["--setopt=cacheonly=metadata"] if not context.config.architecture.is_native(): - cmdline += [f"--forcearch={context.config.distribution.architecture(context.config.architecture)}"] + cmdline += [ + f"--forcearch={context.config.distribution.architecture(context.config.architecture)}" + ] if not context.config.with_docs: cmdline += ["--no-docs" if dnf.endswith("dnf5") else "--nodocs"] @@ -194,8 +198,8 @@ def invoke( stdout=stdout, ) finally: - # dnf interprets the log directory relative to the install root so there's nothing we can do but to remove - # the log files from the install root afterwards. + # dnf interprets the log directory relative to the install root so there's nothing we can do but + # to remove the log files from the install root afterwards. if (context.root / "var/log").exists(): for p in (context.root / "var/log").iterdir(): if any(p.name.startswith(prefix) for prefix in ("dnf", "hawkey", "yum")): @@ -214,7 +218,9 @@ def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> N def createrepo(cls, context: Context) -> None: run( ["createrepo_c", context.repository], - sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]), + sandbox=context.sandbox( + binary="createrepo_c", options=["--bind", context.repository, context.repository] + ), ) (context.sandbox_tree / "etc/yum.repos.d/mkosi-local.repo").write_text( diff --git a/mkosi/installer/pacman.py b/mkosi/installer/pacman.py index e8e43c589..729bbac8c 100644 --- a/mkosi/installer/pacman.py +++ b/mkosi/installer/pacman.py @@ -52,15 +52,15 @@ def scripts(cls, context: Context) -> dict[str, list[PathString]]: def mounts(cls, context: Context) -> list[PathString]: mounts = [ *super().mounts(context), - # pacman writes downloaded packages to the first writable cache directory. We don't want it to write to our - # local repository directory so we expose it as a read-only directory to pacman. + # pacman writes downloaded packages to the first writable cache directory. We don't want it to + # write to our local repository directory so we expose it as a read-only directory to pacman. "--ro-bind", context.repository, "/var/cache/pacman/mkosi", ] # fmt: skip if (context.root / "var/lib/pacman/local").exists(): - # pacman reuses the same directory for the sync databases and the local database containing the list of - # installed packages. The former should go in the cache directory, the latter should go in the image, so we - # bind mount the local directory from the image to make sure that happens. + # pacman reuses the same directory for the sync databases and the local database containing the + # list of installed packages. The former should go in the cache directory, the latter should go + # in the image, so we bind mount the local directory from the image to make sure that happens. mounts += ["--bind", context.root / "var/lib/pacman/local", "/var/lib/pacman/local"] return mounts @@ -143,8 +143,9 @@ def cmd(cls, context: Context) -> list[PathString]: "--root=/buildroot", "--logfile=/dev/null", "--dbpath=/var/lib/pacman", - # Make sure pacman looks at our local repository first by putting it as the first cache directory. We mount - # it read-only so the second directory will still be used for writing new cache entries. + # Make sure pacman looks at our local repository first by putting it as the first cache + # directory. We mount it read-only so the second directory will still be used for writing new + # cache entries. "--cachedir=/var/cache/pacman/mkosi", "--cachedir=/var/cache/pacman/pkg", "--hookdir=/buildroot/etc/pacman.d/hooks", @@ -183,7 +184,9 @@ def createrepo(cls, context: Context) -> None: context.repository / "mkosi.db.tar", *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)), ], - sandbox=context.sandbox(binary="repo-add", options=["--bind", context.repository, context.repository]), + sandbox=context.sandbox( + binary="repo-add", options=["--bind", context.repository, context.repository] + ), ) (context.sandbox_tree / "etc/mkosi-local.conf").write_text( diff --git a/mkosi/installer/rpm.py b/mkosi/installer/rpm.py index ff99f0d32..e7c3e37f5 100644 --- a/mkosi/installer/rpm.py +++ b/mkosi/installer/rpm.py @@ -74,7 +74,9 @@ def setup_rpm(context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm") -> Non plugindir = Path( run( - ["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE + ["rpm", "--eval", "%{__plugindir}"], + sandbox=context.sandbox(binary="rpm"), + stdout=subprocess.PIPE, ).stdout.strip() ) if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists(): @@ -82,8 +84,8 @@ def setup_rpm(context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm") -> Non for plugin in plugindir.iterdir(): f.write(f"%__transaction_{plugin.stem} %{{nil}}\n") - # Write an rpm sequoia policy that allows SHA1 as various distribution GPG keys (OpenSUSE) still use SHA1 for - # various things. + # Write an rpm sequoia policy that allows SHA1 as various distribution GPG keys (OpenSUSE) still use SHA1 + # for various things. # TODO: Remove when all rpm distribution GPG keys have stopped using SHA1. if not (p := context.sandbox_tree / "etc/crypto-policies/back-ends/rpm-sequoia.config").exists(): p.parent.mkdir(parents=True, exist_ok=True) diff --git a/mkosi/installer/zypper.py b/mkosi/installer/zypper.py index ff8708786..90f78a990 100644 --- a/mkosi/installer/zypper.py +++ b/mkosi/installer/zypper.py @@ -48,9 +48,8 @@ def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None: config = context.sandbox_tree / "etc/zypp/zypp.conf" config.parent.mkdir(exist_ok=True, parents=True) - # rpm.install.excludedocs can only be configured in zypp.conf so we append - # to any user provided config file. Let's also bump the refresh delay to - # the same default as dnf which is 48 hours. + # rpm.install.excludedocs can only be configured in zypp.conf so we append to any user provided + # config file. Let's also bump the refresh delay to the same default as dnf which is 48 hours. with config.open("a") as f: f.write( textwrap.dedent( @@ -67,9 +66,9 @@ def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None: repofile.parent.mkdir(exist_ok=True, parents=True) with repofile.open("w") as f: for repo in repositories: - # zypper uses the repo ID as its cache key which is unsafe so add a hash of the url used to it to - # make sure a unique cache is used for each repository. We use roughly the same algorithm here that - # dnf uses as well. + # zypper uses the repo ID as its cache key which is unsafe so add a hash of the url used + # to it to make sure a unique cache is used for each repository. We use roughly the same + # algorithm here that dnf uses as well. key = hashlib.sha256(repo.url.encode()).hexdigest()[:16] f.write( @@ -140,7 +139,9 @@ def sync(cls, context: Context, force: bool, arguments: Sequence[str] = ()) -> N def createrepo(cls, context: Context) -> None: run( ["createrepo_c", context.repository], - sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]), + sandbox=context.sandbox( + binary="createrepo_c", options=["--bind", context.repository, context.repository] + ), ) (context.sandbox_tree / "etc/zypp/repos.d/mkosi-local.repo").write_text( diff --git a/mkosi/kmod.py b/mkosi/kmod.py index e04d4fea9..b3a8ec1f1 100644 --- a/mkosi/kmod.py +++ b/mkosi/kmod.py @@ -15,10 +15,15 @@ def loaded_modules() -> list[str]: # Loaded modules are listed with underscores but the filenames might use dashes instead. - return [rf"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()] + return [ + rf"/{line.split()[0].replace('_', '[_-]')}\.ko" + for line in Path("/proc/modules").read_text().splitlines() + ] -def filter_kernel_modules(root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str]) -> list[Path]: +def filter_kernel_modules( + root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str] +) -> list[Path]: modulesd = Path("usr/lib/modules") / kver with chdir(root): modules = set(modulesd.rglob("*.ko*")) @@ -73,10 +78,11 @@ def resolve_module_dependencies( log_step("Running modinfo to fetch kernel module dependencies") - # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to modinfo and - # it'll process them all in a single go. We get the modinfo for all modules to build two maps that map the path of - # the module to its module dependencies and its firmware dependencies respectively. Because there's more kernel - # modules than the max number of accepted CLI arguments, we split the modules list up into chunks. + # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to + # modinfo and it'll process them all in a single go. We get the modinfo for all modules to build two maps + # that map the path of the module to its module dependencies and its firmware dependencies + # respectively. Because there's more kernel modules than the max number of accepted CLI arguments, we + # split the modules list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): chunk = list(nametofile.keys())[i : i + 8500] @@ -104,9 +110,11 @@ def resolve_module_dependencies( depends.update(normalize_module_name(d) for d in value.strip().split(",") if d) elif key == "softdep": - # softdep is delimited by spaces and can contain strings like pre: and post: so discard anything that - # ends with a colon. - depends.update(normalize_module_name(d) for d in value.strip().split() if not d.endswith(":")) + # softdep is delimited by spaces and can contain strings like pre: and post: so discard + # anything that ends with a colon. + depends.update( + normalize_module_name(d) for d in value.strip().split() if not d.endswith(":") + ) elif key == "firmware": fw = [f for f in Path("usr/lib/firmware").glob(f"{value.strip()}*")] @@ -116,9 +124,8 @@ def resolve_module_dependencies( firmware.update(fw) elif key == "name": - # The file names use dashes, but the module names use underscores. We track the names - # in terms of the file names, since the depends use dashes and therefore filenames as - # well. + # The file names use dashes, but the module names use underscores. We track the names in + # terms of the file names, since the depends use dashes and therefore filenames as well. name = normalize_module_name(value.strip()) moddep[name] = depends @@ -157,22 +164,28 @@ def gen_required_kernel_modules( ) -> Iterator[Path]: modulesd = Path("usr/lib/modules") / kver - # There is firmware in /usr/lib/firmware that is not depended on by any modules so if any firmware was installed - # we have to take the slow path to make sure we don't copy firmware into the initrd that is not depended on by any - # kernel modules. + # There is firmware in /usr/lib/firmware that is not depended on by any modules so if any firmware was + # installed we have to take the slow path to make sure we don't copy firmware into the initrd that is not + # depended on by any kernel modules. if exclude or (root / "usr/lib/firmware").glob("*"): modules = filter_kernel_modules(root, kver, include=include, exclude=exclude) names = [module_path_to_name(m) for m in modules] mods, firmware = resolve_module_dependencies(root, kver, names) else: - logging.debug("No modules excluded and no firmware installed, using kernel modules generation fast path") + logging.debug( + "No modules excluded and no firmware installed, using kernel modules generation fast path" + ) with chdir(root): mods = set(modulesd.rglob("*.ko*")) firmware = set() yield from sorted( itertools.chain( - {p.relative_to(root) for f in mods | firmware for p in parents_below(root / f, root / "usr/lib")}, + { + p.relative_to(root) + for f in mods | firmware + for p in parents_below(root / f, root / "usr/lib") + }, mods, firmware, (p.relative_to(root) for p in (root / modulesd).glob("modules*")), @@ -181,7 +194,9 @@ def gen_required_kernel_modules( if (modulesd / "vdso").exists(): if not mods: - yield from (p.relative_to(root) for p in parents_below(root / modulesd / "vdso", root / "usr/lib")) + yield from ( + p.relative_to(root) for p in parents_below(root / modulesd / "vdso", root / "usr/lib") + ) yield modulesd / "vdso" yield from sorted(p.relative_to(root) for p in (root / modulesd / "vdso").iterdir()) diff --git a/mkosi/manifest.py b/mkosi/manifest.py index ed31b9caa..dd047354b 100644 --- a/mkosi/manifest.py +++ b/mkosi/manifest.py @@ -110,7 +110,9 @@ def record_rpm_packages(self) -> None: "--queryformat", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n", ], stdout=subprocess.PIPE, - sandbox=self.context.sandbox(binary="rpm", options=["--ro-bind", self.context.root, "/buildroot"]), + sandbox=( + self.context.sandbox(binary="rpm", options=["--ro-bind", self.context.root, "/buildroot"]) + ), ) # fmt: skip packages = sorted(c.stdout.splitlines()) @@ -173,7 +175,7 @@ def record_deb_packages(self) -> None: "dpkg-query", "--admindir=/buildroot/var/lib/dpkg", "--show", - "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n", + "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n", # noqa ], stdout=subprocess.PIPE, sandbox=self.context.sandbox( @@ -187,17 +189,14 @@ def record_deb_packages(self) -> None: for package in packages: name, source, version, arch, size, installtime = package.split("\t") - # dpkg records the size in KBs, the field is optional - # db-fsys:Last-Modified is not available in very old dpkg, so just skip creating - # the manifest for sysext when building on very old distributions by setting the - # timestamp to epoch. This only affects Ubuntu Bionic which is nearing EOL. - # If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by - # packages that were installed in this execution of mkosi. We assume that the - # upper layer is put together in one go, which currently is always true. - if ( - self.context.config.base_trees - and datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp - ): + # dpkg records the size in KBs, the field is optional db-fsys:Last-Modified is not available in + # very old dpkg, so just skip creating the manifest for sysext when building on very old + # distributions by setting the timestamp to epoch. This only affects Ubuntu Bionic which is + # nearing EOL. If we are creating a layer based on a BaseImage=, e.g. a sysext, filter by + # packages that were installed in this execution of mkosi. We assume that the upper layer is put + # together in one go, which currently is always true. + install_timestamp = datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) + if self.context.config.base_trees and install_timestamp < self._init_timestamp: continue manifest = PackageManifest("deb", name, version, arch, int(size or 0) * 1024) @@ -208,10 +207,11 @@ def record_deb_packages(self) -> None: source_package = self.source_packages.get(source) if source_package is None: - # Yes, --quiet is specified twice, to avoid output about download stats. Note that the argument of the - # 'changelog' verb is the binary package name, not the source package name. We also have to set "Dir" - # explicitly because apt has no separate option to configure the changelog directory. Apt.invoke() - # sets all options that are interpreted relative to Dir to absolute paths by default so this is safe. + # Yes, --quiet is specified twice, to avoid output about download stats. Note that the + # argument of the 'changelog' verb is the binary package name, not the source package + # name. We also have to set "Dir" explicitly because apt has no separate option to configure + # the changelog directory. Apt.invoke() sets all options that are interpreted relative to Dir + # to absolute paths by default so this is safe. result = Apt.invoke( self.context, "changelog", diff --git a/mkosi/mounts.py b/mkosi/mounts.py index a2ddb60f8..2de1b6638 100644 --- a/mkosi/mounts.py +++ b/mkosi/mounts.py @@ -44,7 +44,9 @@ def mount_overlay( os.chmod(upperdir, st.st_mode) workdir = Path( - stack.enter_context(tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir")) + stack.enter_context( + tempfile.TemporaryDirectory(dir=upperdir.parent, prefix=f"{upperdir.name}-workdir") + ) ) try: diff --git a/mkosi/qemu.py b/mkosi/qemu.py index 6189d3645..3e8fa7610 100644 --- a/mkosi/qemu.py +++ b/mkosi/qemu.py @@ -82,7 +82,9 @@ def available(self, log: bool = False) -> bool: raise e if log and e.errno in (errno.ENOENT, errno.ENODEV): - logging.warning(f"{self.device()} not found. Not adding {self.description()} to the virtual machine.") + logging.warning( + f"{self.device()} not found. Not adding {self.description()} to the virtual machine." + ) if log and e.errno in (errno.EPERM, errno.EACCES): logging.warning( @@ -203,13 +205,14 @@ def find_ovmf_firmware(config: Config, qemu: Path, firmware: QemuFirmware) -> Op if target["architecture"] != arch: continue - # We cannot use fnmatch as for example our default machine for x86-64 is q35 and the firmware description - # lists "pc-q35-*" so we use a substring check instead. + # We cannot use fnmatch as for example our default machine for x86-64 is q35 and the firmware + # description lists "pc-q35-*" so we use a substring check instead. if any(machine in glob for glob in target["machines"]): break else: logging.debug( - f"{p.name} firmware description does not target architecture {arch} or machine {machine}, skipping" + f"{p.name} firmware description does not target architecture {arch} or " + f"machine {machine}, skipping" ) continue @@ -247,7 +250,14 @@ def start_swtpm(config: Config) -> Iterator[Path]: with tempfile.TemporaryDirectory(prefix="mkosi-swtpm-") as state: # swtpm_setup is noisy and doesn't have a --quiet option so we pipe it's stdout to /dev/null. run( - ["swtpm_setup", "--tpm-state", state, "--tpm2", "--pcr-banks", "sha256", "--config", "/dev/null"], + [ + "swtpm_setup", + "--tpm-state", state, + "--tpm2", + "--pcr-banks", + "sha256", + "--config", "/dev/null", + ], sandbox=config.sandbox( binary="swtpm_setup", options=["--bind", state, state], @@ -257,12 +267,12 @@ def start_swtpm(config: Config) -> Iterator[Path]: ), ), stdout=None if ARG_DEBUG.get() else subprocess.DEVNULL, - ) + ) # fmt: skip cmdline = ["swtpm", "socket", "--tpm2", "--tpmstate", f"dir={state}"] - # We create the socket ourselves and pass the fd to swtpm to avoid race conditions where we start qemu before - # swtpm has had the chance to create the socket (or where we try to chown it first). + # We create the socket ourselves and pass the fd to swtpm to avoid race conditions where we start + # qemu before swtpm has had the chance to create the socket (or where we try to chown it first). with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock: path = Path(state) / Path("sock") sock.bind(os.fspath(path)) @@ -301,7 +311,9 @@ def systemd_escape(config: Config, s: PathString, path: bool = False) -> str: if path: cmdline += ["--path"] - return run(cmdline, stdout=subprocess.PIPE, sandbox=config.sandbox(binary="systemd-escape")).stdout.strip() + return run( + cmdline, stdout=subprocess.PIPE, sandbox=config.sandbox(binary="systemd-escape") + ).stdout.strip() @contextlib.contextmanager @@ -324,7 +336,8 @@ def start_virtiofsd( virtiofsd, "--shared-dir", workdir(directory), "--xattr", - # qemu's client doesn't seem to support announcing submounts so disable the feature to avoid the warning. + # qemu's client doesn't seem to support announcing submounts so disable the feature to avoid the + # warning. "--no-announce-submounts", "--sandbox=chroot", f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}", @@ -337,13 +350,13 @@ def start_virtiofsd( if uidmap: st = Path(directory).stat() - # If we're already running as the same user that we'll be running virtiofsd as, don't bother doing any explicit - # user switching or chown()'ing as it's not needed in this case. + # If we're already running as the same user that we'll be running virtiofsd as, don't bother doing + # any explicit user switching or chown()'ing as it's not needed in this case. if st.st_uid == os.getuid() and st.st_gid == os.getgid(): st = None - # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start qemu - # before virtiofsd has had the chance to create the socket (or where we try to chown it first). + # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start + # qemu before virtiofsd has had the chance to create the socket (or where we try to chown it first). with ( tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd-") as context, socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock, @@ -352,8 +365,8 @@ def start_virtiofsd( # Make sure virtiofsd can access the socket in this directory. os.chown(context, st.st_uid, st.st_gid) - # Make sure we can use the socket name as a unique identifier for the fs as well but make sure it's not too - # long as virtiofs tag names are limited to 36 bytes. + # Make sure we can use the socket name as a unique identifier for the fs as well but make sure it's + # not too long as virtiofs tag names are limited to 36 bytes. path = Path(context) / f"sock-{uuid.uuid4().hex}"[:35] sock.bind(os.fspath(path)) sock.listen() @@ -364,9 +377,9 @@ def start_virtiofsd( cmdline += ["--fd", str(SD_LISTEN_FDS_START)] - # We want RuntimeBuildSources= and RuntimeTrees= to do the right thing even when running mkosi qemu as root - # without the source directories necessarily being owned by root. We achieve this by running virtiofsd as the - # owner of the source directory and then mapping that uid to root. + # We want RuntimeBuildSources= and RuntimeTrees= to do the right thing even when running mkosi qemu + # as root without the source directories necessarily being owned by root. We achieve this by running + # virtiofsd as the owner of the source directory and then mapping that uid to root. name = f"mkosi-virtiofsd-{name}" description = f"virtiofsd for {directory}" @@ -381,9 +394,10 @@ def start_virtiofsd( pass_fds=(sock.fileno(),), user=st.st_uid if st and not scope else None, group=st.st_gid if st and not scope else None, - # If we're booting from virtiofs and unshare is too old, we don't set up a scope so we can use our own - # function to become root in the subuid range. - # TODO: Drop this as soon as we drop CentOS Stream 9 support and can rely on newer unshare features. + # If we're booting from virtiofs and unshare is too old, we don't set up a scope so we can use + # our own function to become root in the subuid range. + # TODO: Drop this as soon as we drop CentOS Stream 9 support and can rely on newer unshare + # features. preexec_fn=become_root_in_subuid_range if not scope and not uidmap else None, sandbox=config.sandbox( binary=virtiofsd, @@ -443,7 +457,9 @@ async def notify() -> None: try: yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{vsock.getsockname()[1]}", messages finally: - logging.debug(f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes") + logging.debug( + f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes" + ) for k, v in messages.items(): logging.debug(f"- {k}={v}") @@ -456,10 +472,14 @@ def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: if not bin: die("systemd-journal-remote must be installed to forward logs from the virtual machine") - d = config.forward_journal.parent if config.forward_journal.suffix == ".journal" else config.forward_journal + if config.forward_journal.suffix == ".journal": + d = config.forward_journal.parent + else: + d = config.forward_journal + if not d.exists(): - # Pass exist_ok=True because multiple mkosi processes might be trying to create the parent directory at the - # same time. + # Pass exist_ok=True because multiple mkosi processes might be trying to create the parent directory + # at the same time. d.mkdir(exist_ok=True, parents=True) # Make sure COW is disabled so systemd-journal-remote doesn't complain on btrfs filesystems. run(["chattr", "+C", d], check=False, stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None) @@ -468,8 +488,8 @@ def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: with tempfile.NamedTemporaryFile(mode="w", prefix="mkosi-journal-remote-config-") as f: os.chmod(f.name, 0o644) - # Make sure we capture all the logs by bumping the limits. We set MaxFileSize=4G because with the compact mode - # enabled the files cannot grow any larger anyway. + # Make sure we capture all the logs by bumping the limits. We set MaxFileSize=4G because with the + # compact mode enabled the files cannot grow any larger anyway. f.write( textwrap.dedent( f"""\ @@ -532,9 +552,9 @@ def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: yield src return - # If we're booting a directory image that was not built as root, we have to make an ephemeral copy so that we can - # ensure the files in the directory are either owned by the actual root user or a fake one in a subuid user - # namespace which we'll run virtiofsd as. + # If we're booting a directory image that was not built as root, we have to make an ephemeral copy so + # that we can ensure the files in the directory are either owned by the actual root user or a fake one in + # a subuid user namespace which we'll run virtiofsd as. if not config.ephemeral and (config.output_format != OutputFormat.directory or src.stat().st_uid == 0): with flock_or_die(src): yield src @@ -543,8 +563,8 @@ def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: src = src.resolve() # tempfile doesn't provide an API to get a random filename in an arbitrary directory so we do this - # instead. Limit the size to 16 characters as the output name might be used in a unix socket path by vmspawn and - # needs to fit in 108 characters. + # instead. Limit the size to 16 characters as the output name might be used in a unix socket path by + # vmspawn and needs to fit in 108 characters. tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}" try: @@ -569,7 +589,8 @@ def copy() -> None: copy_tree( src, tmp, - # Make sure the ownership is changed to the (fake) root user if the directory was not built as root. + # Make sure the ownership is changed to the (fake) root user if the directory was not built + # as root. preserve=config.output_format == OutputFormat.directory and src.stat().st_uid == 0, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, @@ -629,13 +650,16 @@ def finalize_qemu_firmware(config: Config, kernel: Optional[Path]) -> QemuFirmwa else QemuFirmware.linux ) elif ( - config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None + config.output_format in (OutputFormat.cpio, OutputFormat.directory) + or config.architecture.to_efi() is None ): return QemuFirmware.linux else: # At the moment there are no qemu firmware descriptions for non-x86 architectures that advertise # secure-boot support so let's default to no secure boot for non-x86 architectures. - return QemuFirmware.uefi_secure_boot if config.architecture.is_x86_variant() else QemuFirmware.uefi + return ( + QemuFirmware.uefi_secure_boot if config.architecture.is_x86_variant() else QemuFirmware.uefi + ) else: return config.qemu_firmware @@ -674,7 +698,9 @@ def finalize_firmware_variables( ), ) # fmt: skip else: - tools = Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools() + tools = ( + Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools() + ) vars = ( tools / ovmf.vars.relative_to("/") if config.qemu_firmware_variables == Path("microsoft") or not config.qemu_firmware_variables @@ -706,7 +732,9 @@ def apply_runtime_size(config: Config, image: Path) -> None: @contextlib.contextmanager def finalize_drive(drive: QemuDrive) -> Iterator[Path]: - with tempfile.NamedTemporaryFile(dir=drive.directory or "/var/tmp", prefix=f"mkosi-drive-{drive.id}") as file: + with tempfile.NamedTemporaryFile( + dir=drive.directory or "/var/tmp", prefix=f"mkosi-drive-{drive.id}" + ) as file: file.truncate(drive.size) yield Path(file.name) @@ -836,21 +864,30 @@ def run_qemu(args: Args, config: Config) -> None: die("RuntimeTrees= cannot be used when booting in BIOS firmware") if config.qemu_kvm == ConfigFeature.enabled and not config.architecture.is_native(): - die(f"KVM acceleration requested but {config.architecture} does not match the native host architecture") + die( + f"KVM acceleration requested but {config.architecture} does not match " + "the native host architecture" + ) if config.qemu_firmware_variables == Path("custom") and not config.secure_boot_certificate: die("SecureBootCertificate= must be configured to use QemuFirmwareVariables=custom") - # After we unshare the user namespace to sandbox qemu, we might not have access to /dev/kvm or related device nodes - # anymore as access to these might be gated behind the kvm group and we won't be part of the kvm group anymore - # after unsharing the user namespace. To get around this, open all those device nodes early can pass them as file - # descriptors to qemu later. Note that we can't pass the kvm file descriptor to qemu until version 9.0. + # After we unshare the user namespace to sandbox qemu, we might not have access to /dev/kvm or related + # device nodes anymore as access to these might be gated behind the kvm group and we won't be part of the + # kvm group anymore after unsharing the user namespace. To get around this, open all those device nodes + # early can pass them as file descriptors to qemu later. Note that we can't pass the kvm file descriptor + # to qemu until version 9.0. qemu_device_fds = { - d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) + d: d.open() + for d in QemuDeviceNode + if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } if not (qemu := config.find_binary(f"qemu-system-{config.architecture.to_qemu()}")): - die("qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed on the host system?") + die( + "qemu not found.", + hint=f"Is qemu-system-{config.architecture.to_qemu()} installed on the host system?", + ) have_kvm = (qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or ( qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds @@ -870,7 +907,8 @@ def run_qemu(args: Args, config: Config) -> None: if config.output_format in (OutputFormat.uki, OutputFormat.esp) and kernel: logging.warning( - f"Booting UKI output, kernel {kernel} configured with QemuKernel= or passed with -kernel will not be used" + f"Booting UKI output, kernel {kernel} configured with QemuKernel= or " + "passed with -kernel will not be used" ) kernel = None @@ -957,7 +995,8 @@ def run_qemu(args: Args, config: Config) -> None: if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): die( f"VSock connection ID {cid} is already in use by another virtual machine", - hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID", + hint="Use QemuVsockConnectionId=auto to have mkosi automatically " + "find a free vsock connection ID", ) index = list(qemu_device_fds.keys()).index(QemuDeviceNode.vhost_vsock) @@ -998,7 +1037,8 @@ def run_qemu(args: Args, config: Config) -> None: ] # fmt: skip if config.qemu_cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp): - # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size 2048. + # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size + # 2048. src = (config.output_dir_or_cwd() / config.output_with_compression).resolve() fname = src.parent / f"{src.name}-{uuid.uuid4().hex}" run( @@ -1031,7 +1071,8 @@ def run_qemu(args: Args, config: Config) -> None: apply_runtime_size(config, fname) if kernel and ( - KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) + KernelType.identify(config, kernel) != KernelType.uki + or not config.architecture.supports_smbios(firmware) ): kcl = config.kernel_command_line + config.kernel_command_line_extra else: @@ -1139,14 +1180,16 @@ def add_virtiofs_mount( if config.output_format in (OutputFormat.disk, OutputFormat.esp): direct = fname.stat().st_size % resource.getpagesize() == 0 ephemeral = config.ephemeral - cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring" + cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring" # noqa cmdline += [ "-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1", ] # fmt: skip if config.qemu_swtpm == ConfigFeature.enabled or ( - config.qemu_swtpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None + config.qemu_swtpm == ConfigFeature.auto + and firmware.is_uefi() + and config.find_binary("swtpm") is not None ): sock = stack.enter_context(start_swtpm(config)) cmdline += [ @@ -1164,7 +1207,9 @@ def add_virtiofs_mount( credentials["vmm.notify_socket"] = addr if config.forward_journal: - credentials["journal.forward_to_socket"] = stack.enter_context(start_journal_remote_vsock(config)) + credentials["journal.forward_to_socket"] = stack.enter_context( + start_journal_remote_vsock(config) + ) for k, v in credentials.items(): payload = base64.b64encode(v.encode()).decode() @@ -1180,7 +1225,8 @@ def add_virtiofs_mount( kcl += [f"systemd.set_credential_binary={k}:{payload}"] if kernel and ( - KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) + KernelType.identify(config, kernel) != KernelType.uki + or not config.architecture.supports_smbios(firmware) ): cmdline += ["-append", " ".join(kcl)] elif config.architecture.supports_smbios(firmware): @@ -1207,11 +1253,9 @@ def add_virtiofs_mount( if cid is not None: stack.enter_context(finalize_state(config, cid)) - # Reopen stdin, stdout and stderr to give qemu a private copy of them. - # This is a mitigation for the case when running mkosi under meson and - # one or two of the three are redirected and their pipe might block, - # but qemu opens all of them non-blocking because at least one of them - # is opened this way. + # Reopen stdin, stdout and stderr to give qemu a private copy of them. This is a mitigation for the + # case when running mkosi under meson and one or two of the three are redirected and their pipe might + # block, but qemu opens all of them non-blocking because at least one of them is opened this way. stdin = try_or( lambda: os.open(f"/proc/self/fd/{sys.stdin.fileno()}", os.O_RDONLY), OSError, @@ -1250,7 +1294,8 @@ def add_virtiofs_mount( ), ), ) as proc: - # We have to close these before we wait for qemu otherwise we'll deadlock as qemu will never exit. + # We have to close these before we wait for qemu otherwise we'll deadlock as qemu will never + # exit. for fd in qemu_device_fds.values(): os.close(fd) diff --git a/mkosi/run.py b/mkosi/run.py index 644becf28..557c8ef4a 100644 --- a/mkosi/run.py +++ b/mkosi/run.py @@ -79,8 +79,8 @@ def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> It rc = e.returncode # Failures from qemu, ssh and systemd-nspawn are expected and we won't log stacktraces for those. - # Failures from self come from the forks we spawn to build images in a user namespace. We've already done all - # the logging for those failures so we don't log stacktraces for those either. + # Failures from self come from the forks we spawn to build images in a user namespace. We've already + # done all the logging for those failures so we don't log stacktraces for those either. if ( ARG_DEBUG.get() and e.cmd @@ -125,8 +125,8 @@ def log_process_failure(sandbox: Sequence[str], cmdline: Sequence[str], returnco logging.error(f"{cmdline[0]} not found.") else: logging.error( - f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}" returned non-zero exit code ' - f"{returncode}." + f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}"' + f" returned non-zero exit code {returncode}." ) @@ -193,9 +193,8 @@ def spawn( logging.info(f"+ {shlex.join(cmd)}") if not stdout and not stderr: - # Unless explicit redirection is done, print all subprocess - # output on stderr, since we do so as well for mkosi's own - # output. + # Unless explicit redirection is done, print all subprocess output on stderr, since we do so as well + # for mkosi's own output. stdout = sys.stderr if stdin is None: @@ -232,16 +231,17 @@ def preexec() -> None: return # The systemd socket activation interface requires any passed file descriptors to start at '3' and - # incrementally increase from there. The file descriptors we got from the caller might be arbitrary, so we need - # to move them around to make sure they start at '3' and incrementally increase. + # incrementally increase from there. The file descriptors we got from the caller might be arbitrary, + # so we need to move them around to make sure they start at '3' and incrementally increase. for i, fd in enumerate(pass_fds): # Don't do anything if the file descriptor is already what we need it to be. if fd == SD_LISTEN_FDS_START + i: continue - # Close any existing file descriptor that occupies the id that we want to move to. This is safe because - # using pass_fds implies using close_fds as well, except that file descriptors are closed by python after - # running the preexec function, so we have to close a few of those manually here to make room if needed. + # Close any existing file descriptor that occupies the id that we want to move to. This is safe + # because using pass_fds implies using close_fds as well, except that file descriptors are closed + # by python after running the preexec function, so we have to close a few of those manually here + # to make room if needed. try: os.close(SD_LISTEN_FDS_START + i) except OSError as e: @@ -249,8 +249,8 @@ def preexec() -> None: raise nfd = fcntl.fcntl(fd, fcntl.F_DUPFD, SD_LISTEN_FDS_START + i) - # fcntl.F_DUPFD uses the closest available file descriptor ID, so make sure it actually picked the ID we - # expect it to pick. + # fcntl.F_DUPFD uses the closest available file descriptor ID, so make sure it actually picked + # the ID we expect it to pick. assert nfd == SD_LISTEN_FDS_START + i with sandbox as sbx: @@ -265,8 +265,9 @@ def preexec() -> None: text=True, user=user, group=group, - # pass_fds only comes into effect after python has invoked the preexec function, so we make sure that - # pass_fds contains the file descriptors to keep open after we've done our transformation in preexec(). + # pass_fds only comes into effect after python has invoked the preexec function, so we make + # sure that pass_fds contains the file descriptors to keep open after we've done our + # transformation in preexec(). pass_fds=[SD_LISTEN_FDS_START + i for i in range(len(pass_fds))], env=env, preexec_fn=preexec, @@ -411,7 +412,8 @@ def finalize_passwd_mounts(root: PathString) -> list[PathString]: directory instead of from the host. """ return flatten( - ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") for f in ("passwd", "group", "shadow", "gshadow") + ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") + for f in ("passwd", "group", "shadow", "gshadow") ) @@ -431,14 +433,15 @@ def vartmpdir() -> Iterator[Path]: try: yield d finally: - # A directory that's used as an overlayfs workdir will contain a "work" subdirectory after the overlayfs is - # unmounted. This "work" subdirectory will have permissions 000 and as such can't be opened or searched unless - # the user has the CAP_DAC_OVERRIDE capability. shutil.rmtree() will try to search the "work" subdirectory to - # remove anything in it which will fail with a permission error. To circumvent this, if the work directory - # exists and is not empty, let's fork off a subprocess where we acquire extra privileges and then invoke - # shutil.rmtree(). If the work directory exists but is empty, let's just delete the "work" subdirectory first - # and then invoke shutil.rmtree(). Deleting the subdirectory when it is empty is not a problem because deleting - # a subdirectory depends on the permissions of the parent directory and not the directory itself. + # A directory that's used as an overlayfs workdir will contain a "work" subdirectory after the + # overlayfs is unmounted. This "work" subdirectory will have permissions 000 and as such can't be + # opened or searched unless the user has the CAP_DAC_OVERRIDE capability. shutil.rmtree() will try to + # search the "work" subdirectory to remove anything in it which will fail with a permission error. To + # circumvent this, if the work directory exists and is not empty, let's fork off a subprocess where + # we acquire extra privileges and then invoke shutil.rmtree(). If the work directory exists but is + # empty, let's just delete the "work" subdirectory first and then invoke shutil.rmtree(). Deleting + # the subdirectory when it is empty is not a problem because deleting a subdirectory depends on the + # permissions of the parent directory and not the directory itself. try: (d / "work").rmdir() except OSError as e: @@ -474,11 +477,13 @@ def sandbox_cmd( *setup, sys.executable, "-SI", mkosi.sandbox.__file__, "--proc", "/proc", - # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are used instead. + # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are + # used instead. "--unsetenv", "TMPDIR", *network_options(network=network), - # apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they still use - # sandbox.py, so we make sure it is available inside the sandbox so it can be executed there as well. + # apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they still + # use sandbox.py, so we make sure it is available inside the sandbox so it can be executed there as + # well. "--ro-bind", Path(mkosi.sandbox.__file__), "/sandbox.py", ] # fmt: skip @@ -497,10 +502,10 @@ def sandbox_cmd( elif p.is_dir(): cmdline += ["--ro-bind", p, Path("/") / p.relative_to(tools)] - # If we're using /usr from a tools tree, we have to use /etc/alternatives and /etc/ld.so.cache from the tools tree - # as well if they exists since those are directly related to /usr. In relaxed mode, we only do this if - # the mountpoint already exists on the host as otherwise we'd modify the host's /etc by creating the mountpoint - # ourselves (or fail when trying to create it). + # If we're using /usr from a tools tree, we have to use /etc/alternatives and /etc/ld.so.cache from the + # tools tree as well if they exists since those are directly related to /usr. In relaxed mode, we only do + # this if the mountpoint already exists on the host as otherwise we'd modify the host's /etc by creating + # the mountpoint ourselves (or fail when trying to create it). for p in (Path("etc/alternatives"), Path("etc/ld.so.cache")): if (tools / p).exists() and (not relaxed or (Path("/") / p).exists()): cmdline += ["--ro-bind", tools / p, Path("/") / p] @@ -544,7 +549,11 @@ def sandbox_cmd( if network and Path("/etc/resolv.conf").exists(): cmdline += ["--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf"] - cmdline += ["--setenv", "PATH", f"/scripts:{'/usr/bin:/usr/sbin' if tools != Path('/') else os.environ['PATH']}"] + cmdline += [ + "--setenv", + "PATH", + f"/scripts:{'/usr/bin:/usr/sbin' if tools != Path('/') else os.environ['PATH']}", + ] if scripts: cmdline += ["--ro-bind", scripts, "/scripts"] @@ -581,9 +590,9 @@ def sandbox_cmd( else: cmdline += ["--dir", Path("/") / d] - # If we put an overlayfs on /var, and /var/tmp is not in the sandbox tree, make sure /var/tmp is a bind mount - # of a regular empty directory instead of the overlays so tools like systemd-repart can use the underlying - # filesystem features from btrfs when using /var/tmp. + # If we put an overlayfs on /var, and /var/tmp is not in the sandbox tree, make sure /var/tmp is a + # bind mount of a regular empty directory instead of the overlays so tools like systemd-repart can + # use the underlying filesystem features from btrfs when using /var/tmp. if overlay and not (overlay / "var/tmp").exists(): tmp = stack.enter_context(vartmpdir()) cmdline += ["--bind", tmp, "/var/tmp"] @@ -599,20 +608,20 @@ def apivfs_options(*, root: Path = Path("/buildroot")) -> list[PathString]: "--dev", root / "dev", # Nudge gpg to create its sockets in /run by making sure /run/user/0 exists. "--dir", root / "run/user/0", - # Make sure anything running in the root directory thinks it's in a container. $container can't always - # be accessed so we write /run/host/container-manager as well which is always accessible. + # Make sure anything running in the root directory thinks it's in a container. $container can't + # always be accessed so we write /run/host/container-manager as well which is always accessible. "--write", "mkosi", root / "run/host/container-manager", ] # fmt: skip def chroot_options() -> list[PathString]: return [ - # Let's always run as (fake) root when we chroot inside the image as tools executed within the image could - # have builtin assumptions about files being owned by root. + # Let's always run as (fake) root when we chroot inside the image as tools executed within the image + # could have builtin assumptions about files being owned by root. "--become-root", - # Unshare IPC namespace so any tests that exercise IPC related features don't fail with permission errors as - # --become-root implies unsharing a user namespace which won't have access to the parent's IPC namespace - # anymore. + # Unshare IPC namespace so any tests that exercise IPC related features don't fail with permission + # errors as --become-root implies unsharing a user namespace which won't have access to the parent's + # IPC namespace anymore. "--unshare-ipc", "--setenv", "container", "mkosi", "--setenv", "HOME", "/", @@ -631,7 +640,8 @@ def chroot_cmd( cmdline: list[PathString] = [ sys.executable, "-SI", mkosi.sandbox.__file__, "--bind", root, "/", - # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are used instead. + # We mounted a subdirectory of TMPDIR to /var/tmp so we unset TMPDIR so that /tmp or /var/tmp are + # used instead. "--unsetenv", "TMPDIR", *network_options(network=network), *apivfs_options(root=Path("/")), diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py index 34fbde4f3..bc9b6a0b8 100755 --- a/mkosi/sandbox.py +++ b/mkosi/sandbox.py @@ -190,7 +190,12 @@ def seccomp_suppress_chown() -> None: libseccomp.seccomp_release.argtypes = (ctypes.c_void_p,) libseccomp.seccomp_release.restype = None libseccomp.seccomp_syscall_resolve_name.argtypes = (ctypes.c_char_p,) - libseccomp.seccomp_rule_add_exact.argtypes = (ctypes.c_void_p, ctypes.c_uint32, ctypes.c_int, ctypes.c_uint) + libseccomp.seccomp_rule_add_exact.argtypes = ( + ctypes.c_void_p, + ctypes.c_uint32, + ctypes.c_int, + ctypes.c_uint, + ) libseccomp.seccomp_load.argtypes = (ctypes.c_void_p,) seccomp = libseccomp.seccomp_init(SCMP_ACT_ALLOW) @@ -254,7 +259,13 @@ def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: oserror(src) try: - libc.move_mount.argtypes = (ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint) + libc.move_mount.argtypes = ( + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, + ) r = libc.move_mount(fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) except AttributeError: libc.syscall.argtypes = ( @@ -574,7 +585,9 @@ def __exit__(self, *args: object, **kwargs: object) -> None: def execute(self, oldroot: str, newroot: str) -> None: lowerdirs = tuple(chase(oldroot, p) for p in self.lowerdirs) - upperdir = chase(oldroot, self.upperdir) if self.upperdir and self.upperdir != "tmpfs" else self.upperdir + upperdir = ( + chase(oldroot, self.upperdir) if self.upperdir and self.upperdir != "tmpfs" else self.upperdir + ) workdir = chase(oldroot, self.workdir) if self.workdir else None dst = chase(newroot, self.dst) @@ -781,8 +794,10 @@ def main() -> None: os.chdir("/tmp") with umask(~0o755): - os.mkdir("newroot") # This is where we set up the sandbox rootfs - os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. + # This is where we set up the sandbox rootfs + os.mkdir("newroot") + # This is the old rootfs which is used as the source for mounts in the new rootfs. + os.mkdir("oldroot") # Make sure that newroot is a mountpoint. mount("newroot", "newroot", "", MS_BIND | MS_REC, "") diff --git a/mkosi/sysupdate.py b/mkosi/sysupdate.py index b2201bab7..197d3a314 100644 --- a/mkosi/sysupdate.py +++ b/mkosi/sysupdate.py @@ -17,7 +17,8 @@ def run_sysupdate(args: Args, config: Config) -> None: if not config.sysupdate_dir: die( "No sysupdate definitions directory specified", - hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=", + hint="Specify a directory containing systemd-sysupdate transfer definitions with " + "SysupdateDirectory=", ) if not (sysupdate := config.find_binary("systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate")): diff --git a/mkosi/tree.py b/mkosi/tree.py index 214511539..df3e8247c 100644 --- a/mkosi/tree.py +++ b/mkosi/tree.py @@ -155,13 +155,14 @@ def rmtree(*paths: Path, sandbox: SandboxProtocol = nosandbox) -> None: paths = tuple(p.absolute() for p in paths) if subvolumes := sorted({p for p in paths if p.exists() and is_subvolume(p)}): - # Silence and ignore failures since when not running as root, this will fail with a permission error unless the - # btrfs filesystem is mounted with user_subvol_rm_allowed. + # Silence and ignore failures since when not running as root, this will fail with a permission error + # unless the btrfs filesystem is mounted with user_subvol_rm_allowed. run( ["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)], check=False, sandbox=sandbox( - binary="btrfs", options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes) + binary="btrfs", + options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes), ), stdout=subprocess.DEVNULL if not ARG_DEBUG.get() else None, stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None, @@ -201,7 +202,8 @@ def move_tree( raise e logging.info( - f"Could not rename {src} to {dst} as they are located on different devices, falling back to copying" + f"Could not rename {src} to {dst} as they are located on different devices, " + "falling back to copying" ) copy_tree(src, dst, use_subvolumes=use_subvolumes, sandbox=sandbox) rmtree(src, sandbox=sandbox) diff --git a/mkosi/user.py b/mkosi/user.py index e74d4d182..2fa7870ca 100644 --- a/mkosi/user.py +++ b/mkosi/user.py @@ -70,8 +70,8 @@ def runtime_dir(cls) -> Path: @classmethod def chown(cls, path: Path) -> None: - # If we created a file/directory in a parent directory owned by a regular user, make sure the path and any - # parent directories are owned by the invoking user as well. + # If we created a file/directory in a parent directory owned by a regular user, make sure the path + # and any parent directories are owned by the invoking user as well. if q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None): st = q.stat() @@ -109,8 +109,8 @@ def become_root_in_subuid_range() -> None: """ Set up a new user namespace mapping using /etc/subuid and /etc/subgid. - The current process becomes the root user in the new user namespace and the current user and group will be mapped - to 65436. The other IDs will be mapped through. + The current process becomes the root user in the new user namespace and the current user and group will + be mapped to 65436. The other IDs will be mapped through. """ if os.getuid() == 0: return @@ -123,11 +123,11 @@ def become_root_in_subuid_range() -> None: with tempfile.NamedTemporaryFile(prefix="mkosi-uidmap-lock-") as lockfile: lock = Path(lockfile.name) - # We map the private UID range configured in /etc/subuid and /etc/subgid into the user namespace using - # newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi so that - # we can access files and directories from the current user from within the user namespace. We don't map to the - # last user in the range as the last user is sometimes used in tests as a default value and mapping to that - # user might break those tests. + # We map the private UID range configured in /etc/subuid and /etc/subgid into the user namespace + # using newuidmap and newgidmap. On top of that, we also make sure to map in the user running mkosi + # so that we can access files and directories from the current user from within the user + # namespace. We don't map to the last user in the range as the last user is sometimes used in tests + # as a default value and mapping to that user might break those tests. newuidmap = [ "flock", "--exclusive", "--close", lock, "newuidmap", pid, 0, subuid, SUBRANGE - 100, @@ -142,12 +142,13 @@ def become_root_in_subuid_range() -> None: SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99 ] # fmt: skip - # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping to the - # process in the user namespace. The mapping can only be assigned after the user namespace has been unshared. - # To make this work, we first lock a temporary file, then spawn the newuidmap and newgidmap processes, which we - # execute using flock so they don't execute before they can get a lock on the same temporary file, then we - # unshare the user namespace and finally we unlock the temporary file, which allows the newuidmap and newgidmap - # processes to execute. we then wait for the processes to finish before continuing. + # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid + # mapping to the process in the user namespace. The mapping can only be assigned after the user + # namespace has been unshared. To make this work, we first lock a temporary file, then spawn the + # newuidmap and newgidmap processes, which we execute using flock so they don't execute before they + # can get a lock on the same temporary file, then we unshare the user namespace and finally we unlock + # the temporary file, which allows the newuidmap and newgidmap processes to execute. we then wait for + # the processes to finish before continuing. with ( flock(lock) as fd, spawn([str(x) for x in newuidmap]) as uidmap, diff --git a/mkosi/util.py b/mkosi/util.py index 1cd804385..82499ea35 100644 --- a/mkosi/util.py +++ b/mkosi/util.py @@ -143,8 +143,8 @@ def flock_or_die(path: Path) -> Iterator[Path]: die( f"Cannot lock {path} as it is locked by another process", - hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting multiple " - "instances of the same image", + hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting " + "multiple instances of the same image", ) @@ -192,9 +192,12 @@ def parents_below(path: Path, below: Path) -> list[Path]: def resource_path(mod: ModuleType) -> Iterator[Path]: t = importlib.resources.files(mod) with as_file(t) as p: - # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking user so - # that any commands executed as the invoking user can access files within it. - if p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700: + # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking + # user so that any commands executed as the invoking user can access files within it. + if ( + p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) + and stat.S_IMODE(p.parent.stat().st_mode) == 0o700 + ): p.parent.chmod(0o755) yield p diff --git a/mkosi/vmspawn.py b/mkosi/vmspawn.py index 77aadbc75..56f132b69 100644 --- a/mkosi/vmspawn.py +++ b/mkosi/vmspawn.py @@ -45,7 +45,8 @@ def run_vmspawn(args: Args, config: Config) -> None: if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}", - hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn", + hint="Please install a kernel in the image or provide a --qemu-kernel" + " argument to mkosi vmspawn", ) cmdline: list[PathString] = [ diff --git a/pyproject.toml b/pyproject.toml index fa296ce2d..e0ade1770 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,7 +74,7 @@ strict_equality = true [tool.ruff] target-version = "py39" -line-length = 119 +line-length = 109 lint.select = ["E", "F", "I", "UP"] [tool.pytest.ini_options] diff --git a/tests/__init__.py b/tests/__init__.py index d643d2eec..e794f8226 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -100,7 +100,7 @@ def build(self, options: Sequence[PathString] = (), args: Sequence[str] = ()) -> if self.config.tools_tree_distribution else [] ), - *(["--tools-tree-release", self.config.tools_tree_release] if self.config.tools_tree_release else []), + *(["--tools-tree-release", self.config.tools_tree_release] if self.config.tools_tree_release else []), # noqa *(f"--kernel-command-line={i}" for i in kcl), "--force", "--incremental", @@ -176,8 +176,10 @@ def genkey(self) -> CompletedProcess: @pytest.fixture(scope="session", autouse=True) def suspend_capture_stdin(pytestconfig: Any) -> Iterator[None]: """ - When --capture=no (or -s) is specified, pytest will still intercept stdin. Let's explicitly make it not capture - stdin when --capture=no is specified so we can debug image boot failures by logging into the emergency shell. + When --capture=no (or -s) is specified, pytest will still intercept + stdin. Let's explicitly make it not capture stdin when --capture=no is + specified so we can debug image boot failures by logging into the emergency + shell. """ capmanager: Any = pytestconfig.pluginmanager.getplugin("capturemanager") diff --git a/tests/conftest.py b/tests/conftest.py index 46843e951..cc419fa40 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -49,7 +49,9 @@ def pytest_addoption(parser: Any) -> None: @pytest.fixture(scope="session") def config(request: Any) -> ImageConfig: distribution = cast(Distribution, request.config.getoption("--distribution")) - release = cast(str, request.config.getoption("--release") or parse_config(["-d", str(distribution)])[1][0].release) + release = cast( + str, request.config.getoption("--release") or parse_config(["-d", str(distribution)])[1][0].release + ) return ImageConfig( distribution=distribution, release=release, diff --git a/tests/test_boot.py b/tests/test_boot.py index a0cdff3cd..7e660ad94 100644 --- a/tests/test_boot.py +++ b/tests/test_boot.py @@ -17,16 +17,20 @@ def have_vmspawn() -> bool: - return ( - find_binary("systemd-vmspawn") is not None - and GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 + return find_binary("systemd-vmspawn") is not None and ( + GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 ) -@pytest.mark.parametrize("format", [f for f in OutputFormat if f not in (OutputFormat.confext, OutputFormat.sysext)]) +@pytest.mark.parametrize( + "format", [f for f in OutputFormat if f not in (OutputFormat.confext, OutputFormat.sysext)] +) def test_format(config: ImageConfig, format: OutputFormat) -> None: with Image(config) as image: - if image.config.distribution == Distribution.rhel_ubi and format in (OutputFormat.esp, OutputFormat.uki): + if image.config.distribution == Distribution.rhel_ubi and format in ( + OutputFormat.esp, + OutputFormat.uki, + ): pytest.skip("Cannot build RHEL-UBI images with format 'esp' or 'uki'") image.genkey() @@ -57,8 +61,8 @@ def test_format(config: ImageConfig, format: OutputFormat) -> None: if have_vmspawn() and format in (OutputFormat.disk, OutputFormat.directory): image.vmspawn() - # TODO: Remove the opensuse check again when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved - # and we install the grub tools in the openSUSE tools tree again. + # TODO: Remove the opensuse check again when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is + # resolved and we install the grub tools in the openSUSE tools tree again. if format != OutputFormat.disk or config.tools_tree_distribution == Distribution.opensuse: return @@ -70,8 +74,8 @@ def test_bootloader(config: ImageConfig, bootloader: Bootloader) -> None: if config.distribution == Distribution.rhel_ubi: return - # TODO: Remove this again when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved and we install - # the grub tools in the openSUSE tools tree again. + # TODO: Remove this again when https://bugzilla.opensuse.org/show_bug.cgi?id=1227464 is resolved and we + # install the grub tools in the openSUSE tools tree again. if bootloader == Bootloader.grub and config.tools_tree_distribution == Distribution.opensuse: return diff --git a/tests/test_config.py b/tests/test_config.py index 51c40ea77..cbbbbd706 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -147,8 +147,8 @@ def test_parse_config(tmp_path: Path) -> None: ] ) # fmt: skip - # Empty values on the CLIs resets non-collection based settings to their defaults and collection based settings to - # empty collections. + # Empty values on the CLIs resets non-collection based settings to their defaults and collection based + # settings to empty collections. assert config.distribution == (detect_distribution()[0] or Distribution.custom) assert "MY_KEY" not in config.environment assert "my.cred" not in config.credentials @@ -176,8 +176,8 @@ def test_parse_config(tmp_path: Path) -> None: # Setting a value explicitly in a dropin should override the default from mkosi.conf. assert config.distribution == Distribution.debian - # Lists should be merged by appending the new values to the existing values. Any values from the CLI should be - # appended to the values from the configuration files. + # Lists should be merged by appending the new values to the existing values. Any values from the CLI + # should be appended to the values from the configuration files. assert config.packages == ["abc", "qed", "def", "last"] assert config.output_format == OutputFormat.cpio assert config.image_id == "00-dropin" @@ -269,7 +269,9 @@ def test_parse_config(tmp_path: Path) -> None: ) with chdir(d): - _, [one, two, config] = parse_config(["--package", "qed", "--build-package", "def", "--repositories", "cli"]) + _, [one, two, config] = parse_config( + ["--package", "qed", "--build-package", "def", "--repositories", "cli"] + ) # Universal settings should always come from the main image. assert one.distribution == config.distribution @@ -298,7 +300,8 @@ def test_parse_config(tmp_path: Path) -> None: with chdir(d): _, [one, two, config] = parse_config(["--image-version", "7.8.9"]) - # Inherited settings specified on the CLI should not override subimages that configure the setting explicitly. + # Inherited settings specified on the CLI should not override subimages that configure the setting + # explicitly. assert config.image_version == "7.8.9" assert one.image_version == "7.8.9" assert two.image_version == "4.5.6" @@ -1153,7 +1156,9 @@ def test_expand_kernel_specifiers(text: str) -> str: assert test_expand_kernel_specifiers("&c") == boot_count assert test_expand_kernel_specifiers("Image_1.0.3") == "Image_1.0.3" - assert test_expand_kernel_specifiers("Image~&c+&h-&k-&e") == f"Image~{boot_count}+{roothash}-{kver}-{token}" + assert ( + test_expand_kernel_specifiers("Image~&c+&h-&k-&e") == f"Image~{boot_count}+{roothash}-{kver}-{token}" + ) def test_output_id_version(tmp_path: Path) -> None: diff --git a/tests/test_initrd.py b/tests/test_initrd.py index ae61cb161..9d1854a5a 100644 --- a/tests/test_initrd.py +++ b/tests/test_initrd.py @@ -65,7 +65,9 @@ def test_initrd_lvm(config: ImageConfig) -> None: lvm.touch() os.truncate(lvm, 5000 * 1024**2) - lodev = run(["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE).stdout.strip() + lodev = run( + ["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE + ).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run(["lvm", "pvcreate", f"{lodev}p1"]) @@ -161,7 +163,9 @@ def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: lvm.touch() os.truncate(lvm, 5000 * 1024**2) - lodev = run(["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE).stdout.strip() + lodev = run( + ["losetup", "--show", "--find", "--partscan", lvm], stdout=subprocess.PIPE + ).stdout.strip() stack.callback(lambda: run(["losetup", "--detach", lodev])) run(["sfdisk", "--label", "gpt", lodev], input="type=E6D6D379-F507-44C2-A23C-238F2A3DF928 bootable") run( diff --git a/tests/test_json.py b/tests/test_json.py index 2a6cae076..cb8673a41 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -479,7 +479,10 @@ def test_config() -> None: runtime_network=Network.interface, runtime_scratch=ConfigFeature.enabled, runtime_size=8589934592, - runtime_trees=[ConfigTree(Path("/foo/bar"), Path("/baz")), ConfigTree(Path("/bar/baz"), Path("/qux"))], + runtime_trees=[ + ConfigTree(Path("/foo/bar"), Path("/baz")), + ConfigTree(Path("/bar/baz"), Path("/qux")), + ], sector_size=None, secure_boot=True, secure_boot_auto_enroll=True, From 407ddb39cce768443d85149889a19e25ade592ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Behrmann?= Date: Thu, 19 Sep 2024 23:24:09 +0200 Subject: [PATCH 3/6] ci: add ruff format check --- .github/workflows/ci.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fa25f5705..4acf226b8 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -27,11 +27,16 @@ jobs: python3 -m pip install --break-system-packages codespell mypy reuse ruff npm install -g pyright - - name: Run ruff + - name: Run ruff check run: | ruff --version ruff check mkosi/ tests/ kernel-install/50-mkosi.install + - name: Run ruff format + run: | + ruff --version + ruff format --check mkosi/ tests/ kernel-install/50-mkosi.install + - name: Check that tabs are not used in code run: sh -c '! git grep -P "\\t" "*.py"' From 91155e90c7bcd06ee0f0a0691f5d322f47fb1024 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Behrmann?= Date: Fri, 20 Sep 2024 11:15:17 +0200 Subject: [PATCH 4/6] editorconfig: configure line length more widely --- .dir-locals.el | 8 ++++++-- .editorconfig | 3 +++ 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.dir-locals.el b/.dir-locals.el index 1c3fbcaa0..6223f7c60 100644 --- a/.dir-locals.el +++ b/.dir-locals.el @@ -9,9 +9,13 @@ ((python-mode . ((indent-tabs-mode . nil) (tab-width . 4) - (fill-column . 99))) + (fill-column . 109))) + (python-ts-mode . ((indent-tabs-mode . nil) + (tab-width . 4) + (fill-column . 109))) (sh-mode . ((sh-basic-offset . 4) (sh-indentation . 4))) + (markdown-mode . ((fill-column . 109))) (nil . ((indent-tabs-mode . nil) (tab-width . 4) - (fill-column . 79))) ) + (fill-column . 79)))) diff --git a/.editorconfig b/.editorconfig index 4c7a5f214..a093b65ce 100644 --- a/.editorconfig +++ b/.editorconfig @@ -8,5 +8,8 @@ charset = utf-8 indent_style = space indent_size = 4 +[*.{py,md}] +max_line_length = 109 + [*.yaml,*.yml] indent_size = 2 From d4a1d2f9feca00eb66a5f6a77d824bcde523037d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Behrmann?= Date: Sat, 21 Sep 2024 07:08:22 +0200 Subject: [PATCH 5/6] Force multiline formatting in more places --- mkosi/__init__.py | 28 +++++++++++++++++++------- mkosi/config.py | 11 +++++++++-- mkosi/distributions/centos.py | 37 ++++++++++++++++++++++++++++------- 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/mkosi/__init__.py b/mkosi/__init__.py index 0edc8bf97..3bfae473f 100644 --- a/mkosi/__init__.py +++ b/mkosi/__init__.py @@ -397,11 +397,19 @@ def configure_autologin(context: Context) -> None: with complete_step("Setting up autologin…"): configure_autologin_service( - context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600" + context, + "console-getty.service", + "--noclear --keep-baud console 115200,38400,9600", + ) + configure_autologin_service( + context, + "getty@tty1.service", + "--noclear -", ) - configure_autologin_service(context, "getty@tty1.service", "--noclear -") configure_autologin_service( - context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -" + context, + "serial-getty@hvc0.service", + "--keep-baud 115200,57600,38400,9600 -", ) @@ -2263,8 +2271,10 @@ def check_tools(config: Config, verb: Verb) -> None: config, version="254", reason="build bootable images", - hint="Use ToolsTree=default to download most required tools including ukify automatically " - "or use Bootable=no to create a non-bootable image which doesn't require ukify", + hint=( + "Use ToolsTree=default to download most required tools including ukify automatically " + "or use Bootable=no to create a non-bootable image which doesn't require ukify" + ), ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): @@ -3682,7 +3692,10 @@ def generate_key_cert_pair(args: Args) -> None: for f in ("mkosi.key", "mkosi.crt"): if Path(f).exists() and not args.force: - die(f"{f} already exists", hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) + die( + f"{f} already exists", + hint="To generate new keys, first remove mkosi.key and mkosi.crt", + ) log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.") logging.info( @@ -4137,7 +4150,8 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: if all(config == Config.default() for config in images): die( - "No configuration found", hint="Make sure mkosi is run from a directory with configuration files" + "No configuration found", + hint="Make sure mkosi is run from a directory with configuration files", ) if args.verb == Verb.summary: diff --git a/mkosi/config.py b/mkosi/config.py index ebf088c8f..236dc1907 100644 --- a/mkosi/config.py +++ b/mkosi/config.py @@ -844,7 +844,11 @@ def config_match_enum(match: str, value: StrEnum) -> bool: def config_make_list_parser( - delimiter: str, *, parse: Callable[[str], Any] = str, unescape: bool = False, reset: bool = True + delimiter: str, + *, + parse: Callable[[str], Any] = str, + unescape: bool = False, + reset: bool = True, ) -> ConfigParseCallback: def config_parse_list(value: Optional[str], old: Optional[list[Any]]) -> Optional[list[Any]]: new = old.copy() if old else [] @@ -1023,7 +1027,10 @@ def config_parse_filename(value: Optional[str], old: Optional[str]) -> Optional[ return None if not is_valid_filename(value): - die(f"{value!r} is not a valid filename.", hint=hint) + die( + f"{value!r} is not a valid filename.", + hint=hint, + ) return value diff --git a/mkosi/distributions/centos.py b/mkosi/distributions/centos.py index 5beafe33b..75ef35c71 100644 --- a/mkosi/distributions/centos.py +++ b/mkosi/distributions/centos.py @@ -237,25 +237,48 @@ def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]: else: url = "metalink=https://mirrors.fedoraproject.org/metalink?arch=$basearch" for repo in ("epel", "epel-next"): - yield RpmRepository(repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False) yield RpmRepository( - f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False + repo, + f"{url}&repo={repo}-$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( - f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False + f"{repo}-debuginfo", + f"{url}&repo={repo}-debug-$releasever", + gpgurls, + enabled=False, + ) + yield RpmRepository( + f"{repo}-source", + f"{url}&repo={repo}-source-$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( - "epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False + "epel-testing", + f"{url}&repo=testing-epel$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( - "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False + "epel-testing-debuginfo", + f"{url}&repo=testing-debug-epel$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( - "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False + "epel-testing-source", + f"{url}&repo=testing-source-epel$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( - "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False + "epel-next-testing", + f"{url}&repo=epel-testing-next-$releasever", + gpgurls, + enabled=False, ) yield RpmRepository( "epel-next-testing-debuginfo", From 296d13ffdbb3d2ccf05fe893b2dfeef443ceac65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=B6rg=20Behrmann?= Date: Sat, 21 Sep 2024 07:08:35 +0200 Subject: [PATCH 6/6] Add comments demarking config sections --- mkosi/config.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/mkosi/config.py b/mkosi/config.py index 236dc1907..e1ca77522 100644 --- a/mkosi/config.py +++ b/mkosi/config.py @@ -1941,6 +1941,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple SETTINGS = ( + # Include section ConfigSetting( dest="include", short="-I", @@ -1952,6 +1953,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple ), help="Include configuration from the specified file or directory", ), + # Config section ConfigSetting( dest="profile", section="Config", @@ -1990,6 +1992,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple parse=config_make_list_parser(delimiter=" "), help="Environment variables to pass to subimages", ), + # Distribution section ConfigSetting( dest="distribution", short="-d", @@ -2068,6 +2071,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple help="Repositories to use", scope=SettingScope.universal, ), + # Output section ConfigSetting( dest="output_format", short="-t", @@ -2210,6 +2214,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple paths=("mkosi.clean",), help="Clean script to run after cleanup", ), + # Content section ConfigSetting( dest="packages", short="-p", @@ -2684,6 +2689,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), + # Validation section ConfigSetting( dest="secure_boot", metavar="BOOL", @@ -2796,6 +2802,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple section="Validation", help="GPG key to use for signing", ), + # Build section ConfigSetting( dest="tools_tree", metavar="PATH", @@ -2971,6 +2978,7 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple parse=config_parse_boolean, help="Whether mkosi can store information about previous builds", ), + # Host section ConfigSetting( dest="proxy_url", section="Host",