From: Jörg Behrmann Date: Wed, 18 Sep 2024 13:31:43 +0000 (+0200) Subject: Format with ruff X-Git-Tag: v25~276^2~5 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9222b51efaca13120f02e6aa09b25fdd4b9273df;p=thirdparty%2Fmkosi.git Format with ruff This change reformats mkosi using the ruff formatter with the default settings, except for a line width of 119 columns. Deviating from the default ruff formatting "fmt: skip" comments were added for were it semantically makes sense, mainly: - lists representing cmdlines, where options and their arguments should not be split - when alignment improves readability (by easing comparisons with lines above and below) Deviations from the above two guidelines are - alignment was discarded for semantically empty statements (enum.auto()) - when all positional arguments where on the same line and options where on different lines, the positional arguments where put on separate lines as well, to minimize difference from vanilla ruff. In collections that fit on a single line, trailing commas were removed, since they force ruff to use multi-line formatting. --- diff --git a/kernel-install/50-mkosi.install b/kernel-install/50-mkosi.install index d59f64f6f..f8b77f601 100644 --- a/kernel-install/50-mkosi.install +++ b/kernel-install/50-mkosi.install @@ -76,9 +76,9 @@ def main() -> None: log_setup() parser = argparse.ArgumentParser( - description='kernel-install plugin to build initrds or Unified Kernel Images using mkosi', + description="kernel-install plugin to build initrds or Unified Kernel Images using mkosi", allow_abbrev=False, - usage='50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…', + usage="50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…", ) parser.add_argument( @@ -148,7 +148,7 @@ def main() -> None: "--format", str(format), "--output", output, "--output-dir", context.staging_area, - ] + ] # fmt: skip if context.verbose: cmdline += ["--debug"] @@ -167,5 +167,5 @@ def main() -> None: (context.staging_area / f"{output}.initrd").unlink() -if __name__ == '__main__': +if __name__ == "__main__": main() diff --git a/mkosi/__init__.py b/mkosi/__init__.py index 43270ace8..9e0cbae03 100644 --- a/mkosi/__init__.py +++ b/mkosi/__init__.py @@ -155,7 +155,8 @@ def mount_base_trees(context: Context) -> Iterator[None]: elif path.suffix == ".raw": run( ["systemd-dissect", "--mount", "--mkdir", path, d], - env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no")) + env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"), + ) stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", d])) bases += [d] else: @@ -187,8 +188,10 @@ def install_distribution(context: Context) -> None: else: if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext): if context.config.packages: - die("Cannot install packages in extension images without a base tree", - hint="Configure a base tree with the BaseTrees= setting") + die( + "Cannot install packages in extension images without a base tree", + hint="Configure a base tree with the BaseTrees= setting", + ) return with complete_step(f"Installing {str(context.config.distribution).capitalize()}"): @@ -222,10 +225,12 @@ def install_distribution(context: Context) -> None: if context.config.packages: context.config.distribution.install_packages(context, context.config.packages) - for f in ("var/lib/systemd/random-seed", - "var/lib/systemd/credential.secret", - "etc/machine-info", - "var/lib/dbus/machine-id"): + for f in ( + "var/lib/systemd/random-seed", + "var/lib/systemd/credential.secret", + "etc/machine-info", + "var/lib/dbus/machine-id", + ): # Using missing_ok=True still causes an OSError if the mount is read-only even if the # file doesn't exist so do an explicit exists() check first. if (context.root / f).exists(): @@ -276,7 +281,7 @@ def check_root_populated(context: Context) -> None: hint=( "The root must be populated by the distribution, or from base trees, " "skeleton trees, and prepare scripts." - ) + ), ) @@ -385,13 +390,11 @@ def configure_autologin(context: Context) -> None: return with complete_step("Setting up autologin…"): - configure_autologin_service(context, "console-getty.service", - "--noclear --keep-baud console 115200,38400,9600") - configure_autologin_service(context, "getty@tty1.service", - "--noclear -") - configure_autologin_service(context, - "serial-getty@hvc0.service", - "--keep-baud 115200,57600,38400,9600 -") + configure_autologin_service( + context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600" + ) + configure_autologin_service(context, "getty@tty1.service", "--noclear -") + configure_autologin_service(context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -") @contextlib.contextmanager @@ -518,7 +521,7 @@ def run_configure_scripts(config: Config) -> Config: ), input=config.to_json(indent=None), stdout=subprocess.PIPE, - ) + ) # fmt: skip config = Config.from_json(result.stdout) @@ -564,7 +567,7 @@ def run_sync_scripts(config: Config) -> None: "--dir", "/work/src", "--chdir", "/work/src", *sources, - ] + ] # fmt: skip if (p := INVOKING_USER.home()).exists() and p != Path("/"): # We use a writable mount here to keep git worktrees working which encode absolute @@ -615,7 +618,7 @@ def script_maybe_chroot_sandbox( ], "mkosi-as-caller": mkosi_as_caller(), **context.config.distribution.package_manager(context.config).scripts(context), - } + } # fmt: skip with finalize_host_scripts(context, helpers) as hd: if script.suffix != ".chroot": @@ -628,7 +631,7 @@ def script_maybe_chroot_sandbox( *context.config.distribution.package_manager(context.config).mounts(context), ], scripts=hd, - ) as sandbox: + ) as sandbox: # fmt: skip yield sandbox else: if suppress_chown: @@ -702,7 +705,7 @@ def run_prepare_scripts(context: Context, build: bool) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/prepare", arg], @@ -779,7 +782,7 @@ def run_build_scripts(context: Context) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/build-script", *cmdline], @@ -845,7 +848,7 @@ def run_postinst_scripts(context: Context) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/postinst", "final"], @@ -911,7 +914,7 @@ def run_finalize_scripts(context: Context) -> None: else [] ), *sources, - ] + ] # fmt: skip run( ["/work/finalize"], @@ -922,7 +925,7 @@ def run_finalize_scripts(context: Context) -> None: script=script, options=options, network=context.config.with_network, - ) + ), ) @@ -967,10 +970,10 @@ def run_postoutput_scripts(context: Context) -> None: "--dir", "/work/out", "--become-root", *sources, - ] + ], ), stdin=sys.stdin, - ) + ) # fmt: skip def install_tree( @@ -992,7 +995,8 @@ def install_tree( def copy() -> None: copy_tree( - src, t, + src, + t, preserve=preserve, use_subvolumes=config.use_subvolumes, sandbox=config.sandbox, @@ -1015,7 +1019,7 @@ def install_tree( "--bind", t.parent, workdir(t.parent), ], ), - ) + ) # fmt: skip else: # If we get an unknown file without a target, we just copy it into /. copy() @@ -1050,11 +1054,12 @@ def install_sandbox_trees(config: Config, dst: Path) -> None: if (p := config.tools() / "etc/crypto-policies").exists(): copy_tree( - p, dst / "etc/crypto-policies", + p, + dst / "etc/crypto-policies", preserve=False, dereference=True, sandbox=config.sandbox, - ) + ) # fmt: skip if not config.sandbox_trees: return @@ -1091,7 +1096,8 @@ def install_build_dest(context: Context) -> None: with complete_step("Copying in build tree…"): copy_tree( - context.install_dir, context.root, + context.install_dir, + context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -1203,7 +1209,7 @@ def finalize_default_initrd( "--selinux-relabel", str(relabel), *(["-f"] * args.force), "--include=mkosi-initrd", - ] + ] # fmt: skip _, [config] = parse_config(cmdline + ["build"], resources=resources) @@ -1353,9 +1359,11 @@ def build_kernel_modules_initrd(context: Context, kver: str) -> Path: return kmods make_cpio( - context.root, kmods, + context.root, + kmods, files=gen_required_kernel_modules( - context.root, kver, + context.root, + kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_initrd_include, @@ -1366,7 +1374,6 @@ def build_kernel_modules_initrd(context: Context, kver: str) -> Path: sandbox=context.sandbox, ) - if context.config.distribution.is_apt_distribution(): # Ubuntu Focal's kernel does not support zstd-compressed initrds so use xz instead. if context.config.distribution == Distribution.ubuntu and context.config.release == "focal": @@ -1395,7 +1402,7 @@ def join_initrds(initrds: Sequence[Path], output: Path) -> Path: for p in initrds: initrd = p.read_bytes() n = len(initrd) - padding = b'\0' * (round_up(n, 4) - n) # pad to 32 bit alignment + padding = b"\0" * (round_up(n, 4) - n) # pad to 32 bit alignment seq.write(initrd) seq.write(padding) @@ -1404,12 +1411,9 @@ def join_initrds(initrds: Sequence[Path], output: Path) -> Path: def want_signed_pcrs(config: Config) -> bool: - return ( - config.sign_expected_pcr == ConfigFeature.enabled or - ( - config.sign_expected_pcr == ConfigFeature.auto and - config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None - ) + return config.sign_expected_pcr == ConfigFeature.enabled or ( + config.sign_expected_pcr == ConfigFeature.auto + and config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None ) @@ -1442,14 +1446,14 @@ def build_uki( "--output", workdir(output), "--efi-arch", arch, "--uname", kver, - ] + ] # fmt: skip options: list[PathString] = [ "--bind", output.parent, workdir(output.parent), "--ro-bind", context.workspace / "cmdline", context.workspace / "cmdline", "--ro-bind", context.root / "usr/lib/os-release", context.root / "usr/lib/os-release", "--ro-bind", stub, stub, - ] + ] # fmt: skip if context.config.secure_boot: assert context.config.secure_boot_key @@ -1460,14 +1464,12 @@ def build_uki( if context.config.secure_boot_sign_tool != SecureBootSignTool.pesign: cmd += [ "--signtool", "sbsign", - "--secureboot-private-key", - context.config.secure_boot_key, - "--secureboot-certificate", - context.config.secure_boot_certificate, - ] + "--secureboot-private-key", context.config.secure_boot_key, + "--secureboot-certificate", context.config.secure_boot_certificate, + ] # fmt: skip options += [ "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, - ] + ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--signing-engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): @@ -1480,7 +1482,7 @@ def build_uki( context.workspace / "pesign", "--secureboot-certificate-name", certificate_common_name(context, context.config.secure_boot_certificate), - ] + ] # fmt: skip options += ["--ro-bind", context.workspace / "pesign", context.workspace / "pesign"] if want_signed_pcrs(context.config): @@ -1489,17 +1491,17 @@ def build_uki( # SHA1 might be disabled in OpenSSL depending on the distro so we opt to not sign # for SHA1 to avoid having to manage a bunch of configuration to re-enable SHA1. "--pcr-banks", "sha256", - ] + ] # fmt: skip if context.config.secure_boot_key.exists(): options += ["--bind", context.config.secure_boot_key, context.config.secure_boot_key] if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += [ "--signing-engine", context.config.secure_boot_key_source.source, "--pcr-public-key", context.config.secure_boot_certificate, - ] + ] # fmt: skip options += [ "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, - ] + ] # fmt: skip cmd += ["build", "--linux", kimg] options += ["--ro-bind", kimg, kimg] @@ -1511,9 +1513,10 @@ def build_uki( python_binary(context.config, binary=ukify), ukify, sandbox=context.sandbox, - ) >= "256" and - (version := systemd_stub_version(context, stub)) and - version >= "256" + ) + >= "256" + and (version := systemd_stub_version(context, stub)) + and version >= "256" ): for microcode in microcodes: cmd += ["--microcode", microcode] @@ -1565,21 +1568,26 @@ def systemd_stub_version(context: Context, stub: Path) -> Optional[GenericVersio def want_uki(context: Context) -> bool: return want_efi(context.config) and ( - context.config.bootloader == Bootloader.uki or - context.config.unified_kernel_images == ConfigFeature.enabled or ( - context.config.unified_kernel_images == ConfigFeature.auto and - systemd_stub_binary(context).exists() and - context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None - ) + context.config.bootloader == Bootloader.uki + or context.config.unified_kernel_images == ConfigFeature.enabled + or ( + context.config.unified_kernel_images == ConfigFeature.auto + and systemd_stub_binary(context).exists() + and context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None + ) ) def find_entry_token(context: Context) -> str: if ( - not context.config.find_binary("kernel-install") or - "--version" not in run(["kernel-install", "--help"], - stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install")).stdout or - systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" + not context.config.find_binary("kernel-install") + or ( + "--version" + not in run( + ["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install") + ).stdout + ) + or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1" ): return context.config.image_id or context.config.distribution.name @@ -1654,10 +1662,10 @@ def install_type1( with umask(~0o600): if ( - want_efi(context.config) and - context.config.secure_boot and - context.config.shim_bootloader != ShimBootloader.signed and - KernelType.identify(context.config, kimg) == KernelType.pe + want_efi(context.config) + and context.config.secure_boot + and context.config.shim_bootloader != ShimBootloader.signed + and KernelType.identify(context.config, kimg) == KernelType.pe ): kimg = sign_efi_binary(context, kimg, dst / "vmlinuz") else: @@ -1689,9 +1697,9 @@ def install_type1( assert config if ( - not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) and - not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) and - (root := finalize_root(partitions)) + not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) + and not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) + and (root := finalize_root(partitions)) ): cmdline = [root] + cmdline @@ -1727,7 +1735,7 @@ def expand_kernel_specifiers(text: str, kver: str, token: str, roothash: str, bo "e": token, "k": kver, "h": roothash, - "c": boot_count + "c": boot_count, } def replacer(match: re.Match[str]) -> str: @@ -1842,9 +1850,9 @@ def install_kernel(context: Context, partitions: Sequence[Partition]) -> None: return if context.config.bootable == ConfigFeature.auto and ( - context.config.output_format == OutputFormat.cpio or - context.config.output_format.is_extension_image() or - context.config.overlay + context.config.output_format == OutputFormat.cpio + or context.config.output_format.is_extension_image() + or context.config.overlay ): return @@ -1895,7 +1903,8 @@ def maybe_compress(context: Context, compression: Compression, src: Path, dst: O if not compression or src.is_dir(): if dst: move_tree( - src, dst, + src, + dst, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -1908,7 +1917,7 @@ def maybe_compress(context: Context, compression: Compression, src: Path, dst: O with complete_step(f"Compressing {src} with {compression}"): with src.open("rb") as i: - src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. + src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. with dst.open("wb") as o: run(cmd, stdin=i, stdout=o, sandbox=context.sandbox(binary=cmd[0])) @@ -1927,13 +1936,11 @@ def copy_uki(context: Context) -> None: reverse=True, ) - if ( - (uki := context.root / efi_boot_binary(context)).exists() and + if (uki := context.root / efi_boot_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass - elif ( - (uki := context.root / shim_second_stage_binary(context)).exists() and + elif (uki := context.root / shim_second_stage_binary(context)).exists() and ( KernelType.identify(context.config, uki) == KernelType.uki ): pass @@ -2013,7 +2020,8 @@ def calculate_signature(context: Context) -> None: cmdline += ["--default-key", context.config.key] cmdline += [ - "--output", workdir(context.staging / context.config.output_signature), + "--output", + workdir(context.staging / context.config.output_signature), workdir(context.staging / context.config.output_checksum), ] @@ -2029,16 +2037,16 @@ def calculate_signature(context: Context) -> None: "--bind", home, home, "--bind", context.staging, workdir(context.staging), "--bind", "/run", "/run", - ] + ] # fmt: skip - with (complete_step("Signing SHA256SUMS…")): + with complete_step("Signing SHA256SUMS…"): run( cmdline, env=env, sandbox=context.sandbox( binary="gpg", options=options, - ) + ), ) @@ -2064,12 +2072,12 @@ def save_manifest(context: Context, manifest: Optional[Manifest]) -> None: if manifest.has_data(): if ManifestFormat.json in context.config.manifest_format: with complete_step(f"Saving manifest {context.config.output_manifest}"): - with open(context.staging / context.config.output_manifest, 'w') as f: + with open(context.staging / context.config.output_manifest, "w") as f: manifest.write_json(f) if ManifestFormat.changelog in context.config.manifest_format: with complete_step(f"Saving report {context.config.output_changelog}"): - with open(context.staging / context.config.output_changelog, 'w') as f: + with open(context.staging / context.config.output_changelog, "w") as f: manifest.write_package_report(f) @@ -2088,7 +2096,7 @@ def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]: if config.image: fragments += [config.image] - key = '~'.join(str(s) for s in fragments) + key = "~".join(str(s) for s in fragments) assert config.cache_dir return ( @@ -2148,12 +2156,16 @@ def check_inputs(config: Config) -> None: die(f"{script} is not executable") if config.secure_boot and not config.secure_boot_key: - die("SecureBoot= is enabled but no secure boot key is configured", - hint="Run mkosi genkey to generate a secure boot key/certificate pair") + die( + "SecureBoot= is enabled but no secure boot key is configured", + hint="Run mkosi genkey to generate a secure boot key/certificate pair", + ) if config.secure_boot and not config.secure_boot_certificate: - die("SecureBoot= is enabled but no secure boot key is configured", - hint="Run mkosi genkey to generate a secure boot key/certificate pair") + die( + "SecureBoot= is enabled but no secure boot key is configured", + hint="Run mkosi genkey to generate a secure boot key/certificate pair", + ) def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path: @@ -2175,8 +2187,10 @@ def check_systemd_tool( v = systemd_tool_version(tool, sandbox=config.sandbox) if v < version: - die(f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", - hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.") + die( + f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.", + hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.", + ) def check_ukify( @@ -2189,8 +2203,10 @@ def check_ukify( v = systemd_tool_version(python_binary(config, binary=ukify), ukify, sandbox=config.sandbox) if v < version: - die(f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", - hint="Use ToolsTree=default to get a newer version of 'ukify'.") + die( + f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.", + hint="Use ToolsTree=default to get a newer version of 'ukify'.", + ) def check_tools(config: Config, verb: Verb) -> None: @@ -2204,7 +2220,7 @@ def check_tools(config: Config, verb: Verb) -> None: version="254", reason="build bootable images", hint="Use ToolsTree=default to download most required tools including ukify automatically or use " - "Bootable=no to create a non-bootable image which doesn't require ukify", + "Bootable=no to create a non-bootable image which doesn't require ukify", ) if config.output_format in (OutputFormat.disk, OutputFormat.esp): @@ -2316,9 +2332,9 @@ def configure_initrd(context: Context) -> None: return if ( - not (context.root / "init").exists() and - not (context.root / "init").is_symlink() and - (context.root / "usr/lib/systemd/systemd").exists() + not (context.root / "init").exists() + and not (context.root / "init").is_symlink() + and (context.root / "usr/lib/systemd/systemd").exists() ): (context.root / "init").symlink_to("/usr/lib/systemd/systemd") @@ -2351,18 +2367,15 @@ def run_depmod(context: Context, *, cache: bool = False) -> None: for kver, _ in gen_kernel_images(context): modulesd = context.root / "usr/lib/modules" / kver - if ( - not cache and - not context.config.kernel_modules_exclude and - all((modulesd / o).exists() for o in outputs) - ): + if not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs): mtime = (modulesd / "modules.dep").stat().st_mtime if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")): continue if not cache: process_kernel_modules( - context.root, kver, + context.root, + kver, include=finalize_kernel_modules_include( context, include=context.config.kernel_modules_include, @@ -2384,8 +2397,10 @@ def run_sysusers(context: Context) -> None: return with complete_step("Generating system users"): - run(["systemd-sysusers", "--root=/buildroot"], - sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"])) + run( + ["systemd-sysusers", "--root=/buildroot"], + sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"]), + ) def run_tmpfiles(context: Context) -> None: @@ -2428,7 +2443,7 @@ def run_tmpfiles(context: Context) -> None: "--become-root", ], ), - ) + ) # fmt: skip def run_preset(context: Context) -> None: @@ -2440,10 +2455,14 @@ def run_preset(context: Context) -> None: return with complete_step("Applying presets…"): - run(["systemctl", "--root=/buildroot", "preset-all"], - sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"])) - run(["systemctl", "--root=/buildroot", "--global", "preset-all"], - sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"])) + run( + ["systemctl", "--root=/buildroot", "preset-all"], + sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]), + ) + run( + ["systemctl", "--root=/buildroot", "--global", "preset-all"], + sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]), + ) def run_hwdb(context: Context) -> None: @@ -2455,8 +2474,10 @@ def run_hwdb(context: Context) -> None: return with complete_step("Generating hardware database"): - run(["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], - sandbox=context.sandbox(binary="systemd-hwdb", options=["--bind", context.root, "/buildroot"])) + run( + ["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"], + sandbox=context.sandbox(binary="systemd-hwdb", options=["--bind", context.root, "/buildroot"]), + ) # Remove any existing hwdb in /etc in favor of the one we just put in /usr. (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True) @@ -2487,7 +2508,7 @@ def run_firstboot(context: Context) -> None: ("--hostname", None, context.config.hostname), ("--root-password-hashed", "passwd.hashed-password.root", password), ("--root-shell", "passwd.shell.root", context.config.root_shell), - ) + ) # fmt: skip options = [] creds = [] @@ -2506,8 +2527,10 @@ def run_firstboot(context: Context) -> None: return with complete_step("Applying first boot settings"): - run(["systemd-firstboot", "--root=/buildroot", "--force", *options], - sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"])) + run( + ["systemd-firstboot", "--root=/buildroot", "--force", *options], + sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"]), + ) # Initrds generally don't ship with only /usr so there's not much point in putting the # credentials in /usr/lib/credstore. @@ -2529,9 +2552,11 @@ def run_selinux_relabel(context: Context) -> None: binpolicy = Path("/buildroot") / binpolicy.relative_to(context.root) with complete_step(f"Relabeling files using {policy} policy"): - run([setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"], + run( + [setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"], sandbox=context.sandbox(binary=setfiles, options=["--bind", context.root, "/buildroot"]), - check=context.config.selinux_relabel == ConfigFeature.enabled) + check=context.config.selinux_relabel == ConfigFeature.enabled, + ) def need_build_overlay(config: Config) -> bool: @@ -2548,7 +2573,8 @@ def save_cache(context: Context) -> None: rmtree(final, sandbox=context.sandbox) move_tree( - context.root, final, + context.root, + final, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -2556,7 +2582,8 @@ def save_cache(context: Context) -> None: if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists(): rmtree(build, sandbox=context.sandbox) move_tree( - context.workspace / "build-overlay", build, + context.workspace / "build-overlay", + build, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -2590,8 +2617,12 @@ def have_cache(config: Config) -> bool: if prev != json.loads(new): logging.info("Cache manifest mismatch, not reusing cached images") if ARG_DEBUG.get(): - run(["diff", manifest, "-"], input=new, check=False, - sandbox=config.sandbox(binary="diff", options=["--bind", manifest, manifest])) + run( + ["diff", manifest, "-"], + input=new, + check=False, + sandbox=config.sandbox(binary="diff", options=["--bind", manifest, manifest]), + ) return False else: @@ -2612,7 +2643,8 @@ def reuse_cache(context: Context) -> bool: with complete_step("Copying cached trees"): copy_tree( - final, context.root, + final, + context.root, use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -2666,13 +2698,13 @@ def make_image( f"--offline={yes_no(context.config.repart_offline)}", "--seed", str(context.config.seed), workdir(context.staging / context.config.output_with_format), - ] + ] # fmt: skip options: list[PathString] = [ # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. "--become-root", "--bind", context.staging, workdir(context.staging), - ] + ] # fmt: skip if root: cmdline += ["--root=/buildroot"] @@ -2720,8 +2752,8 @@ def make_image( sandbox=context.sandbox( binary="systemd-repart", devices=( - not context.config.repart_offline or - context.config.verity_key_source.type != KeySourceType.file + not context.config.repart_offline + or context.config.verity_key_source.type != KeySourceType.file ), options=options, ), @@ -2761,11 +2793,10 @@ def make_disk( else: bootloader = None - esp = ( - context.config.bootable == ConfigFeature.enabled or - (context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists()) + esp = context.config.bootable == ConfigFeature.enabled or ( + context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists() ) - bios = (context.config.bootable != ConfigFeature.disabled and want_grub_bios(context)) + bios = context.config.bootable != ConfigFeature.disabled and want_grub_bios(context) if esp or bios: # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds @@ -2884,10 +2915,14 @@ def make_oci(context: Context, root_layer: Path, dst: Path) -> None: ], "annotations": { "io.systemd.mkosi.version": __version__, - **({ - "org.opencontainers.image.version": context.config.image_version, - } if context.config.image_version else {}), - } + **( + { + "org.opencontainers.image.version": context.config.image_version, + } + if context.config.image_version + else {} + ), + }, } oci_manifest_blob = json.dumps(oci_manifest) oci_manifest_digest = hashlib.sha256(oci_manifest_blob.encode()).hexdigest() @@ -2966,7 +3001,7 @@ def make_extension_image(context: Context, output: Path) -> None: "--size=auto", "--definitions", r, workdir(output), - ] + ] # fmt: skip options: list[PathString] = [ # Make sure we're root so that the mkfs tools invoked by systemd-repart think the files # that go into the disk image are owned by root. @@ -2974,7 +3009,7 @@ def make_extension_image(context: Context, output: Path) -> None: "--bind", output.parent, workdir(output.parent), "--ro-bind", context.root, "/buildroot", "--ro-bind", r, r, - ] + ] # fmt: skip if not context.config.architecture.is_native(): cmdline += ["--architecture", str(context.config.architecture)] @@ -3004,8 +3039,8 @@ def make_extension_image(context: Context, output: Path) -> None: sandbox=context.sandbox( binary="systemd-repart", devices=( - not context.config.repart_offline or - context.config.verity_key_source.type != KeySourceType.file + not context.config.repart_offline + or context.config.verity_key_source.type != KeySourceType.file ), options=options, ), @@ -3032,7 +3067,8 @@ def finalize_staging(context: Context) -> None: os.chmod(f, context.config.output_mode) move_tree( - f, context.config.output_dir_or_cwd(), + f, + context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes, sandbox=context.sandbox, ) @@ -3042,7 +3078,7 @@ def clamp_mtime(path: Path, mtime: int) -> None: st = os.stat(path, follow_symlinks=False) orig = (st.st_atime_ns, st.st_mtime_ns) updated = (min(orig[0], mtime * 1_000_000_000), - min(orig[1], mtime * 1_000_000_000)) + min(orig[1], mtime * 1_000_000_000)) # fmt: skip if orig != updated: os.utime(path, ns=updated, follow_symlinks=False) @@ -3065,11 +3101,11 @@ def setup_workspace(args: Args, config: Config) -> Iterator[Path]: with contextlib.ExitStack() as stack: workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace-")) # Discard setuid/setgid bits as these are inherited and can leak into the image. - workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID|stat.S_ISUID)) + workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) stack.callback(lambda: rmtree(workspace, sandbox=config.sandbox)) (workspace / "tmp").mkdir(mode=0o1777) - with scopedenv({"TMPDIR" : os.fspath(workspace / "tmp")}): + with scopedenv({"TMPDIR": os.fspath(workspace / "tmp")}): try: yield Path(workspace) except BaseException: @@ -3110,13 +3146,11 @@ def copy_repository_metadata(config: Config, dst: Path) -> None: exclude: list[PathString] if d == "cache": exclude = flatten( - ("--ro-bind", tmp, p) - for p in config.distribution.package_manager(config).cache_subdirs(src) + ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).cache_subdirs(src) ) else: exclude = flatten( - ("--ro-bind", tmp, p) - for p in config.distribution.package_manager(config).state_subdirs(src) + ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).state_subdirs(src) ) subdst = dst / d / subdir @@ -3170,11 +3204,7 @@ def build_image(context: Context) -> None: wantrepo = ( ( not cached - and ( - context.config.packages - or context.config.build_packages - or context.config.prepare_scripts - ) + and (context.config.packages or context.config.build_packages or context.config.prepare_scripts) ) or context.config.volatile_packages or context.config.postinst_scripts @@ -3288,9 +3318,12 @@ def build_image(context: Context) -> None: context.root.rename(context.staging / context.config.output_with_format) if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp): - maybe_compress(context, context.config.compress_output, - context.staging / context.config.output_with_format, - context.staging / context.config.output_with_compression) + maybe_compress( + context, + context.config.compress_output, + context.staging / context.config.output_with_format, + context.staging / context.config.output_with_compression, + ) calculate_sha256sum(context) calculate_signature(context) @@ -3357,9 +3390,9 @@ def run_shell(args: Args, config: Config) -> None: # copy to avoid ending up with files not owned by the directory image owner in the # directory image. if config.ephemeral or ( - config.output_format == OutputFormat.directory and - args.verb == Verb.boot and - (config.output_dir_or_cwd() / config.output).stat().st_uid != 0 + config.output_format == OutputFormat.directory + and args.verb == Verb.boot + and (config.output_dir_or_cwd() / config.output).stat().st_uid != 0 ): fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output)) else: @@ -3385,7 +3418,7 @@ def run_shell(args: Args, config: Config) -> None: devices=True, options=["--bind", fname, fname], ), - ) + ) # fmt: skip if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] @@ -3429,8 +3462,7 @@ def run_shell(args: Args, config: Config) -> None: cmdline += ["--bind", f"{path}:/root:norbind,{uidmap}"] if config.runtime_scratch == ConfigFeature.enabled or ( - config.runtime_scratch == ConfigFeature.auto and - config.output_format == OutputFormat.disk + config.runtime_scratch == ConfigFeature.auto and config.output_format == OutputFormat.disk ): scratch = stack.enter_context(tempfile.TemporaryDirectory(dir="/var/tmp")) os.chmod(scratch, 0o1777) @@ -3447,7 +3479,7 @@ def run_shell(args: Args, config: Config) -> None: cmdline += [ "--bind", f"{addr}:/run/host/journal/socket", "--set-credential=journal.forward_to_socket:/run/host/journal/socket", - ] + ] # fmt: skip for p in config.unit_properties: cmdline += ["--property", p] @@ -3545,7 +3577,8 @@ def run_serve(args: Args, config: Config) -> None: run( [python_binary(config, binary=None), "-m", "http.server", "8081"], - stdin=sys.stdin, stdout=sys.stdout, + stdin=sys.stdin, + stdout=sys.stdout, sandbox=config.sandbox( binary=python_binary(config, binary=None), network=True, @@ -3564,8 +3597,7 @@ def generate_key_cert_pair(args: Args) -> None: for f in ("mkosi.key", "mkosi.crt"): if Path(f).exists() and not args.force: - die(f"{f} already exists", - hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) + die(f"{f} already exists", hint=("To generate new keys, first remove mkosi.key and mkosi.crt")) log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.") logging.info( @@ -3591,7 +3623,7 @@ def generate_key_cert_pair(args: Args) -> None: "-nodes" ], env=dict(OPENSSL_CONF="/dev/null"), - ) + ) # fmt: skip def bump_image_version() -> None: @@ -3630,7 +3662,6 @@ def prepend_to_environ_path(config: Config) -> Iterator[None]: return with tempfile.TemporaryDirectory(prefix="mkosi.path-") as d: - for path in config.extra_search_paths: if not path.is_dir(): (Path(d) / path.name).symlink_to(path.absolute()) @@ -3647,8 +3678,10 @@ def prepend_to_environ_path(config: Config) -> Iterator[None]: def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Config: if not config.tools_tree_distribution: - die(f"{config.distribution} does not have a default tools tree distribution", - hint="use ToolsTreeDistribution= to set one explicitly") + die( + f"{config.distribution} does not have a default tools tree distribution", + hint="use ToolsTreeDistribution= to set one explicitly", + ) cmdline = [ "--directory", "", @@ -3676,7 +3709,7 @@ def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Co *(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []), *(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []), *(["-f"] * args.force), - ] + ] # fmt: skip _, [tools] = parse_config( cmdline + ["--include=mkosi-tools", "build"], @@ -3693,9 +3726,11 @@ def check_workspace_directory(config: Config) -> None: for tree in config.build_sources: if wd.is_relative_to(tree.source): - die(f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", + die( + f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})", hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure a different " - "workspace directory") + "workspace directory", + ) def run_clean_scripts(config: Config) -> None: @@ -3741,16 +3776,17 @@ def run_clean_scripts(config: Config) -> None: "--ro-bind", json, "/work/config.json", *(["--bind", str(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []), *sources, - ] + ], ), stdin=sys.stdin, - ) + ) # fmt: skip def needs_build(args: Args, config: Config, force: int = 1) -> bool: return ( - args.force >= force or - not (config.output_dir_or_cwd() / config.output_with_compression).exists() or + args.force >= force + or not (config.output_dir_or_cwd() / config.output_with_compression).exists() + or # When the output is a directory, its name is the same as the symlink we create that points # to the actual output when not building a directory. So if the full output path exists, we # have to check that it's not a symlink as well. @@ -3796,7 +3832,7 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: complete_step(f"Removing output files of {config.name()} image…"), flock_or_die(config.output_dir_or_cwd() / config.output) if (config.output_dir_or_cwd() / config.output).exists() - else contextlib.nullcontext() + else contextlib.nullcontext(), ): rmtree(*outputs, sandbox=sandbox) @@ -3825,10 +3861,7 @@ def run_clean(args: Args, config: Config, *, resources: Path) -> None: lock_repository_metadata(config), ): rmtree( - *( - config.package_cache_dir_or_default() / d / subdir - for d in ("cache", "lib") - ), + *(config.package_cache_dir_or_default() / d / subdir for d in ("cache", "lib")), sandbox=sandbox, ) @@ -3852,8 +3885,8 @@ def ensure_directories_exist(config: Config) -> None: st = config.build_dir.stat() # Discard setuid/setgid bits if set as these are inherited and can leak into the image. - if stat.S_IMODE(st.st_mode) & (stat.S_ISGID|stat.S_ISUID): - config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID|stat.S_ISUID)) + if stat.S_IMODE(st.st_mode) & (stat.S_ISGID | stat.S_ISUID): + config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID | stat.S_ISUID)) def metadata_cache(config: Config) -> Path: @@ -3869,10 +3902,10 @@ def sync_repository_metadata(args: Args, images: Sequence[Config], *, resources: # If we have a metadata cache and any cached image and using cached metadata is not explicitly disabled, reuse the # metadata cache. if ( - last.incremental and - metadata_cache(last).exists() and - last.cacheonly != Cacheonly.never and - any(have_cache(config) for config in images) + last.incremental + and metadata_cache(last).exists() + and last.cacheonly != Cacheonly.never + and any(have_cache(config) for config in images) ): with complete_step("Copying cached package manager metadata"): copy_tree(metadata_cache(last), dst, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox) @@ -3936,7 +3969,7 @@ def run_build( unshare(CLONE_NEWNS) if os.getuid() == 0: - mount("", "/", "", MS_SLAVE|MS_REC, "") + mount("", "/", "", MS_SLAVE | MS_REC, "") # For extra safety when running as root, remount a bunch of stuff read-only. # Because some build systems use output directories in /usr, we only remount @@ -3952,7 +3985,7 @@ def run_build( attrs = MOUNT_ATTR_RDONLY if d not in ("/usr", "/opt"): - attrs |= MOUNT_ATTR_NOSUID|MOUNT_ATTR_NODEV|MOUNT_ATTR_NOEXEC + attrs |= MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | MOUNT_ATTR_NOEXEC mount_rbind(d, d, attrs) @@ -4004,16 +4037,12 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if all(config == Config.default() for config in images): - die("No configuration found", - hint="Make sure mkosi is run from a directory with configuration files") + die("No configuration found", hint="Make sure mkosi is run from a directory with configuration files") if args.verb == Verb.summary: if args.json: text = json.dumps( - {"Images": [config.to_dict() for config in images]}, - cls=JsonEncoder, - indent=4, - sort_keys=True + {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True ) else: text = "\n".join(summary(config) for config in images) @@ -4059,13 +4088,15 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: assert args.verb.needs_build() if ( - tools and - not (tools.output_dir_or_cwd() / tools.output).exists() and - args.verb != Verb.build and - not args.force + tools + and not (tools.output_dir_or_cwd() / tools.output).exists() + and args.verb != Verb.build + and not args.force ): - die(f"Default tools tree requested for image '{last.name()}' but it has not been built yet", - hint="Make sure to build the image first with 'mkosi build' or use '--force'") + die( + f"Default tools tree requested for image '{last.name()}' but it has not been built yet", + hint="Make sure to build the image first with 'mkosi build' or use '--force'", + ) if not last.repart_offline and os.getuid() != 0: die(f"Must be root to build {last.name()} image configured with RepartOffline=no") @@ -4077,20 +4108,19 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if args.verb != Verb.build and not args.force and not output.exists(): - die(f"Image '{last.name()}' has not been built yet", - hint="Make sure to build the image first with 'mkosi build' or use '--force'") + die( + f"Image '{last.name()}' has not been built yet", + hint="Make sure to build the image first with 'mkosi build' or use '--force'", + ) check_workspace_directory(last) # If we're doing an incremental build and the cache is not out of date, don't clean up the # tools tree so that we can reuse the previous one. - if ( - tools and - ( - not tools.incremental or - ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) or - needs_build(args, tools, force=2) - ) + if tools and ( + not tools.incremental + or ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) + or needs_build(args, tools, force=2) ): run_clean(args, tools, resources=resources) @@ -4119,7 +4149,7 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: tools.output_dir_or_cwd() / tools.output if tools and config.tools_tree == Path("default") else config.tools_tree - ) + ), ) with prepend_to_environ_path(config): @@ -4173,12 +4203,14 @@ def run_verb(args: Args, images: Sequence[Config], *, resources: Path) -> None: return if ( - last.output_format == OutputFormat.directory and - (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 and - os.getuid() != 0 + last.output_format == OutputFormat.directory + and (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 + and os.getuid() != 0 ): - die("Cannot operate on directory images built as root when running unprivileged", - hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image") + die( + "Cannot operate on directory images built as root when running unprivileged", + hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image", + ) with prepend_to_environ_path(last): run_vm = { diff --git a/mkosi/archive.py b/mkosi/archive.py index 0ba828aa1..6ad8df48a 100644 --- a/mkosi/archive.py +++ b/mkosi/archive.py @@ -20,7 +20,7 @@ def tar_exclude_apivfs_tmp() -> list[str]: "--exclude", "./tmp/*", "--exclude", "./run/*", "--exclude", "./var/tmp/*", - ] + ] # fmt: skip def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> None: @@ -50,7 +50,7 @@ def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> N stdout=f, # Make sure tar uses user/group information from the root directory instead of the host. sandbox=sandbox(binary="tar", options=["--ro-bind", src, src, *finalize_passwd_mounts(src)]), - ) + ) # fmt: skip def can_extract_tar(src: Path) -> bool: @@ -92,9 +92,9 @@ def extract_tar( sandbox=sandbox( binary="tar", # Make sure tar uses user/group information from the root directory instead of the host. - options=["--ro-bind", src, src, "--bind", dst, dst, *finalize_passwd_mounts(dst)] + options=["--ro-bind", src, src, "--bind", dst, dst, *finalize_passwd_mounts(dst)], ), - ) + ) # fmt: skip def make_cpio( @@ -128,4 +128,4 @@ def make_cpio( input="\0".join(os.fspath(f) for f in files), stdout=f, sandbox=sandbox(binary="cpio", options=["--ro-bind", src, src, *finalize_passwd_mounts(src)]), - ) + ) # fmt: skip diff --git a/mkosi/backport.py b/mkosi/backport.py index 2a542c4e7..e03fcf42c 100644 --- a/mkosi/backport.py +++ b/mkosi/backport.py @@ -16,7 +16,7 @@ from typing import no_type_check @contextlib.contextmanager def _tempfile( reader, - suffix='', + suffix="", # gh-93353: Keep a reference to call os.remove() in late Python # finalization. *, @@ -38,10 +38,12 @@ def _tempfile( except FileNotFoundError: pass + @no_type_check def _temp_file(path): return _tempfile(path.read_bytes, suffix=path.name) + @no_type_check def _is_present_dir(path) -> bool: """ @@ -55,6 +57,7 @@ def _is_present_dir(path) -> bool: return path.is_dir() return False + @no_type_check @functools.singledispatch def as_file(path): @@ -64,6 +67,7 @@ def as_file(path): """ return _temp_dir(path) if _is_present_dir(path) else _temp_file(path) + @no_type_check @contextlib.contextmanager def _temp_path(dir: tempfile.TemporaryDirectory): @@ -73,6 +77,7 @@ def _temp_path(dir: tempfile.TemporaryDirectory): with dir as result: yield Path(result) + @no_type_check @contextlib.contextmanager def _temp_dir(path): @@ -84,6 +89,7 @@ def _temp_dir(path): with _temp_path(tempfile.TemporaryDirectory()) as temp_dir: yield _write_contents(temp_dir, path) + @no_type_check def _write_contents(target, source): child = target.joinpath(source.name) diff --git a/mkosi/bootloader.py b/mkosi/bootloader.py index c61adc2b8..2fdd8afcb 100644 --- a/mkosi/bootloader.py +++ b/mkosi/bootloader.py @@ -48,9 +48,8 @@ def want_efi(config: Config) -> bool: return False if ( - (config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay) - and config.bootable == ConfigFeature.auto - ): + config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay + ) and config.bootable == ConfigFeature.auto: return False if config.architecture.to_efi() is None: @@ -181,7 +180,7 @@ def grub_mkimage( with ( complete_step(f"Generating grub image for {target}"), - tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig + tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig, ): earlyconfig.write( textwrap.dedent( @@ -236,16 +235,16 @@ def grub_mkimage( *(["--ro-bind", str(sbat), str(sbat)] if sbat else []), ], ), - ) + ) # fmt: skip def find_signed_grub_image(context: Context) -> Optional[Path]: arch = context.config.architecture.to_efi() patterns = [ - f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu - f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS - "usr/share/efi/*/grub.efi", # OpenSUSE + f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu + f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS + "usr/share/efi/*/grub.efi", # OpenSUSE ] for p in flatten(context.root.glob(pattern) for pattern in patterns): @@ -260,9 +259,9 @@ def find_signed_grub_image(context: Context) -> Optional[Path]: def python_binary(config: Config, *, binary: Optional[PathString]) -> PathString: tools = ( - not binary or - not (path := config.find_binary(binary)) or - not any(path.is_relative_to(d) for d in config.extra_search_paths) + not binary + or not (path := config.find_binary(binary)) + or not any(path.is_relative_to(d) for d in config.extra_search_paths) ) # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools @@ -393,7 +392,7 @@ def grub_bios_setup(context: Context, partitions: Sequence[Partition]) -> None: "--bind", mountinfo.name, "/proc/self/mountinfo", ], ), - ) + ) # fmt: skip def efi_boot_binary(context: Context) -> Path: @@ -423,7 +422,7 @@ def certificate_common_name(context: Context, certificate: Path) -> str: ], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="openssl", options=["--ro-bind", certificate, certificate]), - ).stdout + ).stdout # fmt: skip for line in output.splitlines(): if not line.strip().startswith("commonName"): @@ -438,7 +437,6 @@ def certificate_common_name(context: Context, certificate: Path) -> str: die(f"Certificate {certificate} is missing Common Name") - def pesign_prepare(context: Context) -> None: assert context.config.secure_boot_key assert context.config.secure_boot_certificate @@ -473,7 +471,7 @@ def pesign_prepare(context: Context) -> None: "--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate, ], ), - ) + ) # fmt: skip (context.workspace / "pesign").mkdir(exist_ok=True) @@ -492,7 +490,7 @@ def pesign_prepare(context: Context) -> None: "--ro-bind", context.workspace / "pesign", context.workspace / "pesign", ], ), - ) + ) # fmt: skip def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: @@ -500,20 +498,20 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: assert context.config.secure_boot_certificate if ( - context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or - context.config.secure_boot_sign_tool == SecureBootSignTool.auto and - context.config.find_binary("sbsign") is not None + context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign + or context.config.secure_boot_sign_tool == SecureBootSignTool.auto + and context.config.find_binary("sbsign") is not None ): cmd: list[PathString] = [ "sbsign", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(output), - ] + ] # fmt: skip options: list[PathString] = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--ro-bind", input, workdir(input), "--bind", output.parent, workdir(output.parent), - ] + ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): @@ -528,12 +526,12 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: binary="sbsign", options=options, devices=context.config.secure_boot_key_source.type != KeySourceType.file, - ) + ), ) elif ( - context.config.secure_boot_sign_tool == SecureBootSignTool.pesign or - context.config.secure_boot_sign_tool == SecureBootSignTool.auto and - context.config.find_binary("pesign") is not None + context.config.secure_boot_sign_tool == SecureBootSignTool.pesign + or context.config.secure_boot_sign_tool == SecureBootSignTool.auto + and context.config.find_binary("pesign") is not None ): pesign_prepare(context) run( @@ -554,7 +552,7 @@ def sign_efi_binary(context: Context, input: Path, output: Path) -> Path: "--bind", output.parent, workdir(output), ] ), - ) + ) # fmt: skip else: die("One of sbsign or pesign is required to use SecureBoot=") @@ -616,7 +614,7 @@ def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]: for kver in sorted( (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()), key=lambda k: GenericVersion(k.name), - reverse=True + reverse=True, ): # Make sure we look for anything that remotely resembles vmlinuz, as # the arch specific install scripts in the kernel source tree sometimes @@ -654,13 +652,15 @@ def install_systemd_boot(context: Context) -> None: signed = context.config.shim_bootloader == ShimBootloader.signed if not directory.glob("*.efi.signed" if signed else "*.efi"): if context.config.bootable == ConfigFeature.enabled: - die(f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" - f"systemd-boot binary was not found at {directory.relative_to(context.root)}") + die( + f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}" + f"systemd-boot binary was not found at {directory.relative_to(context.root)}" + ) return if context.config.secure_boot and not signed: with complete_step("Signing systemd-boot binaries…"): - for input in itertools.chain(directory.glob('*.efi'), directory.glob('*.EFI')): + for input in itertools.chain(directory.glob("*.efi"), directory.glob("*.EFI")): output = directory / f"{input}.signed" sign_efi_binary(context, input, output) @@ -707,7 +707,7 @@ def install_systemd_boot(context: Context) -> None: "--bind", context.workspace, workdir(context.workspace), ], ), - ) + ) # fmt: skip with umask(~0o600): run( @@ -725,7 +725,7 @@ def install_systemd_boot(context: Context) -> None: "--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"), ] ), - ) + ) # fmt: skip # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: @@ -736,21 +736,21 @@ def install_systemd_boot(context: Context) -> None: "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", "--cert", workdir(context.config.secure_boot_certificate), "--output", workdir(keys / f"{db}.auth"), - ] + ] # fmt: skip options: list[PathString] = [ "--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate), "--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"), "--bind", keys, workdir(keys), - ] + ] # fmt: skip if context.config.secure_boot_key_source.type == KeySourceType.engine: cmd += ["--engine", context.config.secure_boot_key_source.source] if context.config.secure_boot_key.exists(): - cmd += ["--key", workdir(context.config.secure_boot_key),] + cmd += ["--key", workdir(context.config.secure_boot_key)] options += [ "--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key), - ] + ] # fmt: skip else: cmd += ["--key", context.config.secure_boot_key] cmd += [db, workdir(context.workspace / "mkosi.esl")] @@ -781,31 +781,31 @@ def install_shim(context: Context) -> None: arch = context.config.architecture.to_efi() signed = [ - f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu - f"usr/lib/shim/shim{arch}.efi.signed", # Debian - f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS - "usr/share/efi/*/shim.efi", # OpenSUSE + f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu + f"usr/lib/shim/shim{arch}.efi.signed", # Debian + f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS + "usr/share/efi/*/shim.efi", # OpenSUSE ] unsigned = [ - f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu - f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS - f"usr/share/shim/shim{arch}.efi", # Arch + f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu + f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS + f"usr/share/shim/shim{arch}.efi", # Arch ] find_and_install_shim_binary(context, "shim", signed, unsigned, dst) signed = [ - f"usr/lib/shim/mm{arch}.efi.signed", # Debian - f"usr/lib/shim/mm{arch}.efi", # Ubuntu - f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS - "usr/share/efi/*/MokManager.efi", # OpenSUSE + f"usr/lib/shim/mm{arch}.efi.signed", # Debian + f"usr/lib/shim/mm{arch}.efi", # Ubuntu + f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS + "usr/share/efi/*/MokManager.efi", # OpenSUSE ] unsigned = [ - f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu - f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS - f"usr/share/shim/mm{arch}.efi", # Arch + f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu + f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS + f"usr/share/shim/mm{arch}.efi", # Arch ] find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent) diff --git a/mkosi/completion.py b/mkosi/completion.py index 0a96c1a01..72f202e30 100644 --- a/mkosi/completion.py +++ b/mkosi/completion.py @@ -17,8 +17,8 @@ from mkosi.util import StrEnum class CompGen(StrEnum): default = enum.auto() - files = enum.auto() - dirs = enum.auto() + files = enum.auto() + dirs = enum.auto() @staticmethod def from_action(action: argparse.Action) -> "CompGen": @@ -81,9 +81,11 @@ def collect_completion_arguments() -> list[CompletionItem]: compgen=CompGen.from_action(action), ) for action in parser._actions - if (action.option_strings and - action.help != argparse.SUPPRESS and - action.dest not in config.SETTINGS_LOOKUP_BY_DEST) + if ( + action.option_strings + and action.help != argparse.SUPPRESS + and action.dest not in config.SETTINGS_LOOKUP_BY_DEST + ) ] options += [ @@ -107,8 +109,9 @@ def finalize_completion_bash(options: list[CompletionItem], resources: Path) -> def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str: return ( - f"{name.replace('-', '_')}=(" + - " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + ")" + f"{name.replace('-', '_')}=(" + + " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + + ")" ) completion = resources / "completion.bash" @@ -151,7 +154,7 @@ def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> c.write("complete -c mkosi -n '__fish_is_first_token' -a \"") c.write(" ".join(str(v) for v in config.Verb)) - c.write("\"\n") + c.write('"\n') for option in options: if not option.short and not option.long: @@ -165,12 +168,12 @@ def finalize_completion_fish(options: list[CompletionItem], resources: Path) -> if isinstance(option.nargs, int) and option.nargs > 0: c.write("-r ") if option.choices: - c.write("-a \"") + c.write('-a "') c.write(" ".join(option.choices)) - c.write("\" ") + c.write('" ') if option.help is not None: help = option.help.replace("'", "\\'") - c.write(f"-d \"{help}\" ") + c.write(f'-d "{help}" ') c.write(option.compgen.to_fish()) c.write("\n") @@ -225,7 +228,7 @@ def print_completion(args: config.Args, *, resources: Path) -> None: if not args.cmdline: die( "No shell to generate completion script for specified", - hint="Please specify either one of: bash, fish, zsh" + hint="Please specify either one of: bash, fish, zsh", ) shell = args.cmdline[0] @@ -237,8 +240,7 @@ def print_completion(args: config.Args, *, resources: Path) -> None: func = finalize_completion_zsh else: die( - f"{shell!r} is not supported for completion scripts.", - hint="Please specify either one of: bash, fish, zsh" + f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh" ) completion_args = collect_completion_arguments() diff --git a/mkosi/config.py b/mkosi/config.py index c17889d07..3306f37a0 100644 --- a/mkosi/config.py +++ b/mkosi/config.py @@ -57,25 +57,25 @@ BUILTIN_CONFIGS = ("mkosi-tools", "mkosi-initrd") class Verb(StrEnum): - build = enum.auto() - clean = enum.auto() - summary = enum.auto() - cat_config = enum.auto() - shell = enum.auto() - boot = enum.auto() - qemu = enum.auto() - ssh = enum.auto() - serve = enum.auto() - bump = enum.auto() - help = enum.auto() - genkey = enum.auto() + build = enum.auto() + clean = enum.auto() + summary = enum.auto() + cat_config = enum.auto() + shell = enum.auto() + boot = enum.auto() + qemu = enum.auto() + ssh = enum.auto() + serve = enum.auto() + bump = enum.auto() + help = enum.auto() + genkey = enum.auto() documentation = enum.auto() - journalctl = enum.auto() - coredumpctl = enum.auto() - burn = enum.auto() - dependencies = enum.auto() - completion = enum.auto() - sysupdate = enum.auto() + journalctl = enum.auto() + coredumpctl = enum.auto() + burn = enum.auto() + dependencies = enum.auto() + completion = enum.auto() + sysupdate = enum.auto() def supports_cmdline(self) -> bool: return self in ( @@ -117,8 +117,8 @@ class Verb(StrEnum): class ConfigFeature(StrEnum): - auto = enum.auto() - enabled = enum.auto() + auto = enum.auto() + enabled = enum.auto() disabled = enum.auto() def to_tristate(self) -> str: @@ -168,23 +168,23 @@ class QemuVsockCID(enum.IntEnum): class SecureBootSignTool(StrEnum): - auto = enum.auto() + auto = enum.auto() sbsign = enum.auto() pesign = enum.auto() class OutputFormat(StrEnum): - confext = enum.auto() - cpio = enum.auto() + confext = enum.auto() + cpio = enum.auto() directory = enum.auto() - disk = enum.auto() - esp = enum.auto() - none = enum.auto() - portable = enum.auto() - sysext = enum.auto() - tar = enum.auto() - uki = enum.auto() - oci = enum.auto() + disk = enum.auto() + esp = enum.auto() + none = enum.auto() + portable = enum.auto() + sysext = enum.auto() + tar = enum.auto() + uki = enum.auto() + oci = enum.auto() def extension(self) -> str: return { @@ -196,7 +196,7 @@ class OutputFormat(StrEnum): OutputFormat.sysext: ".raw", OutputFormat.tar: ".tar", OutputFormat.uki: ".efi", - }.get(self, "") + }.get(self, "") # fmt: skip def use_outer_compression(self) -> bool: return self in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk) or self.is_extension_image() @@ -206,11 +206,12 @@ class OutputFormat(StrEnum): class ManifestFormat(StrEnum): - json = enum.auto() # the standard manifest in json format + json = enum.auto() # the standard manifest in json format changelog = enum.auto() # human-readable text file with package changelogs class Compression(StrEnum): + # fmt: off none = enum.auto() zstd = enum.auto() zst = zstd @@ -220,21 +221,20 @@ class Compression(StrEnum): gzip = gz lz4 = enum.auto() lzma = enum.auto() + # fmt: on def __bool__(self) -> bool: return self != Compression.none def extension(self) -> str: - return { - Compression.zstd: ".zst" - }.get(self, f".{self}") + return {Compression.zstd: ".zst"}.get(self, f".{self}") def oci_media_type_suffix(self) -> str: suffix = { Compression.none: "", Compression.gz: "+gzip", Compression.zstd: "+zstd", - }.get(self) + }.get(self) # fmt: skip if not suffix: die(f"Compression {self} not supported for OCI layers") @@ -243,11 +243,11 @@ class Compression(StrEnum): class DocFormat(StrEnum): - auto = enum.auto() + auto = enum.auto() markdown = enum.auto() - man = enum.auto() - pandoc = enum.auto() - system = enum.auto() + man = enum.auto() + pandoc = enum.auto() + system = enum.auto() @classmethod def all(cls) -> list["DocFormat"]: @@ -257,10 +257,10 @@ class DocFormat(StrEnum): class Bootloader(StrEnum): - none = enum.auto() - uki = enum.auto() + none = enum.auto() + uki = enum.auto() systemd_boot = enum.auto() - grub = enum.auto() + grub = enum.auto() class BiosBootloader(StrEnum): @@ -269,25 +269,25 @@ class BiosBootloader(StrEnum): class ShimBootloader(StrEnum): - none = enum.auto() - signed = enum.auto() + none = enum.auto() + signed = enum.auto() unsigned = enum.auto() class Cacheonly(StrEnum): - always = enum.auto() - auto = enum.auto() - none = auto + always = enum.auto() + auto = enum.auto() + none = auto metadata = enum.auto() - never = enum.auto() + never = enum.auto() class QemuFirmware(StrEnum): - auto = enum.auto() - linux = enum.auto() - uefi = enum.auto() + auto = enum.auto() + linux = enum.auto() + uefi = enum.auto() uefi_secure_boot = enum.auto() - bios = enum.auto() + bios = enum.auto() def is_uefi(self) -> bool: return self in (QemuFirmware.uefi, QemuFirmware.uefi_secure_boot) @@ -295,83 +295,83 @@ class QemuFirmware(StrEnum): class Network(StrEnum): interface = enum.auto() - user = enum.auto() - none = enum.auto() + user = enum.auto() + none = enum.auto() class Vmm(StrEnum): - qemu = enum.auto() + qemu = enum.auto() vmspawn = enum.auto() class Architecture(StrEnum): - alpha = enum.auto() - arc = enum.auto() - arm = enum.auto() - arm64 = enum.auto() - ia64 = enum.auto() + alpha = enum.auto() + arc = enum.auto() + arm = enum.auto() + arm64 = enum.auto() + ia64 = enum.auto() loongarch64 = enum.auto() - mips_le = enum.auto() - mips64_le = enum.auto() - parisc = enum.auto() - ppc = enum.auto() - ppc64 = enum.auto() - ppc64_le = enum.auto() - riscv32 = enum.auto() - riscv64 = enum.auto() - s390 = enum.auto() - s390x = enum.auto() - tilegx = enum.auto() - x86 = enum.auto() - x86_64 = enum.auto() + mips_le = enum.auto() + mips64_le = enum.auto() + parisc = enum.auto() + ppc = enum.auto() + ppc64 = enum.auto() + ppc64_le = enum.auto() + riscv32 = enum.auto() + riscv64 = enum.auto() + s390 = enum.auto() + s390x = enum.auto() + tilegx = enum.auto() + x86 = enum.auto() + x86_64 = enum.auto() @staticmethod def from_uname(s: str) -> "Architecture": a = { - "aarch64" : Architecture.arm64, - "aarch64_be" : Architecture.arm64, - "armv8l" : Architecture.arm, - "armv8b" : Architecture.arm, - "armv7ml" : Architecture.arm, - "armv7mb" : Architecture.arm, - "armv7l" : Architecture.arm, - "armv7b" : Architecture.arm, - "armv6l" : Architecture.arm, - "armv6b" : Architecture.arm, - "armv5tl" : Architecture.arm, - "armv5tel" : Architecture.arm, - "armv5tejl" : Architecture.arm, - "armv5tejb" : Architecture.arm, - "armv5teb" : Architecture.arm, - "armv5tb" : Architecture.arm, - "armv4tl" : Architecture.arm, - "armv4tb" : Architecture.arm, - "armv4l" : Architecture.arm, - "armv4b" : Architecture.arm, - "alpha" : Architecture.alpha, - "arc" : Architecture.arc, - "arceb" : Architecture.arc, - "x86_64" : Architecture.x86_64, - "i686" : Architecture.x86, - "i586" : Architecture.x86, - "i486" : Architecture.x86, - "i386" : Architecture.x86, - "ia64" : Architecture.ia64, - "parisc64" : Architecture.parisc, - "parisc" : Architecture.parisc, - "loongarch64" : Architecture.loongarch64, - "mips64" : Architecture.mips64_le, - "mips" : Architecture.mips_le, - "ppc64le" : Architecture.ppc64_le, - "ppc64" : Architecture.ppc64, - "ppc" : Architecture.ppc, - "riscv64" : Architecture.riscv64, - "riscv32" : Architecture.riscv32, - "riscv" : Architecture.riscv64, - "s390x" : Architecture.s390x, - "s390" : Architecture.s390, - "tilegx" : Architecture.tilegx, - }.get(s) + "aarch64": Architecture.arm64, + "aarch64_be": Architecture.arm64, + "armv8l": Architecture.arm, + "armv8b": Architecture.arm, + "armv7ml": Architecture.arm, + "armv7mb": Architecture.arm, + "armv7l": Architecture.arm, + "armv7b": Architecture.arm, + "armv6l": Architecture.arm, + "armv6b": Architecture.arm, + "armv5tl": Architecture.arm, + "armv5tel": Architecture.arm, + "armv5tejl": Architecture.arm, + "armv5tejb": Architecture.arm, + "armv5teb": Architecture.arm, + "armv5tb": Architecture.arm, + "armv4tl": Architecture.arm, + "armv4tb": Architecture.arm, + "armv4l": Architecture.arm, + "armv4b": Architecture.arm, + "alpha": Architecture.alpha, + "arc": Architecture.arc, + "arceb": Architecture.arc, + "x86_64": Architecture.x86_64, + "i686": Architecture.x86, + "i586": Architecture.x86, + "i486": Architecture.x86, + "i386": Architecture.x86, + "ia64": Architecture.ia64, + "parisc64": Architecture.parisc, + "parisc": Architecture.parisc, + "loongarch64": Architecture.loongarch64, + "mips64": Architecture.mips64_le, + "mips": Architecture.mips_le, + "ppc64le": Architecture.ppc64_le, + "ppc64": Architecture.ppc64, + "ppc": Architecture.ppc, + "riscv64": Architecture.riscv64, + "riscv32": Architecture.riscv32, + "riscv": Architecture.riscv64, + "s390x": Architecture.s390x, + "s390": Architecture.s390, + "tilegx": Architecture.tilegx, + }.get(s) # fmt: skip if not a: die(f"Architecture {s} is not supported") @@ -380,32 +380,32 @@ class Architecture(StrEnum): def to_efi(self) -> Optional[str]: return { - Architecture.x86_64 : "x64", - Architecture.x86 : "ia32", - Architecture.arm64 : "aa64", - Architecture.arm : "arm", - Architecture.riscv64 : "riscv64", - Architecture.loongarch64 : "loongarch64", - }.get(self) + Architecture.x86_64: "x64", + Architecture.x86: "ia32", + Architecture.arm64: "aa64", + Architecture.arm: "arm", + Architecture.riscv64: "riscv64", + Architecture.loongarch64: "loongarch64", + }.get(self) # fmt: skip def to_qemu(self) -> str: a = { - Architecture.alpha : "alpha", - Architecture.arm : "arm", - Architecture.arm64 : "aarch64", - Architecture.loongarch64 : "loongarch64", - Architecture.mips64_le : "mips", - Architecture.mips_le : "mips", - Architecture.parisc : "hppa", - Architecture.ppc : "ppc", - Architecture.ppc64 : "ppc64", - Architecture.ppc64_le : "ppc64", - Architecture.riscv32 : "riscv32", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.x86 : "i386", - Architecture.x86_64 : "x86_64", - }.get(self) + Architecture.alpha: "alpha", + Architecture.arm: "arm", + Architecture.arm64: "aarch64", + Architecture.loongarch64: "loongarch64", + Architecture.mips64_le: "mips", + Architecture.mips_le: "mips", + Architecture.parisc: "hppa", + Architecture.ppc: "ppc", + Architecture.ppc64: "ppc64", + Architecture.ppc64_le: "ppc64", + Architecture.riscv32: "riscv32", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.x86: "i386", + Architecture.x86_64: "x86_64", + }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by QEMU") @@ -414,20 +414,20 @@ class Architecture(StrEnum): def to_oci(self) -> str: a = { - Architecture.arm : "arm", - Architecture.arm64 : "arm64", - Architecture.loongarch64 : "loong64", - Architecture.mips64_le : "mips64le", - Architecture.mips_le : "mipsle", - Architecture.ppc : "ppc", - Architecture.ppc64 : "ppc64", - Architecture.ppc64_le : "ppc64le", - Architecture.riscv32 : "riscv", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.x86 : "386", - Architecture.x86_64 : "amd64", - }.get(self) + Architecture.arm: "arm", + Architecture.arm64: "arm64", + Architecture.loongarch64: "loong64", + Architecture.mips64_le: "mips64le", + Architecture.mips_le: "mipsle", + Architecture.ppc: "ppc", + Architecture.ppc64: "ppc64", + Architecture.ppc64_le: "ppc64le", + Architecture.riscv32: "riscv", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.x86: "386", + Architecture.x86_64: "amd64", + }.get(self) # fmt: skip if not a: die(f"Architecture {self} not supported by OCI") @@ -447,23 +447,22 @@ class Architecture(StrEnum): return self.is_x86_variant() def can_kvm(self) -> bool: - return ( - self == Architecture.native() or - (Architecture.native() == Architecture.x86_64 and self == Architecture.x86) + return self == Architecture.native() or ( + Architecture.native() == Architecture.x86_64 and self == Architecture.x86 ) def default_qemu_machine(self) -> str: m = { - Architecture.x86 : "q35", - Architecture.x86_64 : "q35", - Architecture.arm : "virt", - Architecture.arm64 : "virt", - Architecture.s390 : "s390-ccw-virtio", - Architecture.s390x : "s390-ccw-virtio", - Architecture.ppc : "pseries", - Architecture.ppc64 : "pseries", - Architecture.ppc64_le : "pseries", - } + Architecture.x86: "q35", + Architecture.x86_64: "q35", + Architecture.arm: "virt", + Architecture.arm64: "virt", + Architecture.s390: "s390-ccw-virtio", + Architecture.s390x: "s390-ccw-virtio", + Architecture.ppc: "pseries", + Architecture.ppc64: "pseries", + Architecture.ppc64_le: "pseries", + } # fmt: skip if self not in m: die(f"No qemu machine defined for architecture {self}") @@ -472,9 +471,9 @@ class Architecture(StrEnum): def default_qemu_nic_model(self) -> str: return { - Architecture.s390 : "virtio", - Architecture.s390x : "virtio", - }.get(self, "virtio-net-pci") + Architecture.s390: "virtio", + Architecture.s390x: "virtio", + }.get(self, "virtio-net-pci") # fmt: skip def is_native(self) -> bool: return self == self.native() @@ -503,15 +502,17 @@ def parse_boolean(s: str) -> bool: die(f"Invalid boolean literal: {s!r}") -def parse_path(value: str, - *, - required: bool = True, - resolve: bool = True, - expanduser: bool = True, - expandvars: bool = True, - secret: bool = False, - absolute: bool = False, - constants: Sequence[str] = ()) -> Path: +def parse_path( + value: str, + *, + required: bool = True, + resolve: bool = True, + expanduser: bool = True, + expandvars: bool = True, + secret: bool = False, + absolute: bool = False, + constants: Sequence[str] = (), +) -> Path: if value in constants: return Path(value) @@ -535,10 +536,12 @@ def parse_path(value: str, if secret and path.exists(): mode = path.stat().st_mode & 0o777 if mode & 0o007: - die(textwrap.dedent(f"""\ + die( + textwrap.dedent(f"""\ Permissions of '{path}' of '{mode:04o}' are too open. When creating secret files use an access mode that restricts access to the owner only. - """)) + """) + ) return path @@ -552,7 +555,7 @@ def config_parse_key(value: Optional[str], old: Optional[str]) -> Optional[Path] def make_tree_parser(absolute: bool = True, required: bool = False) -> Callable[[str], ConfigTree]: def parse_tree(value: str) -> ConfigTree: - src, sep, tgt = value.partition(':') + src, sep, tgt = value.partition(":") return ConfigTree( source=parse_path(src, required=required), @@ -562,7 +565,9 @@ def make_tree_parser(absolute: bool = True, required: bool = False) -> Callable[ resolve=False, expanduser=False, absolute=absolute, - ) if sep else None, + ) + if sep + else None, ) return parse_tree @@ -764,8 +769,8 @@ def config_default_repository_key_fetch(namespace: argparse.Namespace) -> bool: return cast( bool, - (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution()) or - namespace.tools_tree_distribution.is_rpm_distribution() + (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution()) + or namespace.tools_tree_distribution.is_rpm_distribution(), ) @@ -830,11 +835,9 @@ def config_make_enum_matcher(type: type[StrEnum]) -> ConfigMatchCallback: return config_match_enum -def config_make_list_parser(delimiter: str, - *, - parse: Callable[[str], Any] = str, - unescape: bool = False, - reset: bool = True) -> ConfigParseCallback: +def config_make_list_parser( + delimiter: str, *, parse: Callable[[str], Any] = str, unescape: bool = False, reset: bool = True +) -> ConfigParseCallback: def config_parse_list(value: Optional[str], old: Optional[list[Any]]) -> Optional[list[Any]]: new = old.copy() if old else [] @@ -888,12 +891,14 @@ def config_match_version(match: str, value: str) -> bool: return True -def config_make_dict_parser(delimiter: str, - *, - parse: Callable[[str], tuple[str, Any]], - unescape: bool = False, - allow_paths: bool = False, - reset: bool = True) -> ConfigParseCallback: +def config_make_dict_parser( + delimiter: str, + *, + parse: Callable[[str], tuple[str, Any]], + unescape: bool = False, + allow_paths: bool = False, + reset: bool = True, +) -> ConfigParseCallback: def config_parse_dict(value: Optional[str], old: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]: new = old.copy() if old else {} @@ -953,13 +958,15 @@ def parse_credential(value: str) -> tuple[str, str]: return (key, value) -def make_path_parser(*, - required: bool = True, - resolve: bool = True, - expanduser: bool = True, - expandvars: bool = True, - secret: bool = False, - constants: Sequence[str] = ()) -> Callable[[str], Path]: +def make_path_parser( + *, + required: bool = True, + resolve: bool = True, + expanduser: bool = True, + expandvars: bool = True, + secret: bool = False, + constants: Sequence[str] = (), +) -> Callable[[str], Path]: return functools.partial( parse_path, required=required, @@ -971,13 +978,15 @@ def make_path_parser(*, ) -def config_make_path_parser(*, - required: bool = True, - resolve: bool = True, - expanduser: bool = True, - expandvars: bool = True, - secret: bool = False, - constants: Sequence[str] = ()) -> ConfigParseCallback: +def config_make_path_parser( + *, + required: bool = True, + resolve: bool = True, + expanduser: bool = True, + expandvars: bool = True, + secret: bool = False, + constants: Sequence[str] = (), +) -> ConfigParseCallback: def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]: if not value: return None @@ -1089,8 +1098,9 @@ def config_parse_profile(value: Optional[str], old: Optional[int] = None) -> Opt return None if not is_valid_filename(value): - die(f"{value!r} is not a valid profile", - hint="Profile= or --profile= requires a name with no path components.") + die( + f"{value!r} is not a valid profile", hint="Profile= or --profile= requires a name with no path components." + ) return value @@ -1179,14 +1189,13 @@ def file_run_or_read(file: Path) -> str: content = file.read_text() if content.startswith("#!/"): - die(f"{file} starts with a shebang ({content.splitlines()[0]})", - hint="This file should be executable") + die(f"{file} starts with a shebang ({content.splitlines()[0]})", hint="This file should be executable") return content class KeySourceType(StrEnum): - file = enum.auto() + file = enum.auto() engine = enum.auto() @@ -1252,7 +1261,7 @@ class ConfigSetting: def __post_init__(self) -> None: if not self.name: - object.__setattr__(self, 'name', ''.join(x.capitalize() for x in self.dest.split('_') if x)) + object.__setattr__(self, "name", "".join(x.capitalize() for x in self.dest.split("_") if x)) if not self.long: object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}") @@ -1285,9 +1294,11 @@ class CustomHelpFormatter(argparse.HelpFormatter): Otherwise, the text is wrapped without indentation. """ lines = text.splitlines() - subindent = ' ' if lines[0].endswith(':') else '' - return flatten(textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, - subsequent_indent=subindent) for line in lines) + subindent = " " if lines[0].endswith(":") else "" + return flatten( + textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent) + for line in lines + ) def parse_chdir(path: str) -> Optional[Path]: @@ -1326,7 +1337,7 @@ class IgnoreAction(argparse.Action): parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], - option_string: Optional[str] = None + option_string: Optional[str] = None, ) -> None: logging.warning(f"{option_string} is no longer supported") @@ -1337,7 +1348,7 @@ class PagerHelpAction(argparse._HelpAction): parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None] = None, - option_string: Optional[str] = None + option_string: Optional[str] = None, ) -> None: page(parser.format_help(), namespace.pager) parser.exit() @@ -1383,10 +1394,7 @@ class Args: @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Args": - return cls(**{ - k: v for k, v in vars(ns).items() - if k in inspect.signature(cls).parameters - }) + return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters}) def to_dict(self) -> dict[str, Any]: return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory) @@ -1415,16 +1423,17 @@ class Args: k = key_transformer(k) if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v): - die(f"Serialized JSON has unknown field {k} with value {v}", - hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON") + die( + f"Serialized JSON has unknown field {k} with value {v}", + hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", + ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} - return dataclasses.replace(cls.default(), **{ - k: v for k, v in j.items() - if k in inspect.signature(cls).parameters - }) + return dataclasses.replace( + cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters} + ) PACKAGE_GLOBS = ( @@ -1655,10 +1664,7 @@ class Config: @classmethod def from_namespace(cls, ns: argparse.Namespace) -> "Config": - return cls(**{ - k: v for k, v in vars(ns).items() - if k in inspect.signature(cls).parameters - }) + return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters}) @property def output_with_format(self) -> str: @@ -1738,8 +1744,7 @@ class Config: "repositories": sorted(self.repositories), "overlay": self.overlay, "prepare_scripts": sorted( - base64.b64encode(script.read_bytes()).decode() - for script in self.prepare_scripts + base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts ), # We don't use the full path here since tests will often use temporary directories for the output directory # which would trigger a rebuild every time. @@ -1778,16 +1783,17 @@ class Config: k = key_transformer(k) if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v): - die(f"Serialized JSON has unknown field {k} with value {v}", - hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON") + die( + f"Serialized JSON has unknown field {k} with value {v}", + hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON", + ) value_transformer = json_type_transformer(cls) j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()} - return dataclasses.replace(cls.default(), **{ - k: v for k, v in j.items() - if k in inspect.signature(cls).parameters - }) + return dataclasses.replace( + cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters} + ) def find_binary(self, *names: PathString, tools: bool = True) -> Optional[Path]: return find_binary(*names, root=self.tools() if tools else Path("/"), extra=self.extra_search_paths) @@ -1813,9 +1819,9 @@ class Config: ] if ( - binary and - (path := self.find_binary(binary, tools=tools)) and - any(path.is_relative_to(d) for d in self.extra_search_paths) + binary + and (path := self.find_binary(binary, tools=tools)) + and any(path.is_relative_to(d) for d in self.extra_search_paths) ): tools = False opt += flatten(("--ro-bind", d, d) for d in self.extra_search_paths if not relaxed) @@ -1863,8 +1869,8 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple line = line.strip() - if line[0] == '[': - if line[-1] != ']': + if line[0] == "[": + if line[-1] != "]": die(f"{line} is not a valid section") # Yield the section name with an empty key and value to indicate we've finished the current section. @@ -2028,7 +2034,6 @@ SETTINGS = ( help="Repositories to use", scope=SettingScope.universal, ), - ConfigSetting( dest="output_format", short="-t", @@ -2171,7 +2176,6 @@ SETTINGS = ( paths=("mkosi.clean",), help="Clean script to run after cleanup", ), - ConfigSetting( dest="packages", short="-p", @@ -2235,11 +2239,11 @@ SETTINGS = ( ), ConfigSetting( dest="base_trees", - long='--base-tree', - metavar='PATH', + long="--base-tree", + metavar="PATH", section="Content", parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)), - help='Use the given tree as base tree (e.g. lower sysext layer)', + help="Use the given tree as base tree (e.g. lower sysext layer)", ), ConfigSetting( dest="skeleton_trees", @@ -2458,7 +2462,7 @@ SETTINGS = ( # The default value is set in `__init__.py` in `install_uki`. # `None` is used to determine if the roothash and boot count format # should be appended to the filename if they are found. - #default= + # default= help="Specify the format used for the UKI filename", ), ConfigSetting( @@ -2645,7 +2649,6 @@ SETTINGS = ( parse=config_parse_feature, help="Specify whether to relabel all files with setfiles", ), - ConfigSetting( dest="secure_boot", metavar="BOOL", @@ -2757,7 +2760,6 @@ SETTINGS = ( section="Validation", help="GPG key to use for signing", ), - ConfigSetting( dest="tools_tree", metavar="PATH", @@ -2861,7 +2863,12 @@ SETTINGS = ( section="Build", parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)), help="Use a sandbox tree to configure the various tools that mkosi executes", - paths=("mkosi.sandbox", "mkosi.sandbox.tar", "mkosi.pkgmngr", "mkosi.pkgmngr.tar",), + paths=( + "mkosi.sandbox", + "mkosi.sandbox.tar", + "mkosi.pkgmngr", + "mkosi.pkgmngr.tar", + ), scope=SettingScope.universal, ), ConfigSetting( @@ -2926,7 +2933,6 @@ SETTINGS = ( parse=config_parse_boolean, help="Whether mkosi can store information about previous builds", ), - ConfigSetting( dest="proxy_url", section="Host", @@ -2995,8 +3001,10 @@ SETTINGS = ( metavar="BOOL", section="Host", parse=config_parse_boolean, - help=('If specified, the container/VM is run with a temporary snapshot of the output ' - 'image that is removed immediately when the container/VM terminates'), + help=( + "If specified, the container/VM is run with a temporary snapshot of the output " + "image that is removed immediately when the container/VM terminates" + ), nargs="?", ), ConfigSetting( @@ -3277,7 +3285,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: prog="mkosi", description="Build Bespoke OS Images", # the synopsis below is supposed to be indented by two spaces - usage="\n " + textwrap.dedent("""\ + usage="\n " + + textwrap.dedent("""\ mkosi [options…] {b}summary{e} mkosi [options…] {b}cat-config{e} mkosi [options…] {b}build{e} [command line…] @@ -3309,14 +3318,16 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: help=argparse.SUPPRESS, ) parser.add_argument( - "-f", "--force", + "-f", + "--force", action="count", dest="force", default=0, help="Remove existing image file before operation", ) parser.add_argument( - "-C", "--directory", + "-C", + "--directory", type=parse_chdir if chdir else str, default=Path.cwd(), help="Change to specified directory before doing anything", @@ -3360,7 +3371,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: default="mkosi of %u", ) parser.add_argument( - "-B", "--auto-bump", + "-B", + "--auto-bump", help="Automatically bump image version after building", action="store_true", default=False, @@ -3379,7 +3391,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: default=False, ) parser.add_argument( - "-w", "--wipe-build-dir", + "-w", + "--wipe-build-dir", help="Remove the build directory before building the image", action="store_true", default=False, @@ -3413,7 +3426,8 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: help=argparse.SUPPRESS, ) parser.add_argument( - "-h", "--help", + "-h", + "--help", action=PagerHelpAction, help=argparse.SUPPRESS, ) @@ -3428,12 +3442,12 @@ def create_argument_parser(chdir: bool = True) -> argparse.ArgumentParser: for long in [s.long, *s.compat_longs]: opts = [s.short, long] if s.short and long == s.long else [long] - group.add_argument( # type: ignore + group.add_argument( # type: ignore *opts, dest=s.dest, choices=s.choices, metavar=s.metavar, - nargs=s.nargs, # type: ignore + nargs=s.nargs, # type: ignore const=s.const, help=s.help if long == s.long else argparse.SUPPRESS, action=ConfigAction, @@ -3474,7 +3488,7 @@ class ConfigAction(argparse.Action): parser: argparse.ArgumentParser, namespace: argparse.Namespace, values: Union[str, Sequence[Any], None], - option_string: Optional[str] = None + option_string: Optional[str] = None, ) -> None: assert option_string is not None @@ -3500,7 +3514,7 @@ class ParseContext: # specified in configuration files. self.cli = argparse.Namespace() self.config = argparse.Namespace( - files = [], + files=[], ) self.defaults = argparse.Namespace() # Compare inodes instead of paths so we can't get tricked by bind mounts and such. @@ -3596,10 +3610,7 @@ class ParseContext: # If a value was specified on the CLI, it always takes priority. If the setting is a collection of values, we # merge the value from the CLI with the value from the configuration, making sure that the value from the CLI # always takes priority. - if ( - hasattr(self.cli, setting.dest) and - (v := getattr(self.cli, setting.dest)) is not None - ): + if hasattr(self.cli, setting.dest) and (v := getattr(self.cli, setting.dest)) is not None: if isinstance(v, list): return (getattr(self.config, setting.dest, None) or []) + v elif isinstance(v, dict): @@ -3614,15 +3625,14 @@ class ParseContext: # value either if the setting is set to the empty string on the command line. if ( - not hasattr(self.cli, setting.dest) and - hasattr(self.config, setting.dest) and - (v := getattr(self.config, setting.dest)) is not None + not hasattr(self.cli, setting.dest) + and hasattr(self.config, setting.dest) + and (v := getattr(self.config, setting.dest)) is not None ): return v - if ( - (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and - isinstance(setting.parse(None, None), (dict, list, set)) + if (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and isinstance( + setting.parse(None, None), (dict, list, set) ): default = setting.parse(None, None) elif hasattr(self.defaults, setting.dest): @@ -3719,7 +3729,7 @@ class ParseContext: return match_triggered is not False def parse_config_one(self, path: Path, profiles: bool = False, local: bool = False) -> bool: - s: Optional[ConfigSetting] # Make mypy happy + s: Optional[ConfigSetting] # Make mypy happy extras = path.is_dir() if path.is_dir(): @@ -3740,10 +3750,7 @@ class ParseContext: delattr(self.config, s.dest) for s in SETTINGS: - if ( - s.scope == SettingScope.universal and - (image := getattr(self.config, "image", None)) is not None - ): + if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None: continue if self.only_sections and s.section not in self.only_sections: @@ -3764,14 +3771,14 @@ class ParseContext: s.dest, s.parse( file_run_or_read(extra).rstrip("\n") if s.path_read_text else f, - getattr(self.config, s.dest, None) + getattr(self.config, s.dest, None), ), ) if path.exists(): abs_path = Path.cwd() / path logging.debug(f"Loading configuration file {abs_path}") - files = getattr(self.config, 'files') + files = getattr(self.config, "files") files += [abs_path] for section, k, v in parse_ini(path, only_sections=self.only_sections or {s.section for s in SETTINGS}): @@ -3784,10 +3791,7 @@ class ParseContext: if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)): die(f"Unknown setting {name}") - if ( - s.scope == SettingScope.universal and - (image := getattr(self.config, "image", None)) is not None - ): + if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None: die(f"Setting {name} cannot be configured in subimage {image}") if name in self.immutable: die(f"Setting {name} cannot be modified anymore at this point") @@ -3875,10 +3879,10 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu return args, () if ( - args.verb.needs_build() and - args.verb != Verb.build and - not args.force and - Path(".mkosi-private/history/latest.json").exists() + args.verb.needs_build() + and args.verb != Verb.build + and not args.force + and Path(".mkosi-private/history/latest.json").exists() ): prev = Config.from_json(Path(".mkosi-private/history/latest.json").read_text()) @@ -3897,7 +3901,7 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu if hasattr(context.config, s.dest): delattr(context.config, s.dest) - context.only_sections = ("Include", "Host",) + context.only_sections = ("Include", "Host") else: prev = None @@ -3931,9 +3935,7 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu # we check here to see if dependencies were explicitly provided and if not we gather # the list of default dependencies while we parse the subimages. dependencies: Optional[list[str]] = ( - None - if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies") - else [] + None if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies") else [] ) if args.directory is not None and Path("mkosi.images").exists(): @@ -3955,7 +3957,7 @@ def parse_config(argv: Sequence[str] = (), *, resources: Path = Path("/")) -> tu name: getattr(config, "environment")[name] for name in getattr(config, "pass_environment", {}) if name in getattr(config, "environment", {}) - } + }, ) for p in sorted(Path("mkosi.images").iterdir()): @@ -4023,14 +4025,20 @@ def load_credentials(args: argparse.Namespace) -> dict[str, str]: if "ssh.authorized_keys.root" not in creds: if args.ssh_certificate: - pubkey = run(["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"], - stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null")).stdout.strip() - sshpubkey = run(["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], - input=pubkey, stdout=subprocess.PIPE).stdout.strip() + pubkey = run( + ["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"], + stdout=subprocess.PIPE, + env=dict(OPENSSL_CONF="/dev/null"), + ).stdout.strip() + sshpubkey = run( + ["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=subprocess.PIPE + ).stdout.strip() creds["ssh.authorized_keys.root"] = sshpubkey elif args.ssh: - die("Ssh= is enabled but no SSH certificate was found", - hint="Run 'mkosi genkey' to automatically create one") + die( + "Ssh= is enabled but no SSH certificate was found", + hint="Run 'mkosi genkey' to automatically create one", + ) return creds @@ -4122,11 +4130,7 @@ def load_environment(args: argparse.Namespace) -> dict[str, str]: if gnupghome := os.getenv("GNUPGHOME"): env["GNUPGHOME"] = gnupghome - env |= dict( - parse_environment(line) - for f in args.environment_files - for line in f.read_text().strip().splitlines() - ) + env |= dict(parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines()) env |= args.environment return env @@ -4169,9 +4173,9 @@ def load_config(config: argparse.Namespace) -> Config: # For unprivileged builds we need the userxattr OverlayFS mount option, which is only available # in Linux v5.11 and later. if ( - (config.build_scripts or config.base_trees) and - GenericVersion(platform.release()) < GenericVersion("5.11") and - os.geteuid() != 0 + (config.build_scripts or config.base_trees) + and GenericVersion(platform.release()) < GenericVersion("5.11") + and os.geteuid() != 0 ): die("This unprivileged build configuration requires at least Linux v5.11") @@ -4241,7 +4245,7 @@ def cat_config(images: Sequence[Config]) -> str: # Display the paths as relative to ., if underneath. if path.is_relative_to(Path.cwd()): path = path.relative_to(Path.cwd()) - print(f'{Style.blue}# {path}{Style.reset}', file=c) + print(f"{Style.blue}# {path}{Style.reset}", file=c) print(path.read_text(), file=c) return c.getvalue() @@ -4601,9 +4605,11 @@ def want_selinux_relabel(config: Config, root: Path, fatal: bool = True) -> Opti die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config") return None - policy = run(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], - sandbox=config.sandbox(binary="sh", options=["--ro-bind", selinux, selinux]), - stdout=subprocess.PIPE).stdout.strip() + policy = run( + ["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], + sandbox=config.sandbox(binary="sh", options=["--ro-bind", selinux, selinux]), + stdout=subprocess.PIPE, + ).stdout.strip() if not policy: if fatal and config.selinux_relabel == ConfigFeature.enabled: die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config") @@ -4642,5 +4648,8 @@ def systemd_tool_version(*tool: PathString, sandbox: SandboxProtocol = nosandbox [*tool, "--version"], stdout=subprocess.PIPE, sandbox=sandbox(binary=tool[-1]), - ).stdout.split()[2].strip("()").removeprefix("v") + ) + .stdout.split()[2] + .strip("()") + .removeprefix("v") ) diff --git a/mkosi/curl.py b/mkosi/curl.py index 900c392e9..5d792af13 100644 --- a/mkosi/curl.py +++ b/mkosi/curl.py @@ -28,4 +28,4 @@ def curl(config: Config, url: str, output_dir: Path) -> None: network=True, options=["--bind", output_dir, output_dir, *finalize_crypto_mounts(config)], ), - ) + ) # fmt: skip diff --git a/mkosi/distributions/__init__.py b/mkosi/distributions/__init__.py index 10df0902a..a04b7dc61 100644 --- a/mkosi/distributions/__init__.py +++ b/mkosi/distributions/__init__.py @@ -15,10 +15,10 @@ if TYPE_CHECKING: class PackageType(StrEnum): - none = enum.auto() - rpm = enum.auto() - deb = enum.auto() - pkg = enum.auto() + none = enum.auto() + rpm = enum.auto() + deb = enum.auto() + pkg = enum.auto() class DistributionInstaller: @@ -74,21 +74,21 @@ class DistributionInstaller: class Distribution(StrEnum): # Please consult docs/distribution-policy.md and contact one # of the mkosi maintainers before implementing a new distribution. - fedora = enum.auto() - debian = enum.auto() - kali = enum.auto() - ubuntu = enum.auto() - arch = enum.auto() - opensuse = enum.auto() - mageia = enum.auto() - centos = enum.auto() - rhel = enum.auto() - rhel_ubi = enum.auto() + fedora = enum.auto() + debian = enum.auto() + kali = enum.auto() + ubuntu = enum.auto() + arch = enum.auto() + opensuse = enum.auto() + mageia = enum.auto() + centos = enum.auto() + rhel = enum.auto() + rhel_ubi = enum.auto() openmandriva = enum.auto() - rocky = enum.auto() - alma = enum.auto() - azure = enum.auto() - custom = enum.auto() + rocky = enum.auto() + alma = enum.auto() + azure = enum.auto() + custom = enum.auto() def is_centos_variant(self) -> bool: return self in ( @@ -156,7 +156,7 @@ class Distribution(StrEnum): return self.installer().package_manager(context.config).createrepo(context) def installer(self) -> type[DistributionInstaller]: - modname = str(self).replace('-', '_') + modname = str(self).replace("-", "_") mod = importlib.import_module(f"mkosi.distributions.{modname}") installer = getattr(mod, "Installer") assert issubclass(installer, DistributionInstaller) diff --git a/mkosi/distributions/arch.py b/mkosi/distributions/arch.py index aaab71fc7..86711f88b 100644 --- a/mkosi/distributions/arch.py +++ b/mkosi/distributions/arch.py @@ -65,7 +65,8 @@ class Installer(DistributionInstaller): # Testing repositories have to go before regular ones to to take precedence. repos = [ - repo for repo in ( + repo + for repo in ( "core-testing", "core-testing-debug", "extra-testing", @@ -74,7 +75,8 @@ class Installer(DistributionInstaller): "extra-debug", "multilib-testing", "multilib", - ) if repo in context.config.repositories + ) + if repo in context.config.repositories ] + ["core", "extra"] if context.config.architecture.is_arm_variant(): @@ -86,13 +88,12 @@ class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - Architecture.arm : "armv7h", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + Architecture.arm: "armv7h", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Arch Linux") return a - diff --git a/mkosi/distributions/azure.py b/mkosi/distributions/azure.py index 985251ee9..77ca1dc9e 100644 --- a/mkosi/distributions/azure.py +++ b/mkosi/distributions/azure.py @@ -98,9 +98,9 @@ class Installer(fedora.Installer): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.arm64 : "aarch64", - Architecture.x86_64 : "x86_64", - }.get(arch) + Architecture.arm64: "aarch64", + Architecture.x86_64: "x86_64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") diff --git a/mkosi/distributions/centos.py b/mkosi/distributions/centos.py index 67e309bbd..76be2d57f 100644 --- a/mkosi/distributions/centos.py +++ b/mkosi/distributions/centos.py @@ -53,8 +53,8 @@ class Installer(DistributionInstaller): # The Hyperscale SIG uses /usr/lib/sysimage/rpm in its rebuild of rpm for C9S that's shipped in the # hyperscale-packages-experimental repository. if ( - GenericVersion(context.config.release) > 9 or - "hyperscale-packages-experimental" in context.config.repositories + GenericVersion(context.config.release) > 9 + or "hyperscale-packages-experimental" in context.config.repositories ): return "/usr/lib/sysimage/rpm" @@ -84,11 +84,11 @@ class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.ppc64_le : "ppc64le", - Architecture.s390x : "s390x", - Architecture.arm64 : "aarch64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.ppc64_le: "ppc64le", + Architecture.s390x: "s390x", + Architecture.arm64: "aarch64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by {cls.pretty_name()}") @@ -206,7 +206,7 @@ class Installer(DistributionInstaller): ("epel", "epel"), ("epel-next", "epel/next"), ("epel-testing", "epel/testing"), - ("epel-next-testing", "epel/testing/next") + ("epel-next-testing", "epel/testing/next"), ): # For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror URL and # path we were given. Since this doesn't work for all scenarios, we also allow overriding the mirror @@ -235,41 +235,19 @@ class Installer(DistributionInstaller): for repo in ("epel", "epel-next"): yield RpmRepository(repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False) yield RpmRepository( - f"{repo}-debuginfo", - f"{url}&repo={repo}-debug-$releasever", - gpgurls, - enabled=False - ) - yield RpmRepository( - f"{repo}-source", - f"{url}&repo={repo}-source-$releasever", - gpgurls, - enabled=False + f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False ) + yield RpmRepository(f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False) + yield RpmRepository("epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False) yield RpmRepository( - "epel-testing", - f"{url}&repo=testing-epel$releasever", - gpgurls, - enabled=False - ) - yield RpmRepository( - "epel-testing-debuginfo", - f"{url}&repo=testing-debug-epel$releasever", - gpgurls, - enabled=False + "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( - "epel-testing-source", - f"{url}&repo=testing-source-epel$releasever", - gpgurls, - enabled=False + "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False ) yield RpmRepository( - "epel-next-testing", - f"{url}&repo=epel-testing-next-$releasever", - gpgurls, - enabled=False + "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False ) yield RpmRepository( "epel-next-testing-debuginfo", diff --git a/mkosi/distributions/debian.py b/mkosi/distributions/debian.py index 4b1d029e1..a3c83a303 100644 --- a/mkosi/distributions/debian.py +++ b/mkosi/distributions/debian.py @@ -122,7 +122,7 @@ class Installer(DistributionInstaller): "sparc" : ["lib64"], "sparc64" : ["lib32", "lib64"], "x32" : ["lib32", "lib64", "libx32"], - }.get(context.config.distribution.architecture(context.config.architecture), []) + }.get(context.config.distribution.architecture(context.config.architecture), []) # fmt: skip with umask(~0o755): for d in subdirs: @@ -180,7 +180,7 @@ class Installer(DistributionInstaller): if not context.config.with_docs else [] ), - sandbox=context.sandbox + sandbox=context.sandbox, ) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer @@ -213,7 +213,6 @@ class Installer(DistributionInstaller): # Let's make sure it is enabled by default in our images. (context.root / "etc/systemd/system-generators/systemd-gpt-auto-generator").unlink(missing_ok=True) - @classmethod def remove_packages(cls, context: Context, packages: Sequence[str]) -> None: Apt.invoke(context, "purge", packages, apivfs=True) @@ -221,22 +220,22 @@ class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.arm64 : "arm64", - Architecture.arm : "armhf", - Architecture.alpha : "alpha", - Architecture.x86_64 : "amd64", - Architecture.x86 : "i386", - Architecture.ia64 : "ia64", - Architecture.loongarch64 : "loongarch64", - Architecture.mips64_le : "mips64el", - Architecture.mips_le : "mipsel", - Architecture.parisc : "hppa", - Architecture.ppc64_le : "ppc64el", - Architecture.ppc64 : "ppc64", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.s390 : "s390", - }.get(arch) + Architecture.arm64: "arm64", + Architecture.arm: "armhf", + Architecture.alpha: "alpha", + Architecture.x86_64: "amd64", + Architecture.x86: "i386", + Architecture.ia64: "ia64", + Architecture.loongarch64: "loongarch64", + Architecture.mips64_le: "mips64el", + Architecture.mips_le: "mipsel", + Architecture.parisc: "hppa", + Architecture.ppc64_le: "ppc64el", + Architecture.ppc64: "ppc64", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.s390: "s390", + }.get(arch) # fmt: skip if not a: die(f"Architecture {arch} is not supported by Debian") @@ -275,7 +274,7 @@ def fixup_os_release(context: Context) -> None: with osrelease.open("r") as old, newosrelease.open("w") as new: for line in old.readlines(): if line.startswith("VERSION_CODENAME="): - new.write('VERSION_CODENAME=sid\n') + new.write("VERSION_CODENAME=sid\n") else: new.write(line) @@ -285,16 +284,19 @@ def fixup_os_release(context: Context) -> None: # precedence over /usr/lib/os-release, and ignore the latter and assume that if an usr-only # image is built then the package manager will not run on it. if candidate == "etc/os-release": - run([ - "dpkg-divert", - "--quiet", - "--root=/buildroot", - "--local", - "--add", - "--rename", - "--divert", - f"/{candidate}.dpkg", - f"/{candidate}", - ], sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"])) + run( + [ + "dpkg-divert", + "--quiet", + "--root=/buildroot", + "--local", + "--add", + "--rename", + "--divert", + f"/{candidate}.dpkg", + f"/{candidate}", + ], + sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"]), + ) newosrelease.rename(osrelease) diff --git a/mkosi/distributions/fedora.py b/mkosi/distributions/fedora.py index ddd2abd94..9696e6a25 100644 --- a/mkosi/distributions/fedora.py +++ b/mkosi/distributions/fedora.py @@ -46,8 +46,10 @@ def find_fedora_rpm_gpgkeys(context: Context) -> Iterable[str]: if not key1 and not key2: if not context.config.repository_key_fetch: - die("Fedora GPG keys not found in /usr/share/distribution-gpg-keys", - hint="Make sure the distribution-gpg-keys package is installed") + die( + "Fedora GPG keys not found in /usr/share/distribution-gpg-keys", + hint="Make sure the distribution-gpg-keys package is installed", + ) if context.config.release == "rawhide": # https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, let's @@ -118,13 +120,15 @@ class Installer(DistributionInstaller): return if context.config.release == "eln": - mirror = context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" + mirror = ( + context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose" + ) for repo in ("Appstream", "BaseOS", "Extras", "CRB"): url = f"baseurl={join_mirror(mirror, repo)}" yield RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls) yield RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False) yield RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False) - elif (m := context.config.mirror): + elif m := context.config.mirror: directory = "development" if context.config.release == "rawhide" else "releases" url = f"baseurl={join_mirror(m, f'linux/{directory}/$releasever/Everything')}" yield RpmRepository("fedora", f"{url}/$basearch/os", gpgurls) @@ -156,16 +160,10 @@ class Installer(DistributionInstaller): enabled=False, ) yield RpmRepository( - "updates-source", - f"{url}&repo=updates-released-source-f$releasever", - gpgurls, - enabled=False + "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False ) yield RpmRepository( - "updates-testing", - f"{url}&repo=updates-testing-f$releasever", - gpgurls, - enabled=False + "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False ) yield RpmRepository( "updates-testing-debuginfo", @@ -183,14 +181,14 @@ class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.arm64 : "aarch64", - Architecture.mips64_le : "mips64el", - Architecture.mips_le : "mipsel", - Architecture.ppc64_le : "ppc64le", - Architecture.riscv64 : "riscv64", - Architecture.s390x : "s390x", - Architecture.x86_64 : "x86_64", - }.get(arch) + Architecture.arm64: "aarch64", + Architecture.mips64_le: "mips64el", + Architecture.mips_le: "mipsel", + Architecture.ppc64_le: "ppc64le", + Architecture.riscv64: "riscv64", + Architecture.s390x: "s390x", + Architecture.x86_64: "x86_64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Fedora") diff --git a/mkosi/distributions/mageia.py b/mkosi/distributions/mageia.py index 6e91853a8..d461cc392 100644 --- a/mkosi/distributions/mageia.py +++ b/mkosi/distributions/mageia.py @@ -52,9 +52,9 @@ class Installer(fedora.Installer): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by Mageia") diff --git a/mkosi/distributions/openmandriva.py b/mkosi/distributions/openmandriva.py index a153d68de..1e0de8b54 100644 --- a/mkosi/distributions/openmandriva.py +++ b/mkosi/distributions/openmandriva.py @@ -49,10 +49,10 @@ class Installer(fedora.Installer): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - Architecture.riscv64 : "riscv64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + Architecture.riscv64: "riscv64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by OpenMandriva") diff --git a/mkosi/distributions/opensuse.py b/mkosi/distributions/opensuse.py index 30a91be2c..4e3356ee7 100644 --- a/mkosi/distributions/opensuse.py +++ b/mkosi/distributions/opensuse.py @@ -72,7 +72,8 @@ class Installer(DistributionInstaller): "--recommends" if context.config.with_recommends else "--no-recommends", *sort_packages(packages), ], - apivfs=apivfs) + apivfs=apivfs, + ) # fmt: skip else: Dnf.invoke(context, "install", sort_packages(packages), apivfs=apivfs) @@ -100,8 +101,10 @@ class Installer(DistributionInstaller): ) if not gpgkeys and not context.config.repository_key_fetch: - die("OpenSUSE GPG keys not found in /usr/share/distribution-gpg-keys", - hint="Make sure the distribution-gpg-keys package is installed") + die( + "OpenSUSE GPG keys not found in /usr/share/distribution-gpg-keys", + hint="Make sure the distribution-gpg-keys package is installed", + ) if zypper and gpgkeys: run( @@ -112,8 +115,8 @@ class Installer(DistributionInstaller): "--bind", context.root, "/buildroot", *finalize_crypto_mounts(context.config), ], - ) - ) + ), + ) # fmt: skip if context.config.release == "tumbleweed": if context.config.architecture == Architecture.x86_64: @@ -162,11 +165,13 @@ class Installer(DistributionInstaller): ) else: if ( - context.config.release in ("current", "stable", "leap") and - context.config.architecture != Architecture.x86_64 + context.config.release in ("current", "stable", "leap") + and context.config.architecture != Architecture.x86_64 ): - die(f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture", - hint="Specify either tumbleweed or a specific leap release such as 15.6") + die( + f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture", + hint="Specify either tumbleweed or a specific leap release such as 15.6", + ) if context.config.release in ("current", "stable", "leap"): release = "openSUSE-current" @@ -225,9 +230,9 @@ class Installer(DistributionInstaller): @classmethod def architecture(cls, arch: Architecture) -> str: a = { - Architecture.x86_64 : "x86_64", - Architecture.arm64 : "aarch64", - }.get(arch) + Architecture.x86_64: "x86_64", + Architecture.arm64: "aarch64", + }.get(arch) # fmt: skip if not a: die(f"Architecture {a} is not supported by OpenSUSE") diff --git a/mkosi/distributions/ubuntu.py b/mkosi/distributions/ubuntu.py index 803a91f69..f9cd81374 100644 --- a/mkosi/distributions/ubuntu.py +++ b/mkosi/distributions/ubuntu.py @@ -79,4 +79,3 @@ class Installer(debian.Installer): components=components, signedby=signedby, ) - diff --git a/mkosi/initrd.py b/mkosi/initrd.py index 3b6a9d95f..97ec49153 100644 --- a/mkosi/initrd.py +++ b/mkosi/initrd.py @@ -36,19 +36,22 @@ def main() -> None: default=platform.uname().release, ) parser.add_argument( - "-t", "--format", + "-t", + "--format", choices=[str(OutputFormat.cpio), str(OutputFormat.uki), str(OutputFormat.directory)], help="Output format (CPIO archive, UKI or local directory)", default="cpio", ) parser.add_argument( - "-o", "--output", + "-o", + "--output", metavar="NAME", help="Output name", default="initrd", ) parser.add_argument( - "-O", "--output-dir", + "-O", + "--output-dir", metavar="DIR", help="Output directory", default="", @@ -66,7 +69,8 @@ def main() -> None: default=False, ) parser.add_argument( - "-D", "--show-documentation", + "-D", + "--show-documentation", help="Show the man page", action="store_true", default=False, @@ -98,7 +102,7 @@ def main() -> None: "--kernel-modules-include=host", "--build-sources", "", "--include=mkosi-initrd", - ] + ] # fmt: skip if args.debug: cmdline += ["--debug"] @@ -145,8 +149,9 @@ def main() -> None: if (Path("/etc") / p).resolve().is_file(): shutil.copy2(Path("/etc") / p, Path(d) / "etc" / p) else: - shutil.copytree(Path("/etc") / p, Path(d) / "etc" / p, - ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True) + shutil.copytree( + Path("/etc") / p, Path(d) / "etc" / p, ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True + ) cmdline += ["--sandbox-tree", d] @@ -156,7 +161,7 @@ def main() -> None: cmdline, stdin=sys.stdin, stdout=sys.stdout, - env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {} + env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {}, ) diff --git a/mkosi/installer/__init__.py b/mkosi/installer/__init__.py index 47914ddae..fd9930f93 100644 --- a/mkosi/installer/__init__.py +++ b/mkosi/installer/__init__.py @@ -28,7 +28,7 @@ class PackageManager: @classmethod def state_subdirs(cls, state: Path) -> list[Path]: - return [] + return [] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: @@ -37,7 +37,7 @@ class PackageManager: @classmethod def finalize_environment(cls, context: Context) -> dict[str, str]: env = { - "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. + "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc. # systemd's chroot detection doesn't work when unprivileged so tell it explicitly. "SYSTEMD_IN_CHROOT": "1", } @@ -46,8 +46,8 @@ class PackageManager: env["SYSTEMD_HWDB_UPDATE_BYPASS"] = "1" if ( - "KERNEL_INSTALL_BYPASS" not in context.config.environment and - context.config.bootable != ConfigFeature.disabled + "KERNEL_INSTALL_BYPASS" not in context.config.environment + and context.config.bootable != ConfigFeature.disabled ): env["KERNEL_INSTALL_BYPASS"] = "1" else: @@ -70,7 +70,7 @@ class PackageManager: mounts = [ *finalize_crypto_mounts(context.config), "--bind", context.repository, "/repository", - ] + ] # fmt: skip if context.config.local_mirror and (mirror := startswith(context.config.local_mirror, "file://")): mounts += ["--ro-bind", mirror, mirror] @@ -111,7 +111,7 @@ class PackageManager: # original root won't be available anymore. If we're not in the sandbox yet, we want to pick up the passwd # files from the original root. *finalize_passwd_mounts(root), - ] + ] # fmt: skip @classmethod def apivfs_script_cmd(cls, context: Context) -> list[PathString]: @@ -123,7 +123,7 @@ class PackageManager: *apivfs_options(), *cls.options(root="/buildroot"), "--", - ] + ] # fmt: skip @classmethod def sandbox( @@ -142,7 +142,7 @@ class PackageManager: *cls.options(root=context.root, apivfs=apivfs), *options, ], - ) + ) # fmt: skip @classmethod def sync(cls, context: Context, force: bool) -> None: @@ -168,9 +168,9 @@ def clean_package_manager_metadata(context: Context) -> None: if context.config.clean_package_metadata == ConfigFeature.disabled: return - if ( - context.config.clean_package_metadata == ConfigFeature.auto and - context.config.output_format in (OutputFormat.directory, OutputFormat.tar) + if context.config.clean_package_metadata == ConfigFeature.auto and context.config.output_format in ( + OutputFormat.directory, + OutputFormat.tar, ): return @@ -181,10 +181,12 @@ def clean_package_manager_metadata(context: Context) -> None: executable = context.config.distribution.package_manager(context.config).executable(context.config) remove = [] - for tool, paths in (("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), - ("dnf5", ["usr/lib/sysimage/libdnf5"]), - ("dpkg", ["var/lib/dpkg"]), - (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"])): + for tool, paths in ( + ("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]), + ("dnf5", ["usr/lib/sysimage/libdnf5"]), + ("dpkg", ["var/lib/dpkg"]), + (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"]), + ): # fmt: skip if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary(tool, root=context.root): remove += [context.root / p for p in paths if (context.root / p).exists()] diff --git a/mkosi/installer/apt.py b/mkosi/installer/apt.py index c7c983f99..23435becf 100644 --- a/mkosi/installer/apt.py +++ b/mkosi/installer/apt.py @@ -71,7 +71,8 @@ class Apt(PackageManager): return { **{ - command: cmd + cls.env_cmd(context) + cls.cmd(context, command) for command in ( + command: cmd + cls.env_cmd(context) + cls.cmd(context, command) + for command in ( "apt", "apt-cache", "apt-cdrom", @@ -84,16 +85,17 @@ class Apt(PackageManager): ) }, **{ - command: cmd + cls.dpkg_cmd(command) for command in( + command: cmd + cls.dpkg_cmd(command) + for command in ( "dpkg", "dpkg-query", ) }, - "mkosi-install" : ["apt-get", "install"], - "mkosi-upgrade" : ["apt-get", "upgrade"], - "mkosi-remove" : ["apt-get", "purge"], + "mkosi-install": ["apt-get", "install"], + "mkosi-upgrade": ["apt-get", "upgrade"], + "mkosi-remove": ["apt-get", "purge"], "mkosi-reinstall": ["apt-get", "install", "--reinstall"], - } + } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None: @@ -130,7 +132,7 @@ class Apt(PackageManager): die( f"Keyring for repo {repo.url} not found at {repo.signedby}", hint="Make sure the right keyring package (e.g. debian-archive-keyring, kali-archive-keyring " - "or ubuntu-keyring) is installed", + "or ubuntu-keyring) is installed", ) with sources.open("w") as f: @@ -141,7 +143,7 @@ class Apt(PackageManager): def finalize_environment(cls, context: Context) -> dict[str, str]: env = { "APT_CONFIG": "/etc/apt.conf", - "DEBIAN_FRONTEND" : "noninteractive", + "DEBIAN_FRONTEND": "noninteractive", "DEBCONF_INTERACTIVE_SEEN": "true", } @@ -180,14 +182,14 @@ class Apt(PackageManager): "-o", "DPkg::Use-Pty=false", "-o", "DPkg::Install::Recursive::Minimum=1000", "-o", "pkgCacheGen::ForceEssential=,", - ] + ] # fmt: skip if not context.config.repository_key_check: cmdline += [ "-o", "Acquire::AllowInsecureRepositories=true", "-o", "Acquire::AllowDowngradeToInsecureRepositories=true", "-o", "APT::Get::AllowUnauthenticated=true", - ] + ] # fmt: skip if not context.config.with_docs: cmdline += [f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs] @@ -197,7 +199,7 @@ class Apt(PackageManager): cmdline += [ "-o", f"Acquire::http::Proxy={context.config.proxy_url}", "-o", f"Acquire::https::Proxy={context.config.proxy_url}", - ] + ] # fmt: skip return cmdline @@ -276,4 +278,4 @@ class Apt(PackageManager): "-o", "Dir::Etc::sourceparts=-", "-o", "APT::Get::List-Cleanup=0", ], - ) + ) # fmt: skip diff --git a/mkosi/installer/dnf.py b/mkosi/installer/dnf.py index 39bbbe800..83fb48ad0 100644 --- a/mkosi/installer/dnf.py +++ b/mkosi/installer/dnf.py @@ -25,22 +25,18 @@ class Dnf(PackageManager): @classmethod def cache_subdirs(cls, cache: Path) -> list[Path]: - return [ - p / "packages" - for p in cache.iterdir() - if p.is_dir() and "-" in p.name and "mkosi" not in p.name - ] + return [p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name] @classmethod def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "dnf": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), - "mkosi-install" : ["dnf", "install"], - "mkosi-upgrade" : ["dnf", "upgrade"], - "mkosi-remove" : ["dnf", "remove"], + "mkosi-install": ["dnf", "install"], + "mkosi-upgrade": ["dnf", "upgrade"], + "mkosi-remove": ["dnf", "remove"], "mkosi-reinstall": ["dnf", "reinstall"], - } + } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[RpmRepository], filelists: bool = True) -> None: @@ -112,9 +108,9 @@ class Dnf(PackageManager): @classmethod def cmd( - cls, - context: Context, - cached_metadata: bool = True, + cls, + context: Context, + cached_metadata: bool = True, ) -> list[PathString]: dnf = cls.executable(context.config) @@ -131,7 +127,7 @@ class Dnf(PackageManager): f"--setopt=install_weak_deps={int(context.config.with_recommends)}", "--setopt=check_config_file_age=0", "--disable-plugin=*" if dnf.endswith("dnf5") else "--disableplugin=*", - ] + ] # fmt: skip for plugin in ("builddep", "versionlock"): cmdline += ["--enable-plugin", plugin] if dnf.endswith("dnf5") else ["--enableplugin", plugin] @@ -216,8 +212,10 @@ class Dnf(PackageManager): @classmethod def createrepo(cls, context: Context) -> None: - run(["createrepo_c", context.repository], - sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository])) + run( + ["createrepo_c", context.repository], + sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]), + ) (context.sandbox_tree / "etc/yum.repos.d/mkosi-local.repo").write_text( textwrap.dedent( diff --git a/mkosi/installer/pacman.py b/mkosi/installer/pacman.py index f8ba927c1..e8e43c589 100644 --- a/mkosi/installer/pacman.py +++ b/mkosi/installer/pacman.py @@ -42,11 +42,11 @@ class Pacman(PackageManager): def scripts(cls, context: Context) -> dict[str, list[PathString]]: return { "pacman": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), - "mkosi-install" : ["pacman", "--sync", "--needed"], - "mkosi-upgrade" : ["pacman", "--sync", "--sysupgrade", "--needed"], - "mkosi-remove" : ["pacman", "--remove", "--recursive", "--nosave"], + "mkosi-install": ["pacman", "--sync", "--needed"], + "mkosi-upgrade": ["pacman", "--sync", "--sysupgrade", "--needed"], + "mkosi-remove": ["pacman", "--remove", "--recursive", "--nosave"], "mkosi-reinstall": ["pacman", "--sync"], - } + } # fmt: skip @classmethod def mounts(cls, context: Context) -> list[PathString]: @@ -55,7 +55,7 @@ class Pacman(PackageManager): # pacman writes downloaded packages to the first writable cache directory. We don't want it to write to our # local repository directory so we expose it as a read-only directory to pacman. "--ro-bind", context.repository, "/var/cache/pacman/mkosi", - ] + ] # fmt: skip if (context.root / "var/lib/pacman/local").exists(): # pacman reuses the same directory for the sync databases and the local database containing the list of @@ -151,7 +151,7 @@ class Pacman(PackageManager): "--arch", context.config.distribution.architecture(context.config.architecture), "--color", "auto", "--noconfirm", - ] + ] # fmt: skip @classmethod def invoke( @@ -181,7 +181,7 @@ class Pacman(PackageManager): "repo-add", "--quiet", context.repository / "mkosi.db.tar", - *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)) + *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)), ], sandbox=context.sandbox(binary="repo-add", options=["--bind", context.repository, context.repository]), ) @@ -198,7 +198,4 @@ class Pacman(PackageManager): ) # pacman can't sync a single repository, so we go behind its back and do it ourselves. - shutil.move( - context.repository / "mkosi.db.tar", - context.metadata_dir / "lib/pacman/sync/mkosi.db" - ) + shutil.move(context.repository / "mkosi.db.tar", context.metadata_dir / "lib/pacman/sync/mkosi.db") diff --git a/mkosi/installer/rpm.py b/mkosi/installer/rpm.py index 39ef644f0..ff99f0d32 100644 --- a/mkosi/installer/rpm.py +++ b/mkosi/installer/rpm.py @@ -36,20 +36,12 @@ def find_rpm_gpgkey( @overload def find_rpm_gpgkey( - context: Context, - key: str, - fallback: Optional[str] = None, - *, - required: Literal[False] + context: Context, key: str, fallback: Optional[str] = None, *, required: Literal[False] ) -> Optional[str]: ... def find_rpm_gpgkey( - context: Context, - key: str, - fallback: Optional[str] = None, - *, - required: bool = True + context: Context, key: str, fallback: Optional[str] = None, *, required: bool = True ) -> Optional[str]: root = context.config.tools() if context.config.tools_tree_certificates else Path("/") @@ -63,8 +55,10 @@ def find_rpm_gpgkey( return fallback if required: - die(f"{key} GPG key not found in /usr/share/distribution-gpg-keys", - hint="Make sure the distribution-gpg-keys package is installed") + die( + f"{key} GPG key not found in /usr/share/distribution-gpg-keys", + hint="Make sure the distribution-gpg-keys package is installed", + ) return None @@ -78,8 +72,11 @@ def setup_rpm(context: Context, *, dbpath: str = "/usr/lib/sysimage/rpm") -> Non if not (confdir / "macros.dbpath").exists(): (confdir / "macros.dbpath").write_text(f"%_dbpath {dbpath}") - plugindir = Path(run(["rpm", "--eval", "%{__plugindir}"], - sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE).stdout.strip()) + plugindir = Path( + run( + ["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE + ).stdout.strip() + ) if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists(): with (confdir / "macros.disable-plugins").open("w") as f: for plugin in plugindir.iterdir(): diff --git a/mkosi/installer/zypper.py b/mkosi/installer/zypper.py index 98de90fb0..ff8708786 100644 --- a/mkosi/installer/zypper.py +++ b/mkosi/installer/zypper.py @@ -32,16 +32,16 @@ class Zypper(PackageManager): "install", "--download", "in-advance", "--recommends" if context.config.with_recommends else "--no-recommends", - ] + ] # fmt: skip return { "zypper": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context), - "rpm" : cls.apivfs_script_cmd(context) + rpm_cmd(), - "mkosi-install" : install, - "mkosi-upgrade" : ["zypper", "update"], - "mkosi-remove" : ["zypper", "remove", "--clean-deps"], + "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(), + "mkosi-install": install, + "mkosi-upgrade": ["zypper", "update"], + "mkosi-remove": ["zypper", "remove", "--clean-deps"], "mkosi-reinstall": install + ["--force"], - } + } # fmt: skip @classmethod def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None: @@ -138,8 +138,10 @@ class Zypper(PackageManager): @classmethod def createrepo(cls, context: Context) -> None: - run(["createrepo_c", context.repository], - sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository])) + run( + ["createrepo_c", context.repository], + sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]), + ) (context.sandbox_tree / "etc/zypp/repos.d/mkosi-local.repo").write_text( textwrap.dedent( diff --git a/mkosi/kmod.py b/mkosi/kmod.py index 9e38b4a78..e04d4fea9 100644 --- a/mkosi/kmod.py +++ b/mkosi/kmod.py @@ -15,7 +15,7 @@ from mkosi.util import chdir, parents_below def loaded_modules() -> list[str]: # Loaded modules are listed with underscores but the filenames might use dashes instead. - return [fr"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()] + return [rf"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()] def filter_kernel_modules(root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str]) -> list[Path]: @@ -79,7 +79,7 @@ def resolve_module_dependencies( # modules than the max number of accepted CLI arguments, we split the modules list up into chunks. info = "" for i in range(0, len(nametofile.keys()), 8500): - chunk = list(nametofile.keys())[i:i+8500] + chunk = list(nametofile.keys())[i : i + 8500] info += run( ["modinfo", "--set-version", kver, "--null", *chunk], stdout=subprocess.PIPE, @@ -201,8 +201,7 @@ def process_kernel_modules( firmwared = Path("usr/lib/firmware") with complete_step("Applying kernel module filters"): - required = set( - gen_required_kernel_modules(root, kver, include=include, exclude=exclude)) + required = set(gen_required_kernel_modules(root, kver, include=include, exclude=exclude)) with chdir(root): modules = sorted(modulesd.rglob("*.ko*"), reverse=True) diff --git a/mkosi/log.py b/mkosi/log.py index 3895170b9..bb000759e 100644 --- a/mkosi/log.py +++ b/mkosi/log.py @@ -22,17 +22,17 @@ def terminal_is_dumb() -> bool: class Style: - bold: Final[str] = "\033[0;1;39m" if not terminal_is_dumb() else "" - blue: Final[str] = "\033[0;1;34m" if not terminal_is_dumb() else "" - gray: Final[str] = "\033[0;38;5;245m" if not terminal_is_dumb() else "" - red: Final[str] = "\033[31;1m" if not terminal_is_dumb() else "" - yellow: Final[str] = "\033[33;1m" if not terminal_is_dumb() else "" - reset: Final[str] = "\033[0m" if not terminal_is_dumb() else "" + # fmt: off + bold: Final[str] = "\033[0;1;39m" if not terminal_is_dumb() else "" + blue: Final[str] = "\033[0;1;34m" if not terminal_is_dumb() else "" + gray: Final[str] = "\033[0;38;5;245m" if not terminal_is_dumb() else "" + red: Final[str] = "\033[31;1m" if not terminal_is_dumb() else "" + yellow: Final[str] = "\033[33;1m" if not terminal_is_dumb() else "" + reset: Final[str] = "\033[0m" if not terminal_is_dumb() else "" + # fmt: on -def die(message: str, - *, - hint: Optional[str] = None) -> NoReturn: +def die(message: str, *, hint: Optional[str] = None) -> NoReturn: logging.error(f"{message}") if hint: logging.info(f"({hint})") @@ -84,7 +84,7 @@ class Formatter(logging.Formatter): logging.WARNING: logging.Formatter(f"‣ {Style.yellow}{fmt}{Style.reset}"), logging.ERROR: logging.Formatter(f"‣ {Style.red}{fmt}{Style.reset}"), logging.CRITICAL: logging.Formatter(f"‣ {Style.red}{Style.bold}{fmt}{Style.reset}"), - } + } # fmt: skip super().__init__(fmt, *args, **kwargs) diff --git a/mkosi/manifest.py b/mkosi/manifest.py index eebecff60..ed31b9caa 100644 --- a/mkosi/manifest.py +++ b/mkosi/manifest.py @@ -111,7 +111,7 @@ class Manifest: ], stdout=subprocess.PIPE, sandbox=self.context.sandbox(binary="rpm", options=["--ro-bind", self.context.root, "/buildroot"]), - ) + ) # fmt: skip packages = sorted(c.stdout.splitlines()) @@ -133,8 +133,8 @@ class Manifest: # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if ( - self.context.config.base_trees and - datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp + self.context.config.base_trees + and datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp ): continue @@ -173,15 +173,14 @@ class Manifest: "dpkg-query", "--admindir=/buildroot/var/lib/dpkg", "--show", - "--showformat", - r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n', + "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n", ], stdout=subprocess.PIPE, sandbox=self.context.sandbox( binary="dpkg-query", options=["--ro-bind", self.context.root, "/buildroot"], ), - ) + ) # fmt: skip packages = sorted(c.stdout.splitlines()) @@ -196,8 +195,8 @@ class Manifest: # packages that were installed in this execution of mkosi. We assume that the # upper layer is put together in one go, which currently is always true. if ( - self.context.config.base_trees and - datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp + self.context.config.base_trees + and datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp ): continue diff --git a/mkosi/mounts.py b/mkosi/mounts.py index 01ec0cb47..a2ddb60f8 100644 --- a/mkosi/mounts.py +++ b/mkosi/mounts.py @@ -77,7 +77,7 @@ def finalize_source_mounts(config: Config, *, ephemeral: bool) -> Iterator[list[ "--overlay-upperdir", upperdir, "--overlay-workdir", workdir, "--overlay", dst, - ] + ] # fmt: skip else: options += ["--bind", src, dst] @@ -104,6 +104,5 @@ def finalize_crypto_mounts(config: Config) -> list[PathString]: return flatten( ("--symlink", src.readlink(), target) if src.is_symlink() else ("--ro-bind", src, target) - for src, target - in sorted(set(mounts), key=lambda s: s[1]) + for src, target in sorted(set(mounts), key=lambda s: s[1]) ) diff --git a/mkosi/qemu.py b/mkosi/qemu.py index 39671d42f..6189d3645 100644 --- a/mkosi/qemu.py +++ b/mkosi/qemu.py @@ -49,7 +49,7 @@ from mkosi.util import StrEnum, current_home_dir, flock, flock_or_die, groupby, from mkosi.versioncomp import GenericVersion QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0") -VHOST_VSOCK_SET_GUEST_CID = 0x4008af60 +VHOST_VSOCK_SET_GUEST_CID = 0x4008AF60 class QemuDeviceNode(StrEnum): @@ -72,7 +72,7 @@ class QemuDeviceNode(StrEnum): }[self] def open(self) -> int: - return os.open(self.device(), os.O_RDWR|os.O_CLOEXEC|os.O_NONBLOCK) + return os.open(self.device(), os.O_RDWR | os.O_CLOEXEC | os.O_NONBLOCK) def available(self, log: bool = False) -> bool: try: @@ -102,7 +102,7 @@ def hash_output(config: Config) -> "hashlib._Hash": def hash_to_vsock_cid(hash: "hashlib._Hash") -> int: - cid = int.from_bytes(hash.digest()[:4], byteorder='little') + cid = int.from_bytes(hash.digest()[:4], byteorder="little") # Make sure we don't return any of the well-known CIDs. return max(3, min(cid, 0xFFFFFFFF - 1)) @@ -128,7 +128,7 @@ def find_unused_vsock_cid(config: Config, vfd: int) -> int: if not vsock_cid_in_use(vfd, cid): return cid - hash.update(i.to_bytes(length=4, byteorder='little')) + hash.update(i.to_bytes(length=4, byteorder="little")) for i in range(64): cid = random.randint(0, 0xFFFFFFFF - 1) @@ -140,8 +140,8 @@ def find_unused_vsock_cid(config: Config, vfd: int) -> int: class KernelType(StrEnum): - pe = enum.auto() - uki = enum.auto() + pe = enum.auto() + uki = enum.auto() unknown = enum.auto() @classmethod @@ -328,7 +328,7 @@ def start_virtiofsd( "--no-announce-submounts", "--sandbox=chroot", f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}", - ] + ] # fmt: skip if selinux: cmdline += ["--security-label"] @@ -393,7 +393,7 @@ def start_virtiofsd( ], setup=scope + become_root_in_subuid_range_cmd() if scope and not uidmap else [], ), - ) as proc: + ) as proc: # fmt: skip yield path proc.terminate() @@ -426,7 +426,7 @@ def vsock_notify_handler() -> Iterator[tuple[str, dict[str, str]]]: with s: data = [] try: - while (buf := await loop.sock_recv(s, 4096)): + while buf := await loop.sock_recv(s, 4096): data.append(buf) except ConnectionResetError: logging.debug("vsock notify listener connection reset by peer") @@ -511,12 +511,11 @@ def start_journal_remote(config: Config, sockfd: int) -> Iterator[None]: user=user if not scope else None, group=group if not scope else None, foreground=False, - ) as proc: + ) as proc: # fmt: skip yield proc.terminate() - @contextlib.contextmanager def start_journal_remote_vsock(config: Config) -> Iterator[str]: with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as sock: @@ -549,6 +548,7 @@ def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}" try: + def copy() -> None: if config.output_format == OutputFormat.directory: become_root_in_subuid_range() @@ -567,7 +567,8 @@ def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: ) copy_tree( - src, tmp, + src, + tmp, # Make sure the ownership is changed to the (fake) root user if the directory was not built as root. preserve=config.output_format == OutputFormat.directory and src.stat().st_uid == 0, use_subvolumes=config.use_subvolumes, @@ -578,6 +579,7 @@ def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]: fork_and_wait(copy) yield tmp finally: + def rm() -> None: if config.output_format == OutputFormat.directory: become_root_in_subuid_range() @@ -599,8 +601,8 @@ def qemu_version(config: Config, binary: Path) -> GenericVersion: def want_scratch(config: Config) -> bool: return config.runtime_scratch == ConfigFeature.enabled or ( - config.runtime_scratch == ConfigFeature.auto and - config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None + config.runtime_scratch == ConfigFeature.auto + and config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None ) @@ -613,7 +615,7 @@ def generate_scratch_fs(config: Config) -> Iterator[Path]: run( [f"mkfs.{fs}", "-L", "scratch", *extra.split(), scratch.name], stdout=subprocess.DEVNULL, - sandbox=config.sandbox(binary= f"mkfs.{fs}", options=["--bind", scratch.name, scratch.name]), + sandbox=config.sandbox(binary=f"mkfs.{fs}", options=["--bind", scratch.name, scratch.name]), ) yield Path(scratch.name) @@ -627,8 +629,7 @@ def finalize_qemu_firmware(config: Config, kernel: Optional[Path]) -> QemuFirmwa else QemuFirmware.linux ) elif ( - config.output_format in (OutputFormat.cpio, OutputFormat.directory) or - config.architecture.to_efi() is None + config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None ): return QemuFirmware.linux else: @@ -671,7 +672,7 @@ def finalize_firmware_variables( "--ro-bind", config.secure_boot_certificate, config.secure_boot_certificate, ], ), - ) + ) # fmt: skip else: tools = Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools() vars = ( @@ -700,7 +701,7 @@ def apply_runtime_size(config: Config, image: Path) -> None: image, ], sandbox=config.sandbox(binary="systemd-repart", options=["--bind", image, image]), - ) + ) # fmt: skip @contextlib.contextmanager @@ -716,8 +717,10 @@ def finalize_state(config: Config, cid: int) -> Iterator[None]: with flock(INVOKING_USER.runtime_dir() / "machine"): if (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): - die(f"Another virtual machine named {config.machine_or_name()} is already running", - hint="Use --machine to specify a different virtual machine name") + die( + f"Another virtual machine named {config.machine_or_name()} is already running", + hint="Use --machine to specify a different virtual machine name", + ) p.write_text( json.dumps( @@ -751,7 +754,7 @@ def scope_cmd( if os.getuid() != 0 and "DBUS_SESSION_BUS_ADDRESS" in os.environ and "XDG_RUNTIME_DIR" in os.environ: env = { "DBUS_SESSION_BUS_ADDRESS": os.environ["DBUS_SESSION_BUS_ADDRESS"], - "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"] + "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"], } elif os.getuid() == 0: if "DBUS_SYSTEM_ADDRESS" in os.environ: @@ -777,13 +780,12 @@ def scope_cmd( *(["--uid", str(user)] if user is not None else []), *(["--gid", str(group)] if group is not None else []), *([f"--property={p}" for p in properties]), - ] + ] # fmt: skip def register_machine(config: Config, pid: int, fname: Path) -> None: - if ( - os.getuid() != 0 or - ("DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists()) + if os.getuid() != 0 or ( + "DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists() ): return @@ -803,7 +805,7 @@ def register_machine(config: Config, pid: int, fname: Path) -> None: "vm", str(pid), fname if fname.is_dir() else "", - ], + ], # fmt: skip foreground=False, env=os.environ | config.environment, sandbox=config.sandbox(binary="busctl", relaxed=True), @@ -824,9 +826,9 @@ def run_qemu(args: Args, config: Config) -> None: die(f"{config.output_format} images cannot be booted in qemu") if ( - config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and - config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux) and - not config.qemu_firmware.is_uefi() + config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) + and config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux) + and not config.qemu_firmware.is_uefi() ): die(f"{config.output_format} images cannot be booted with the '{config.qemu_firmware}' firmware") @@ -844,16 +846,15 @@ def run_qemu(args: Args, config: Config) -> None: # after unsharing the user namespace. To get around this, open all those device nodes early can pass them as file # descriptors to qemu later. Note that we can't pass the kvm file descriptor to qemu until version 9.0. qemu_device_fds = { - d: d.open() - for d in QemuDeviceNode - if d.feature(config) != ConfigFeature.disabled and d.available(log=True) + d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True) } if not (qemu := config.find_binary(f"qemu-system-{config.architecture.to_qemu()}")): die("qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed on the host system?") - have_kvm = ((qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or - (qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds)) + have_kvm = (qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or ( + qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds + ) if config.qemu_kvm == ConfigFeature.enabled and not have_kvm: die("KVM acceleration requested but cannot access /dev/kvm") @@ -878,12 +879,9 @@ def run_qemu(args: Args, config: Config) -> None: firmware = finalize_qemu_firmware(config, kernel) - if ( - not kernel and - ( - firmware == QemuFirmware.linux or - config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) - ) + if not kernel and ( + firmware == QemuFirmware.linux + or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki) ): if firmware.is_uefi(): name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki @@ -901,10 +899,10 @@ def run_qemu(args: Args, config: Config) -> None: # A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd. shm = [] if ( - config.runtime_trees or - config.runtime_build_sources or - config.runtime_home or - config.output_format == OutputFormat.directory + config.runtime_trees + or config.runtime_build_sources + or config.runtime_home + or config.output_format == OutputFormat.directory ): shm = ["-object", f"memory-backend-memfd,id=mem,size={config.qemu_mem // 1024**2}M,share=on"] @@ -924,7 +922,7 @@ def run_qemu(args: Args, config: Config) -> None: "-device", "virtio-balloon,free-page-reporting=on", "-no-user-config", *shm, - ] + ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"] @@ -957,14 +955,13 @@ def run_qemu(args: Args, config: Config) -> None: cid = config.qemu_vsock_cid if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid): - die(f"VSock connection ID {cid} is already in use by another virtual machine", - hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID") + die( + f"VSock connection ID {cid} is already in use by another virtual machine", + hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID", + ) index = list(qemu_device_fds.keys()).index(QemuDeviceNode.vhost_vsock) - cmdline += [ - "-device", - f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}" - ] + cmdline += ["-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}"] cmdline += ["-cpu", "max"] @@ -980,7 +977,7 @@ def run_qemu(args: Args, config: Config) -> None: "-device", "virtio-serial-pci,id=mkosi-virtio-serial-pci", "-device", "virtconsole,chardev=console", "-mon", "console", - ] + ] # fmt: skip # QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that. if firmware.is_uefi(): @@ -998,7 +995,7 @@ def run_qemu(args: Args, config: Config) -> None: cmdline += [ "-global", "ICH9-LPC.disable_s3=1", "-global", "driver=cfi.pflash01,property=secure,value=on", - ] + ] # fmt: skip if config.qemu_cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp): # CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size 2048. @@ -1016,7 +1013,7 @@ def run_qemu(args: Args, config: Config) -> None: "--sector-size=2048", "--copy-from", workdir(src), workdir(fname), - ], + ], # fmt: skip sandbox=config.sandbox( binary="systemd-repart", options=[ @@ -1024,7 +1021,7 @@ def run_qemu(args: Args, config: Config) -> None: "--ro-bind", src, workdir(src), ], ), - ) + ) # fmt: skip stack.callback(lambda: fname.unlink()) else: fname = stack.enter_context( @@ -1033,12 +1030,8 @@ def run_qemu(args: Args, config: Config) -> None: apply_runtime_size(config, fname) - if ( - kernel and - ( - KernelType.identify(config, kernel) != KernelType.uki or - not config.architecture.supports_smbios(firmware) - ) + if kernel and ( + KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ): kcl = config.kernel_command_line + config.kernel_command_line_extra else: @@ -1064,27 +1057,24 @@ def run_qemu(args: Args, config: Config) -> None: fname, name=config.machine_or_name(), uidmap=False, - selinux=bool(want_selinux_relabel(config, fname, fatal=False))), + selinux=bool(want_selinux_relabel(config, fname, fatal=False)), + ), ) cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root", - ] + ] # fmt: skip kcl += ["root=root", "rootfstype=virtiofs"] credentials = dict(config.credentials) def add_virtiofs_mount( - sock: Path, - dst: PathString, - cmdline: list[PathString], - credentials: dict[str, str], - *, tag: str + sock: Path, dst: PathString, cmdline: list[PathString], credentials: dict[str, str], *, tag: str ) -> None: cmdline += [ "-chardev", f"socket,id={sock.name},path={sock}", "-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}", - ] + ] # fmt: skip if "fstab.extra" not in credentials: credentials["fstab.extra"] = "" @@ -1133,15 +1123,16 @@ def run_qemu(args: Args, config: Config) -> None: cmdline += [ "-drive", f"if=none,id=scratch,file={scratch},format=raw,discard=on,{cache}", "-device", "scsi-hd,drive=scratch", - ] + ] # fmt: skip kcl += [f"systemd.mount-extra=LABEL=scratch:/var/tmp:{config.distribution.filesystem()}"] if config.output_format == OutputFormat.cpio: cmdline += ["-initrd", fname] elif ( - kernel and KernelType.identify(config, kernel) != KernelType.uki and - "-initrd" not in args.cmdline and - (config.output_dir_or_cwd() / config.output_split_initrd).exists() + kernel + and KernelType.identify(config, kernel) != KernelType.uki + and "-initrd" not in args.cmdline + and (config.output_dir_or_cwd() / config.output_split_initrd).exists() ): cmdline += ["-initrd", config.output_dir_or_cwd() / config.output_split_initrd] @@ -1149,20 +1140,19 @@ def run_qemu(args: Args, config: Config) -> None: direct = fname.stat().st_size % resource.getpagesize() == 0 ephemeral = config.ephemeral cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring" - cmdline += ["-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", - "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1"] - - if ( - config.qemu_swtpm == ConfigFeature.enabled or - ( - config.qemu_swtpm == ConfigFeature.auto and - firmware.is_uefi() and - config.find_binary("swtpm") is not None - ) + cmdline += [ + "-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}", + "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1", + ] # fmt: skip + + if config.qemu_swtpm == ConfigFeature.enabled or ( + config.qemu_swtpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None ): sock = stack.enter_context(start_swtpm(config)) - cmdline += ["-chardev", f"socket,id=chrtpm,path={sock}", - "-tpmdev", "emulator,id=tpm0,chardev=chrtpm"] + cmdline += [ + "-chardev", f"socket,id=chrtpm,path={sock}", + "-tpmdev", "emulator,id=tpm0,chardev=chrtpm", + ] # fmt: skip if config.architecture.is_x86_variant(): cmdline += ["-device", "tpm-tis,tpmdev=tpm0"] @@ -1189,12 +1179,8 @@ def run_qemu(args: Args, config: Config) -> None: elif kernel: kcl += [f"systemd.set_credential_binary={k}:{payload}"] - if ( - kernel and - ( - KernelType.identify(config, kernel) != KernelType.uki or - not config.architecture.supports_smbios(firmware) - ) + if kernel and ( + KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware) ): cmdline += ["-append", " ".join(kcl)] elif config.architecture.supports_smbios(firmware): @@ -1277,14 +1263,18 @@ def run_qemu(args: Args, config: Config) -> None: def run_ssh(args: Args, config: Config) -> None: with flock(INVOKING_USER.runtime_dir() / "machine"): if not (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists(): - die(f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", - hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?") + die( + f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}", + hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?", + ) state = json.loads(p.read_text()) if not state["SshKey"]: - die("An SSH key must be configured when booting the image to use 'mkosi ssh'", - hint="Use 'mkosi genkey' to generate a new SSH key and certificate") + die( + "An SSH key must be configured when booting the image to use 'mkosi ssh'", + hint="Use 'mkosi genkey' to generate a new SSH key and certificate", + ) cmd: list[PathString] = [ "ssh", @@ -1296,7 +1286,7 @@ def run_ssh(args: Args, config: Config) -> None: "-o", "LogLevel=ERROR", "-o", f"ProxyCommand={state['ProxyCommand']}", "root@mkosi", - ] + ] # fmt: skip cmd += args.cmdline diff --git a/mkosi/run.py b/mkosi/run.py index 261c017a2..644becf28 100644 --- a/mkosi/run.py +++ b/mkosi/run.py @@ -82,10 +82,10 @@ def uncaught_exception_handler(exit: Callable[[int], NoReturn] = sys.exit) -> It # Failures from self come from the forks we spawn to build images in a user namespace. We've already done all # the logging for those failures so we don't log stacktraces for those either. if ( - ARG_DEBUG.get() and - e.cmd and - str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") and - "qemu-system" not in str(e.cmd[0]) + ARG_DEBUG.get() + and e.cmd + and str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") + and "qemu-system" not in str(e.cmd[0]) ): sys.excepthook(*ensure_exc_info()) except BaseException: @@ -125,7 +125,7 @@ def log_process_failure(sandbox: Sequence[str], cmdline: Sequence[str], returnco logging.error(f"{cmdline[0]} not found.") else: logging.error( - f"\"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}\" returned non-zero exit code " + f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}" returned non-zero exit code ' f"{returncode}." ) @@ -411,8 +411,7 @@ def finalize_passwd_mounts(root: PathString) -> list[PathString]: directory instead of from the host. """ return flatten( - ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") - for f in ("passwd", "group", "shadow", "gshadow") + ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") for f in ("passwd", "group", "shadow", "gshadow") ) @@ -420,7 +419,7 @@ def network_options(*, network: bool) -> list[PathString]: return [ "--setenv", "SYSTEMD_OFFLINE", one_zero(network), *(["--unshare-net"] if not network else []), - ] + ] # fmt: skip @contextlib.contextmanager @@ -444,6 +443,7 @@ def vartmpdir() -> Iterator[Path]: (d / "work").rmdir() except OSError as e: if e.errno == errno.ENOTEMPTY: + def remove() -> None: acquire_privileges() shutil.rmtree(d) @@ -480,14 +480,14 @@ def sandbox_cmd( # apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they still use # sandbox.py, so we make sure it is available inside the sandbox so it can be executed there as well. "--ro-bind", Path(mkosi.sandbox.__file__), "/sandbox.py", - ] + ] # fmt: skip if overlay and (overlay / "usr").exists(): cmdline += [ "--overlay-lowerdir", tools / "usr" "--overlay-lowerdir", overlay / "usr", "--overlay", "/usr", - ] + ] # fmt: skip else: cmdline += ["--ro-bind", tools / "usr", "/usr"] @@ -534,7 +534,7 @@ def sandbox_cmd( "--dir", "/var/log", "--unshare-ipc", "--symlink", "../proc/self/mounts", "/etc/mtab", - ] + ] # fmt: skip if devices: cmdline += ["--bind", "/sys", "/sys", "--bind", "/dev", "/dev"] @@ -574,7 +574,7 @@ def sandbox_cmd( "--overlay-upperdir", tmp or "tmpfs", *(["--overlay-workdir", str(work)] if work else []), "--overlay", Path("/") / d, - ] + ] # fmt: skip elif not relaxed: if tmp: cmdline += ["--bind", tmp, Path("/") / d] @@ -602,7 +602,7 @@ def apivfs_options(*, root: Path = Path("/buildroot")) -> list[PathString]: # Make sure anything running in the root directory thinks it's in a container. $container can't always # be accessed so we write /run/host/container-manager as well which is always accessible. "--write", "mkosi", root / "run/host/container-manager", - ] + ] # fmt: skip def chroot_options() -> list[PathString]: @@ -618,7 +618,7 @@ def chroot_options() -> list[PathString]: "--setenv", "HOME", "/", "--setenv", "PATH", "/usr/bin:/usr/sbin", "--setenv", "BUILDROOT", "/", - ] + ] # fmt: skip @contextlib.contextmanager @@ -636,7 +636,7 @@ def chroot_cmd( *network_options(network=network), *apivfs_options(root=Path("/")), *chroot_options(), - ] + ] # fmt: skip if network and Path("/etc/resolv.conf").exists(): cmdline += ["--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf"] diff --git a/mkosi/sandbox.py b/mkosi/sandbox.py index 581bbd7a1..34fbde4f3 100755 --- a/mkosi/sandbox.py +++ b/mkosi/sandbox.py @@ -54,6 +54,7 @@ PR_CAP_AMBIENT_RAISE = 2 SCMP_ACT_ALLOW = 0x7FFF0000 SCMP_ACT_ERRNO = 0x00050000 + class mount_attr(ctypes.Structure): _fields_ = [ ("attr_set", ctypes.c_uint64), @@ -231,12 +232,21 @@ def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: try: libc.mount_setattr.argtypes = ( - ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, + ctypes.c_void_p, + ctypes.c_size_t, ) r = libc.mount_setattr(fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) except AttributeError: libc.syscall.argtypes = ( - ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t, + ctypes.c_long, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, + ctypes.c_void_p, + ctypes.c_size_t, ) r = libc.syscall(NR_mount_setattr, fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0) @@ -248,7 +258,12 @@ def mount_rbind(src: str, dst: str, attrs: int = 0) -> None: r = libc.move_mount(fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) except AttributeError: libc.syscall.argtypes = ( - ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, + ctypes.c_long, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_int, + ctypes.c_char_p, + ctypes.c_uint, ) r = libc.syscall(NR_move_mount, fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH) @@ -387,14 +402,15 @@ class FSOperation: # Drop all bind mounts that are mounted from beneath another bind mount to the same # location within the new rootfs. optimized = [ - m for m in binds + m + for m in binds if not any( - m != n and - m.readonly == n.readonly and - m.required == n.required and - is_relative_to(m.src, n.src) and - is_relative_to(m.dst, n.dst) and - os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst) + m != n + and m.readonly == n.readonly + and m.required == n.required + and is_relative_to(m.src, n.src) + and is_relative_to(m.dst, n.dst) + and os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst) for n in binds ) ] @@ -602,8 +618,8 @@ class OverlayOperation(FSOperation): mount("overlayfs", dst, "overlay", 0, ",".join(options)) -ANSI_HIGHLIGHT = "\x1B[0;1;39m" if os.isatty(2) else "" -ANSI_NORMAL = "\x1B[0m" if os.isatty(2) else "" +ANSI_HIGHLIGHT = "\x1b[0;1;39m" if os.isatty(2) else "" +ANSI_NORMAL = "\x1b[0m" if os.isatty(2) else "" HELP = f"""\ mkosi-sandbox [OPTIONS...] COMMAND [ARGUMENTS...] @@ -638,6 +654,7 @@ mkosi-sandbox [OPTIONS...] COMMAND [ARGUMENTS...] See the mkosi-sandbox(1) man page for details.\ """ + def main() -> None: # We don't use argparse as it takes +- 10ms to import and since this is purely for internal # use, it's not necessary to have good UX for this CLI interface so it's trivial to write @@ -764,8 +781,8 @@ def main() -> None: os.chdir("/tmp") with umask(~0o755): - os.mkdir("newroot") # This is where we set up the sandbox rootfs - os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. + os.mkdir("newroot") # This is where we set up the sandbox rootfs + os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs. # Make sure that newroot is a mountpoint. mount("newroot", "newroot", "", MS_BIND | MS_REC, "") diff --git a/mkosi/sysupdate.py b/mkosi/sysupdate.py index efb3cf7ef..b2201bab7 100644 --- a/mkosi/sysupdate.py +++ b/mkosi/sysupdate.py @@ -15,8 +15,10 @@ def run_sysupdate(args: Args, config: Config) -> None: die("SplitArtifacts= must be enabled to be able to use mkosi sysupdate") if not config.sysupdate_dir: - die("No sysupdate definitions directory specified", - hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=") + die( + "No sysupdate definitions directory specified", + hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=", + ) if not (sysupdate := config.find_binary("systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate")): die("Could not find systemd-sysupdate") @@ -26,7 +28,7 @@ def run_sysupdate(args: Args, config: Config) -> None: "--definitions", config.sysupdate_dir, "--transfer-source", config.output_dir_or_cwd(), *args.cmdline, - ] + ] # fmt: skip run( cmd, @@ -42,6 +44,6 @@ def run_sysupdate(args: Args, config: Config) -> None: options=[ *(["--bind", "/boot", "/boot"] if Path("/boot").exists() else []), *(["--bind", "/efi", "/efi"] if Path("/efi").exists() else []), - ] + ], ), ) diff --git a/mkosi/tree.py b/mkosi/tree.py index fb7d29631..214511539 100644 --- a/mkosi/tree.py +++ b/mkosi/tree.py @@ -28,7 +28,9 @@ def cp_version(*, sandbox: SandboxProtocol = nosandbox) -> GenericVersion: ["cp", "--version"], sandbox=sandbox(binary="cp"), stdout=subprocess.PIPE, - ).stdout.splitlines()[0].split()[3] + ) + .stdout.splitlines()[0] + .split()[3] ) @@ -51,7 +53,7 @@ def make_tree( result = run( ["btrfs", "subvolume", "create", workdir(path, sandbox)], sandbox=sandbox(binary="btrfs", options=["--bind", path.parent, workdir(path.parent, sandbox)]), - check=use_subvolumes == ConfigFeature.enabled + check=use_subvolumes == ConfigFeature.enabled, ).returncode else: result = 1 @@ -92,7 +94,7 @@ def copy_tree( options: list[PathString] = [ "--ro-bind", src, workdir(src, sandbox), "--bind", dst.parent, workdir(dst.parent, sandbox), - ] + ] # fmt: skip def copy() -> None: cmdline: list[PathString] = [ @@ -102,7 +104,8 @@ def copy_tree( f"--preserve=mode,links{',timestamps,ownership,xattr' if preserve else ''}", "--reflink=auto", "--copy-contents", - workdir(src, sandbox), workdir(dst, sandbox), + workdir(src, sandbox), + workdir(dst, sandbox), ] if dst.exists() and dst.is_dir() and any(dst.iterdir()) and cp_version(sandbox=sandbox) >= "9.5": @@ -118,16 +121,12 @@ def copy_tree( # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. if ( - use_subvolumes == ConfigFeature.disabled or - not preserve or - not is_subvolume(src) or - (dst.exists() and (not dst.is_dir() or any(dst.iterdir()))) + use_subvolumes == ConfigFeature.disabled + or not preserve + or not is_subvolume(src) + or (dst.exists() and (not dst.is_dir() or any(dst.iterdir()))) ): - with ( - preserve_target_directories_stat(src, dst) - if not preserve - else contextlib.nullcontext() - ): + with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst @@ -143,11 +142,7 @@ def copy_tree( ).returncode if result != 0: - with ( - preserve_target_directories_stat(src, dst) - if not preserve - else contextlib.nullcontext() - ): + with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext(): copy() return dst @@ -162,14 +157,15 @@ def rmtree(*paths: Path, sandbox: SandboxProtocol = nosandbox) -> None: if subvolumes := sorted({p for p in paths if p.exists() and is_subvolume(p)}): # Silence and ignore failures since when not running as root, this will fail with a permission error unless the # btrfs filesystem is mounted with user_subvol_rm_allowed. - run(["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)], + run( + ["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)], check=False, sandbox=sandbox( - binary="btrfs", - options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes) + binary="btrfs", options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes) ), stdout=subprocess.DEVNULL if not ARG_DEBUG.get() else None, - stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None) + stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None, + ) filtered = sorted({p for p in paths if p.exists() or p.is_symlink()}) if filtered: @@ -187,7 +183,7 @@ def move_tree( dst: Path, *, use_subvolumes: ConfigFeature = ConfigFeature.disabled, - sandbox: SandboxProtocol = nosandbox + sandbox: SandboxProtocol = nosandbox, ) -> Path: src = src.absolute() dst = dst.absolute() diff --git a/mkosi/types.py b/mkosi/types.py index aa7b7b8fb..3b3b6077f 100644 --- a/mkosi/types.py +++ b/mkosi/types.py @@ -24,5 +24,6 @@ PathString = Union[Path, str] # https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224 _T_co = TypeVar("_T_co", covariant=True) + class SupportsRead(Protocol[_T_co]): def read(self, __length: int = ...) -> _T_co: ... diff --git a/mkosi/user.py b/mkosi/user.py index 407d981c2..e74d4d182 100644 --- a/mkosi/user.py +++ b/mkosi/user.py @@ -73,7 +73,7 @@ class INVOKING_USER: # If we created a file/directory in a parent directory owned by a regular user, make sure the path and any # parent directories are owned by the invoking user as well. - if (q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None)): + if q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None): st = q.stat() os.chown(path, st.st_uid, st.st_gid) @@ -133,14 +133,14 @@ def become_root_in_subuid_range() -> None: 0, subuid, SUBRANGE - 100, SUBRANGE - 100, os.getuid(), 1, SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99 - ] + ] # fmt: skip newgidmap = [ "flock", "--exclusive", "--close", lock, "newgidmap", pid, 0, subgid, SUBRANGE - 100, SUBRANGE - 100, os.getgid(), 1, SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99 - ] + ] # fmt: skip # newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping to the # process in the user namespace. The mapping can only be assigned after the user namespace has been unshared. @@ -183,6 +183,6 @@ def become_root_in_subuid_range_cmd() -> list[str]: "--map-groups", f"{SUBRANGE - 100}:{os.getgid()}:1", "--map-groups", f"{SUBRANGE - 100 + 1}:{subgid + SUBRANGE - 100 + 1}:99", "--keep-caps", - ] + ] # fmt: skip return [str(x) for x in cmd] diff --git a/mkosi/util.py b/mkosi/util.py index 1afabdb0a..1cd804385 100644 --- a/mkosi/util.py +++ b/mkosi/util.py @@ -121,7 +121,7 @@ def make_executable(*paths: Path) -> None: @contextlib.contextmanager def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: - fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY) + fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY) try: fcntl.fcntl(fd, fcntl.FD_CLOEXEC) logging.debug(f"Acquiring lock on {path}") @@ -135,15 +135,17 @@ def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]: @contextlib.contextmanager def flock_or_die(path: Path) -> Iterator[Path]: try: - with flock(path, fcntl.LOCK_EX|fcntl.LOCK_NB): + with flock(path, fcntl.LOCK_EX | fcntl.LOCK_NB): yield path except OSError as e: if e.errno != errno.EWOULDBLOCK: raise e - die(f"Cannot lock {path} as it is locked by another process", + die( + f"Cannot lock {path} as it is locked by another process", hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting multiple " - "instances of the same image") + "instances of the same image", + ) @contextlib.contextmanager @@ -183,7 +185,7 @@ class StrEnum(enum.Enum): def parents_below(path: Path, below: Path) -> list[Path]: parents = list(path.parents) - return parents[:parents.index(below)] + return parents[: parents.index(below)] @contextlib.contextmanager @@ -192,10 +194,7 @@ def resource_path(mod: ModuleType) -> Iterator[Path]: with as_file(t) as p: # Make sure any temporary directory that the resources are unpacked in is accessible to the invoking user so # that any commands executed as the invoking user can access files within it. - if ( - p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and - stat.S_IMODE(p.parent.stat().st_mode) == 0o700 - ): + if p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700: p.parent.chmod(0o755) yield p @@ -204,7 +203,7 @@ def resource_path(mod: ModuleType) -> Iterator[Path]: def hash_file(path: Path) -> str: # TODO Replace with hashlib.file_digest after dropping support for Python 3.10. h = hashlib.sha256() - b = bytearray(16 * 1024**2) + b = bytearray(16 * 1024**2) mv = memoryview(b) with path.open("rb", buffering=0) as f: diff --git a/mkosi/versioncomp.py b/mkosi/versioncomp.py index 8e2e06423..a6c45865a 100644 --- a/mkosi/versioncomp.py +++ b/mkosi/versioncomp.py @@ -21,6 +21,7 @@ class GenericVersion: @classmethod def compare_versions(cls, v1: str, v2: str) -> int: """Implements comparison according to UAPI Group Version Format Specification""" + def rstrip_invalid_version_chars(s: str) -> str: valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."} for i, c in enumerate(s): @@ -84,9 +85,9 @@ class GenericVersion: v2 = v2.removeprefix("^") elif v1.startswith("^"): # TODO: bug? - return cls._LEFT_SMALLER #cls._RIGHT_SMALLER + return cls._LEFT_SMALLER # cls._RIGHT_SMALLER elif v2.startswith("^"): - return cls._RIGHT_SMALLER #cls._LEFT_SMALLER + return cls._RIGHT_SMALLER # cls._LEFT_SMALLER # If the remaining part of one of strings starts with ".": if the other remaining part # does not start with ., the string with . compares lower. Otherwise, both dot diff --git a/mkosi/vmspawn.py b/mkosi/vmspawn.py index 78bbaec4d..77aadbc75 100644 --- a/mkosi/vmspawn.py +++ b/mkosi/vmspawn.py @@ -45,7 +45,7 @@ def run_vmspawn(args: Args, config: Config) -> None: if not kernel.exists(): die( f"Kernel or UKI not found at {kernel}", - hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn" + hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn", ) cmdline: list[PathString] = [ @@ -56,7 +56,7 @@ def run_vmspawn(args: Args, config: Config) -> None: "--vsock", config.qemu_vsock.to_tristate(), "--tpm", config.qemu_swtpm.to_tristate(), "--secure-boot", yes_no(config.secure_boot), - ] + ] # fmt: skip if config.runtime_network == Network.user: cmdline += ["--network-user-mode"] diff --git a/tests/__init__.py b/tests/__init__.py index fb31afc5a..d643d2eec 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -77,7 +77,7 @@ class Image: user=user, group=group, env=os.environ, - ) + ) # fmt: skip def build(self, options: Sequence[PathString] = (), args: Sequence[str] = ()) -> CompletedProcess: kcl = [ @@ -107,7 +107,7 @@ class Image: "--output-dir", self.output_dir, *(["--debug-shell"] if self.config.debug_shell else []), *options, - ] + ] # fmt: skip self.mkosi("summary", options, user=self.uid, group=self.uid) diff --git a/tests/test_boot.py b/tests/test_boot.py index 3ee3af7bf..a0cdff3cd 100644 --- a/tests/test_boot.py +++ b/tests/test_boot.py @@ -19,8 +19,7 @@ pytestmark = pytest.mark.integration def have_vmspawn() -> bool: return ( find_binary("systemd-vmspawn") is not None - and GenericVersion(run(["systemd-vmspawn", "--version"], - stdout=subprocess.PIPE).stdout.strip()) >= 256 + and GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256 ) diff --git a/tests/test_config.py b/tests/test_config.py index 3cbdce993..51c40ea77 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -50,11 +50,11 @@ def test_compression_enum_bool() -> None: def test_compression_enum_str() -> None: assert str(Compression.none) == "none" assert str(Compression.zstd) == "zstd" - assert str(Compression.zst) == "zstd" - assert str(Compression.xz) == "xz" - assert str(Compression.bz2) == "bz2" - assert str(Compression.gz) == "gz" - assert str(Compression.lz4) == "lz4" + assert str(Compression.zst) == "zstd" + assert str(Compression.xz) == "xz" + assert str(Compression.bz2) == "bz2" + assert str(Compression.gz) == "gz" + assert str(Compression.lz4) == "lz4" assert str(Compression.lzma) == "lzma" @@ -129,7 +129,7 @@ def test_parse_config(tmp_path: Path) -> None: "--credential", "my.cred=cli.value", "--repositories", "universe", ] - ) + ) # fmt: skip # Values from the CLI should take priority. assert config.distribution == Distribution.fedora @@ -145,7 +145,7 @@ def test_parse_config(tmp_path: Path) -> None: "--credential", "", "--repositories", "", ] - ) + ) # fmt: skip # Empty values on the CLIs resets non-collection based settings to their defaults and collection based settings to # empty collections. @@ -708,9 +708,7 @@ def test_match_distribution(tmp_path: Path, dist1: Distribution, dist2: Distribu assert "testpkg3" in conf.packages -@pytest.mark.parametrize( - "release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2) -) +@pytest.mark.parametrize("release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2)) def test_match_release(tmp_path: Path, release1: int, release2: int) -> None: with chdir(tmp_path): parent = Path("mkosi.conf") @@ -805,9 +803,7 @@ def test_match_repositories(tmp_path: Path) -> None: @pytest.mark.parametrize( - "image1,image2", itertools.combinations_with_replacement( - ["image_a", "image_b", "image_c"], 2 - ) + "image1,image2", itertools.combinations_with_replacement(["image_a", "image_b", "image_c"], 2) ) def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: with chdir(tmp_path): @@ -877,10 +873,11 @@ def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None: @pytest.mark.parametrize( - "op,version", itertools.product( + "op,version", + itertools.product( ["", "==", "<", ">", "<=", ">="], [122, 123, 124], - ) + ), ) def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: opfunc = { @@ -890,7 +887,7 @@ def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None: "<=": operator.le, ">": operator.gt, ">=": operator.ge, - }.get(op, operator.eq,) + }.get(op, operator.eq) with chdir(tmp_path): parent = Path("mkosi.conf") @@ -1135,7 +1132,7 @@ def test_specifiers(tmp_path: Path) -> None: def test_kernel_specifiers(tmp_path: Path) -> None: - kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 + kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638 token = "MySystemImage" roothash = "67e893261799236dcf20529115ba9fae4fd7c2269e1e658d42269503e5760d38" boot_count = "3" @@ -1217,10 +1214,10 @@ def test_environment(tmp_path: Path) -> None: _, [sub, config] = parse_config() expected = { - "TestValue1": "100", # from other.env - "TestValue2": "300", # from mkosi.conf - "TestValue3": "400", # from mkosi.conf - "TestValue4": "99", # from mkosi.env + "TestValue1": "100", # from other.env + "TestValue2": "300", # from mkosi.conf + "TestValue3": "400", # from mkosi.conf + "TestValue4": "99", # from mkosi.env } # Only check values for keys from expected, as config.environment contains other items as well diff --git a/tests/test_initrd.py b/tests/test_initrd.py index 0e39b19ce..ae61cb161 100644 --- a/tests/test_initrd.py +++ b/tests/test_initrd.py @@ -92,12 +92,14 @@ def test_initrd_lvm(config: ImageConfig) -> None: lvm.rename(Path(image.output_dir) / "image.raw") - image.qemu([ - "--qemu-firmware=linux", - # LVM confuses systemd-repart so we mask it for this test. - "--kernel-command-line-extra=systemd.mask=systemd-repart.service", - "--kernel-command-line-extra=root=LABEL=root", - ]) + image.qemu( + [ + "--qemu-firmware=linux", + # LVM confuses systemd-repart so we mask it for this test. + "--kernel-command-line-extra=systemd.mask=systemd-repart.service", + "--kernel-command-line-extra=root=LABEL=root", + ] + ) def test_initrd_luks(config: ImageConfig, passphrase: Path) -> None: @@ -172,7 +174,7 @@ def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: "luksFormat", f"{lodev}p1", ] - ) + ) # fmt: skip run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"]) stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"])) luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip() @@ -200,13 +202,15 @@ def test_initrd_luks_lvm(config: ImageConfig, passphrase: Path) -> None: lvm.rename(Path(image.output_dir) / "image.raw") - image.qemu([ - "--format=disk", - "--credential=cryptsetup.passphrase=mkosi", - "--qemu-firmware=linux", - "--kernel-command-line-extra=root=LABEL=root", - f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}", - ]) + image.qemu( + [ + "--format=disk", + "--credential=cryptsetup.passphrase=mkosi", + "--qemu-firmware=linux", + "--kernel-command-line-extra=root=LABEL=root", + f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}", + ] + ) def test_initrd_size(config: ImageConfig) -> None: diff --git a/tests/test_json.py b/tests/test_json.py index 6217d73ba..2a6cae076 100644 --- a/tests/test_json.py +++ b/tests/test_json.py @@ -379,13 +379,13 @@ def test_config() -> None: build_sources_ephemeral=True, cache_dir=Path("/is/this/the/cachedir"), cacheonly=Cacheonly.always, - checksum= False, + checksum=False, clean_package_metadata=ConfigFeature.auto, clean_scripts=[Path("/clean")], compress_level=3, compress_output=Compression.bz2, configure_scripts=[Path("/configure")], - credentials= {"credkey": "credval"}, + credentials={"credkey": "credval"}, dependencies=["dep1"], distribution=Distribution.fedora, environment={"foo": "foo", "BAR": "BAR", "Qux": "Qux"}, @@ -521,7 +521,7 @@ def test_config() -> None: with_docs=True, with_network=False, with_recommends=True, - with_tests= True, + with_tests=True, workspace_dir=Path("/cwd"), ) diff --git a/tests/test_sysext.py b/tests/test_sysext.py index 6650aa50a..c8c3b7300 100644 --- a/tests/test_sysext.py +++ b/tests/test_sysext.py @@ -14,12 +14,15 @@ def test_sysext(config: ImageConfig) -> None: image.build(["--clean-package-metadata=no", "--format=directory"]) with Image(image.config) as sysext: - sysext.build([ - "--directory", "", - "--incremental=no", - "--base-tree", Path(image.output_dir) / "image", - "--overlay", - "--package=dnsmasq", - "--format=disk", - ]) - + sysext.build( + [ + "--directory", + "", + "--incremental=no", + "--base-tree", + Path(image.output_dir) / "image", + "--overlay", + "--package=dnsmasq", + "--format=disk", + ] + ) diff --git a/tests/test_versioncomp.py b/tests/test_versioncomp.py index 6743f8fc9..d715d3e87 100644 --- a/tests/test_versioncomp.py +++ b/tests/test_versioncomp.py @@ -72,8 +72,8 @@ def test_generic_version_spec() -> None: GenericVersion("124-1"), ], ), - 2 - ) + 2, + ), ) def test_generic_version_strverscmp_improved_doc( s1: tuple[int, GenericVersion], @@ -86,9 +86,9 @@ def test_generic_version_strverscmp_improved_doc( i1, v1 = s1 i2, v2 = s2 assert (v1 == v2) == (i1 == i2) - assert (v1 < v2) == (i1 < i2) + assert (v1 < v2) == (i1 < i2) assert (v1 <= v2) == (i1 <= i2) - assert (v1 > v2) == (i1 > i2) + assert (v1 > v2) == (i1 > i2) assert (v1 >= v2) == (i1 >= i2) assert (v1 != v2) == (i1 != i2) @@ -122,8 +122,8 @@ def test_generic_version_rpmvercmp() -> None: RPMVERCMP("5.5p1", "5.5p10", -1) RPMVERCMP("5.5p10", "5.5p1", 1) - RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ - RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ + RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */ + RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */ RPMVERCMP("xyz10", "xyz10", 0) RPMVERCMP("xyz10", "xyz10.1", -1) @@ -165,8 +165,8 @@ def test_generic_version_rpmvercmp() -> None: RPMVERCMP("20101122", "20101121", 1) RPMVERCMP("2_0", "2_0", 0) - RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal - RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal + RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal + RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal # RhBug:178798 case */ RPMVERCMP("a", "a", 0) @@ -224,7 +224,7 @@ def test_generic_version_rpmvercmp() -> None: print("/* RPM version comparison oddities */") # RhBug:811992 case RPMVERCMP("1b.fc17", "1b.fc17", 0) - RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ + RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */ RPMVERCMP("1.fc17", "1b.fc17", -1) RPMVERCMP("1g.fc17", "1g.fc17", 0) RPMVERCMP("1g.fc17", "1.fc17", 1)