]> git.ipfire.org Git - thirdparty/mkosi.git/commitdiff
Rename various symbols
authorDaan De Meyer <daan.j.demeyer@gmail.com>
Mon, 1 Jan 2024 16:49:08 +0000 (17:49 +0100)
committerDaan De Meyer <daan.j.demeyer@gmail.com>
Wed, 3 Jan 2024 15:24:20 +0000 (16:24 +0100)
- Let's get rid of the Mkosi prefix everywhere. Python has namespaced
modules for a reason, let's make use of that.
- Let's also rename State to Context, to match systemd where Context
is generally used as well instead of State.

33 files changed:
mkosi/__init__.py
mkosi/archive.py
mkosi/bubblewrap.py
mkosi/burn.py
mkosi/config.py
mkosi/context.py [moved from mkosi/state.py with 89% similarity]
mkosi/distributions/__init__.py
mkosi/distributions/alma.py
mkosi/distributions/arch.py
mkosi/distributions/centos.py
mkosi/distributions/custom.py
mkosi/distributions/debian.py
mkosi/distributions/fedora.py
mkosi/distributions/gentoo.py
mkosi/distributions/mageia.py
mkosi/distributions/openmandriva.py
mkosi/distributions/opensuse.py
mkosi/distributions/rhel.py
mkosi/distributions/rhel_ubi.py
mkosi/distributions/rocky.py
mkosi/distributions/ubuntu.py
mkosi/installer/__init__.py
mkosi/installer/apt.py
mkosi/installer/dnf.py
mkosi/installer/pacman.py
mkosi/installer/rpm.py
mkosi/installer/zypper.py
mkosi/log.py
mkosi/manifest.py
mkosi/qemu.py
mkosi/run.py
tests/test_config.py
tests/test_json.py

index 6b3f5e1d21957910da7f39b13592490f5c729073..ffb26d1f1d4317d78d5471949f63f99cca86f82b 100644 (file)
@@ -26,15 +26,15 @@ from mkosi.archive import extract_tar, make_cpio, make_tar
 from mkosi.bubblewrap import bwrap, chroot_cmd
 from mkosi.burn import run_burn
 from mkosi.config import (
+    Args,
     BiosBootloader,
     Bootloader,
     Compression,
+    Config,
     ConfigFeature,
     DocFormat,
+    JsonEncoder,
     ManifestFormat,
-    MkosiArgs,
-    MkosiConfig,
-    MkosiJsonEncoder,
     OutputFormat,
     SecureBootSignTool,
     ShimBootloader,
@@ -46,6 +46,7 @@ from mkosi.config import (
     summary,
     yes_no,
 )
+from mkosi.context import Context
 from mkosi.distributions import Distribution
 from mkosi.installer import clean_package_manager_metadata, package_manager_scripts
 from mkosi.kmod import gen_required_kernel_modules, process_kernel_modules
@@ -56,7 +57,6 @@ from mkosi.pager import page
 from mkosi.partition import Partition, finalize_root, finalize_roothash
 from mkosi.qemu import KernelType, QemuDeviceNode, copy_ephemeral, run_qemu, run_ssh
 from mkosi.run import become_root, find_binary, fork_and_wait, init_mount_namespace, run
-from mkosi.state import MkosiState
 from mkosi.tree import copy_tree, move_tree, rmtree
 from mkosi.types import PathString
 from mkosi.util import (
@@ -84,73 +84,73 @@ MKOSI_AS_CALLER = (
 )
 
 @contextlib.contextmanager
-def mount_base_trees(state: MkosiState) -> Iterator[None]:
-    if not state.config.base_trees or not state.config.overlay:
+def mount_base_trees(context: Context) -> Iterator[None]:
+    if not context.config.base_trees or not context.config.overlay:
         yield
         return
 
     with complete_step("Mounting base trees…"), contextlib.ExitStack() as stack:
         bases = []
-        (state.workspace / "bases").mkdir(exist_ok=True)
+        (context.workspace / "bases").mkdir(exist_ok=True)
 
-        for path in state.config.base_trees:
-            d = state.workspace / f"bases/{path.name}-{uuid.uuid4().hex}"
+        for path in context.config.base_trees:
+            d = context.workspace / f"bases/{path.name}-{uuid.uuid4().hex}"
 
             if path.is_dir():
                 bases += [path]
             elif path.suffix == ".tar":
-                extract_tar(state, path, d)
+                extract_tar(context, path, d)
                 bases += [d]
             elif path.suffix == ".raw":
-                bwrap(state, ["systemd-dissect", "-M", path, d])
-                stack.callback(lambda: bwrap(state, ["systemd-dissect", "-U", d]))
+                bwrap(context, ["systemd-dissect", "-M", path, d])
+                stack.callback(lambda: bwrap(context, ["systemd-dissect", "-U", d]))
                 bases += [d]
             else:
                 die(f"Unsupported base tree source {path}")
 
-        stack.enter_context(mount_overlay(bases, state.root, state.root))
+        stack.enter_context(mount_overlay(bases, context.root, context.root))
 
         yield
 
 
-def remove_files(state: MkosiState) -> None:
+def remove_files(context: Context) -> None:
     """Remove files based on user-specified patterns"""
 
-    if not state.config.remove_files:
+    if not context.config.remove_files:
         return
 
     with complete_step("Removing files…"):
-        for pattern in state.config.remove_files:
-            for p in state.root.glob(pattern.lstrip("/")):
+        for pattern in context.config.remove_files:
+            for p in context.root.glob(pattern.lstrip("/")):
                 rmtree(p)
 
 
-def install_distribution(state: MkosiState) -> None:
-    if state.config.base_trees:
-        if not state.config.packages:
+def install_distribution(context: Context) -> None:
+    if context.config.base_trees:
+        if not context.config.packages:
             return
 
-        with complete_step(f"Installing extra packages for {str(state.config.distribution).capitalize()}"):
-            state.config.distribution.install_packages(state, state.config.packages)
+        with complete_step(f"Installing extra packages for {str(context.config.distribution).capitalize()}"):
+            context.config.distribution.install_packages(context, context.config.packages)
     else:
-        with complete_step(f"Installing {str(state.config.distribution).capitalize()}"):
-            state.config.distribution.install(state)
+        with complete_step(f"Installing {str(context.config.distribution).capitalize()}"):
+            context.config.distribution.install(context)
 
-            if not state.config.overlay:
-                if not (state.root / "etc/machine-id").exists():
+            if not context.config.overlay:
+                if not (context.root / "etc/machine-id").exists():
                     # Uninitialized means we want it to get initialized on first boot.
                     with umask(~0o444):
-                        (state.root / "etc/machine-id").write_text("uninitialized\n")
+                        (context.root / "etc/machine-id").write_text("uninitialized\n")
 
                 # Ensure /efi exists so that the ESP is mounted there, as recommended by
                 # https://0pointer.net/blog/linux-boot-partitions.html. Use the most restrictive access mode we
                 # can without tripping up mkfs tools since this directory is only meant to be overmounted and
                 # should not be read from or written to.
                 with umask(~0o500):
-                    (state.root / "efi").mkdir(exist_ok=True)
+                    (context.root / "efi").mkdir(exist_ok=True)
 
-            if state.config.packages:
-                state.config.distribution.install_packages(state, state.config.packages)
+            if context.config.packages:
+                context.config.distribution.install_packages(context, context.config.packages)
 
     for f in ("var/lib/systemd/random-seed",
               "var/lib/systemd/credential.secret",
@@ -158,36 +158,36 @@ def install_distribution(state: MkosiState) -> None:
               "var/lib/dbus/machine-id"):
         # Using missing_ok=True still causes an OSError if the mount is read-only even if the
         # file doesn't exist so do an explicit exists() check first.
-        if (state.root / f).exists():
-            (state.root / f).unlink()
+        if (context.root / f).exists():
+            (context.root / f).unlink()
 
 
-def install_build_packages(state: MkosiState) -> None:
-    if not state.config.build_scripts or not state.config.build_packages:
+def install_build_packages(context: Context) -> None:
+    if not context.config.build_scripts or not context.config.build_packages:
         return
 
     # TODO: move to parenthesised context managers once on 3.10
-    pd = str(state.config.distribution).capitalize()
-    with complete_step(f"Installing build packages for {pd}"), mount_build_overlay(state):
-        state.config.distribution.install_packages(state, state.config.build_packages)
+    pd = str(context.config.distribution).capitalize()
+    with complete_step(f"Installing build packages for {pd}"), mount_build_overlay(context):
+        context.config.distribution.install_packages(context, context.config.build_packages)
 
 
-def remove_packages(state: MkosiState) -> None:
+def remove_packages(context: Context) -> None:
     """Remove packages listed in config.remove_packages"""
 
-    if not state.config.remove_packages:
+    if not context.config.remove_packages:
         return
 
-    with complete_step(f"Removing {len(state.config.remove_packages)} packages…"):
+    with complete_step(f"Removing {len(context.config.remove_packages)} packages…"):
         try:
-            state.config.distribution.remove_packages(state, state.config.remove_packages)
+            context.config.distribution.remove_packages(context, context.config.remove_packages)
         except NotImplementedError:
-            die(f"Removing packages is not supported for {state.config.distribution}")
+            die(f"Removing packages is not supported for {context.config.distribution}")
 
 
-def check_root_populated(state: MkosiState) -> None:
+def check_root_populated(context: Context) -> None:
     """Check that the root was populated by looking for a os-release file."""
-    osrelease = state.root / "usr/lib/os-release"
+    osrelease = context.root / "usr/lib/os-release"
     if not osrelease.exists():
         die(
             f"{osrelease} not found.",
@@ -198,16 +198,16 @@ def check_root_populated(state: MkosiState) -> None:
         )
 
 
-def configure_os_release(state: MkosiState) -> None:
+def configure_os_release(context: Context) -> None:
     """Write IMAGE_ID and IMAGE_VERSION to /usr/lib/os-release in the image."""
-    if not state.config.image_id and not state.config.image_version:
+    if not context.config.image_id and not context.config.image_version:
         return
 
-    if state.config.overlay or state.config.output_format in (OutputFormat.sysext, OutputFormat.confext):
+    if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext):
         return
 
     for candidate in ["usr/lib/os-release", "etc/os-release", "usr/lib/initrd-release", "etc/initrd-release"]:
-        osrelease = state.root / candidate
+        osrelease = context.root / candidate
         # at this point we know we will either change or add to the file
         newosrelease = osrelease.with_suffix(".new")
 
@@ -218,34 +218,34 @@ def configure_os_release(state: MkosiState) -> None:
         with osrelease.open("r") as old, newosrelease.open("w") as new:
             # fix existing values
             for line in old.readlines():
-                if state.config.image_id and line.startswith("IMAGE_ID="):
-                    new.write(f'IMAGE_ID="{state.config.image_id}"\n')
+                if context.config.image_id and line.startswith("IMAGE_ID="):
+                    new.write(f'IMAGE_ID="{context.config.image_id}"\n')
                     image_id_written = True
-                elif state.config.image_version and line.startswith("IMAGE_VERSION="):
-                    new.write(f'IMAGE_VERSION="{state.config.image_version}"\n')
+                elif context.config.image_version and line.startswith("IMAGE_VERSION="):
+                    new.write(f'IMAGE_VERSION="{context.config.image_version}"\n')
                     image_version_written = True
                 else:
                     new.write(line)
 
             # append if they were missing
-            if state.config.image_id and not image_id_written:
-                new.write(f'IMAGE_ID="{state.config.image_id}"\n')
-            if state.config.image_version and not image_version_written:
-                new.write(f'IMAGE_VERSION="{state.config.image_version}"\n')
+            if context.config.image_id and not image_id_written:
+                new.write(f'IMAGE_ID="{context.config.image_id}"\n')
+            if context.config.image_version and not image_version_written:
+                new.write(f'IMAGE_VERSION="{context.config.image_version}"\n')
 
         newosrelease.rename(osrelease)
 
 
-def configure_extension_release(state: MkosiState) -> None:
-    if state.config.output_format not in (OutputFormat.sysext, OutputFormat.confext):
+def configure_extension_release(context: Context) -> None:
+    if context.config.output_format not in (OutputFormat.sysext, OutputFormat.confext):
         return
 
-    prefix = "SYSEXT" if state.config.output_format == OutputFormat.sysext else "CONFEXT"
-    d = "usr/lib" if state.config.output_format == OutputFormat.sysext else "etc"
-    p = state.root / d / f"extension-release.d/extension-release.{state.config.output}"
+    prefix = "SYSEXT" if context.config.output_format == OutputFormat.sysext else "CONFEXT"
+    d = "usr/lib" if context.config.output_format == OutputFormat.sysext else "etc"
+    p = context.root / d / f"extension-release.d/extension-release.{context.config.output}"
     p.parent.mkdir(parents=True, exist_ok=True)
 
-    osrelease = read_os_release(state.root)
+    osrelease = read_os_release(context.root)
     extrelease = read_env_file(p) if p.exists() else {}
     new = p.with_suffix(".new")
 
@@ -259,23 +259,23 @@ def configure_extension_release(state: MkosiState) -> None:
         if "VERSION_ID" not in extrelease and (version := osrelease.get("VERSION_ID")):
             f.write(f"VERSION_ID={version}\n")
 
-        if f"{prefix}_ID" not in extrelease and state.config.image_id:
-            f.write(f"{prefix}_ID={state.config.image_id}\n")
+        if f"{prefix}_ID" not in extrelease and context.config.image_id:
+            f.write(f"{prefix}_ID={context.config.image_id}\n")
 
-        if f"{prefix}_VERSION_ID" not in extrelease and state.config.image_version:
-            f.write(f"{prefix}_VERSION_ID={state.config.image_version}\n")
+        if f"{prefix}_VERSION_ID" not in extrelease and context.config.image_version:
+            f.write(f"{prefix}_VERSION_ID={context.config.image_version}\n")
 
         if f"{prefix}_SCOPE" not in extrelease:
             f.write(f"{prefix}_SCOPE=initrd system portable\n")
 
         if "ARCHITECTURE" not in extrelease:
-            f.write(f"ARCHITECTURE={state.config.architecture}\n")
+            f.write(f"ARCHITECTURE={context.config.architecture}\n")
 
     new.rename(p)
 
 
-def configure_autologin_service(state: MkosiState, service: str, extra: str) -> None:
-    dropin = state.root / f"usr/lib/systemd/system/{service}.d/autologin.conf"
+def configure_autologin_service(context: Context, service: str, extra: str) -> None:
+    dropin = context.root / f"usr/lib/systemd/system/{service}.d/autologin.conf"
     with umask(~0o755):
         dropin.parent.mkdir(parents=True, exist_ok=True)
     with umask(~0o644):
@@ -292,47 +292,47 @@ def configure_autologin_service(state: MkosiState, service: str, extra: str) ->
         )
 
 
-def configure_autologin(state: MkosiState) -> None:
-    if not state.config.autologin:
+def configure_autologin(context: Context) -> None:
+    if not context.config.autologin:
         return
 
     with complete_step("Setting up autologin…"):
-        configure_autologin_service(state, "console-getty.service",
+        configure_autologin_service(context, "console-getty.service",
                                     "--noclear --keep-baud console 115200,38400,9600")
-        configure_autologin_service(state, "getty@tty1.service",
+        configure_autologin_service(context, "getty@tty1.service",
                                     "--noclear -")
-        configure_autologin_service(state, "serial-getty@ttyS0.service",
+        configure_autologin_service(context, "serial-getty@ttyS0.service",
                                     "--keep-baud 115200,57600,38400,9600 -")
 
-        if state.config.architecture.default_serial_tty() != "ttyS0":
-            configure_autologin_service(state,
-                                        f"serial-getty@{state.config.architecture.default_serial_tty()}.service",
+        if context.config.architecture.default_serial_tty() != "ttyS0":
+            configure_autologin_service(context,
+                                        f"serial-getty@{context.config.architecture.default_serial_tty()}.service",
                                         "--keep-baud 115200,57600,38400,9600 -")
 
 
 @contextlib.contextmanager
-def mount_cache_overlay(state: MkosiState) -> Iterator[None]:
-    if not state.config.incremental or not any(state.root.iterdir()):
+def mount_cache_overlay(context: Context) -> Iterator[None]:
+    if not context.config.incremental or not any(context.root.iterdir()):
         yield
         return
 
-    d = state.workspace / "cache-overlay"
+    d = context.workspace / "cache-overlay"
     with umask(~0o755):
         d.mkdir(exist_ok=True)
 
-    with mount_overlay([state.root], d, state.root):
+    with mount_overlay([context.root], d, context.root):
         yield
 
 
 @contextlib.contextmanager
-def mount_build_overlay(state: MkosiState, volatile: bool = False) -> Iterator[Path]:
-    d = state.workspace / "build-overlay"
+def mount_build_overlay(context: Context, volatile: bool = False) -> Iterator[Path]:
+    d = context.workspace / "build-overlay"
     if not d.is_symlink():
         with umask(~0o755):
             d.mkdir(exist_ok=True)
 
     with contextlib.ExitStack() as stack:
-        lower = [state.root]
+        lower = [context.root]
 
         if volatile:
             lower += [d]
@@ -340,13 +340,13 @@ def mount_build_overlay(state: MkosiState, volatile: bool = False) -> Iterator[P
         else:
             upper = d
 
-        stack.enter_context(mount_overlay(lower, upper, state.root))
+        stack.enter_context(mount_overlay(lower, upper, context.root))
 
-        yield state.root
+        yield context.root
 
 
 @contextlib.contextmanager
-def finalize_source_mounts(config: MkosiConfig) -> Iterator[list[PathString]]:
+def finalize_source_mounts(config: Config) -> Iterator[list[PathString]]:
     with contextlib.ExitStack() as stack:
         mounts = [
             (stack.enter_context(mount_overlay([source])) if config.build_sources_ephemeral else source, target)
@@ -390,58 +390,58 @@ def finalize_scripts(scripts: Mapping[str, Sequence[PathString]] = {}) -> Iterat
 
 
 def finalize_host_scripts(
-    state: MkosiState,
+    context: Context,
     helpers: dict[str, Sequence[PathString]],  # FIXME: change dict to Mapping when PyRight is fixed
 ) -> contextlib.AbstractContextManager[Path]:
     scripts: dict[str, Sequence[PathString]] = {}
     if find_binary("git"):
         scripts["git"] = ("git", "-c", "safe.directory=*")
     if find_binary("useradd"):
-        scripts["useradd"] = ("useradd", "--root", state.root)
-    return finalize_scripts(scripts | helpers | package_manager_scripts(state))
+        scripts["useradd"] = ("useradd", "--root", context.root)
+    return finalize_scripts(scripts | helpers | package_manager_scripts(context))
 
 
-def finalize_chroot_scripts(state: MkosiState) -> contextlib.AbstractContextManager[Path]:
-    git = {"git": ("git", "-c", "safe.directory=*")} if find_binary("git", root=state.root) else {}
+def finalize_chroot_scripts(context: Context) -> contextlib.AbstractContextManager[Path]:
+    git = {"git": ("git", "-c", "safe.directory=*")} if find_binary("git", root=context.root) else {}
     return finalize_scripts(git)
 
 
-def run_prepare_scripts(state: MkosiState, build: bool) -> None:
-    if not state.config.prepare_scripts:
+def run_prepare_scripts(context: Context, build: bool) -> None:
+    if not context.config.prepare_scripts:
         return
-    if build and not state.config.build_scripts:
+    if build and not context.config.build_scripts:
         return
 
     env = dict(
-        ARCHITECTURE=str(state.config.architecture),
-        BUILDROOT=str(state.root),
+        ARCHITECTURE=str(context.config.architecture),
+        BUILDROOT=str(context.root),
         CHROOT_SCRIPT="/work/prepare",
         CHROOT_SRCDIR="/work/src",
         MKOSI_UID=str(INVOKING_USER.uid),
         MKOSI_GID=str(INVOKING_USER.gid),
         SCRIPT="/work/prepare",
         SRCDIR=str(Path.cwd()),
-        WITH_DOCS=one_zero(state.config.with_docs),
-        WITH_NETWORK=one_zero(state.config.with_network),
-        WITH_TESTS=one_zero(state.config.with_tests),
+        WITH_DOCS=one_zero(context.config.with_docs),
+        WITH_NETWORK=one_zero(context.config.with_network),
+        WITH_TESTS=one_zero(context.config.with_tests),
     )
 
     with contextlib.ExitStack() as stack:
         if build:
-            stack.enter_context(mount_build_overlay(state))
+            stack.enter_context(mount_build_overlay(context))
             step_msg = "Running prepare script {} in build overlay…"
             arg = "build"
         else:
             step_msg = "Running prepare script {}…"
             arg = "final"
 
-        sources = stack.enter_context(finalize_source_mounts(state.config))
-        cd = stack.enter_context(finalize_chroot_scripts(state))
+        sources = stack.enter_context(finalize_source_mounts(context.config))
+        cd = stack.enter_context(finalize_chroot_scripts(context))
 
-        for script in state.config.prepare_scripts:
+        for script in context.config.prepare_scripts:
             helpers = {
                 "mkosi-chroot": chroot_cmd(
-                    state.root,
+                    context.root,
                     resolve=True,
                     options=[
                         "--bind", script, "/work/prepare",
@@ -455,122 +455,126 @@ def run_prepare_scripts(state: MkosiState, build: bool) -> None:
                 "mkosi-as-caller" : MKOSI_AS_CALLER,
             }
 
-            hd = stack.enter_context(finalize_host_scripts(state, helpers))
+            hd = stack.enter_context(finalize_host_scripts(context, helpers))
 
             with complete_step(step_msg.format(script)):
                 bwrap(
-                    state,
+                    context,
                     script_maybe_chroot(script, "/work/prepare") + [arg],
                     network=True,
                     options=sources + ["--ro-bind", script, script],
                     scripts=hd,
-                    env=env | state.config.environment,
+                    env=env | context.config.environment,
                     stdin=sys.stdin,
                 )
 
 
-def run_build_scripts(state: MkosiState) -> None:
-    if not state.config.build_scripts:
+def run_build_scripts(context: Context) -> None:
+    if not context.config.build_scripts:
         return
 
     env = dict(
-        ARCHITECTURE=str(state.config.architecture),
-        BUILDROOT=str(state.root),
+        ARCHITECTURE=str(context.config.architecture),
+        BUILDROOT=str(context.root),
         CHROOT_DESTDIR="/work/dest",
         CHROOT_OUTPUTDIR="/work/out",
         CHROOT_SCRIPT="/work/build-script",
         CHROOT_SRCDIR="/work/src",
-        DESTDIR=str(state.install_dir),
+        DESTDIR=str(context.install_dir),
         MKOSI_UID=str(INVOKING_USER.uid),
         MKOSI_GID=str(INVOKING_USER.gid),
-        OUTPUTDIR=str(state.staging),
+        OUTPUTDIR=str(context.staging),
         SCRIPT="/work/build-script",
         SRCDIR=str(Path.cwd()),
-        WITH_DOCS=one_zero(state.config.with_docs),
-        WITH_NETWORK=one_zero(state.config.with_network),
-        WITH_TESTS=one_zero(state.config.with_tests),
+        WITH_DOCS=one_zero(context.config.with_docs),
+        WITH_NETWORK=one_zero(context.config.with_network),
+        WITH_TESTS=one_zero(context.config.with_tests),
     )
 
-    if state.config.build_dir is not None:
+    if context.config.build_dir is not None:
         env |= dict(
-            BUILDDIR=str(state.config.build_dir),
+            BUILDDIR=str(context.config.build_dir),
             CHROOT_BUILDDIR="/work/build",
         )
 
     with (
-        mount_build_overlay(state, volatile=True),
-        finalize_chroot_scripts(state) as cd,
-        finalize_source_mounts(state.config) as sources,
+        mount_build_overlay(context, volatile=True),
+        finalize_chroot_scripts(context) as cd,
+        finalize_source_mounts(context.config) as sources,
     ):
-        for script in state.config.build_scripts:
+        for script in context.config.build_scripts:
             helpers = {
                 "mkosi-chroot": chroot_cmd(
-                    state.root,
-                    resolve=state.config.with_network,
+                    context.root,
+                    resolve=context.config.with_network,
                     options=[
                         "--bind", script, "/work/build-script",
-                        "--bind", state.install_dir, "/work/dest",
-                        "--bind", state.staging, "/work/out",
+                        "--bind", context.install_dir, "/work/dest",
+                        "--bind", context.staging, "/work/out",
                         "--bind", Path.cwd(), "/work/src",
                         "--bind", cd, "/work/scripts",
-                        *(["--bind", str(state.config.build_dir), "/work/build"] if state.config.build_dir else []),
+                        *([
+                            "--bind", os.fspath(context.config.build_dir), "/work/build"]
+                            if context.config.build_dir
+                            else []
+                        ),
                         "--chdir", "/work/src",
                         "--setenv", "SRCDIR", "/work/src",
                         "--setenv", "DESTDIR", "/work/dest",
                         "--setenv", "OUTPUTDIR", "/work/out",
                         "--setenv", "BUILDROOT", "/",
-                        *(["--setenv", "BUILDDIR", "/work/build"] if state.config.build_dir else []),
+                        *(["--setenv", "BUILDDIR", "/work/build"] if context.config.build_dir else []),
                     ],
                 ),
                 "mkosi-as-caller" : MKOSI_AS_CALLER,
             }
 
-            cmdline = state.args.cmdline if state.args.verb == Verb.build else []
+            cmdline = context.args.cmdline if context.args.verb == Verb.build else []
 
             with (
-                finalize_host_scripts(state, helpers) as hd,
+                finalize_host_scripts(context, helpers) as hd,
                 complete_step(f"Running build script {script}…"),
             ):
                 bwrap(
-                    state,
+                    context,
                     script_maybe_chroot(script, "/work/build-script") + cmdline,
-                    network=state.config.with_network,
+                    network=context.config.with_network,
                     options=sources + ["--ro-bind", script, script],
                     scripts=hd,
-                    env=env | state.config.environment,
+                    env=env | context.config.environment,
                     stdin=sys.stdin,
                 )
 
 
-def run_postinst_scripts(state: MkosiState) -> None:
-    if not state.config.postinst_scripts:
+def run_postinst_scripts(context: Context) -> None:
+    if not context.config.postinst_scripts:
         return
 
     env = dict(
-        ARCHITECTURE=str(state.config.architecture),
-        BUILDROOT=str(state.root),
+        ARCHITECTURE=str(context.config.architecture),
+        BUILDROOT=str(context.root),
         CHROOT_OUTPUTDIR="/work/out",
         CHROOT_SCRIPT="/work/postinst",
         CHROOT_SRCDIR="/work/src",
         MKOSI_UID=str(INVOKING_USER.uid),
         MKOSI_GID=str(INVOKING_USER.gid),
-        OUTPUTDIR=str(state.staging),
+        OUTPUTDIR=str(context.staging),
         SCRIPT="/work/postinst",
         SRCDIR=str(Path.cwd()),
     )
 
     with (
-        finalize_chroot_scripts(state) as cd,
-        finalize_source_mounts(state.config) as sources,
+        finalize_chroot_scripts(context) as cd,
+        finalize_source_mounts(context.config) as sources,
     ):
-        for script in state.config.postinst_scripts:
+        for script in context.config.postinst_scripts:
             helpers = {
                 "mkosi-chroot": chroot_cmd(
-                    state.root,
-                    resolve=state.config.with_network,
+                    context.root,
+                    resolve=context.config.with_network,
                     options=[
                         "--bind", script, "/work/postinst",
-                        "--bind", state.staging, "/work/out",
+                        "--bind", context.staging, "/work/out",
                         "--bind", Path.cwd(), "/work/src",
                         "--bind", cd, "/work/scripts",
                         "--chdir", "/work/src",
@@ -583,49 +587,49 @@ def run_postinst_scripts(state: MkosiState) -> None:
             }
 
             with (
-                finalize_host_scripts(state, helpers) as hd,
+                finalize_host_scripts(context, helpers) as hd,
                 complete_step(f"Running postinstall script {script}…"),
             ):
                 bwrap(
-                    state,
+                    context,
                     script_maybe_chroot(script, "/work/postinst") + ["final"],
-                    network=state.config.with_network,
+                    network=context.config.with_network,
                     options=sources + ["--ro-bind", script, script],
                     scripts=hd,
-                    env=env | state.config.environment,
+                    env=env | context.config.environment,
                     stdin=sys.stdin,
                 )
 
 
-def run_finalize_scripts(state: MkosiState) -> None:
-    if not state.config.finalize_scripts:
+def run_finalize_scripts(context: Context) -> None:
+    if not context.config.finalize_scripts:
         return
 
     env = dict(
-        ARCHITECTURE=str(state.config.architecture),
-        BUILDROOT=str(state.root),
+        ARCHITECTURE=str(context.config.architecture),
+        BUILDROOT=str(context.root),
         CHROOT_OUTPUTDIR="/work/out",
         CHROOT_SCRIPT="/work/finalize",
         CHROOT_SRCDIR="/work/src",
         MKOSI_UID=str(INVOKING_USER.uid),
         MKOSI_GID=str(INVOKING_USER.gid),
-        OUTPUTDIR=str(state.staging),
+        OUTPUTDIR=str(context.staging),
         SCRIPT="/work/finalize",
         SRCDIR=str(Path.cwd()),
     )
 
     with (
-        finalize_chroot_scripts(state) as cd,
-        finalize_source_mounts(state.config) as sources,
+        finalize_chroot_scripts(context) as cd,
+        finalize_source_mounts(context.config) as sources,
     ):
-        for script in state.config.finalize_scripts:
+        for script in context.config.finalize_scripts:
             helpers = {
                 "mkosi-chroot": chroot_cmd(
-                    state.root,
-                    resolve=state.config.with_network,
+                    context.root,
+                    resolve=context.config.with_network,
                     options=[
                         "--bind", script, "/work/finalize",
-                        "--bind", state.staging, "/work/out",
+                        "--bind", context.staging, "/work/out",
                         "--bind", Path.cwd(), "/work/src",
                         "--bind", cd, "/work/scripts",
                         "--chdir", "/work/src",
@@ -638,23 +642,23 @@ def run_finalize_scripts(state: MkosiState) -> None:
             }
 
             with (
-                finalize_host_scripts(state, helpers) as hd,
+                finalize_host_scripts(context, helpers) as hd,
                 complete_step(f"Running finalize script {script}…"),
             ):
                 bwrap(
-                    state,
+                    context,
                     script_maybe_chroot(script, "/work/finalize"),
-                    network=state.config.with_network,
+                    network=context.config.with_network,
                     options=sources + ["--ro-bind", script, script],
                     scripts=hd,
-                    env=env | state.config.environment,
+                    env=env | context.config.environment,
                     stdin=sys.stdin,
                 )
 
 
-def certificate_common_name(state: MkosiState, certificate: Path) -> str:
+def certificate_common_name(context: Context, certificate: Path) -> str:
     output = bwrap(
-        state,
+        context,
         [
             "openssl",
             "x509",
@@ -679,20 +683,20 @@ def certificate_common_name(state: MkosiState, certificate: Path) -> str:
     die(f"Certificate {certificate} is missing Common Name")
 
 
-def pesign_prepare(state: MkosiState) -> None:
-    assert state.config.secure_boot_key
-    assert state.config.secure_boot_certificate
+def pesign_prepare(context: Context) -> None:
+    assert context.config.secure_boot_key
+    assert context.config.secure_boot_certificate
 
-    if (state.workspace / "pesign").exists():
+    if (context.workspace / "pesign").exists():
         return
 
-    (state.workspace / "pesign").mkdir()
+    (context.workspace / "pesign").mkdir()
 
     # pesign takes a certificate directory and a certificate common name as input arguments, so we have
     # to transform our input key and cert into that format. Adapted from
     # https://www.mankier.com/1/pesign#Examples-Signing_with_the_certificate_and_private_key_in_individual_files
     bwrap(
-        state,
+        context,
         [
             "openssl",
             "pkcs12",
@@ -702,70 +706,70 @@ def pesign_prepare(state: MkosiState) -> None:
             "-certpbe", "NONE",
             "-nomaciter",
             "-passout", "pass:",
-            "-out", state.workspace / "secure-boot.p12",
-            "-inkey", state.config.secure_boot_key,
-            "-in", state.config.secure_boot_certificate,
+            "-out", context.workspace / "secure-boot.p12",
+            "-inkey", context.config.secure_boot_key,
+            "-in", context.config.secure_boot_certificate,
         ],
     )
 
     bwrap(
-        state,
+        context,
         [
             "pk12util",
             "-K", "",
             "-W", "",
-            "-i", state.workspace / "secure-boot.p12",
-            "-d", state.workspace / "pesign",
+            "-i", context.workspace / "secure-boot.p12",
+            "-d", context.workspace / "pesign",
         ],
     )
 
 
-def efi_boot_binary(state: MkosiState) -> Path:
-    arch = state.config.architecture.to_efi()
+def efi_boot_binary(context: Context) -> Path:
+    arch = context.config.architecture.to_efi()
     assert arch
     return Path(f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI")
 
 
-def shim_second_stage_binary(state: MkosiState) -> Path:
-    arch = state.config.architecture.to_efi()
+def shim_second_stage_binary(context: Context) -> Path:
+    arch = context.config.architecture.to_efi()
     assert arch
-    if state.config.distribution == Distribution.opensuse:
+    if context.config.distribution == Distribution.opensuse:
         return Path("efi/EFI/BOOT/grub.EFI")
     else:
         return Path(f"efi/EFI/BOOT/grub{arch}.EFI")
 
 
-def sign_efi_binary(state: MkosiState, input: Path, output: Path) -> None:
-    assert state.config.secure_boot_key
-    assert state.config.secure_boot_certificate
+def sign_efi_binary(context: Context, input: Path, output: Path) -> None:
+    assert context.config.secure_boot_key
+    assert context.config.secure_boot_certificate
 
     if (
-        state.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or
-        state.config.secure_boot_sign_tool == SecureBootSignTool.auto and
+        context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or
+        context.config.secure_boot_sign_tool == SecureBootSignTool.auto and
         shutil.which("sbsign") is not None
     ):
         bwrap(
-            state,
+            context,
             [
                 "sbsign",
-                "--key", state.config.secure_boot_key,
-                "--cert", state.config.secure_boot_certificate,
+                "--key", context.config.secure_boot_key,
+                "--cert", context.config.secure_boot_certificate,
                 "--output", output,
                 input,
             ],
         )
     elif (
-        state.config.secure_boot_sign_tool == SecureBootSignTool.pesign or
-        state.config.secure_boot_sign_tool == SecureBootSignTool.auto and
+        context.config.secure_boot_sign_tool == SecureBootSignTool.pesign or
+        context.config.secure_boot_sign_tool == SecureBootSignTool.auto and
         shutil.which("pesign") is not None
     ):
-        pesign_prepare(state)
+        pesign_prepare(context)
         bwrap(
-            state,
+            context,
             [
                 "pesign",
-                "--certdir", state.workspace / "pesign",
-                "--certificate", certificate_common_name(state, state.config.secure_boot_certificate),
+                "--certdir", context.workspace / "pesign",
+                "--certificate", certificate_common_name(context, context.config.secure_boot_certificate),
                 "--sign",
                 "--force",
                 "--in", input,
@@ -776,153 +780,153 @@ def sign_efi_binary(state: MkosiState, input: Path, output: Path) -> None:
         die("One of sbsign or pesign is required to use SecureBoot=")
 
 
-def install_systemd_boot(state: MkosiState) -> None:
-    if not want_efi(state.config):
+def install_systemd_boot(context: Context) -> None:
+    if not want_efi(context.config):
         return
 
-    if state.config.bootloader != Bootloader.systemd_boot:
+    if context.config.bootloader != Bootloader.systemd_boot:
         return
 
-    if not any(gen_kernel_images(state)) and state.config.bootable == ConfigFeature.auto:
+    if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto:
         return
 
     if not shutil.which("bootctl"):
-        if state.config.bootable == ConfigFeature.enabled:
+        if context.config.bootable == ConfigFeature.enabled:
             die("An EFI bootable image with systemd-boot was requested but bootctl was not found")
         return
 
-    directory = state.root / "usr/lib/systemd/boot/efi"
+    directory = context.root / "usr/lib/systemd/boot/efi"
     if not directory.exists() or not any(directory.iterdir()):
-        if state.config.bootable == ConfigFeature.enabled:
+        if context.config.bootable == ConfigFeature.enabled:
             die("A EFI bootable image with systemd-boot was requested but systemd-boot was not found at "
-                f"{directory.relative_to(state.root)}")
+                f"{directory.relative_to(context.root)}")
         return
 
-    if state.config.secure_boot:
+    if context.config.secure_boot:
         with complete_step("Signing systemd-boot binaries…"):
             for input in itertools.chain(directory.glob('*.efi'), directory.glob('*.EFI')):
                 output = directory / f"{input}.signed"
-                sign_efi_binary(state, input, output)
+                sign_efi_binary(context, input, output)
 
     with complete_step("Installing systemd-boot…"):
         bwrap(
-            state,
-            ["bootctl", "install", "--root", state.root, "--all-architectures", "--no-variables"],
+            context,
+            ["bootctl", "install", "--root", context.root, "--all-architectures", "--no-variables"],
             env={"SYSTEMD_ESP_PATH": "/efi"},
         )
 
-        if state.config.shim_bootloader != ShimBootloader.none:
+        if context.config.shim_bootloader != ShimBootloader.none:
             shutil.copy2(
-                state.root / f"efi/EFI/systemd/systemd-boot{state.config.architecture.to_efi()}.efi",
-                state.root / shim_second_stage_binary(state),
+                context.root / f"efi/EFI/systemd/systemd-boot{context.config.architecture.to_efi()}.efi",
+                context.root / shim_second_stage_binary(context),
             )
 
-    if state.config.secure_boot and state.config.secure_boot_auto_enroll:
-        assert state.config.secure_boot_key
-        assert state.config.secure_boot_certificate
+    if context.config.secure_boot and context.config.secure_boot_auto_enroll:
+        assert context.config.secure_boot_key
+        assert context.config.secure_boot_certificate
 
         with complete_step("Setting up secure boot auto-enrollment…"):
-            keys = state.root / "efi/loader/keys/auto"
+            keys = context.root / "efi/loader/keys/auto"
             with umask(~0o700):
                 keys.mkdir(parents=True, exist_ok=True)
 
             # sbsiglist expects a DER certificate.
             bwrap(
-                state,
+                context,
                 [
                     "openssl",
                     "x509",
                     "-outform", "DER",
-                    "-in", state.config.secure_boot_certificate,
-                    "-out", state.workspace / "mkosi.der",
+                    "-in", context.config.secure_boot_certificate,
+                    "-out", context.workspace / "mkosi.der",
                 ],
             )
 
             bwrap(
-                state,
+                context,
                 [
                     "sbsiglist",
                     "--owner", str(uuid.uuid4()),
                     "--type", "x509",
-                    "--output", state.workspace / "mkosi.esl",
-                    state.workspace / "mkosi.der",
+                    "--output", context.workspace / "mkosi.esl",
+                    context.workspace / "mkosi.der",
                 ],
             )
 
             # We reuse the key for all secure boot databases to keep things simple.
             for db in ["PK", "KEK", "db"]:
                 bwrap(
-                    state,
+                    context,
                     [
                         "sbvarsign",
                         "--attr",
                             "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS",
-                        "--key", state.config.secure_boot_key,
-                        "--cert", state.config.secure_boot_certificate,
+                        "--key", context.config.secure_boot_key,
+                        "--cert", context.config.secure_boot_certificate,
                         "--output", keys / f"{db}.auth",
                         db,
-                        state.workspace / "mkosi.esl",
+                        context.workspace / "mkosi.esl",
                     ],
                 )
 
 
 def find_and_install_shim_binary(
-    state: MkosiState,
+    context: Context,
     name: str,
     signed: Sequence[str],
     unsigned: Sequence[str],
     output: Path,
 ) -> None:
-    if state.config.shim_bootloader == ShimBootloader.signed:
+    if context.config.shim_bootloader == ShimBootloader.signed:
         for pattern in signed:
-            for p in state.root.glob(pattern):
+            for p in context.root.glob(pattern):
                 if p.is_symlink() and p.readlink().is_absolute():
                     logging.warning(f"Ignoring signed {name} EFI binary which is an absolute path to {p.readlink()}")
                     continue
 
-                rel = p.relative_to(state.root)
+                rel = p.relative_to(context.root)
                 log_step(f"Installing signed {name} EFI binary from /{rel} to /{output}")
-                shutil.copy2(p, state.root / output)
+                shutil.copy2(p, context.root / output)
                 return
 
-        if state.config.bootable == ConfigFeature.enabled:
+        if context.config.bootable == ConfigFeature.enabled:
             die(f"Couldn't find signed {name} EFI binary installed in the image")
     else:
         for pattern in unsigned:
-            for p in state.root.glob(pattern):
+            for p in context.root.glob(pattern):
                 if p.is_symlink() and p.readlink().is_absolute():
                     logging.warning(f"Ignoring unsigned {name} EFI binary which is an absolute path to {p.readlink()}")
                     continue
 
-                rel = p.relative_to(state.root)
-                if state.config.secure_boot:
+                rel = p.relative_to(context.root)
+                if context.config.secure_boot:
                     log_step(f"Signing and installing unsigned {name} EFI binary from /{rel} to /{output}")
-                    sign_efi_binary(state, p, state.root / output)
+                    sign_efi_binary(context, p, context.root / output)
                 else:
                     log_step(f"Installing unsigned {name} EFI binary /{rel} to /{output}")
-                    shutil.copy2(p, state.root / output)
+                    shutil.copy2(p, context.root / output)
 
                 return
 
-        if state.config.bootable == ConfigFeature.enabled:
+        if context.config.bootable == ConfigFeature.enabled:
             die(f"Couldn't find unsigned {name} EFI binary installed in the image")
 
 
-def install_shim(state: MkosiState) -> None:
-    if not want_efi(state.config):
+def install_shim(context: Context) -> None:
+    if not want_efi(context.config):
         return
 
-    if state.config.shim_bootloader == ShimBootloader.none:
+    if context.config.shim_bootloader == ShimBootloader.none:
         return
 
-    if not any(gen_kernel_images(state)) and state.config.bootable == ConfigFeature.auto:
+    if not any(gen_kernel_images(context)) and context.config.bootable == ConfigFeature.auto:
         return
 
-    dst = efi_boot_binary(state)
+    dst = efi_boot_binary(context)
     with umask(~0o700):
-        (state.root / dst).parent.mkdir(parents=True, exist_ok=True)
+        (context.root / dst).parent.mkdir(parents=True, exist_ok=True)
 
-    arch = state.config.architecture.to_efi()
+    arch = context.config.architecture.to_efi()
 
     signed = [
         f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu
@@ -937,7 +941,7 @@ def install_shim(state: MkosiState) -> None:
         f"usr/share/shim/shim{arch}.efi", # Arch
     ]
 
-    find_and_install_shim_binary(state, "shim", signed, unsigned, dst)
+    find_and_install_shim_binary(context, "shim", signed, unsigned, dst)
 
     signed = [
         f"usr/lib/shim/mm{arch}.efi.signed", # Debian
@@ -952,42 +956,42 @@ def install_shim(state: MkosiState) -> None:
         f"usr/share/shim/mm{arch}.efi", # Arch
     ]
 
-    find_and_install_shim_binary(state, "mok", signed, unsigned, dst.parent)
+    find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent)
 
 
-def find_grub_bios_directory(state: MkosiState) -> Optional[Path]:
+def find_grub_bios_directory(context: Context) -> Optional[Path]:
     for d in ("usr/lib/grub/i386-pc", "usr/share/grub2/i386-pc"):
-        if (p := state.root / d).exists() and any(p.iterdir()):
+        if (p := context.root / d).exists() and any(p.iterdir()):
             return p
 
     return None
 
 
-def find_grub_binary(state: MkosiState, binary: str) -> Optional[Path]:
+def find_grub_binary(context: Context, binary: str) -> Optional[Path]:
     assert "grub" in binary and "grub2" not in binary
-    return find_binary(binary, root=state.root) or find_binary(binary.replace("grub", "grub2"), root=state.root)
+    return find_binary(binary, root=context.root) or find_binary(binary.replace("grub", "grub2"), root=context.root)
 
 
-def find_grub_prefix(state: MkosiState) -> Optional[str]:
-    path = find_grub_binary(state, "grub-mkimage")
+def find_grub_prefix(context: Context) -> Optional[str]:
+    path = find_grub_binary(context, "grub-mkimage")
     if path is None:
         return None
 
     return "grub2" if "grub2" in os.fspath(path) else "grub"
 
 
-def want_grub_efi(state: MkosiState) -> bool:
-    if state.config.bootable == ConfigFeature.disabled:
+def want_grub_efi(context: Context) -> bool:
+    if context.config.bootable == ConfigFeature.disabled:
         return False
 
-    if state.config.bootloader != Bootloader.grub:
+    if context.config.bootloader != Bootloader.grub:
         return False
 
-    if state.config.overlay or state.config.output_format.is_extension_image():
+    if context.config.overlay or context.config.output_format.is_extension_image():
         return False
 
-    if not any((state.root / "efi").rglob("grub*.efi")):
-        if state.config.bootable == ConfigFeature.enabled:
+    if not any((context.root / "efi").rglob("grub*.efi")):
+        if context.config.bootable == ConfigFeature.enabled:
             die("A bootable EFI image with grub was requested but grub for EFI is not installed in /efi")
 
         return False
@@ -995,43 +999,43 @@ def want_grub_efi(state: MkosiState) -> bool:
     return True
 
 
-def want_grub_bios(state: MkosiState, partitions: Sequence[Partition] = ()) -> bool:
-    if state.config.bootable == ConfigFeature.disabled:
+def want_grub_bios(context: Context, partitions: Sequence[Partition] = ()) -> bool:
+    if context.config.bootable == ConfigFeature.disabled:
         return False
 
-    if state.config.output_format != OutputFormat.disk:
+    if context.config.output_format != OutputFormat.disk:
         return False
 
-    if state.config.bios_bootloader != BiosBootloader.grub:
+    if context.config.bios_bootloader != BiosBootloader.grub:
         return False
 
-    if state.config.overlay:
+    if context.config.overlay:
         return False
 
-    have = find_grub_bios_directory(state) is not None
-    if not have and state.config.bootable == ConfigFeature.enabled:
+    have = find_grub_bios_directory(context) is not None
+    if not have and context.config.bootable == ConfigFeature.enabled:
         die("A BIOS bootable image with grub was requested but grub for BIOS is not installed")
 
     bios = any(p.type == Partition.GRUB_BOOT_PARTITION_UUID for p in partitions)
-    if partitions and not bios and state.config.bootable == ConfigFeature.enabled:
+    if partitions and not bios and context.config.bootable == ConfigFeature.enabled:
         die("A BIOS bootable image with grub was requested but no BIOS Boot Partition was configured")
 
     esp = any(p.type == "esp" for p in partitions)
-    if partitions and not esp and state.config.bootable == ConfigFeature.enabled:
+    if partitions and not esp and context.config.bootable == ConfigFeature.enabled:
         die("A BIOS bootable image with grub was requested but no ESP partition was configured")
 
     root = any(p.type.startswith("root") or p.type.startswith("usr") for p in partitions)
-    if partitions and not root and state.config.bootable == ConfigFeature.enabled:
+    if partitions and not root and context.config.bootable == ConfigFeature.enabled:
         die("A BIOS bootable image with grub was requested but no root or usr partition was configured")
 
     installed = True
 
     for binary in ("grub-mkimage", "grub-bios-setup"):
-        path = find_grub_binary(state, binary)
+        path = find_grub_binary(context, binary)
         if path is not None:
             continue
 
-        if state.config.bootable == ConfigFeature.enabled:
+        if context.config.bootable == ConfigFeature.enabled:
             die(f"A BIOS bootable image with grub was requested but {binary} was not found")
 
         installed = False
@@ -1039,12 +1043,12 @@ def want_grub_bios(state: MkosiState, partitions: Sequence[Partition] = ()) -> b
     return (have and bios and esp and root and installed) if partitions else have
 
 
-def prepare_grub_config(state: MkosiState) -> Optional[Path]:
-    prefix = find_grub_prefix(state)
+def prepare_grub_config(context: Context) -> Optional[Path]:
+    prefix = find_grub_prefix(context)
     if not prefix:
         return None
 
-    config = state.root / "efi" / prefix / "grub.cfg"
+    config = context.root / "efi" / prefix / "grub.cfg"
     with umask(~0o700):
         config.parent.mkdir(exist_ok=True)
 
@@ -1057,34 +1061,34 @@ def prepare_grub_config(state: MkosiState) -> Optional[Path]:
     return config
 
 
-def prepare_grub_efi(state: MkosiState) -> None:
-    if not want_grub_efi(state):
+def prepare_grub_efi(context: Context) -> None:
+    if not want_grub_efi(context):
         return
 
-    prefix = find_grub_prefix(state)
+    prefix = find_grub_prefix(context)
     assert prefix
 
     # Signed EFI grub shipped by distributions reads its configuration from /EFI/<distribution>/grub.cfg in
     # the ESP so let's put a shim there to redirect to the actual configuration file.
-    earlyconfig = state.root / "efi/EFI" / state.config.distribution.name / "grub.cfg"
+    earlyconfig = context.root / "efi/EFI" / context.config.distribution.name / "grub.cfg"
     with umask(~0o700):
         earlyconfig.parent.mkdir(parents=True, exist_ok=True)
 
     # Read the actual config file from the root of the ESP.
     earlyconfig.write_text(f"configfile /{prefix}/grub.cfg\n")
 
-    config = prepare_grub_config(state)
+    config = prepare_grub_config(context)
     assert config
 
     with config.open("a") as f:
         f.write('if [ "${grub_platform}" == "efi" ]; then\n')
 
-        for uki in (state.root / "boot/EFI/Linux").glob("*.efi"):
+        for uki in (context.root / "boot/EFI/Linux").glob("*.efi"):
             f.write(
                 textwrap.dedent(
                     f"""\
                     menuentry "{uki.stem}" {{
-                        chainloader /{uki.relative_to(state.root / "boot")}
+                        chainloader /{uki.relative_to(context.root / "boot")}
                     }}
                     """
                 )
@@ -1093,46 +1097,46 @@ def prepare_grub_efi(state: MkosiState) -> None:
         f.write("fi\n")
 
 
-def prepare_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> None:
-    if not want_grub_bios(state, partitions):
+def prepare_grub_bios(context: Context, partitions: Sequence[Partition]) -> None:
+    if not want_grub_bios(context, partitions):
         return
 
-    config = prepare_grub_config(state)
+    config = prepare_grub_config(context)
     assert config
 
     root = finalize_root(partitions)
     assert root
 
-    token = find_entry_token(state)
+    token = find_entry_token(context)
 
-    dst = state.root / "boot" / token
+    dst = context.root / "boot" / token
     with umask(~0o700):
         dst.mkdir(exist_ok=True)
 
     with config.open("a") as f:
         f.write('if [ "${grub_platform}" == "pc" ]; then\n')
 
-        for kver, kimg in gen_kernel_images(state):
+        for kver, kimg in gen_kernel_images(context):
             kdst = dst / kver
             with umask(~0o700):
                 kdst.mkdir(exist_ok=True)
 
-            microcode = build_microcode_initrd(state)
-            kmods = build_kernel_modules_initrd(state, kver)
+            microcode = build_microcode_initrd(context)
+            kmods = build_kernel_modules_initrd(context, kver)
 
             with umask(~0o600):
-                kimg = Path(shutil.copy2(state.root / kimg, kdst / "vmlinuz"))
+                kimg = Path(shutil.copy2(context.root / kimg, kdst / "vmlinuz"))
                 initrds = [Path(shutil.copy2(microcode, kdst / "microcode"))] if microcode else []
                 initrds += [
                     Path(shutil.copy2(initrd, dst / initrd.name))
-                    for initrd in (state.config.initrds or [build_initrd(state)])
+                    for initrd in (context.config.initrds or [build_initrd(context)])
                 ]
                 initrds += [Path(shutil.copy2(kmods, kdst / "kmods"))]
 
-                image = Path("/") / kimg.relative_to(state.root / "boot")
-                cmdline = " ".join(state.config.kernel_command_line)
+                image = Path("/") / kimg.relative_to(context.root / "boot")
+                cmdline = " ".join(context.config.kernel_command_line)
                 initrds = " ".join(
-                    [os.fspath(Path("/") / initrd.relative_to(state.root / "boot")) for initrd in initrds]
+                    [os.fspath(Path("/") / initrd.relative_to(context.root / "boot")) for initrd in initrds]
                 )
 
                 f.write(
@@ -1152,16 +1156,16 @@ def prepare_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> Non
     # so we're forced to reimplement its functionality. Luckily that's pretty simple, run grub-mkimage to
     # generate the required core.img and copy the relevant files to the ESP.
 
-    mkimage = find_grub_binary(state, "grub-mkimage")
+    mkimage = find_grub_binary(context, "grub-mkimage")
     assert mkimage
 
-    directory = find_grub_bios_directory(state)
+    directory = find_grub_bios_directory(context)
     assert directory
 
-    prefix = find_grub_prefix(state)
+    prefix = find_grub_prefix(context)
     assert prefix
 
-    dst = state.root / "efi" / prefix / "i386-pc"
+    dst = context.root / "efi" / prefix / "i386-pc"
     dst.mkdir(parents=True, exist_ok=True)
 
     with tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig:
@@ -1177,7 +1181,7 @@ def prepare_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> Non
         earlyconfig.flush()
 
         bwrap(
-            state,
+            context,
             [
                 mkimage,
                 "--directory", directory,
@@ -1193,7 +1197,7 @@ def prepare_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> Non
                 "search",
                 "search_fs_file",
             ],
-            options=["--bind", state.root / "usr", "/usr"],
+            options=["--bind", context.root / "usr", "/usr"],
         )
 
     for p in directory.glob("*.mod"):
@@ -1205,24 +1209,24 @@ def prepare_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> Non
     shutil.copy2(directory / "modinfo.sh", dst)
     shutil.copy2(directory / "boot.img", dst)
 
-    dst = state.root / "efi" / prefix / "fonts"
+    dst = context.root / "efi" / prefix / "fonts"
     with umask(~0o700):
         dst.mkdir(exist_ok=True)
 
     for prefix in ("grub", "grub2"):
-        unicode = state.root / "usr/share" / prefix / "unicode.pf2"
+        unicode = context.root / "usr/share" / prefix / "unicode.pf2"
         if unicode.exists():
             shutil.copy2(unicode, dst)
 
 
-def install_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> None:
-    if not want_grub_bios(state, partitions):
+def install_grub_bios(context: Context, partitions: Sequence[Partition]) -> None:
+    if not want_grub_bios(context, partitions):
         return
 
-    setup = find_grub_binary(state, "grub-bios-setup")
+    setup = find_grub_binary(context, "grub-bios-setup")
     assert setup
 
-    prefix = find_grub_prefix(state)
+    prefix = find_grub_prefix(context)
     assert prefix
 
     # grub-bios-setup insists on being able to open the root device that --directory is located on, which
@@ -1230,27 +1234,27 @@ def install_grub_bios(state: MkosiState, partitions: Sequence[Partition]) -> Non
     # bios boot partition. To make installation work unprivileged, we trick grub to think that the root
     # device is our image by mounting over its /proc/self/mountinfo file (where it gets its information from)
     # with our own file correlating the root directory to our image file.
-    mountinfo = state.workspace / "mountinfo"
-    mountinfo.write_text(f"1 0 1:1 / / - fat {state.staging / state.config.output_with_format}\n")
+    mountinfo = context.workspace / "mountinfo"
+    mountinfo.write_text(f"1 0 1:1 / / - fat {context.staging / context.config.output_with_format}\n")
 
     with complete_step("Installing grub boot loader…"):
         # We don't setup the mountinfo bind mount with bwrap because we need to know the child process pid to
         # be able to do the mount and we don't know the pid beforehand.
         bwrap(
-            state,
+            context,
             [
                 "sh", "-c", f"mount --bind {mountinfo} /proc/$$/mountinfo && exec $0 \"$@\"",
                 setup,
-                "--directory", state.root / "efi" / prefix / "i386-pc",
+                "--directory", context.root / "efi" / prefix / "i386-pc",
                 *(["--verbose"] if ARG_DEBUG.get() else []),
-                state.staging / state.config.output_with_format,
+                context.staging / context.config.output_with_format,
             ],
-            options=["--bind", state.root / "usr", "/usr"],
+            options=["--bind", context.root / "usr", "/usr"],
         )
 
 
 def install_tree(
-    state: MkosiState,
+    context: Context,
     src: Path,
     dst: Path,
     target: Optional[Path] = None,
@@ -1265,77 +1269,77 @@ def install_tree(
         t.parent.mkdir(parents=True, exist_ok=True)
 
     if src.is_dir() or (src.is_file() and target):
-        copy_tree(src, t, preserve_owner=preserve_owner, use_subvolumes=state.config.use_subvolumes)
+        copy_tree(src, t, preserve_owner=preserve_owner, use_subvolumes=context.config.use_subvolumes)
     elif src.suffix == ".tar":
-        extract_tar(state, src, t)
+        extract_tar(context, src, t)
     elif src.suffix == ".raw":
         run(["systemd-dissect", "--copy-from", src, "/", t])
     else:
         # If we get an unknown file without a target, we just copy it into /.
-        copy_tree(src, t, preserve_owner=preserve_owner, use_subvolumes=state.config.use_subvolumes)
+        copy_tree(src, t, preserve_owner=preserve_owner, use_subvolumes=context.config.use_subvolumes)
 
 
-def install_base_trees(state: MkosiState) -> None:
-    if not state.config.base_trees or state.config.overlay:
+def install_base_trees(context: Context) -> None:
+    if not context.config.base_trees or context.config.overlay:
         return
 
     with complete_step("Copying in base trees…"):
-        for path in state.config.base_trees:
-            install_tree(state, path, state.root)
+        for path in context.config.base_trees:
+            install_tree(context, path, context.root)
 
 
-def install_skeleton_trees(state: MkosiState) -> None:
-    if not state.config.skeleton_trees:
+def install_skeleton_trees(context: Context) -> None:
+    if not context.config.skeleton_trees:
         return
 
     with complete_step("Copying in skeleton file trees…"):
-        for tree in state.config.skeleton_trees:
-            install_tree(state, tree.source, state.root, tree.target, preserve_owner=False)
+        for tree in context.config.skeleton_trees:
+            install_tree(context, tree.source, context.root, tree.target, preserve_owner=False)
 
 
-def install_package_manager_trees(state: MkosiState) -> None:
+def install_package_manager_trees(context: Context) -> None:
     # Ensure /etc exists in the package manager tree
-    (state.pkgmngr / "etc").mkdir(exist_ok=True)
+    (context.pkgmngr / "etc").mkdir(exist_ok=True)
 
     # Required to be able to access certificates in the sandbox when running from nix.
     if Path("/etc/static").is_symlink():
-        (state.pkgmngr / "etc/static").symlink_to(Path("/etc/static").readlink())
+        (context.pkgmngr / "etc/static").symlink_to(Path("/etc/static").readlink())
 
-    if not state.config.package_manager_trees:
+    if not context.config.package_manager_trees:
         return
 
     with complete_step("Copying in package manager file trees…"):
-        for tree in state.config.package_manager_trees:
-            install_tree(state, tree.source, state.workspace / "pkgmngr", tree.target, preserve_owner=False)
+        for tree in context.config.package_manager_trees:
+            install_tree(context, tree.source, context.workspace / "pkgmngr", tree.target, preserve_owner=False)
 
 
-def install_extra_trees(state: MkosiState) -> None:
-    if not state.config.extra_trees:
+def install_extra_trees(context: Context) -> None:
+    if not context.config.extra_trees:
         return
 
     with complete_step("Copying in extra file trees…"):
-        for tree in state.config.extra_trees:
-            install_tree(state, tree.source, state.root, tree.target, preserve_owner=False)
+        for tree in context.config.extra_trees:
+            install_tree(context, tree.source, context.root, tree.target, preserve_owner=False)
 
 
-def install_build_dest(state: MkosiState) -> None:
-    if not any(state.install_dir.iterdir()):
+def install_build_dest(context: Context) -> None:
+    if not any(context.install_dir.iterdir()):
         return
 
     with complete_step("Copying in build tree…"):
-        copy_tree(state.install_dir, state.root, use_subvolumes=state.config.use_subvolumes)
+        copy_tree(context.install_dir, context.root, use_subvolumes=context.config.use_subvolumes)
 
 
 def gzip_binary() -> str:
     return "pigz" if shutil.which("pigz") else "gzip"
 
 
-def gen_kernel_images(state: MkosiState) -> Iterator[tuple[str, Path]]:
-    if not (state.root / "usr/lib/modules").exists():
+def gen_kernel_images(context: Context) -> Iterator[tuple[str, Path]]:
+    if not (context.root / "usr/lib/modules").exists():
         return
 
     for kver in sorted(
-        (k for k in (state.root / "usr/lib/modules").iterdir() if k.is_dir()),
+        (k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()),
         key=lambda k: GenericVersion(k.name),
         reverse=True
     ):
@@ -1349,63 +1353,63 @@ def gen_kernel_images(state: MkosiState) -> Iterator[tuple[str, Path]]:
                 break
 
 
-def build_initrd(state: MkosiState) -> Path:
-    if state.config.distribution == Distribution.custom:
+def build_initrd(context: Context) -> Path:
+    if context.config.distribution == Distribution.custom:
         die("Building a default initrd is not supported for custom distributions")
 
     # Default values are assigned via the parser so we go via the argument parser to construct
     # the config for the initrd.
 
-    if state.config.root_password:
-        password, hashed = state.config.root_password
+    if context.config.root_password:
+        password, hashed = context.config.root_password
         rootpwopt = f"hashed:{password}" if hashed else password
     else:
         rootpwopt = None
 
     cmdline = [
         "--directory", "",
-        "--distribution", str(state.config.distribution),
-        "--release", state.config.release,
-        "--architecture", str(state.config.architecture),
-        *(["--mirror", state.config.mirror] if state.config.mirror else []),
-        "--repository-key-check", str(state.config.repository_key_check),
-        "--repositories", ",".join(state.config.repositories),
-        "--package-manager-tree", ",".join(format_tree(t) for t in state.config.package_manager_trees),
+        "--distribution", str(context.config.distribution),
+        "--release", context.config.release,
+        "--architecture", str(context.config.architecture),
+        *(["--mirror", context.config.mirror] if context.config.mirror else []),
+        "--repository-key-check", str(context.config.repository_key_check),
+        "--repositories", ",".join(context.config.repositories),
+        "--package-manager-tree", ",".join(format_tree(t) for t in context.config.package_manager_trees),
         # Note that when compress_output == Compression.none == 0 we don't pass --compress-output which means the
         # default compression will get picked. This is exactly what we want so that initrds are always compressed.
-        *(["--compress-output", str(state.config.compress_output)] if state.config.compress_output else []),
-        "--with-network", str(state.config.with_network),
-        "--cache-only", str(state.config.cache_only),
-        "--output-dir", str(state.workspace / "initrd"),
-        *(["--workspace-dir", str(state.config.workspace_dir)] if state.config.workspace_dir else []),
-        "--cache-dir", str(state.cache_dir),
-        *(["--local-mirror", str(state.config.local_mirror)] if state.config.local_mirror else []),
-        "--incremental", str(state.config.incremental),
-        "--acl", str(state.config.acl),
-        *flatten(["--package", package] for package in state.config.initrd_packages),
-        "--output", f"{state.config.output}-initrd",
-        *(["--image-id", state.config.image_id] if state.config.image_id else []),
-        *(["--image-version", state.config.image_version] if state.config.image_version else []),
+        *(["--compress-output", str(context.config.compress_output)] if context.config.compress_output else []),
+        "--with-network", str(context.config.with_network),
+        "--cache-only", str(context.config.cache_only),
+        "--output-dir", str(context.workspace / "initrd"),
+        *(["--workspace-dir", str(context.config.workspace_dir)] if context.config.workspace_dir else []),
+        "--cache-dir", str(context.cache_dir),
+        *(["--local-mirror", str(context.config.local_mirror)] if context.config.local_mirror else []),
+        "--incremental", str(context.config.incremental),
+        "--acl", str(context.config.acl),
+        *flatten(["--package", package] for package in context.config.initrd_packages),
+        "--output", f"{context.config.output}-initrd",
+        *(["--image-id", context.config.image_id] if context.config.image_id else []),
+        *(["--image-version", context.config.image_version] if context.config.image_version else []),
         *(
-            ["--source-date-epoch", str(state.config.source_date_epoch)]
-            if state.config.source_date_epoch is not None else
+            ["--source-date-epoch", str(context.config.source_date_epoch)]
+            if context.config.source_date_epoch is not None else
             []
         ),
-        *(["--locale", state.config.locale] if state.config.locale else []),
-        *(["--locale-messages", state.config.locale_messages] if state.config.locale_messages else []),
-        *(["--keymap", state.config.keymap] if state.config.keymap else []),
-        *(["--timezone", state.config.timezone] if state.config.timezone else []),
-        *(["--hostname", state.config.hostname] if state.config.hostname else []),
+        *(["--locale", context.config.locale] if context.config.locale else []),
+        *(["--locale-messages", context.config.locale_messages] if context.config.locale_messages else []),
+        *(["--keymap", context.config.keymap] if context.config.keymap else []),
+        *(["--timezone", context.config.timezone] if context.config.timezone else []),
+        *(["--hostname", context.config.hostname] if context.config.hostname else []),
         *(["--root-password", rootpwopt] if rootpwopt else []),
-        *([f"--environment={k}='{v}'" for k, v in state.config.environment.items()]),
-        *(["--tools-tree", str(state.config.tools_tree)] if state.config.tools_tree else []),
-        *(["-f"] * state.args.force),
+        *([f"--environment={k}='{v}'" for k, v in context.config.environment.items()]),
+        *(["--tools-tree", str(context.config.tools_tree)] if context.config.tools_tree else []),
+        *(["-f"] * context.args.force),
     ]
 
     with resource_path(mkosi.resources) as r:
         cmdline += ["--include", os.fspath(r / "mkosi-initrd")]
 
-        for include in state.config.initrd_include:
+        for include in context.config.initrd_include:
             cmdline += ["--include", os.fspath(include)]
 
         args, [config] = parse_config(cmdline + ["build"])
@@ -1424,18 +1428,18 @@ def build_initrd(state: MkosiState) -> Path:
     return config.output_dir / config.output
 
 
-def build_microcode_initrd(state: MkosiState) -> Optional[Path]:
-    microcode = state.workspace / "initrd-microcode.img"
+def build_microcode_initrd(context: Context) -> Optional[Path]:
+    microcode = context.workspace / "initrd-microcode.img"
     if microcode.exists():
         return microcode
 
-    amd = state.root / "usr/lib/firmware/amd-ucode"
-    intel = state.root / "usr/lib/firmware/intel-ucode"
+    amd = context.root / "usr/lib/firmware/amd-ucode"
+    intel = context.root / "usr/lib/firmware/intel-ucode"
 
     if not amd.exists() and not intel.exists():
         return None
 
-    root = state.workspace / "initrd-microcode-root"
+    root = context.workspace / "initrd-microcode-root"
     destdir = root / "kernel/x86/microcode"
 
     with umask(~0o755):
@@ -1451,31 +1455,31 @@ def build_microcode_initrd(state: MkosiState) -> Optional[Path]:
             for p in intel.iterdir():
                 f.write(p.read_bytes())
 
-    make_cpio(state, root, microcode)
+    make_cpio(context, root, microcode)
 
     return microcode
 
 
-def build_kernel_modules_initrd(state: MkosiState, kver: str) -> Path:
-    kmods = state.workspace / f"initrd-kernel-modules-{kver}.img"
+def build_kernel_modules_initrd(context: Context, kver: str) -> Path:
+    kmods = context.workspace / f"initrd-kernel-modules-{kver}.img"
     if kmods.exists():
         return kmods
 
     make_cpio(
-        state, state.root, kmods,
+        context, context.root, kmods,
         gen_required_kernel_modules(
-            state.root, kver,
-            state.config.kernel_modules_initrd_include,
-            state.config.kernel_modules_initrd_exclude,
-            state.config.kernel_modules_initrd_include_host,
+            context.root, kver,
+            context.config.kernel_modules_initrd_include,
+            context.config.kernel_modules_initrd_exclude,
+            context.config.kernel_modules_initrd_include_host,
         )
     )
 
     # Debian/Ubuntu do not compress their kernel modules, so we compress the initramfs instead. Note that
     # this is not ideal since the compressed kernel modules will all be decompressed on boot which
     # requires significant memory.
-    if state.config.distribution.is_apt_distribution():
-        maybe_compress(state, Compression.zstd, kmods, kmods)
+    if context.config.distribution.is_apt_distribution():
+        maybe_compress(context, Compression.zstd, kmods, kmods)
 
     return kmods
 
@@ -1499,13 +1503,13 @@ def join_initrds(initrds: Sequence[Path], output: Path) -> Path:
     return output
 
 
-def python_binary(config: MkosiConfig) -> str:
+def python_binary(config: Config) -> str:
     # If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools
     # tree, just use the default python3 interpreter.
     return "python3" if config.tools_tree else os.getenv("MKOSI_INTERPRETER", "python3")
 
 
-def extract_pe_section(state: MkosiState, binary: Path, section: str, output: Path) -> None:
+def extract_pe_section(context: Context, binary: Path, section: str, output: Path) -> None:
     # When using a tools tree, we want to use the pefile module from the tools tree instead of requiring that
     # python-pefile is installed on the host. So we execute python as a subprocess to make sure we load
     # pefile from the tools tree if one is used.
@@ -1521,11 +1525,11 @@ def extract_pe_section(state: MkosiState, binary: Path, section: str, output: Pa
         """
     )
 
-    bwrap(state, [python_binary(state.config)], input=pefile)
+    bwrap(context, [python_binary(context.config)], input=pefile)
 
 
 def build_uki(
-    state: MkosiState,
+    context: Context,
     stub: Path,
     kver: str,
     kimg: Path,
@@ -1539,56 +1543,60 @@ def build_uki(
     if roothash:
         cmdline += [roothash]
 
-    cmdline += state.config.kernel_command_line
+    cmdline += context.config.kernel_command_line
 
     # Older versions of systemd-stub expect the cmdline section to be null terminated. We can't embed
     # nul terminators in argv so let's communicate the cmdline via a file instead.
-    (state.workspace / "cmdline").write_text(f"{' '.join(cmdline).strip()}\x00")
+    (context.workspace / "cmdline").write_text(f"{' '.join(cmdline).strip()}\x00")
 
-    if not (arch := state.config.architecture.to_efi()):
-        die(f"Architecture {state.config.architecture} does not support UEFI")
+    if not (arch := context.config.architecture.to_efi()):
+        die(f"Architecture {context.config.architecture} does not support UEFI")
 
     cmd: list[PathString] = [
         shutil.which("ukify") or "/usr/lib/systemd/ukify",
-        "--cmdline", f"@{state.workspace / 'cmdline'}",
-        "--os-release", f"@{state.root / 'usr/lib/os-release'}",
+        "--cmdline", f"@{context.workspace / 'cmdline'}",
+        "--os-release", f"@{context.root / 'usr/lib/os-release'}",
         "--stub", stub,
         "--output", output,
         "--efi-arch", arch,
         "--uname", kver,
     ]
 
-    if not state.config.tools_tree:
-        for p in state.config.extra_search_paths:
+    if not context.config.tools_tree:
+        for p in context.config.extra_search_paths:
             cmd += ["--tools", p]
 
-    if state.config.secure_boot:
-        assert state.config.secure_boot_key
-        assert state.config.secure_boot_certificate
+    if context.config.secure_boot:
+        assert context.config.secure_boot_key
+        assert context.config.secure_boot_certificate
 
         cmd += ["--sign-kernel"]
 
-        if state.config.secure_boot_sign_tool != SecureBootSignTool.pesign:
+        if context.config.secure_boot_sign_tool != SecureBootSignTool.pesign:
             cmd += [
                 "--signtool", "sbsign",
-                "--secureboot-private-key", state.config.secure_boot_key,
-                "--secureboot-certificate", state.config.secure_boot_certificate,
+                "--secureboot-private-key",
+                context.config.secure_boot_key,
+                "--secureboot-certificate",
+                context.config.secure_boot_certificate,
             ]
         else:
-            pesign_prepare(state)
+            pesign_prepare(context)
             cmd += [
                 "--signtool", "pesign",
-                "--secureboot-certificate-dir", state.workspace / "pesign",
-                "--secureboot-certificate-name", certificate_common_name(state, state.config.secure_boot_certificate),
+                "--secureboot-certificate-dir",
+                context.workspace / "pesign",
+                "--secureboot-certificate-name",
+                certificate_common_name(context, context.config.secure_boot_certificate),
             ]
 
-        sign_expected_pcr = (state.config.sign_expected_pcr == ConfigFeature.enabled or
-                            (state.config.sign_expected_pcr == ConfigFeature.auto and
+        sign_expected_pcr = (context.config.sign_expected_pcr == ConfigFeature.enabled or
+                            (context.config.sign_expected_pcr == ConfigFeature.auto and
                                 shutil.which("systemd-measure") is not None))
 
         if sign_expected_pcr:
             cmd += [
-                "--pcr-private-key", state.config.secure_boot_key,
+                "--pcr-private-key", context.config.secure_boot_key,
                 "--pcr-banks", "sha1,sha256",
             ]
 
@@ -1598,10 +1606,10 @@ def build_uki(
         cmd += ["--initrd", initrd]
 
     with complete_step(f"Generating unified kernel image for kernel version {kver}"):
-        bwrap(state, cmd)
+        bwrap(context, cmd)
 
 
-def want_efi(config: MkosiConfig) -> bool:
+def want_efi(config: Config) -> bool:
     # Do we want to make the image bootable on EFI firmware?
     # Note that this returns True also in the case where autodetection might later
     # cause the system to not be made bootable on EFI firmware after the filesystem
@@ -1631,108 +1639,111 @@ def want_efi(config: MkosiConfig) -> bool:
     return True
 
 
-def find_entry_token(state: MkosiState) -> str:
+def find_entry_token(context: Context) -> str:
     if (
         "--version" not in run(["kernel-install", "--help"], stdout=subprocess.PIPE).stdout or
         systemd_tool_version("kernel-install") < "255.1"
     ):
-        return state.config.image_id or state.config.distribution.name
+        return context.config.image_id or context.config.distribution.name
 
-    output = json.loads(bwrap(state, ["kernel-install", "--root", state.root, "--json=pretty", "inspect"],
+    output = json.loads(bwrap(context, ["kernel-install", "--root", context.root, "--json=pretty", "inspect"],
                         stdout=subprocess.PIPE).stdout)
     logging.debug(json.dumps(output, indent=4))
     return cast(str, output["EntryToken"])
 
 
-def install_uki(state: MkosiState, partitions: Sequence[Partition]) -> None:
+def install_uki(context: Context, partitions: Sequence[Partition]) -> None:
     # Iterates through all kernel versions included in the image and generates a combined
     # kernel+initrd+cmdline+osrelease EFI file from it and places it in the /EFI/Linux directory of the ESP.
     # sd-boot iterates through them and shows them in the menu. These "unified" single-file images have the
     # benefit that they can be signed like normal EFI binaries, and can encode everything necessary to boot a
     # specific root device, including the root hash.
 
-    if not want_efi(state.config) or state.config.output_format in (OutputFormat.uki, OutputFormat.esp):
+    if not want_efi(context.config) or context.config.output_format in (OutputFormat.uki, OutputFormat.esp):
         return
 
-    arch = state.config.architecture.to_efi()
-    stub = state.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub"
-    if not stub.exists() and state.config.bootable == ConfigFeature.auto:
+    arch = context.config.architecture.to_efi()
+    stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub"
+    if not stub.exists() and context.config.bootable == ConfigFeature.auto:
         return
 
     roothash = finalize_roothash(partitions)
 
-    for kver, kimg in gen_kernel_images(state):
+    for kver, kimg in gen_kernel_images(context):
         # See https://systemd.io/AUTOMATIC_BOOT_ASSESSMENT/#boot-counting
         boot_count = ""
-        if (state.root / "etc/kernel/tries").exists():
-            boot_count = f'+{(state.root / "etc/kernel/tries").read_text().strip()}'
+        if (context.root / "etc/kernel/tries").exists():
+            boot_count = f'+{(context.root / "etc/kernel/tries").read_text().strip()}'
 
-        if state.config.bootloader == Bootloader.uki:
-            if state.config.shim_bootloader != ShimBootloader.none:
-                boot_binary = state.root / shim_second_stage_binary(state)
+        if context.config.bootloader == Bootloader.uki:
+            if context.config.shim_bootloader != ShimBootloader.none:
+                boot_binary = context.root / shim_second_stage_binary(context)
             else:
-                boot_binary = state.root / efi_boot_binary(state)
+                boot_binary = context.root / efi_boot_binary(context)
         else:
-            token = find_entry_token(state)
+            token = find_entry_token(context)
             if roothash:
                 _, _, h = roothash.partition("=")
-                boot_binary = state.root / f"boot/EFI/Linux/{token}-{kver}-{h}{boot_count}.efi"
+                boot_binary = context.root / f"boot/EFI/Linux/{token}-{kver}-{h}{boot_count}.efi"
             else:
-                boot_binary = state.root / f"boot/EFI/Linux/{token}-{kver}{boot_count}.efi"
+                boot_binary = context.root / f"boot/EFI/Linux/{token}-{kver}{boot_count}.efi"
 
-        microcode = build_microcode_initrd(state)
+        microcode = build_microcode_initrd(context)
 
         initrds = [microcode] if microcode else []
-        initrds += state.config.initrds or [build_initrd(state)]
+        initrds += context.config.initrds or [build_initrd(context)]
 
-        if state.config.kernel_modules_initrd:
-            initrds += [build_kernel_modules_initrd(state, kver)]
+        if context.config.kernel_modules_initrd:
+            initrds += [build_kernel_modules_initrd(context, kver)]
 
         # Make sure the parent directory where we'll be writing the UKI exists.
         with umask(~0o700):
             boot_binary.parent.mkdir(parents=True, exist_ok=True)
 
-        if (state.root / "etc/kernel/cmdline").exists():
-            cmdline = [(state.root / "etc/kernel/cmdline").read_text().strip()]
-        elif (state.root / "usr/lib/kernel/cmdline").exists():
-            cmdline = [(state.root / "usr/lib/kernel/cmdline").read_text().strip()]
+        if (context.root / "etc/kernel/cmdline").exists():
+            cmdline = [(context.root / "etc/kernel/cmdline").read_text().strip()]
+        elif (context.root / "usr/lib/kernel/cmdline").exists():
+            cmdline = [(context.root / "usr/lib/kernel/cmdline").read_text().strip()]
         else:
             cmdline = []
 
-        build_uki(state, stub, kver, state.root / kimg, initrds, cmdline, boot_binary, roothash=roothash)
+        build_uki(context, stub, kver, context.root / kimg, initrds, cmdline, boot_binary, roothash=roothash)
 
-        if not (state.staging / state.config.output_split_initrd).exists():
+        if not (context.staging / context.config.output_split_initrd).exists():
             # Extract the combined initrds from the UKI so we can use it to direct kernel boot with qemu
             # if needed.
-            extract_pe_section(state, boot_binary, ".initrd", state.staging / state.config.output_split_initrd)
+            extract_pe_section(context, boot_binary, ".initrd", context.staging / context.config.output_split_initrd)
 
-        if not (state.staging / state.config.output_split_uki).exists():
-            shutil.copy(boot_binary, state.staging / state.config.output_split_uki)
+        if not (context.staging / context.config.output_split_uki).exists():
+            shutil.copy(boot_binary, context.staging / context.config.output_split_uki)
 
             # ukify will have signed the kernel image as well. Let's make sure we put the signed kernel
             # image in the output directory instead of the unsigned one by reading it from the UKI.
-            extract_pe_section(state, boot_binary, ".linux", state.staging / state.config.output_split_kernel)
+            extract_pe_section(context, boot_binary, ".linux", context.staging / context.config.output_split_kernel)
 
         print_output_size(boot_binary)
 
-        if state.config.bootloader == Bootloader.uki:
+        if context.config.bootloader == Bootloader.uki:
             break
 
-    if state.config.bootable == ConfigFeature.enabled and not (state.staging / state.config.output_split_uki).exists():
+    if (
+        context.config.bootable == ConfigFeature.enabled and
+        not (context.staging / context.config.output_split_uki).exists()
+    ):
         die("A bootable image was requested but no kernel was found")
 
 
-def make_uki(state: MkosiState, stub: Path, kver: str, kimg: Path, output: Path) -> None:
-    microcode = build_microcode_initrd(state)
-    make_cpio(state, state.root, state.workspace / "initrd")
-    maybe_compress(state, state.config.compress_output, state.workspace / "initrd", state.workspace / "initrd")
+def make_uki(context: Context, stub: Path, kver: str, kimg: Path, output: Path) -> None:
+    microcode = build_microcode_initrd(context)
+    make_cpio(context, context.root, context.workspace / "initrd")
+    maybe_compress(context, context.config.compress_output, context.workspace / "initrd", context.workspace / "initrd")
 
     initrds = [microcode] if microcode else []
-    initrds += [state.workspace / "initrd"]
+    initrds += [context.workspace / "initrd"]
 
-    build_uki(state, stub, kver, kimg, initrds, [], output)
-    extract_pe_section(state, output, ".linux", state.staging / state.config.output_split_kernel)
-    extract_pe_section(state, output, ".initrd", state.staging / state.config.output_split_initrd)
+    build_uki(context, stub, kver, kimg, initrds, [], output)
+    extract_pe_section(context, output, ".linux", context.staging / context.config.output_split_kernel)
+    extract_pe_section(context, output, ".initrd", context.staging / context.config.output_split_initrd)
 
 
 def compressor_command(compression: Compression) -> list[PathString]:
@@ -1748,10 +1759,10 @@ def compressor_command(compression: Compression) -> list[PathString]:
         die(f"Unknown compression {compression}")
 
 
-def maybe_compress(state: MkosiState, compression: Compression, src: Path, dst: Optional[Path] = None) -> None:
+def maybe_compress(context: Context, compression: Compression, src: Path, dst: Optional[Path] = None) -> None:
     if not compression or src.is_dir():
         if dst:
-            move_tree(src, dst, use_subvolumes=state.config.use_subvolumes)
+            move_tree(src, dst, use_subvolumes=context.config.use_subvolumes)
         return
 
     if not dst:
@@ -1762,36 +1773,36 @@ def maybe_compress(state: MkosiState, compression: Compression, src: Path, dst:
             src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file.
 
             with dst.open("wb") as o:
-                bwrap(state, compressor_command(compression), stdin=i, stdout=o)
+                bwrap(context, compressor_command(compression), stdin=i, stdout=o)
 
 
-def copy_vmlinuz(state: MkosiState) -> None:
-    if (state.staging / state.config.output_split_kernel).exists():
+def copy_vmlinuz(context: Context) -> None:
+    if (context.staging / context.config.output_split_kernel).exists():
         return
 
-    for _, kimg in gen_kernel_images(state):
-        shutil.copy(state.root / kimg, state.staging / state.config.output_split_kernel)
+    for _, kimg in gen_kernel_images(context):
+        shutil.copy(context.root / kimg, context.staging / context.config.output_split_kernel)
         break
 
 
-def copy_initrd(state: MkosiState) -> None:
-    if (state.staging / state.config.output_split_initrd).exists():
+def copy_initrd(context: Context) -> None:
+    if (context.staging / context.config.output_split_initrd).exists():
         return
 
-    if state.config.bootable == ConfigFeature.disabled:
+    if context.config.bootable == ConfigFeature.disabled:
         return
 
-    if state.config.output_format not in (OutputFormat.disk, OutputFormat.directory):
+    if context.config.output_format not in (OutputFormat.disk, OutputFormat.directory):
         return
 
-    for kver, _ in gen_kernel_images(state):
-        microcode = build_microcode_initrd(state)
+    for kver, _ in gen_kernel_images(context):
+        microcode = build_microcode_initrd(context)
         initrds = [microcode] if microcode else []
-        initrds += state.config.initrds or [build_initrd(state)]
-        if state.config.kernel_modules_initrd:
-            kver = next(gen_kernel_images(state))[0]
-            initrds += [build_kernel_modules_initrd(state, kver)]
-        join_initrds(initrds, state.staging / state.config.output_split_initrd)
+        initrds += context.config.initrds or [build_initrd(context)]
+        if context.config.kernel_modules_initrd:
+            kver = next(gen_kernel_images(context))[0]
+            initrds += [build_kernel_modules_initrd(context, kver)]
+        join_initrds(initrds, context.staging / context.config.output_split_initrd)
         break
 
 
@@ -1806,35 +1817,35 @@ def hash_file(of: TextIO, path: Path) -> None:
     of.write(h.hexdigest() + " *" + path.name + "\n")
 
 
-def calculate_sha256sum(state: MkosiState) -> None:
-    if state.config.output_format == OutputFormat.directory:
+def calculate_sha256sum(context: Context) -> None:
+    if context.config.output_format == OutputFormat.directory:
         return None
 
-    if not state.config.checksum:
+    if not context.config.checksum:
         return None
 
     with complete_step("Calculating SHA256SUMS…"):
-        with open(state.workspace / state.config.output_checksum, "w") as f:
-            for p in state.staging.iterdir():
+        with open(context.workspace / context.config.output_checksum, "w") as f:
+            for p in context.staging.iterdir():
                 hash_file(f, p)
 
-        (state.workspace / state.config.output_checksum).rename(state.staging / state.config.output_checksum)
+        (context.workspace / context.config.output_checksum).rename(context.staging / context.config.output_checksum)
 
 
-def calculate_signature(state: MkosiState) -> None:
-    if not state.config.sign:
+def calculate_signature(context: Context) -> None:
+    if not context.config.sign:
         return None
 
     with complete_step("Signing SHA256SUMS…"):
         cmdline: list[PathString] = ["gpg", "--detach-sign"]
 
         # Need to specify key before file to sign
-        if state.config.key is not None:
-            cmdline += ["--default-key", state.config.key]
+        if context.config.key is not None:
+            cmdline += ["--default-key", context.config.key]
 
         cmdline += [
-            "--output", state.staging / state.config.output_signature,
-            state.staging / state.config.output_checksum,
+            "--output", context.staging / context.config.output_signature,
+            context.staging / context.config.output_checksum,
         ]
 
         # Set the path of the keyring to use based on the environment if possible and fallback to the default
@@ -1845,7 +1856,7 @@ def calculate_signature(state: MkosiState) -> None:
             env |= dict(GPGTTY=os.ttyname(sys.stderr.fileno()))
 
         # Do not output warnings about keyring permissions
-        bwrap(state, cmdline, stderr=subprocess.DEVNULL, env=env)
+        bwrap(context, cmdline, stderr=subprocess.DEVNULL, env=env)
 
 
 def dir_size(path: Union[Path, os.DirEntry[str]]) -> int:
@@ -1863,19 +1874,19 @@ def dir_size(path: Union[Path, os.DirEntry[str]]) -> int:
     return dir_sum
 
 
-def save_manifest(state: MkosiState, manifest: Optional[Manifest]) -> None:
+def save_manifest(context: Context, manifest: Optional[Manifest]) -> None:
     if not manifest:
         return
 
     if manifest.has_data():
-        if ManifestFormat.json in state.config.manifest_format:
-            with complete_step(f"Saving manifest {state.config.output_manifest}"):
-                with open(state.staging / state.config.output_manifest, 'w') as f:
+        if ManifestFormat.json in context.config.manifest_format:
+            with complete_step(f"Saving manifest {context.config.output_manifest}"):
+                with open(context.staging / context.config.output_manifest, 'w') as f:
                     manifest.write_json(f)
 
-        if ManifestFormat.changelog in state.config.manifest_format:
-            with complete_step(f"Saving report {state.config.output_changelog}"):
-                with open(state.staging / state.config.output_changelog, 'w') as f:
+        if ManifestFormat.changelog in context.config.manifest_format:
+            with complete_step(f"Saving report {context.config.output_changelog}"):
+                with open(context.staging / context.config.output_changelog, 'w') as f:
                     manifest.write_package_report(f)
 
 
@@ -1895,7 +1906,7 @@ def empty_directory(path: Path) -> None:
         pass
 
 
-def unlink_output(args: MkosiArgs, config: MkosiConfig) -> None:
+def unlink_output(args: Args, config: Config) -> None:
     # We remove any cached images if either the user used --force twice, or he/she called "clean" with it
     # passed once. Let's also remove the downloaded package cache if the user specified one additional
     # "--force".
@@ -1934,7 +1945,7 @@ def unlink_output(args: MkosiArgs, config: MkosiConfig) -> None:
                 ))
 
 
-def cache_tree_paths(config: MkosiConfig) -> tuple[Path, Path, Path]:
+def cache_tree_paths(config: Config) -> tuple[Path, Path, Path]:
     fragments = [config.distribution, config.release, config.architecture]
 
     if config.image:
@@ -1950,7 +1961,7 @@ def cache_tree_paths(config: MkosiConfig) -> tuple[Path, Path, Path]:
     )
 
 
-def check_inputs(config: MkosiConfig) -> None:
+def check_inputs(config: Config) -> None:
     """
     Make sure all the inputs exist that aren't checked during config parsing because they might be created by an
     earlier build.
@@ -1977,7 +1988,7 @@ def check_inputs(config: MkosiConfig) -> None:
                 die(f"Initrd {p} is not a file")
 
 
-def check_outputs(config: MkosiConfig) -> None:
+def check_outputs(config: Config) -> None:
     for f in (
         config.output_with_compression,
         config.output_checksum if config.checksum else None,
@@ -2008,7 +2019,7 @@ def check_systemd_tool(*tools: PathString, version: str, reason: str, hint: Opti
             hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.")
 
 
-def check_tools(verb: Verb, config: MkosiConfig) -> None:
+def check_tools(verb: Verb, config: Config) -> None:
     if verb == Verb.build:
         if want_efi(config):
             check_systemd_tool(
@@ -2029,11 +2040,11 @@ def check_tools(verb: Verb, config: MkosiConfig) -> None:
         check_systemd_tool("systemd-nspawn", version="254", reason="boot images")
 
 
-def configure_ssh(state: MkosiState) -> None:
-    if not state.config.ssh:
+def configure_ssh(context: Context) -> None:
+    if not context.config.ssh:
         return
 
-    unitdir = state.root / "usr/lib/systemd/system"
+    unitdir = context.root / "usr/lib/systemd/system"
     with umask(~0o755):
         unitdir.mkdir(parents=True, exist_ok=True)
 
@@ -2077,76 +2088,76 @@ def configure_ssh(state: MkosiState) -> None:
             )
         )
 
-    preset = state.root / "usr/lib/systemd/system-preset/80-mkosi-ssh.preset"
+    preset = context.root / "usr/lib/systemd/system-preset/80-mkosi-ssh.preset"
     with umask(~0o755):
         preset.parent.mkdir(parents=True, exist_ok=True)
     with umask(~0o644):
         preset.write_text("enable ssh.socket\n")
 
 
-def configure_initrd(state: MkosiState) -> None:
-    if state.config.overlay or state.config.output_format.is_extension_image():
+def configure_initrd(context: Context) -> None:
+    if context.config.overlay or context.config.output_format.is_extension_image():
         return
 
     if (
-        not (state.root / "init").exists() and
-        not (state.root / "init").is_symlink() and
-        (state.root / "usr/lib/systemd/systemd").exists()
+        not (context.root / "init").exists() and
+        not (context.root / "init").is_symlink() and
+        (context.root / "usr/lib/systemd/systemd").exists()
     ):
-        (state.root / "init").symlink_to("/usr/lib/systemd/systemd")
+        (context.root / "init").symlink_to("/usr/lib/systemd/systemd")
 
-    if not state.config.make_initrd:
+    if not context.config.make_initrd:
         return
 
-    if not (state.root / "etc/initrd-release").exists() and not (state.root / "etc/initrd-release").is_symlink():
-        (state.root / "etc/initrd-release").symlink_to("/etc/os-release")
+    if not (context.root / "etc/initrd-release").exists() and not (context.root / "etc/initrd-release").is_symlink():
+        (context.root / "etc/initrd-release").symlink_to("/etc/os-release")
 
 
-def configure_clock(state: MkosiState) -> None:
-    if state.config.overlay or state.config.output_format.is_extension_image():
+def configure_clock(context: Context) -> None:
+    if context.config.overlay or context.config.output_format.is_extension_image():
         return
 
     with umask(~0o644):
-        (state.root / "usr/lib/clock-epoch").touch()
+        (context.root / "usr/lib/clock-epoch").touch()
 
 
-def run_depmod(state: MkosiState) -> None:
-    if state.config.overlay or state.config.output_format.is_extension_image():
+def run_depmod(context: Context) -> None:
+    if context.config.overlay or context.config.output_format.is_extension_image():
         return
 
-    for kver, _ in gen_kernel_images(state):
+    for kver, _ in gen_kernel_images(context):
         process_kernel_modules(
-            state.root, kver,
-            state.config.kernel_modules_include,
-            state.config.kernel_modules_exclude,
-            state.config.kernel_modules_include_host,
+            context.root, kver,
+            context.config.kernel_modules_include,
+            context.config.kernel_modules_exclude,
+            context.config.kernel_modules_include_host,
         )
 
         with complete_step(f"Running depmod for {kver}"):
-            bwrap(state, ["depmod", "--all", "--basedir", state.root, kver])
+            bwrap(context, ["depmod", "--all", "--basedir", context.root, kver])
 
 
-def run_sysusers(state: MkosiState) -> None:
+def run_sysusers(context: Context) -> None:
     if not shutil.which("systemd-sysusers"):
         logging.info("systemd-sysusers is not installed, not generating system users")
         return
 
     with complete_step("Generating system users"):
-        bwrap(state, ["systemd-sysusers", "--root", state.root])
+        bwrap(context, ["systemd-sysusers", "--root", context.root])
 
 
-def run_preset(state: MkosiState) -> None:
+def run_preset(context: Context) -> None:
     if not shutil.which("systemctl"):
         logging.info("systemctl is not installed, not applying presets")
         return
 
     with complete_step("Applying presets…"):
-        bwrap(state, ["systemctl", "--root", state.root, "preset-all"])
-        bwrap(state, ["systemctl", "--root", state.root, "--global", "preset-all"])
+        bwrap(context, ["systemctl", "--root", context.root, "preset-all"])
+        bwrap(context, ["systemctl", "--root", context.root, "--global", "preset-all"])
 
 
-def run_hwdb(state: MkosiState) -> None:
-    if state.config.overlay or state.config.output_format.is_extension_image():
+def run_hwdb(context: Context) -> None:
+    if context.config.overlay or context.config.output_format.is_extension_image():
         return
 
     if not shutil.which("systemd-hwdb"):
@@ -2154,28 +2165,28 @@ def run_hwdb(state: MkosiState) -> None:
         return
 
     with complete_step("Generating hardware database"):
-        bwrap(state, ["systemd-hwdb", "--root", state.root, "--usr", "--strict", "update"])
+        bwrap(context, ["systemd-hwdb", "--root", context.root, "--usr", "--strict", "update"])
 
     # Remove any existing hwdb in /etc in favor of the one we just put in /usr.
-    (state.root / "etc/udev/hwdb.bin").unlink(missing_ok=True)
+    (context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True)
 
 
-def run_firstboot(state: MkosiState) -> None:
-    if state.config.overlay or state.config.output_format.is_extension_image():
+def run_firstboot(context: Context) -> None:
+    if context.config.overlay or context.config.output_format.is_extension_image():
         return
 
-    password, hashed = state.config.root_password or (None, False)
+    password, hashed = context.config.root_password or (None, False)
     pwopt = "--root-password-hashed" if hashed else "--root-password"
     pwcred = "passwd.hashed-password.root" if hashed else "passwd.plaintext-password.root"
 
     settings = (
-        ("--locale",          "firstboot.locale",          state.config.locale),
-        ("--locale-messages", "firstboot.locale-messages", state.config.locale_messages),
-        ("--keymap",          "firstboot.keymap",          state.config.keymap),
-        ("--timezone",        "firstboot.timezone",        state.config.timezone),
-        ("--hostname",        None,                        state.config.hostname),
+        ("--locale",          "firstboot.locale",          context.config.locale),
+        ("--locale-messages", "firstboot.locale-messages", context.config.locale_messages),
+        ("--keymap",          "firstboot.keymap",          context.config.keymap),
+        ("--timezone",        "firstboot.timezone",        context.config.timezone),
+        ("--hostname",        None,                        context.config.hostname),
         (pwopt,               pwcred,                      password),
-        ("--root-shell",      "passwd.shell.root",         state.config.root_shell),
+        ("--root-shell",      "passwd.shell.root",         context.config.root_shell),
     )
 
     options = []
@@ -2195,32 +2206,32 @@ def run_firstboot(state: MkosiState) -> None:
         return
 
     with complete_step("Applying first boot settings"):
-        bwrap(state, ["systemd-firstboot", "--root", state.root, "--force", *options])
+        bwrap(context, ["systemd-firstboot", "--root", context.root, "--force", *options])
 
         # Initrds generally don't ship with only /usr so there's not much point in putting the credentials in
         # /usr/lib/credstore.
-        if state.config.output_format != OutputFormat.cpio or not state.config.make_initrd:
+        if context.config.output_format != OutputFormat.cpio or not context.config.make_initrd:
             with umask(~0o755):
-                (state.root / "usr/lib/credstore").mkdir(exist_ok=True)
+                (context.root / "usr/lib/credstore").mkdir(exist_ok=True)
 
             for cred, value in creds:
                 with umask(~0o600 if "password" in cred else ~0o644):
-                    (state.root / "usr/lib/credstore" / cred).write_text(value)
+                    (context.root / "usr/lib/credstore" / cred).write_text(value)
 
 
-def run_selinux_relabel(state: MkosiState) -> None:
-    if state.config.selinux_relabel == ConfigFeature.disabled:
+def run_selinux_relabel(context: Context) -> None:
+    if context.config.selinux_relabel == ConfigFeature.disabled:
         return
 
-    selinux = state.root / "etc/selinux/config"
+    selinux = context.root / "etc/selinux/config"
     if not selinux.exists():
-        if state.config.selinux_relabel == ConfigFeature.enabled:
+        if context.config.selinux_relabel == ConfigFeature.enabled:
             die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config")
         return
 
-    policy = bwrap(state, ["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], stdout=subprocess.PIPE).stdout.strip()
+    policy = bwrap(context, ["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], stdout=subprocess.PIPE).stdout.strip()
     if not policy:
-        if state.config.selinux_relabel == ConfigFeature.enabled:
+        if context.config.selinux_relabel == ConfigFeature.enabled:
             die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config")
         return
 
@@ -2228,8 +2239,8 @@ def run_selinux_relabel(state: MkosiState) -> None:
         logging.info("setfiles is not installed, not relabeling files")
         return
 
-    fc = state.root / "etc/selinux" / policy / "contexts/files/file_contexts"
-    binpolicydir = state.root / "etc/selinux" / policy / "policy"
+    fc = context.root / "etc/selinux" / policy / "contexts/files/file_contexts"
+    binpolicydir = context.root / "etc/selinux" / policy / "policy"
 
     try:
         # The policy file is named policy.XX where XX is the policy version that indicates what features are
@@ -2239,55 +2250,55 @@ def run_selinux_relabel(state: MkosiState) -> None:
         die(f"SELinux binary policy not found in {binpolicydir}")
 
     with complete_step(f"Relabeling files using {policy} policy"):
-        bwrap(state, ["setfiles", "-mFr", state.root, "-c", binpolicy, fc, state.root],
-              check=state.config.selinux_relabel == ConfigFeature.enabled)
+        bwrap(context, ["setfiles", "-mFr", context.root, "-c", binpolicy, fc, context.root],
+              check=context.config.selinux_relabel == ConfigFeature.enabled)
 
 
-def need_build_overlay(config: MkosiConfig) -> bool:
+def need_build_overlay(config: Config) -> bool:
     return bool(config.build_scripts and (config.build_packages or config.prepare_scripts))
 
 
-def save_cache(state: MkosiState) -> None:
-    if not state.config.incremental:
+def save_cache(context: Context) -> None:
+    if not context.config.incremental:
         return
 
-    final, build, manifest = cache_tree_paths(state.config)
+    final, build, manifest = cache_tree_paths(context.config)
 
     with complete_step("Installing cache copies"):
         rmtree(final)
 
         # We only use the cache-overlay directory for caching if we have a base tree, otherwise we just
         # cache the root directory.
-        if (state.workspace / "cache-overlay").exists():
-            move_tree(state.workspace / "cache-overlay", final, use_subvolumes=state.config.use_subvolumes)
+        if (context.workspace / "cache-overlay").exists():
+            move_tree(context.workspace / "cache-overlay", final, use_subvolumes=context.config.use_subvolumes)
         else:
-            move_tree(state.root, final, use_subvolumes=state.config.use_subvolumes)
+            move_tree(context.root, final, use_subvolumes=context.config.use_subvolumes)
 
-        if need_build_overlay(state.config) and (state.workspace / "build-overlay").exists():
+        if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists():
             rmtree(build)
-            move_tree(state.workspace / "build-overlay", build, use_subvolumes=state.config.use_subvolumes)
+            move_tree(context.workspace / "build-overlay", build, use_subvolumes=context.config.use_subvolumes)
 
         manifest.write_text(
             json.dumps(
-                state.config.cache_manifest(),
-                cls=MkosiJsonEncoder,
+                context.config.cache_manifest(),
+                cls=JsonEncoder,
                 indent=4,
                 sort_keys=True,
             )
         )
 
 
-def reuse_cache(state: MkosiState) -> bool:
-    if not state.config.incremental:
+def reuse_cache(context: Context) -> bool:
+    if not context.config.incremental:
         return False
 
-    final, build, manifest = cache_tree_paths(state.config)
-    if not final.exists() or (need_build_overlay(state.config) and not build.exists()):
+    final, build, manifest = cache_tree_paths(context.config)
+    if not final.exists() or (need_build_overlay(context.config) and not build.exists()):
         return False
 
     if manifest.exists():
         prev = json.loads(manifest.read_text())
-        if prev != json.loads(json.dumps(state.config.cache_manifest(), cls=MkosiJsonEncoder)):
+        if prev != json.loads(json.dumps(context.config.cache_manifest(), cls=JsonEncoder)):
             return False
     else:
         return False
@@ -2300,38 +2311,38 @@ def reuse_cache(state: MkosiState) -> bool:
             return False
 
     with complete_step("Copying cached trees"):
-        copy_tree(final, state.root, use_subvolumes=state.config.use_subvolumes)
-        if need_build_overlay(state.config):
-            (state.workspace / "build-overlay").symlink_to(build)
+        copy_tree(final, context.root, use_subvolumes=context.config.use_subvolumes)
+        if need_build_overlay(context.config):
+            (context.workspace / "build-overlay").symlink_to(build)
 
     return True
 
 
-def save_uki_components(state: MkosiState) -> tuple[Optional[Path], Optional[str], Optional[Path]]:
-    if state.config.output_format not in (OutputFormat.uki, OutputFormat.esp):
+def save_uki_components(context: Context) -> tuple[Optional[Path], Optional[str], Optional[Path]]:
+    if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp):
         return None, None, None
 
     try:
-        kver, kimg = next(gen_kernel_images(state))
+        kver, kimg = next(gen_kernel_images(context))
     except StopIteration:
         die("A kernel must be installed in the image to build a UKI")
 
-    kimg = shutil.copy2(state.root / kimg, state.workspace)
+    kimg = shutil.copy2(context.root / kimg, context.workspace)
 
-    if not (arch := state.config.architecture.to_efi()):
-        die(f"Architecture {state.config.architecture} does not support UEFI")
+    if not (arch := context.config.architecture.to_efi()):
+        die(f"Architecture {context.config.architecture} does not support UEFI")
 
-    stub = state.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub"
+    stub = context.root / f"usr/lib/systemd/boot/efi/linux{arch}.efi.stub"
     if not stub.exists():
-        die(f"sd-stub not found at /{stub.relative_to(state.root)} in the image")
+        die(f"sd-stub not found at /{stub.relative_to(context.root)} in the image")
 
-    stub = shutil.copy2(stub, state.workspace)
+    stub = shutil.copy2(stub, context.workspace)
 
     return stub, kver, kimg
 
 
 def make_image(
-    state: MkosiState,
+    context: Context,
     msg: str,
     skip: Sequence[str] = [],
     split: bool = False,
@@ -2345,42 +2356,42 @@ def make_image(
         "--dry-run=no",
         "--json=pretty",
         "--no-pager",
-        f"--offline={yes_no(state.config.repart_offline)}",
-        "--seed", str(state.config.seed) if state.config.seed else "random",
-        state.staging / state.config.output_with_format,
+        f"--offline={yes_no(context.config.repart_offline)}",
+        "--seed", str(context.config.seed) if context.config.seed else "random",
+        context.staging / context.config.output_with_format,
     ]
 
     if root:
         cmdline += ["--root", root]
-    if not state.config.architecture.is_native():
-        cmdline += ["--architecture", str(state.config.architecture)]
-    if not (state.staging / state.config.output_with_format).exists():
+    if not context.config.architecture.is_native():
+        cmdline += ["--architecture", str(context.config.architecture)]
+    if not (context.staging / context.config.output_with_format).exists():
         cmdline += ["--empty=create"]
-    if state.config.passphrase:
-        cmdline += ["--key-file", state.config.passphrase]
-    if state.config.verity_key:
-        cmdline += ["--private-key", state.config.verity_key]
-    if state.config.verity_certificate:
-        cmdline += ["--certificate", state.config.verity_certificate]
+    if context.config.passphrase:
+        cmdline += ["--key-file", context.config.passphrase]
+    if context.config.verity_key:
+        cmdline += ["--private-key", context.config.verity_key]
+    if context.config.verity_certificate:
+        cmdline += ["--certificate", context.config.verity_certificate]
     if skip:
         cmdline += ["--defer-partitions", ",".join(skip)]
     if split:
         cmdline += ["--split=yes"]
-    if state.config.sector_size:
-        cmdline += ["--sector-size", str(state.config.sector_size)]
+    if context.config.sector_size:
+        cmdline += ["--sector-size", str(context.config.sector_size)]
 
     for d in definitions:
         cmdline += ["--definitions", d]
 
     env = {
         option: value
-        for option, value in state.config.environment.items()
+        for option, value in context.config.environment.items()
         if option.startswith("SYSTEMD_REPART_MKFS_OPTIONS_") or option == "SOURCE_DATE_EPOCH"
     }
 
     with complete_step(msg):
         output = json.loads(
-            bwrap(state, cmdline, devices=not state.config.repart_offline, stdout=subprocess.PIPE, env=env).stdout
+            bwrap(context, cmdline, devices=not context.config.repart_offline, stdout=subprocess.PIPE, env=env).stdout
         )
 
     logging.debug(json.dumps(output, indent=4))
@@ -2390,36 +2401,36 @@ def make_image(
     if split:
         for p in partitions:
             if p.split_path:
-                maybe_compress(state, state.config.compress_output, p.split_path)
+                maybe_compress(context, context.config.compress_output, p.split_path)
 
     return partitions
 
 
 def make_disk(
-    state: MkosiState,
+    context: Context,
     msg: str,
     skip: Sequence[str] = [],
     split: bool = False,
 ) -> list[Partition]:
-    if state.config.output_format != OutputFormat.disk:
+    if context.config.output_format != OutputFormat.disk:
         return []
 
-    if state.config.repart_dirs:
-        definitions = state.config.repart_dirs
+    if context.config.repart_dirs:
+        definitions = context.config.repart_dirs
     else:
-        defaults = state.workspace / "repart-definitions"
+        defaults = context.workspace / "repart-definitions"
         if not defaults.exists():
             defaults.mkdir()
-            if (arch := state.config.architecture.to_efi()):
-                bootloader = state.root / f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI"
+            if (arch := context.config.architecture.to_efi()):
+                bootloader = context.root / f"efi/EFI/BOOT/BOOT{arch.upper()}.EFI"
             else:
                 bootloader = None
 
             esp = (
-                state.config.bootable == ConfigFeature.enabled or
-                (state.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists())
+                context.config.bootable == ConfigFeature.enabled or
+                (context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists())
             )
-            bios = (state.config.bootable != ConfigFeature.disabled and want_grub_bios(state))
+            bios = (context.config.bootable != ConfigFeature.disabled and want_grub_bios(context))
 
             if esp or bios:
                 # Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds and grub
@@ -2457,7 +2468,7 @@ def make_disk(
                     f"""\
                     [Partition]
                     Type=root
-                    Format={state.config.distribution.filesystem()}
+                    Format={context.config.distribution.filesystem()}
                     CopyFiles=/
                     Minimize=guess
                     """
@@ -2466,20 +2477,20 @@ def make_disk(
 
         definitions = [defaults]
 
-    return make_image(state, msg=msg, skip=skip, split=split, root=state.root, definitions=definitions)
+    return make_image(context, msg=msg, skip=skip, split=split, root=context.root, definitions=definitions)
 
 
-def make_esp(state: MkosiState, uki: Path) -> list[Partition]:
-    if not (arch := state.config.architecture.to_efi()):
-        die(f"Architecture {state.config.architecture} does not support UEFI")
+def make_esp(context: Context, uki: Path) -> list[Partition]:
+    if not (arch := context.config.architecture.to_efi()):
+        die(f"Architecture {context.config.architecture} does not support UEFI")
 
-    definitions = state.workspace / "esp-definitions"
+    definitions = context.workspace / "esp-definitions"
     definitions.mkdir(exist_ok=True)
 
     # Use a minimum of 36MB or 260MB depending on sector size because otherwise the generated FAT filesystem will have
     # too few clusters to be considered a FAT32 filesystem by OVMF which will refuse to boot from it.
     # See https://superuser.com/questions/1702331/what-is-the-minimum-size-of-a-4k-native-partition-when-formatted-with-fat32/1717643#1717643
-    if state.config.sector_size == 512:
+    if context.config.sector_size == 512:
         m = 36
     # TODO: Figure out minimum size for 2K sector size
     else:
@@ -2503,70 +2514,70 @@ def make_esp(state: MkosiState, uki: Path) -> list[Partition]:
         )
     )
 
-    return make_image(state, msg="Generating ESP image", definitions=[definitions])
+    return make_image(context, msg="Generating ESP image", definitions=[definitions])
 
 
-def make_extension_image(state: MkosiState, output: Path) -> None:
+def make_extension_image(context: Context, output: Path) -> None:
     cmdline: list[PathString] = [
         "systemd-repart",
-        "--root", state.root,
+        "--root", context.root,
         "--dry-run=no",
         "--no-pager",
-        f"--offline={yes_no(state.config.repart_offline)}",
-        "--seed", str(state.config.seed) if state.config.seed else "random",
+        f"--offline={yes_no(context.config.repart_offline)}",
+        "--seed", str(context.config.seed) if context.config.seed else "random",
         "--empty=create",
         "--size=auto",
         output,
     ]
 
-    if not state.config.architecture.is_native():
-        cmdline += ["--architecture", str(state.config.architecture)]
-    if state.config.passphrase:
-        cmdline += ["--key-file", state.config.passphrase]
-    if state.config.verity_key:
-        cmdline += ["--private-key", state.config.verity_key]
-    if state.config.verity_certificate:
-        cmdline += ["--certificate", state.config.verity_certificate]
-    if state.config.sector_size:
-        cmdline += ["--sector-size", str(state.config.sector_size)]
+    if not context.config.architecture.is_native():
+        cmdline += ["--architecture", str(context.config.architecture)]
+    if context.config.passphrase:
+        cmdline += ["--key-file", context.config.passphrase]
+    if context.config.verity_key:
+        cmdline += ["--private-key", context.config.verity_key]
+    if context.config.verity_certificate:
+        cmdline += ["--certificate", context.config.verity_certificate]
+    if context.config.sector_size:
+        cmdline += ["--sector-size", str(context.config.sector_size)]
 
     env = {
         option: value
-        for option, value in state.config.environment.items()
+        for option, value in context.config.environment.items()
         if option.startswith("SYSTEMD_REPART_MKFS_OPTIONS_") or option == "SOURCE_DATE_EPOCH"
     }
 
     with (
         resource_path(mkosi.resources) as r,
-        complete_step(f"Building {state.config.output_format} extension image")
+        complete_step(f"Building {context.config.output_format} extension image")
     ):
         bwrap(
-            state,
-            cmdline + ["--definitions", r / f"repart/definitions/{state.config.output_format}.repart.d"],
-            devices=not state.config.repart_offline,
+            context,
+            cmdline + ["--definitions", r / f"repart/definitions/{context.config.output_format}.repart.d"],
+            devices=not context.config.repart_offline,
             env=env,
         )
 
 
-def finalize_staging(state: MkosiState) -> None:
+def finalize_staging(context: Context) -> None:
     # Our output unlinking logic removes everything prefixed with the name of the image, so let's make
     # sure that everything we put into the output directory is prefixed with the name of the output.
-    for f in state.staging.iterdir():
+    for f in context.staging.iterdir():
         # Skip the symlink we create without the version that points to the output with the version.
-        if f.name.startswith(state.config.output) and f.is_symlink():
+        if f.name.startswith(context.config.output) and f.is_symlink():
             continue
 
         name = f.name
-        if not name.startswith(state.config.output):
-            name = f"{state.config.output}-{name}"
+        if not name.startswith(context.config.output):
+            name = f"{context.config.output}-{name}"
         if name != f.name:
-            f.rename(state.staging / name)
+            f.rename(context.staging / name)
 
-    for f in state.staging.iterdir():
+    for f in context.staging.iterdir():
         # Make sure all build outputs that are not directories are owned by the user running mkosi.
         if not f.is_dir():
             os.chown(f, INVOKING_USER.uid, INVOKING_USER.gid, follow_symlinks=False)
-        move_tree(f, state.config.output_dir_or_cwd(), use_subvolumes=state.config.use_subvolumes)
+        move_tree(f, context.config.output_dir_or_cwd(), use_subvolumes=context.config.use_subvolumes)
 
 
 def normalize_mtime(root: Path, mtime: Optional[int], directory: Optional[Path] = None) -> None:
@@ -2582,7 +2593,7 @@ def normalize_mtime(root: Path, mtime: Optional[int], directory: Optional[Path]
 
 
 @contextlib.contextmanager
-def setup_workspace(args: MkosiArgs, config: MkosiConfig) -> Iterator[Path]:
+def setup_workspace(args: Args, config: Config) -> Iterator[Path]:
     with contextlib.ExitStack() as stack:
         workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace"))
         stack.callback(lambda: rmtree(workspace))
@@ -2599,119 +2610,119 @@ def setup_workspace(args: MkosiArgs, config: MkosiConfig) -> Iterator[Path]:
                 raise
 
 
-def build_image(args: MkosiArgs, config: MkosiConfig) -> None:
+def build_image(args: Args, config: Config) -> None:
     manifest = Manifest(config) if config.manifest_format else None
 
     with setup_workspace(args, config) as workspace:
-        state = MkosiState(args, config, workspace)
-        install_package_manager_trees(state)
+        context = Context(args, config, workspace)
+        install_package_manager_trees(context)
 
-        with mount_base_trees(state):
-            install_base_trees(state)
-            install_skeleton_trees(state)
-            cached = reuse_cache(state)
+        with mount_base_trees(context):
+            install_base_trees(context)
+            install_skeleton_trees(context)
+            cached = reuse_cache(context)
 
-            state.config.distribution.setup(state)
+            context.config.distribution.setup(context)
 
             if not cached:
-                with mount_cache_overlay(state):
-                    install_distribution(state)
-                    run_prepare_scripts(state, build=False)
-                    install_build_packages(state)
-                    run_prepare_scripts(state, build=True)
+                with mount_cache_overlay(context):
+                    install_distribution(context)
+                    run_prepare_scripts(context, build=False)
+                    install_build_packages(context)
+                    run_prepare_scripts(context, build=True)
 
-                save_cache(state)
-                reuse_cache(state)
+                save_cache(context)
+                reuse_cache(context)
 
-            check_root_populated(state)
-            run_build_scripts(state)
+            check_root_populated(context)
+            run_build_scripts(context)
 
-            if state.config.output_format == OutputFormat.none:
+            if context.config.output_format == OutputFormat.none:
                 # Touch an empty file to indicate the image was built.
-                (state.staging / state.config.output).touch()
-                finalize_staging(state)
+                (context.staging / context.config.output).touch()
+                finalize_staging(context)
                 return
 
-            install_build_dest(state)
-            install_extra_trees(state)
-            run_postinst_scripts(state)
+            install_build_dest(context)
+            install_extra_trees(context)
+            run_postinst_scripts(context)
 
-            configure_autologin(state)
-            configure_os_release(state)
-            configure_initrd(state)
-            configure_ssh(state)
-            configure_clock(state)
+            configure_autologin(context)
+            configure_os_release(context)
+            configure_initrd(context)
+            configure_ssh(context)
+            configure_clock(context)
 
-            install_systemd_boot(state)
-            install_shim(state)
-            run_sysusers(state)
-            run_preset(state)
-            run_depmod(state)
-            run_firstboot(state)
-            run_hwdb(state)
+            install_systemd_boot(context)
+            install_shim(context)
+            run_sysusers(context)
+            run_preset(context)
+            run_depmod(context)
+            run_firstboot(context)
+            run_hwdb(context)
 
             # These might be removed by the next steps,
             # so let's save them for later if needed.
-            stub, kver, kimg = save_uki_components(state)
+            stub, kver, kimg = save_uki_components(context)
 
-            remove_packages(state)
+            remove_packages(context)
 
             if manifest:
                 with complete_step("Recording packages in manifest…"):
-                    manifest.record_packages(state.root)
-
-            clean_package_manager_metadata(state)
-            remove_files(state)
-            run_selinux_relabel(state)
-            run_finalize_scripts(state)
-
-        normalize_mtime(state.root, state.config.source_date_epoch)
-        partitions = make_disk(state, skip=("esp", "xbootldr"), msg="Generating disk image")
-        install_uki(state, partitions)
-        prepare_grub_efi(state)
-        prepare_grub_bios(state, partitions)
-        normalize_mtime(state.root, state.config.source_date_epoch, directory=Path("boot"))
-        normalize_mtime(state.root, state.config.source_date_epoch, directory=Path("efi"))
-        partitions = make_disk(state, msg="Formatting ESP/XBOOTLDR partitions")
-        install_grub_bios(state, partitions)
-
-        if state.config.split_artifacts:
-            make_disk(state, split=True, msg="Extracting partitions")
-
-        copy_vmlinuz(state)
-        copy_initrd(state)
-
-        if state.config.output_format == OutputFormat.tar:
-            make_tar(state, state.root, state.staging / state.config.output_with_format)
-        elif state.config.output_format == OutputFormat.cpio:
-            make_cpio(state, state.root, state.staging / state.config.output_with_format)
-        elif state.config.output_format == OutputFormat.uki:
+                    manifest.record_packages(context.root)
+
+            clean_package_manager_metadata(context)
+            remove_files(context)
+            run_selinux_relabel(context)
+            run_finalize_scripts(context)
+
+        normalize_mtime(context.root, context.config.source_date_epoch)
+        partitions = make_disk(context, skip=("esp", "xbootldr"), msg="Generating disk image")
+        install_uki(context, partitions)
+        prepare_grub_efi(context)
+        prepare_grub_bios(context, partitions)
+        normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("boot"))
+        normalize_mtime(context.root, context.config.source_date_epoch, directory=Path("efi"))
+        partitions = make_disk(context, msg="Formatting ESP/XBOOTLDR partitions")
+        install_grub_bios(context, partitions)
+
+        if context.config.split_artifacts:
+            make_disk(context, split=True, msg="Extracting partitions")
+
+        copy_vmlinuz(context)
+        copy_initrd(context)
+
+        if context.config.output_format == OutputFormat.tar:
+            make_tar(context, context.root, context.staging / context.config.output_with_format)
+        elif context.config.output_format == OutputFormat.cpio:
+            make_cpio(context, context.root, context.staging / context.config.output_with_format)
+        elif context.config.output_format == OutputFormat.uki:
             assert stub and kver and kimg
-            make_uki(state, stub, kver, kimg, state.staging / state.config.output_with_format)
-        elif state.config.output_format == OutputFormat.esp:
+            make_uki(context, stub, kver, kimg, context.staging / context.config.output_with_format)
+        elif context.config.output_format == OutputFormat.esp:
             assert stub and kver and kimg
-            make_uki(state, stub, kver, kimg, state.staging / state.config.output_split_uki)
-            make_esp(state, state.staging / state.config.output_split_uki)
-        elif state.config.output_format.is_extension_image():
-            make_extension_image(state, state.staging / state.config.output_with_format)
-        elif state.config.output_format == OutputFormat.directory:
-            state.root.rename(state.staging / state.config.output_with_format)
+            make_uki(context, stub, kver, kimg, context.staging / context.config.output_split_uki)
+            make_esp(context, context.staging / context.config.output_split_uki)
+        elif context.config.output_format.is_extension_image():
+            make_extension_image(context, context.staging / context.config.output_with_format)
+        elif context.config.output_format == OutputFormat.directory:
+            context.root.rename(context.staging / context.config.output_with_format)
 
         if config.output_format not in (OutputFormat.uki, OutputFormat.esp):
-            maybe_compress(state, state.config.compress_output,
-                           state.staging / state.config.output_with_format,
-                           state.staging / state.config.output_with_compression)
+            maybe_compress(context, context.config.compress_output,
+                           context.staging / context.config.output_with_format,
+                           context.staging / context.config.output_with_compression)
 
-        calculate_sha256sum(state)
-        calculate_signature(state)
-        save_manifest(state, manifest)
+        calculate_sha256sum(context)
+        calculate_signature(context)
+        save_manifest(context, manifest)
 
-        output_base = state.staging / state.config.output
+        output_base = context.staging / context.config.output
         if not output_base.exists() or output_base.is_symlink():
             output_base.unlink(missing_ok=True)
-            output_base.symlink_to(state.config.output_with_compression)
+            output_base.symlink_to(context.config.output_with_compression)
 
-        finalize_staging(state)
+        finalize_staging(context)
 
     print_output_size(config.output_dir_or_cwd() / config.output)
 
@@ -2728,7 +2739,7 @@ def setfacl(root: Path, uid: int, allow: bool) -> None:
 
 
 @contextlib.contextmanager
-def acl_maybe_toggle(config: MkosiConfig, root: Path, uid: int, *, always: bool) -> Iterator[None]:
+def acl_maybe_toggle(config: Config, root: Path, uid: int, *, always: bool) -> Iterator[None]:
     if not config.acl:
         yield
         return
@@ -2757,7 +2768,7 @@ def acl_maybe_toggle(config: MkosiConfig, root: Path, uid: int, *, always: bool)
 
 
 @contextlib.contextmanager
-def acl_toggle_build(config: MkosiConfig, uid: int) -> Iterator[None]:
+def acl_toggle_build(config: Config, uid: int) -> Iterator[None]:
     if not config.acl:
         yield
         return
@@ -2781,7 +2792,7 @@ def acl_toggle_build(config: MkosiConfig, uid: int) -> Iterator[None]:
 
 
 @contextlib.contextmanager
-def acl_toggle_boot(config: MkosiConfig, uid: int) -> Iterator[None]:
+def acl_toggle_boot(config: Config, uid: int) -> Iterator[None]:
     if not config.acl or config.output_format != OutputFormat.directory:
         yield
         return
@@ -2790,7 +2801,7 @@ def acl_toggle_boot(config: MkosiConfig, uid: int) -> Iterator[None]:
         yield
 
 
-def run_shell(args: MkosiArgs, config: MkosiConfig) -> None:
+def run_shell(args: Args, config: Config) -> None:
     cmdline: list[PathString] = ["systemd-nspawn", "--quiet"]
 
     # If we copied in a .nspawn file, make sure it's actually honoured
@@ -2864,7 +2875,7 @@ def run_shell(args: MkosiArgs, config: MkosiConfig) -> None:
         run(cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False)
 
 
-def run_systemd_tool(tool: str, args: MkosiArgs, config: MkosiConfig) -> None:
+def run_systemd_tool(tool: str, args: Args, config: Config) -> None:
     if config.output_format not in (OutputFormat.disk, OutputFormat.directory):
         die(f"{config.output_format} images cannot be inspected with {tool}")
 
@@ -2888,15 +2899,15 @@ def run_systemd_tool(tool: str, args: MkosiArgs, config: MkosiConfig) -> None:
     )
 
 
-def run_journalctl(args: MkosiArgs, config: MkosiConfig) -> None:
+def run_journalctl(args: Args, config: Config) -> None:
     run_systemd_tool("journalctl", args, config)
 
 
-def run_coredumpctl(args: MkosiArgs, config: MkosiConfig) -> None:
+def run_coredumpctl(args: Args, config: Config) -> None:
     run_systemd_tool("coredumpctl", args, config)
 
 
-def run_serve(config: MkosiConfig) -> None:
+def run_serve(config: Config) -> None:
     """Serve the output directory via a tiny HTTP server"""
 
     port = "8081"
@@ -2906,7 +2917,7 @@ def run_serve(config: MkosiConfig) -> None:
             user=INVOKING_USER.uid, group=INVOKING_USER.gid, stdin=sys.stdin, stdout=sys.stdout)
 
 
-def generate_key_cert_pair(args: MkosiArgs) -> None:
+def generate_key_cert_pair(args: Args) -> None:
     """Generate a private key and accompanying X509 certificate using openssl"""
 
     keylength = 2048
@@ -2966,7 +2977,7 @@ def bump_image_version() -> None:
     os.chown("mkosi.version", INVOKING_USER.uid, INVOKING_USER.gid)
 
 
-def show_docs(args: MkosiArgs) -> None:
+def show_docs(args: Args) -> None:
     if args.doc_format == DocFormat.auto:
         formats = [DocFormat.man, DocFormat.pandoc, DocFormat.markdown, DocFormat.system]
     else:
@@ -3007,7 +3018,7 @@ def expand_specifier(s: str) -> str:
     return s.replace("%u", INVOKING_USER.name())
 
 
-def needs_build(args: MkosiArgs, config: MkosiConfig) -> bool:
+def needs_build(args: Args, config: Config) -> bool:
     return (
         args.verb.needs_build() and
         (args.force > 0 or not (config.output_dir_or_cwd() / config.output_with_compression).exists())
@@ -3015,7 +3026,7 @@ def needs_build(args: MkosiArgs, config: MkosiConfig) -> bool:
 
 
 @contextlib.contextmanager
-def prepend_to_environ_path(config: MkosiConfig) -> Iterator[None]:
+def prepend_to_environ_path(config: Config) -> Iterator[None]:
     if config.tools_tree or not config.extra_search_paths:
         yield
         return
@@ -3036,7 +3047,7 @@ def prepend_to_environ_path(config: MkosiConfig) -> Iterator[None]:
             os.environ["PATH"] = ":".join(olds)
 
 
-def finalize_tools(args: MkosiArgs, images: Sequence[MkosiConfig]) -> Sequence[MkosiConfig]:
+def finalize_tools(args: Args, images: Sequence[Config]) -> Sequence[Config]:
     new = []
 
     for config in images:
@@ -3088,7 +3099,7 @@ def finalize_tools(args: MkosiArgs, images: Sequence[MkosiConfig]) -> Sequence[M
     return new
 
 
-def check_workspace_directory(config: MkosiConfig) -> None:
+def check_workspace_directory(config: Config) -> None:
     wd = config.workspace_dir_or_default()
 
     if wd.is_relative_to(Path.cwd()):
@@ -3101,7 +3112,7 @@ def check_workspace_directory(config: MkosiConfig) -> None:
                 hint="Use WorkspaceDirectory= to configure a different workspace directory")
 
 
-def run_verb(args: MkosiArgs, images: Sequence[MkosiConfig]) -> None:
+def run_verb(args: Args, images: Sequence[Config]) -> None:
     if args.verb.needs_root() and os.getuid() != 0:
         die(f"Must be root to run the {args.verb} command")
 
@@ -3111,7 +3122,7 @@ def run_verb(args: MkosiArgs, images: Sequence[MkosiConfig]) -> None:
     if args.verb == Verb.genkey:
         return generate_key_cert_pair(args)
 
-    if all(config == MkosiConfig.default() for config in images):
+    if all(config == Config.default() for config in images):
         die("No configuration found",
             hint="Make sure you're running mkosi from a directory with configuration files")
 
@@ -3122,7 +3133,7 @@ def run_verb(args: MkosiArgs, images: Sequence[MkosiConfig]) -> None:
         if args.json:
             text = json.dumps(
                 {"Images": [config.to_dict() for config in images]},
-                cls=MkosiJsonEncoder,
+                cls=JsonEncoder,
                 indent=4,
                 sort_keys=True
             )
index 12a9434794ae6387ba2b040eaf412d0a29b2eb22..dd1a463004b7e7a42fcbca8330518e544b516e42 100644 (file)
@@ -7,9 +7,9 @@ from pathlib import Path
 from typing import Optional
 
 from mkosi.bubblewrap import bwrap
+from mkosi.context import Context
 from mkosi.log import log_step
 from mkosi.mounts import finalize_passwd_mounts
-from mkosi.state import MkosiState
 
 
 def tar_binary() -> str:
@@ -38,10 +38,10 @@ def tar_exclude_apivfs_tmp() -> list[str]:
     ]
 
 
-def make_tar(state: MkosiState, src: Path, dst: Path) -> None:
+def make_tar(context: Context, src: Path, dst: Path) -> None:
     log_step(f"Creating tar archive {dst}…")
     bwrap(
-        state,
+        context,
         [
             tar_binary(),
             "--create",
@@ -64,11 +64,11 @@ def make_tar(state: MkosiState, src: Path, dst: Path) -> None:
     )
 
 
-def extract_tar(state: MkosiState, src: Path, dst: Path, log: bool = True) -> None:
+def extract_tar(context: Context, src: Path, dst: Path, log: bool = True) -> None:
     if log:
         log_step(f"Extracting tar archive {src}…")
     bwrap(
-        state,
+        context,
         [
             tar_binary(),
             "--extract",
@@ -90,14 +90,14 @@ def extract_tar(state: MkosiState, src: Path, dst: Path, log: bool = True) -> No
     )
 
 
-def make_cpio(state: MkosiState, src: Path, dst: Path, files: Optional[Iterable[Path]] = None) -> None:
+def make_cpio(context: Context, src: Path, dst: Path, files: Optional[Iterable[Path]] = None) -> None:
     if not files:
         files = src.rglob("*")
     files = sorted(files)
 
     log_step(f"Creating cpio archive {dst}…")
     bwrap(
-        state,
+        context,
         [
             cpio_binary(),
             "--create",
index 56c5db1da6f3295fff9fe1c422cc7f6e33e4c6df..2a5c75dacec8ef7476c5f7a690ec63ca0577282c 100644 (file)
@@ -9,10 +9,10 @@ from collections.abc import Mapping, Sequence
 from pathlib import Path
 from typing import Optional
 
+from mkosi.context import Context
 from mkosi.log import ARG_DEBUG_SHELL
 from mkosi.mounts import finalize_passwd_mounts, mount_overlay
 from mkosi.run import find_binary, log_process_failure, run
-from mkosi.state import MkosiState
 from mkosi.types import _FILE, CompletedProcess, PathString
 from mkosi.util import flatten, one_zero
 
@@ -34,9 +34,9 @@ def have_effective_cap(capability: Capability) -> bool:
     return (int(hexcap, 16) & (1 << capability.value)) != 0
 
 
-def finalize_mounts(state: MkosiState) -> list[str]:
+def finalize_mounts(context: Context) -> list[str]:
     mounts = [
-        ((state.config.tools_tree or Path("/")) / subdir, Path("/") / subdir, True)
+        ((context.config.tools_tree or Path("/")) / subdir, Path("/") / subdir, True)
         for subdir in (
             Path("etc/pki"),
             Path("etc/ssl"),
@@ -45,16 +45,16 @@ def finalize_mounts(state: MkosiState) -> list[str]:
             Path("etc/pacman.d/gnupg"),
             Path("var/lib/ca-certificates"),
         )
-        if ((state.config.tools_tree or Path("/")) / subdir).exists()
+        if ((context.config.tools_tree or Path("/")) / subdir).exists()
     ]
 
     mounts += [
         (d, d, False)
-        for d in (state.workspace, state.config.cache_dir, state.config.output_dir, state.config.build_dir)
+        for d in (context.workspace, context.config.cache_dir, context.config.output_dir, context.config.build_dir)
         if d
     ]
 
-    mounts += [(d, d, True) for d in state.config.extra_search_paths]
+    mounts += [(d, d, True) for d in context.config.extra_search_paths]
 
     return flatten(
         ["--ro-bind" if readonly else "--bind", os.fspath(src), os.fspath(target)]
@@ -64,7 +64,7 @@ def finalize_mounts(state: MkosiState) -> list[str]:
 
 
 def bwrap(
-    state: MkosiState,
+    context: Context,
     cmd: Sequence[PathString],
     *,
     network: bool = False,
@@ -85,7 +85,7 @@ def bwrap(
         "--ro-bind-try", "/nix/store", "/nix/store",
         # This mount is writable so bwrap can create extra directories or symlinks inside of it as needed. This isn't a
         # problem as the package manager directory is created by mkosi and thrown away when the build finishes.
-        "--bind", state.pkgmngr / "etc", "/etc",
+        "--bind", context.pkgmngr / "etc", "/etc",
         "--ro-bind-try", "/etc/alternatives", "/etc/alternatives",
         "--bind", "/var/tmp", "/var/tmp",
         "--bind", "/tmp", "/tmp",
@@ -115,7 +115,7 @@ def bwrap(
     if network:
         cmdline += ["--bind", "/etc/resolv.conf", "/etc/resolv.conf"]
 
-    cmdline += finalize_mounts(state) + [
+    cmdline += finalize_mounts(context) + [
         "--setenv", "PATH", f"{scripts or ''}:{os.environ['PATH']}",
         *options,
         "sh", "-c", "chmod 1777 /dev/shm && exec $0 \"$@\"",
@@ -126,8 +126,8 @@ def bwrap(
 
     try:
         with (
-            mount_overlay([Path("/usr"), state.pkgmngr / "usr"], where=Path("/usr"), lazy=True)
-            if (state.pkgmngr / "usr").exists()
+            mount_overlay([Path("/usr"), context.pkgmngr / "usr"], where=Path("/usr"), lazy=True)
+            if (context.pkgmngr / "usr").exists()
             else contextlib.nullcontext()
         ):
             return run(
index ff4c813e8a779b58e93e48763ae37685bba26ef1..7a8d95ad62efd79900f1266553605dcaeefdb712 100644 (file)
@@ -3,12 +3,12 @@
 import os
 import sys
 
-from mkosi.config import MkosiArgs, MkosiConfig, OutputFormat
+from mkosi.config import Args, Config, OutputFormat
 from mkosi.log import complete_step, die
 from mkosi.run import run
 
 
-def run_burn(args: MkosiArgs, config: MkosiConfig) -> None:
+def run_burn(args: Args, config: Config) -> None:
     if config.output_format not in (OutputFormat.disk, OutputFormat.esp):
         die(f"{config.output_format} images cannot be burned to disk")
 
index 452b75d6782ae403aaff5a70945fa16183f03169..b766298e2f207c4c22c8ea802608f005fc822fc4 100644 (file)
@@ -879,7 +879,7 @@ def config_parse_minimum_version(value: Optional[str], old: Optional[GenericVers
 
 
 @dataclasses.dataclass(frozen=True)
-class MkosiConfigSetting:
+class ConfigSetting:
     dest: str
     section: str
     parse: ConfigParseCallback = config_parse_string
@@ -914,7 +914,7 @@ class MkosiConfigSetting:
 
 
 @dataclasses.dataclass(frozen=True)
-class MkosiMatch:
+class Match:
     name: str
     match: Callable[[str], bool]
 
@@ -992,7 +992,7 @@ class PagerHelpAction(argparse._HelpAction):
 
 
 @dataclasses.dataclass(frozen=True)
-class MkosiArgs:
+class Args:
     verb: Verb
     cmdline: list[str]
     force: int
@@ -1008,7 +1008,7 @@ class MkosiArgs:
     json: bool
 
     @classmethod
-    def default(cls) -> "MkosiArgs":
+    def default(cls) -> "Args":
         """Alternative constructor to generate an all-default MkosiArgs.
 
         This prevents MkosiArgs being generated with defaults values implicitly.
@@ -1020,7 +1020,7 @@ class MkosiArgs:
         return args
 
     @classmethod
-    def from_namespace(cls, ns: argparse.Namespace) -> "MkosiArgs":
+    def from_namespace(cls, ns: argparse.Namespace) -> "Args":
         return cls(**{
             k: v for k, v in vars(ns).items()
             if k in inspect.signature(cls).parameters
@@ -1034,7 +1034,7 @@ class MkosiArgs:
 
     def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str:
         """Dump MkosiArgs as JSON string."""
-        return json.dumps(self.to_dict(), cls=MkosiJsonEncoder, indent=indent, sort_keys=sort_keys)
+        return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys)
 
     @classmethod
     def _load_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> dict[str, Any]:
@@ -1055,25 +1055,25 @@ class MkosiArgs:
         return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()}
 
     @classmethod
-    def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "MkosiArgs":
+    def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args":
         """Instantiate a MkosiArgs object from a full JSON dump."""
         j = cls._load_json(s)
         return cls(**j)
 
     @classmethod
-    def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "MkosiArgs":
+    def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Args":
         """Return a new MkosiArgs with defaults overwritten by the attributes from passed in JSON."""
         j = cls._load_json(s)
         return dataclasses.replace(cls.default(), **j)
 
 
 @dataclasses.dataclass(frozen=True)
-class MkosiConfig:
+class Config:
     """Type-hinted storage for command line arguments.
 
     Only user configuration is stored here while dynamic state exists in
-    MkosiState. If a field of the same name exists in both classes always
-    access the value from state.
+    Mkosicontext. If a field of the same name exists in both classes always
+    access the value from context.
     """
 
     profile: Optional[str]
@@ -1241,7 +1241,7 @@ class MkosiConfig:
         return Path("/var/tmp")
 
     @classmethod
-    def default(cls) -> "MkosiConfig":
+    def default(cls) -> "Config":
         """Alternative constructor to generate an all-default MkosiArgs.
 
         This prevents MkosiArgs being generated with defaults values implicitly.
@@ -1253,7 +1253,7 @@ class MkosiConfig:
         return config
 
     @classmethod
-    def from_namespace(cls, ns: argparse.Namespace) -> "MkosiConfig":
+    def from_namespace(cls, ns: argparse.Namespace) -> "Config":
         return cls(**{
             k: v for k, v in vars(ns).items()
             if k in inspect.signature(cls).parameters
@@ -1332,7 +1332,7 @@ class MkosiConfig:
 
     def to_json(self, *, indent: Optional[int] = 4, sort_keys: bool = True) -> str:
         """Dump MkosiConfig as JSON string."""
-        return json.dumps(self.to_dict(), cls=MkosiJsonEncoder, indent=indent, sort_keys=sort_keys)
+        return json.dumps(self.to_dict(), cls=JsonEncoder, indent=indent, sort_keys=sort_keys)
 
     @classmethod
     def _load_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> dict[str, Any]:
@@ -1355,13 +1355,13 @@ class MkosiConfig:
         return {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()}
 
     @classmethod
-    def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "MkosiConfig":
+    def from_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config":
         """Instantiate a MkosiConfig object from a full JSON dump."""
         j = cls._load_json(s)
         return cls(**j)
 
     @classmethod
-    def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "MkosiConfig":
+    def from_partial_json(cls, s: Union[str, dict[str, Any], SupportsRead[str], SupportsRead[bytes]]) -> "Config":
         """Return a new MkosiConfig with defaults overwritten by the attributes from passed in JSON."""
         j = cls._load_json(s)
         return dataclasses.replace(cls.default(), **j)
@@ -1435,26 +1435,26 @@ def parse_ini(path: Path, only_sections: Collection[str] = ()) -> Iterator[tuple
 
 
 SETTINGS = (
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="include",
         section="Config",
         parse=config_make_list_parser(delimiter=",", reset=False, parse=make_path_parser()),
         help="Include configuration from the specified file or directory",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="initrd_include",
         section="Config",
         parse=config_make_list_parser(delimiter=",", reset=False, parse=make_path_parser()),
         help="Include configuration from the specified file or directory when building the initrd",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="profile",
         section="Config",
         help="Build the specified profile",
         parse=config_parse_profile,
         match=config_make_string_matcher(),
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="images",
         compat_names=("Presets",),
         long="--image",
@@ -1462,20 +1462,20 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=","),
         help="Specify which images to build",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="dependencies",
         long="--dependency",
         section="Config",
         parse=config_make_list_parser(delimiter=","),
         help="Specify other images that this image depends on",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="minimum_version",
         section="Config",
         parse=config_parse_minimum_version,
         help="Specify the minimum required mkosi version",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="distribution",
         short="-d",
         section="Distribution",
@@ -1486,7 +1486,7 @@ SETTINGS = (
         choices=Distribution.values(),
         help="Distribution to install",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="release",
         short="-r",
         section="Distribution",
@@ -1497,7 +1497,7 @@ SETTINGS = (
         default_factory_depends=("distribution",),
         help="Distribution release to install",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="architecture",
         section="Distribution",
         specifier="a",
@@ -1507,18 +1507,18 @@ SETTINGS = (
         choices=Architecture.values(),
         help="Override the architecture of installation",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="mirror",
         short="-m",
         section="Distribution",
         help="Distribution mirror to use",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="local_mirror",
         section="Distribution",
         help="Use a single local, flat and plain mirror to build the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="repository_key_check",
         metavar="BOOL",
         nargs="?",
@@ -1527,21 +1527,21 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Controls signature and key checks on repositories",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="repositories",
         metavar="REPOS",
         section="Distribution",
         parse=config_make_list_parser(delimiter=","),
         help="Repositories to use",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="cache_only",
         metavar="BOOL",
         section="Distribution",
         parse=config_parse_boolean,
         help="Only use the package cache when installing packages",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="package_manager_trees",
         long="--package-manager-tree",
         metavar="PATH",
@@ -1552,7 +1552,7 @@ SETTINGS = (
         help="Use a package manager tree to configure the package manager",
     ),
 
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="output_format",
         short="-t",
         long="--format",
@@ -1566,14 +1566,14 @@ SETTINGS = (
         choices=OutputFormat.values(),
         help="Output Format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="manifest_format",
         metavar="FORMAT",
         section="Output",
         parse=config_make_list_parser(delimiter=",", parse=make_enum_parser(ManifestFormat)),
         help="Manifest Format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="output",
         short="-o",
         metavar="NAME",
@@ -1584,7 +1584,7 @@ SETTINGS = (
         default_factory_depends=("image_id", "image_version"),
         help="Output name",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="compress_output",
         metavar="ALG",
         nargs="?",
@@ -1594,7 +1594,7 @@ SETTINGS = (
         default_factory_depends=("distribution", "release", "output_format"),
         help="Enable whole-output compression (with images or archives)",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="output_dir",
         short="-O",
         metavar="DIR",
@@ -1605,7 +1605,7 @@ SETTINGS = (
         paths=("mkosi.output",),
         help="Output directory",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="workspace_dir",
         metavar="DIR",
         name="WorkspaceDirectory",
@@ -1613,7 +1613,7 @@ SETTINGS = (
         parse=config_make_path_parser(required=False),
         help="Workspace directory",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="cache_dir",
         metavar="PATH",
         name="CacheDirectory",
@@ -1622,7 +1622,7 @@ SETTINGS = (
         paths=("mkosi.cache",),
         help="Package cache path",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="build_dir",
         metavar="PATH",
         name="BuildDirectory",
@@ -1631,7 +1631,7 @@ SETTINGS = (
         paths=("mkosi.builddir",),
         help="Path to use as persistent build directory",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="image_version",
         match=config_match_version,
         section="Output",
@@ -1640,14 +1640,14 @@ SETTINGS = (
         paths=("mkosi.version",),
         path_read_text=True,
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="image_id",
         match=config_make_string_matcher(allow_globs=True),
         section="Output",
         specifier="i",
         help="Set ID for image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="split_artifacts",
         metavar="BOOL",
         nargs="?",
@@ -1655,7 +1655,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Generate split partitions",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="repart_dirs",
         long="--repart-dir",
         metavar="PATH",
@@ -1665,20 +1665,20 @@ SETTINGS = (
         paths=("mkosi.repart",),
         help="Directory containing systemd-repart partition definitions",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="sector_size",
         section="Output",
         parse=config_parse_sector_size,
         help="Set the disk image sector size",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="repart_offline",
         section="Output",
         parse=config_parse_boolean,
         help="Build disk images without using loopback devices",
         default=True,
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="overlay",
         metavar="BOOL",
         nargs="?",
@@ -1686,7 +1686,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Only output the additions on top of the given base trees",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="use_subvolumes",
         metavar="FEATURE",
         nargs="?",
@@ -1694,7 +1694,7 @@ SETTINGS = (
         parse=config_parse_feature,
         help="Use btrfs subvolumes for faster directory operations where possible",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="seed",
         metavar="UUID",
         section="Output",
@@ -1702,7 +1702,7 @@ SETTINGS = (
         help="Set the seed for systemd-repart",
     ),
 
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="packages",
         short="-p",
         long="--package",
@@ -1711,7 +1711,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=","),
         help="Add an additional package to the OS image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="build_packages",
         long="--build-package",
         metavar="PACKAGE",
@@ -1719,7 +1719,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=","),
         help="Additional packages needed for build scripts",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="with_recommends",
         metavar="BOOL",
         nargs="?",
@@ -1727,7 +1727,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Install recommended packages",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="with_docs",
         metavar="BOOL",
         nargs="?",
@@ -1736,7 +1736,7 @@ SETTINGS = (
         default=True,
         help="Install documentation",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="base_trees",
         long='--base-tree',
         metavar='PATH',
@@ -1744,7 +1744,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)),
         help='Use the given tree as base tree (e.g. lower sysext layer)',
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="skeleton_trees",
         long="--skeleton-tree",
         metavar="PATH",
@@ -1754,7 +1754,7 @@ SETTINGS = (
         path_default=False,
         help="Use a skeleton tree to bootstrap the image before installing anything",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="extra_trees",
         long="--extra-tree",
         metavar="PATH",
@@ -1764,7 +1764,7 @@ SETTINGS = (
         path_default=False,
         help="Copy an extra tree on top of image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="remove_packages",
         long="--remove-package",
         metavar="PACKAGE",
@@ -1772,21 +1772,21 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=","),
         help="Remove package from the image OS image after installation",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="remove_files",
         metavar="GLOB",
         section="Content",
         parse=config_make_list_parser(delimiter=","),
         help="Remove files from built image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="clean_package_metadata",
         metavar="FEATURE",
         section="Content",
         parse=config_parse_feature,
         help="Remove package manager database and other files",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="source_date_epoch",
         metavar="TIMESTAMP",
         section="Content",
@@ -1795,7 +1795,7 @@ SETTINGS = (
         default_factory_depends=("environment",),
         help="Set the $SOURCE_DATE_EPOCH timestamp",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="prepare_scripts",
         long="--prepare-script",
         metavar="PATH",
@@ -1806,7 +1806,7 @@ SETTINGS = (
         help="Prepare script to run inside the image before it is cached",
         compat_names=("PrepareScript",),
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="build_scripts",
         long="--build-script",
         metavar="PATH",
@@ -1817,7 +1817,7 @@ SETTINGS = (
         help="Build script to run inside image",
         compat_names=("BuildScript",),
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="postinst_scripts",
         long="--postinst-script",
         metavar="PATH",
@@ -1829,7 +1829,7 @@ SETTINGS = (
         help="Postinstall script to run inside image",
         compat_names=("PostInstallationScript",),
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="finalize_scripts",
         long="--finalize-script",
         metavar="PATH",
@@ -1840,7 +1840,7 @@ SETTINGS = (
         help="Postinstall script to run outside image",
         compat_names=("FinalizeScript",),
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="build_sources",
         metavar="PATH",
         section="Content",
@@ -1848,14 +1848,14 @@ SETTINGS = (
         match=config_match_build_sources,
         help="Path for sources to build",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="build_sources_ephemeral",
         metavar="BOOL",
         section="Content",
         parse=config_parse_boolean,
         help="Make build sources ephemeral when running scripts",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="environment",
         short="-E",
         metavar="NAME[=VALUE]",
@@ -1863,7 +1863,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=" ", unescape=True),
         help="Set an environment variable when running scripts",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="environment_files",
         long="--env-file",
         metavar="PATH",
@@ -1873,7 +1873,7 @@ SETTINGS = (
         path_default=False,
         help="Enviroment files to set when running scripts",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="with_tests",
         short="-T",
         long="--without-tests",
@@ -1884,7 +1884,7 @@ SETTINGS = (
         default=True,
         help="Do not run tests as part of build scripts, if supported",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="with_network",
         metavar="BOOL",
         nargs="?",
@@ -1892,7 +1892,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Run build and postinst scripts with network access (instead of private network)",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="bootable",
         metavar="FEATURE",
         nargs="?",
@@ -1901,7 +1901,7 @@ SETTINGS = (
         match=config_match_feature,
         help="Generate ESP partition with systemd-boot and UKIs for installed kernels",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="bootloader",
         metavar="BOOTLOADER",
         section="Content",
@@ -1910,7 +1910,7 @@ SETTINGS = (
         default=Bootloader.systemd_boot,
         help="Specify which UEFI bootloader to use",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="bios_bootloader",
         metavar="BOOTLOADER",
         section="Content",
@@ -1919,7 +1919,7 @@ SETTINGS = (
         default=BiosBootloader.none,
         help="Specify which BIOS bootloader to use",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="shim_bootloader",
         metavar="BOOTLOADER",
         section="Content",
@@ -1928,7 +1928,7 @@ SETTINGS = (
         default=ShimBootloader.none,
         help="Specify whether to use shim",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="initrds",
         long="--initrd",
         metavar="PATH",
@@ -1936,7 +1936,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)),
         help="Add a user-provided initrd to image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="initrd_packages",
         long="--initrd-package",
         metavar="PACKAGE",
@@ -1944,7 +1944,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=","),
         help="Add additional packages to the default initrd",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_command_line",
         metavar="OPTIONS",
         section="Content",
@@ -1953,28 +1953,28 @@ SETTINGS = (
         default_factory=config_default_kernel_command_line,
         help="Set the kernel command line (only bootable images)",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_include",
         metavar="REGEX",
         section="Content",
         parse=config_make_list_parser(delimiter=","),
         help="Include the specified kernel modules in the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_include_host",
         metavar="BOOL",
         section="Content",
         parse=config_parse_boolean,
         help="Include the currently loaded modules on the host in the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_exclude",
         metavar="REGEX",
         section="Content",
         parse=config_make_list_parser(delimiter=","),
         help="Exclude the specified kernel modules from the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_initrd",
         metavar="BOOL",
         nargs="?",
@@ -1983,62 +1983,62 @@ SETTINGS = (
         default=True,
         help="When building a bootable image, add an extra initrd containing the kernel modules",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_initrd_include",
         metavar="REGEX",
         section="Content",
         parse=config_make_list_parser(delimiter=","),
         help="When building a kernel modules initrd, include the specified kernel modules",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_initrd_include_host",
         metavar="BOOL",
         section="Content",
         parse=config_parse_boolean,
         help="When building a kernel modules initrd, include the currently loaded modules on the host in the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_modules_initrd_exclude",
         metavar="REGEX",
         section="Content",
         parse=config_make_list_parser(delimiter=","),
         help="When building a kernel modules initrd, exclude the specified kernel modules",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="locale",
         section="Content",
         parse=config_parse_string,
         help="Set the system locale",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="locale_messages",
         metavar="LOCALE",
         section="Content",
         parse=config_parse_string,
         help="Set the messages locale",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="keymap",
         metavar="KEYMAP",
         section="Content",
         parse=config_parse_string,
         help="Set the system keymap",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="timezone",
         metavar="TIMEZONE",
         section="Content",
         parse=config_parse_string,
         help="Set the system timezone",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="hostname",
         metavar="HOSTNAME",
         section="Content",
         parse=config_parse_string,
         help="Set the system hostname",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="root_password",
         metavar="PASSWORD",
         section="Content",
@@ -2048,14 +2048,14 @@ SETTINGS = (
         path_secret=True,
         help="Set the password for root",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="root_shell",
         metavar="SHELL",
         section="Content",
         parse=config_parse_string,
         help="Set the shell for root",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="autologin",
         short="-a",
         metavar="BOOL",
@@ -2064,7 +2064,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Enable root autologin",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="make_initrd",
         metavar="BOOL",
         nargs="?",
@@ -2072,7 +2072,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Make sure the image can be used as an initramfs",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="ssh",
         metavar="BOOL",
         nargs="?",
@@ -2080,7 +2080,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Set up SSH access from the host to the final image via 'mkosi ssh'",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="selinux_relabel",
         name="SELinuxRelabel",
         metavar="FEATURE",
@@ -2089,7 +2089,7 @@ SETTINGS = (
         help="Specify whether to relabel all files with setfiles",
     ),
 
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="secure_boot",
         metavar="BOOL",
         nargs="?",
@@ -2097,7 +2097,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Sign the resulting kernel/initrd image for UEFI SecureBoot",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="secure_boot_auto_enroll",
         metavar="BOOL",
         section="Validation",
@@ -2105,7 +2105,7 @@ SETTINGS = (
         default=True,
         help="Automatically enroll the secureboot signing key on first boot",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="secure_boot_key",
         metavar="PATH",
         section="Validation",
@@ -2113,7 +2113,7 @@ SETTINGS = (
         paths=("mkosi.key",),
         help="UEFI SecureBoot private key in PEM format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="secure_boot_certificate",
         metavar="PATH",
         section="Validation",
@@ -2121,7 +2121,7 @@ SETTINGS = (
         paths=("mkosi.crt",),
         help="UEFI SecureBoot certificate in X509 format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="secure_boot_sign_tool",
         metavar="TOOL",
         section="Validation",
@@ -2130,7 +2130,7 @@ SETTINGS = (
         choices=SecureBootSignTool.values(),
         help="Tool to use for signing PE binaries for secure boot",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="verity_key",
         metavar="PATH",
         section="Validation",
@@ -2138,7 +2138,7 @@ SETTINGS = (
         paths=("mkosi.key",),
         help="Private key for signing verity signature in PEM format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="verity_certificate",
         metavar="PATH",
         section="Validation",
@@ -2146,14 +2146,14 @@ SETTINGS = (
         paths=("mkosi.crt",),
         help="Certificate for signing verity signature in X509 format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="sign_expected_pcr",
         metavar="FEATURE",
         section="Validation",
         parse=config_parse_feature,
         help="Measure the components of the unified kernel image (UKI) and embed the PCR signature into the UKI",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="passphrase",
         metavar="PATH",
         section="Validation",
@@ -2161,7 +2161,7 @@ SETTINGS = (
         paths=("mkosi.passphrase",),
         help="Path to a file containing the passphrase to use when LUKS encryption is selected",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="checksum",
         metavar="BOOL",
         nargs="?",
@@ -2169,7 +2169,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Write SHA256SUMS file",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="sign",
         metavar="BOOL",
         nargs="?",
@@ -2177,13 +2177,13 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Write and sign SHA256SUMS file",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="key",
         section="Validation",
         help="GPG key to use for signing",
     ),
 
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="incremental",
         short="-i",
         metavar="BOOL",
@@ -2192,7 +2192,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Make use of and generate intermediary cache images",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="nspawn_settings",
         name="NSpawnSettings",
         long="--settings",
@@ -2202,7 +2202,7 @@ SETTINGS = (
         paths=("mkosi.nspawn",),
         help="Add in .nspawn settings file",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="extra_search_paths",
         long="--extra-search-path",
         metavar="PATH",
@@ -2210,7 +2210,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=",", parse=make_path_parser()),
         help="List of comma-separated paths to look for programs before looking in PATH",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="ephemeral",
         metavar="BOOL",
         section="Host",
@@ -2219,7 +2219,7 @@ SETTINGS = (
                 'image that is removed immediately when the container/VM terminates'),
         nargs="?",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="credentials",
         long="--credential",
         metavar="NAME=VALUE",
@@ -2227,14 +2227,14 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=" "),
         help="Pass a systemd credential to systemd-nspawn or qemu",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="kernel_command_line_extra",
         metavar="OPTIONS",
         section="Host",
         parse=config_make_list_parser(delimiter=" "),
         help="Append extra entries to the kernel command line when booting the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="acl",
         metavar="BOOL",
         nargs="?",
@@ -2242,7 +2242,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Set ACLs on generated directories to permit the user running mkosi to remove them",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="tools_tree",
         metavar="PATH",
         section="Host",
@@ -2250,27 +2250,27 @@ SETTINGS = (
         paths=("mkosi.tools",),
         help="Look up programs to execute inside the given tree",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="tools_tree_distribution",
         metavar="DISTRIBUTION",
         section="Host",
         parse=config_make_enum_parser(Distribution),
         help="Set the distribution to use for the default tools tree",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="tools_tree_release",
         metavar="RELEASE",
         section="Host",
         parse=config_parse_string,
         help="Set the release to use for the default tools tree",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="tools_tree_mirror",
         metavar="MIRROR",
         section="Host",
         help="Set the mirror to use for the default tools tree",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="tools_tree_packages",
         long="--tools-tree-package",
         metavar="PACKAGE",
@@ -2278,7 +2278,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=","),
         help="Add additional packages to the default tools tree",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="runtime_trees",
         long="--runtime-tree",
         metavar="SOURCE:[TARGET]",
@@ -2286,14 +2286,14 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(absolute=False)),
         help="Additional mounts to add when booting the image",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="runtime_size",
         metavar="SIZE",
         section="Host",
         parse=config_parse_bytes,
         help="Grow disk images to the specified size before booting them",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="ssh_key",
         metavar="PATH",
         section="Host",
@@ -2301,7 +2301,7 @@ SETTINGS = (
         paths=("mkosi.key",),
         help="Private key for use with mkosi ssh in PEM format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="ssh_certificate",
         metavar="PATH",
         section="Host",
@@ -2309,7 +2309,7 @@ SETTINGS = (
         paths=("mkosi.crt",),
         help="Certificate for use with mkosi ssh in X509 format",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_gui",
         metavar="BOOL",
         nargs="?",
@@ -2317,21 +2317,21 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Start QEMU in graphical mode",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_smp",
         metavar="SMP",
         section="Host",
         default="1",
         help="Configure guest's SMP settings",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_mem",
         metavar="MEM",
         section="Host",
         default="2G",
         help="Configure guest's RAM size",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_kvm",
         metavar="FEATURE",
         nargs="?",
@@ -2339,7 +2339,7 @@ SETTINGS = (
         parse=config_parse_feature,
         help="Configure whether to use KVM or not",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_vsock",
         metavar="FEATURE",
         nargs="?",
@@ -2347,7 +2347,7 @@ SETTINGS = (
         parse=config_parse_feature,
         help="Configure whether to use qemu with a vsock or not",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_vsock_cid",
         name="QemuVsockConnectionId",
         long="--qemu-vsock-cid",
@@ -2357,7 +2357,7 @@ SETTINGS = (
         default=QemuVsockCID.hash,
         help="Specify the VSock connection ID to use",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_swtpm",
         metavar="FEATURE",
         nargs="?",
@@ -2365,7 +2365,7 @@ SETTINGS = (
         parse=config_parse_feature,
         help="Configure whether to use qemu with swtpm or not",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_cdrom",
         metavar="BOOLEAN",
         nargs="?",
@@ -2373,7 +2373,7 @@ SETTINGS = (
         parse=config_parse_boolean,
         help="Attach the image as a CD-ROM to the virtual machine",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_firmware",
         metavar="FIRMWARE",
         section="Host",
@@ -2382,21 +2382,21 @@ SETTINGS = (
         help="Set qemu firmware to use",
         choices=QemuFirmware.values(),
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_firmware_variables",
         metavar="PATH",
         section="Host",
         parse=config_make_path_parser(),
         help="Set the path to the qemu firmware variables file to use",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_kernel",
         metavar="PATH",
         section="Host",
         parse=config_make_path_parser(),
         help="Specify the kernel to use for qemu direct kernel boot",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_drives",
         long="--qemu-drive",
         metavar="DRIVE",
@@ -2404,7 +2404,7 @@ SETTINGS = (
         parse=config_make_list_parser(delimiter=" ", parse=parse_drive),
         help="Specify a qemu drive that mkosi should create and pass to qemu",
     ),
-    MkosiConfigSetting(
+    ConfigSetting(
         dest="qemu_args",
         metavar="ARGS",
         section="Host",
@@ -2419,15 +2419,15 @@ SETTINGS_LOOKUP_BY_DEST = {s.dest: s for s in SETTINGS}
 SETTINGS_LOOKUP_BY_SPECIFIER = {s.specifier: s for s in SETTINGS if s.specifier}
 
 MATCHES = (
-    MkosiMatch(
+    Match(
         name="PathExists",
         match=match_path_exists,
     ),
-    MkosiMatch(
+    Match(
         name="SystemdVersion",
         match=match_systemd_version,
     ),
-    MkosiMatch(
+    Match(
         name="HostArchitecture",
         match=match_host_architecture,
     ),
@@ -2612,7 +2612,7 @@ def create_argument_parser(action: type[argparse.Action]) -> argparse.ArgumentPa
     return parser
 
 
-def resolve_deps(images: Sequence[MkosiConfig], include: Sequence[str]) -> list[MkosiConfig]:
+def resolve_deps(images: Sequence[Config], include: Sequence[str]) -> list[Config]:
     graph = {config.image: config.dependencies for config in images}
 
     if include:
@@ -2639,7 +2639,7 @@ def resolve_deps(images: Sequence[MkosiConfig], include: Sequence[str]) -> list[
     return sorted(images, key=lambda i: order.index(i.image))
 
 
-def parse_config(argv: Sequence[str] = ()) -> tuple[MkosiArgs, tuple[MkosiConfig, ...]]:
+def parse_config(argv: Sequence[str] = ()) -> tuple[Args, tuple[Config, ...]]:
     # Compare inodes instead of paths so we can't get tricked by bind mounts and such.
     parsed_includes: set[tuple[int, int]] = set()
     immutable_settings: set[str] = set()
@@ -2698,7 +2698,7 @@ def parse_config(argv: Sequence[str] = ()) -> tuple[MkosiArgs, tuple[MkosiConfig
                     parse_config(p if p.is_file() else Path("."), namespace, defaults)
                 parsed_includes.add((st.st_dev, st.st_ino))
 
-    class MkosiAction(argparse.Action):
+    class ConfigAction(argparse.Action):
         def __call__(
             self,
             parser: argparse.ArgumentParser,
@@ -2725,7 +2725,7 @@ def parse_config(argv: Sequence[str] = ()) -> tuple[MkosiArgs, tuple[MkosiConfig
                         setattr(namespace, s.dest, s.parse(v, getattr(namespace, self.dest, None)))
 
     def finalize_default(
-        setting: MkosiConfigSetting,
+        setting: ConfigSetting,
         namespace: argparse.Namespace,
         defaults: argparse.Namespace
     ) -> Optional[Any]:
@@ -2809,7 +2809,7 @@ def parse_config(argv: Sequence[str] = ()) -> tuple[MkosiArgs, tuple[MkosiConfig
         defaults: argparse.Namespace,
         profiles: bool = False,
     ) -> bool:
-        s: Optional[MkosiConfigSetting] # Make mypy happy
+        s: Optional[ConfigSetting] # Make mypy happy
         extras = path.is_dir()
 
         if path.is_dir():
@@ -2919,7 +2919,7 @@ def parse_config(argv: Sequence[str] = ()) -> tuple[MkosiArgs, tuple[MkosiConfig
         argv += ["--", "build"]
 
     namespace = argparse.Namespace()
-    argparser = create_argument_parser(MkosiAction)
+    argparser = create_argument_parser(ConfigAction)
     argparser.parse_args(argv, namespace)
     cli_ns = copy.deepcopy(namespace)
 
@@ -3123,7 +3123,7 @@ def load_environment(args: argparse.Namespace) -> dict[str, str]:
     return env
 
 
-def load_args(args: argparse.Namespace) -> MkosiArgs:
+def load_args(args: argparse.Namespace) -> Args:
     if args.cmdline and not args.verb.supports_cmdline():
         die(f"Arguments after verb are not supported for {args.verb}.")
 
@@ -3132,10 +3132,10 @@ def load_args(args: argparse.Namespace) -> MkosiArgs:
     if args.debug_shell:
         ARG_DEBUG_SHELL.set(args.debug_shell)
 
-    return MkosiArgs.from_namespace(args)
+    return Args.from_namespace(args)
 
 
-def load_config(args: MkosiArgs, config: argparse.Namespace) -> MkosiConfig:
+def load_config(args: Args, config: argparse.Namespace) -> Config:
     if config.build_dir:
         config.build_dir = config.build_dir / f"{config.distribution}~{config.release}~{config.architecture}"
 
@@ -3179,7 +3179,7 @@ def load_config(args: MkosiArgs, config: argparse.Namespace) -> MkosiConfig:
     ):
         die("This unprivileged build configuration requires at least Linux v5.11")
 
-    return MkosiConfig.from_namespace(config)
+    return Config.from_namespace(config)
 
 
 def yes_no(b: bool) -> str:
@@ -3237,7 +3237,7 @@ def format_bytes_or_none(num_bytes: Optional[int]) -> str:
     return format_bytes(num_bytes) if num_bytes is not None else "none"
 
 
-def summary(config: MkosiConfig) -> str:
+def summary(config: Config) -> str:
     def bold(s: Any) -> str:
         return f"{Style.bold}{s}{Style.reset}"
 
@@ -3398,7 +3398,7 @@ def summary(config: MkosiConfig) -> str:
     return summary
 
 
-class MkosiJsonEncoder(json.JSONEncoder):
+class JsonEncoder(json.JSONEncoder):
     def default(self, o: Any) -> Any:
         if isinstance(o, StrEnum):
             return str(o)
@@ -3408,7 +3408,7 @@ class MkosiJsonEncoder(json.JSONEncoder):
             return os.fspath(o)
         elif isinstance(o, uuid.UUID):
             return str(o)
-        elif isinstance(o, (MkosiArgs, MkosiConfig)):
+        elif isinstance(o, (Args, Config)):
             return o.to_dict()
         return json.JSONEncoder.default(self, o)
 
@@ -3416,7 +3416,7 @@ class MkosiJsonEncoder(json.JSONEncoder):
 E = TypeVar("E", bound=StrEnum)
 
 
-def json_type_transformer(refcls: Union[type[MkosiArgs], type[MkosiConfig]]) -> Callable[[str, Any], Any]:
+def json_type_transformer(refcls: Union[type[Args], type[Config]]) -> Callable[[str, Any], Any]:
     fields_by_name = {field.name: field for field in dataclasses.fields(refcls)}
 
     def path_transformer(path: str, fieldtype: type[Path]) -> Path:
similarity index 89%
rename from mkosi/state.py
rename to mkosi/context.py
index 25f0b57b7684a608e37c6900b5d5dacef13ef5e2..f6138936ad2464503e0c9aca26e29f74ce63cd7a 100644 (file)
@@ -2,15 +2,15 @@
 
 from pathlib import Path
 
-from mkosi.config import MkosiArgs, MkosiConfig
+from mkosi.config import Args, Config
 from mkosi.tree import make_tree
 from mkosi.util import umask
 
 
-class MkosiState:
+class Context:
     """State related properties."""
 
-    def __init__(self, args: MkosiArgs, config: MkosiConfig, workspace: Path) -> None:
+    def __init__(self, args: Args, config: Config, workspace: Path) -> None:
         self.args = args
         self.config = config
         self.workspace = workspace
index 993a346a2580dece206b743326a3e8c265aed06d..b714afb84be8fb79adba3df9df70ecb595592801 100644 (file)
@@ -11,7 +11,7 @@ from mkosi.util import StrEnum, read_os_release
 
 if TYPE_CHECKING:
     from mkosi.config import Architecture
-    from mkosi.state import MkosiState
+    from mkosi.context import Context
 
 
 class PackageType(StrEnum):
@@ -28,19 +28,19 @@ class DistributionInstaller:
         raise NotImplementedError
 
     @classmethod
-    def setup(cls, state: "MkosiState") -> None:
+    def setup(cls, context: "Context") -> None:
         raise NotImplementedError
 
     @classmethod
-    def install(cls, state: "MkosiState") -> None:
+    def install(cls, context: "Context") -> None:
         raise NotImplementedError
 
     @classmethod
-    def install_packages(cls, state: "MkosiState", packages: Sequence[str]) -> None:
+    def install_packages(cls, context: "Context", packages: Sequence[str]) -> None:
         raise NotImplementedError
 
     @classmethod
-    def remove_packages(cls, state: "MkosiState", packages: Sequence[str]) -> None:
+    def remove_packages(cls, context: "Context", packages: Sequence[str]) -> None:
         raise NotImplementedError
 
     @classmethod
@@ -106,17 +106,17 @@ class Distribution(StrEnum):
     def is_apt_distribution(self) -> bool:
         return self in (Distribution.debian, Distribution.ubuntu)
 
-    def setup(self, state: "MkosiState") -> None:
-        return self.installer().setup(state)
+    def setup(self, context: "Context") -> None:
+        return self.installer().setup(context)
 
-    def install(self, state: "MkosiState") -> None:
-        return self.installer().install(state)
+    def install(self, context: "Context") -> None:
+        return self.installer().install(context)
 
-    def install_packages(self, state: "MkosiState", packages: Sequence[str]) -> None:
-        return self.installer().install_packages(state, packages)
+    def install_packages(self, context: "Context", packages: Sequence[str]) -> None:
+        return self.installer().install_packages(context, packages)
 
-    def remove_packages(self, state: "MkosiState", packages: Sequence[str]) -> None:
-        return self.installer().remove_packages(state, packages)
+    def remove_packages(self, context: "Context", packages: Sequence[str]) -> None:
+        return self.installer().remove_packages(context, packages)
 
     def filesystem(self) -> str:
         return self.installer().filesystem()
index f35bdd7ae62e58bb1ee15b7be439d48eb1e2c8f9..e01c99bc89fb3f989e9dff43b887e437210e99da 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: LGPL-2.1+
 
+from mkosi.context import Context
 from mkosi.distributions import centos, join_mirror
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
-from mkosi.state import MkosiState
 
 
 class Installer(centos.Installer):
@@ -11,24 +11,24 @@ class Installer(centos.Installer):
         return "AlmaLinux"
 
     @staticmethod
-    def gpgurls(state: MkosiState) -> tuple[str, ...]:
+    def gpgurls(context: Context) -> tuple[str, ...]:
         return (
             find_rpm_gpgkey(
-                state,
-                f"RPM-GPG-KEY-AlmaLinux-{state.config.release}",
-                f"https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{state.config.release}",
+                context,
+                f"RPM-GPG-KEY-AlmaLinux-{context.config.release}",
+                f"https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux-{context.config.release}",
             ),
         )
 
     @classmethod
-    def repository_variants(cls, state: MkosiState, repo: str) -> list[RpmRepository]:
-        if state.config.mirror:
-            url = f"baseurl={join_mirror(state.config.mirror, f'almalinux/$releasever/{repo}/$basearch/os')}"
+    def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]:
+        if context.config.mirror:
+            url = f"baseurl={join_mirror(context.config.mirror, f'almalinux/$releasever/{repo}/$basearch/os')}"
         else:
             url = f"mirrorlist=https://mirrors.almalinux.org/mirrorlist/$releasever/{repo.lower()}"
 
-        return [RpmRepository(repo, url, cls.gpgurls(state))]
+        return [RpmRepository(repo, url, cls.gpgurls(context))]
 
     @classmethod
-    def sig_repositories(cls, state: MkosiState) -> list[RpmRepository]:
+    def sig_repositories(cls, context: Context) -> list[RpmRepository]:
         return []
index 0761c0017e5c83d71924623e3c21b15cdd571711..697a34b526b2d2fc2ab2a836f6ba962b3d9c2c0c 100644 (file)
@@ -3,10 +3,10 @@
 from collections.abc import Sequence
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import Distribution, DistributionInstaller, PackageType
 from mkosi.installer.pacman import PacmanRepository, invoke_pacman, setup_pacman
 from mkosi.log import die
-from mkosi.state import MkosiState
 
 
 class Installer(DistributionInstaller):
@@ -31,16 +31,16 @@ class Installer(DistributionInstaller):
         return Distribution.arch
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
-        if state.config.local_mirror:
-            repos = [PacmanRepository("core", state.config.local_mirror)]
+    def setup(cls, context: Context) -> None:
+        if context.config.local_mirror:
+            repos = [PacmanRepository("core", context.config.local_mirror)]
         else:
             repos = []
 
-            if state.config.architecture == Architecture.arm64:
-                url = f"{state.config.mirror or 'http://mirror.archlinuxarm.org'}/$arch/$repo"
+            if context.config.architecture == Architecture.arm64:
+                url = f"{context.config.mirror or 'http://mirror.archlinuxarm.org'}/$arch/$repo"
             else:
-                url = f"{state.config.mirror or 'https://geo.mirror.pkgbuild.com'}/$repo/os/$arch"
+                url = f"{context.config.mirror or 'https://geo.mirror.pkgbuild.com'}/$repo/os/$arch"
 
             # Testing repositories have to go before regular ones to to take precedence.
             for id in (
@@ -51,22 +51,22 @@ class Installer(DistributionInstaller):
                 "core-debug",
                 "extra-debug",
             ):
-                if id in state.config.repositories:
+                if id in context.config.repositories:
                     repos += [PacmanRepository(id, url)]
 
             for id in ("core", "extra"):
                 repos += [PacmanRepository(id, url)]
 
-        setup_pacman(state, repos)
+        setup_pacman(context, repos)
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
-        cls.install_packages(state, ["filesystem"], apivfs=False)
+    def install(cls, context: Context) -> None:
+        cls.install_packages(context, ["filesystem"], apivfs=False)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
         invoke_pacman(
-            state,
+            context,
             "--sync",
             ["--refresh", "--needed", "--assume-installed", "initramfs"],
             packages,
@@ -74,8 +74,8 @@ class Installer(DistributionInstaller):
         )
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
-        invoke_pacman(state, "--remove", ["--nosave", "--recursive"], packages)
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
+        invoke_pacman(context, "--remove", ["--nosave", "--recursive"], packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
index 5a168b3f5822f42b3a0b1585f30f7ff435b5af8e..5f0a883660271561283deead3c3e30fd94cc9174 100644 (file)
@@ -6,6 +6,7 @@ from collections.abc import Iterable, Sequence
 from pathlib import Path
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import (
     Distribution,
     DistributionInstaller,
@@ -15,7 +16,6 @@ from mkosi.distributions import (
 from mkosi.installer.dnf import invoke_dnf, setup_dnf
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
 from mkosi.log import complete_step, die
-from mkosi.state import MkosiState
 from mkosi.tree import rmtree
 from mkosi.versioncomp import GenericVersion
 
@@ -56,29 +56,29 @@ class Installer(DistributionInstaller):
         return Distribution.fedora
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
-        if GenericVersion(state.config.release) <= 7:
+    def setup(cls, context: Context) -> None:
+        if GenericVersion(context.config.release) <= 7:
             die(f"{cls.pretty_name()} 7 or earlier variants are not supported")
 
-        setup_dnf(state, cls.repositories(state))
-        (state.pkgmngr / "etc/dnf/vars/stream").write_text(f"{state.config.release}-stream\n")
+        setup_dnf(context, cls.repositories(context))
+        (context.pkgmngr / "etc/dnf/vars/stream").write_text(f"{context.config.release}-stream\n")
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
+    def install(cls, context: Context) -> None:
         # Make sure glibc-minimal-langpack is installed instead of glibc-all-langpacks.
-        cls.install_packages(state, ["filesystem", "glibc-minimal-langpack"], apivfs=False)
+        cls.install_packages(context, ["filesystem", "glibc-minimal-langpack"], apivfs=False)
 
         # On Fedora, the default rpmdb has moved to /usr/lib/sysimage/rpm so if that's the case we
         # need to move it back to /var/lib/rpm on CentOS.
-        move_rpm_db(state.root)
+        move_rpm_db(context.root)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
-        invoke_dnf(state, "install", packages, apivfs=apivfs)
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
+        invoke_dnf(context, "install", packages, apivfs=apivfs)
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
-        invoke_dnf(state, "remove", packages)
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
+        invoke_dnf(context, "remove", packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
@@ -95,85 +95,85 @@ class Installer(DistributionInstaller):
         return a
 
     @staticmethod
-    def gpgurls(state: MkosiState) -> tuple[str, ...]:
+    def gpgurls(context: Context) -> tuple[str, ...]:
         keys = ("RPM-GPG-KEY-CentOS-Official", "RPM-GPG-KEY-CentOS-SIG-Extras")
-        return tuple(find_rpm_gpgkey(state, key, f"https://www.centos.org/keys/{key}") for key in keys)
+        return tuple(find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys)
 
     @classmethod
-    def repository_variants(cls, state: MkosiState, repo: str) -> Iterable[RpmRepository]:
-        if state.config.local_mirror:
-            yield RpmRepository(repo, f"baseurl={state.config.local_mirror}", cls.gpgurls(state))
+    def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]:
+        if context.config.local_mirror:
+            yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context))
 
-        elif state.config.mirror:
-            if GenericVersion(state.config.release) <= 8:
+        elif mirror := context.config.mirror:
+            if GenericVersion(context.config.release) <= 8:
                 yield RpmRepository(
                     repo.lower(),
-                    f"baseurl={join_mirror(state.config.mirror, f'centos/$stream/{repo}/$basearch/os')}",
-                    cls.gpgurls(state),
+                    f"baseurl={join_mirror(mirror, f'centos/$stream/{repo}/$basearch/os')}",
+                    cls.gpgurls(context),
                 )
                 yield RpmRepository(
                     f"{repo.lower()}-debuginfo",
-                    f"baseurl={join_mirror(state.config.mirror, 'centos-debuginfo/$stream/$basearch')}",
-                    cls.gpgurls(state),
+                    f"baseurl={join_mirror(mirror, 'centos-debuginfo/$stream/$basearch')}",
+                    cls.gpgurls(context),
                     enabled=False,
                 )
                 yield RpmRepository(
                     f"{repo.lower()}-source",
-                    f"baseurl={join_mirror(state.config.mirror, f'centos/$stream/{repo}/Source')}",
-                    cls.gpgurls(state),
+                    f"baseurl={join_mirror(mirror, f'centos/$stream/{repo}/Source')}",
+                    cls.gpgurls(context),
                     enabled=False,
                 )
             else:
                 if repo == "extras":
                     yield RpmRepository(
                         repo.lower(),
-                        f"baseurl={join_mirror(state.config.mirror, f'SIGs/$stream/{repo}/$basearch/extras-common')}",
-                        cls.gpgurls(state),
+                        f"baseurl={join_mirror(mirror, f'SIGs/$stream/{repo}/$basearch/extras-common')}",
+                        cls.gpgurls(context),
                     )
                     yield RpmRepository(
                         f"{repo.lower()}-source",
-                        f"baseurl={join_mirror(state.config.mirror, f'SIGs/$stream/{repo}/source/extras-common')}",
-                        cls.gpgurls(state),
+                        f"baseurl={join_mirror(mirror, f'SIGs/$stream/{repo}/source/extras-common')}",
+                        cls.gpgurls(context),
                         enabled=False,
                     )
 
                 else:
                     yield RpmRepository(
                         repo.lower(),
-                        f"baseurl={join_mirror(state.config.mirror, f'$stream/{repo}/$basearch/os')}",
-                        cls.gpgurls(state),
+                        f"baseurl={join_mirror(mirror, f'$stream/{repo}/$basearch/os')}",
+                        cls.gpgurls(context),
                     )
                     yield RpmRepository(
                         f"{repo.lower()}-debuginfo",
-                        f"baseurl={join_mirror(state.config.mirror, f'$stream/{repo}/$basearch/debug/tree')}",
-                        cls.gpgurls(state),
+                        f"baseurl={join_mirror(mirror, f'$stream/{repo}/$basearch/debug/tree')}",
+                        cls.gpgurls(context),
                         enabled=False,
                     )
                     yield RpmRepository(
                         f"{repo.lower()}-source",
-                        f"baseurl={join_mirror(state.config.mirror, f'$stream/{repo}/source/tree')}",
-                        cls.gpgurls(state),
+                        f"baseurl={join_mirror(mirror, f'$stream/{repo}/source/tree')}",
+                        cls.gpgurls(context),
                         enabled=False,
                     )
 
         else:
-            if GenericVersion(state.config.release) <= 8:
+            if GenericVersion(context.config.release) <= 8:
                 yield RpmRepository(
                     repo.lower(),
                     f"mirrorlist=http://mirrorlist.centos.org/?release=$stream&arch=$basearch&repo={repo}",
-                    cls.gpgurls(state),
+                    cls.gpgurls(context),
                 )
                 # These can't be retrieved from the mirrorlist.
                 yield RpmRepository(
                     f"{repo.lower()}-debuginfo",
                     "baseurl=http://debuginfo.centos.org/$stream/$basearch",
-                    cls.gpgurls(state),
+                    cls.gpgurls(context),
                     enabled=False,
                 )
                 yield RpmRepository(
                     f"{repo.lower()}-source",
                     f"baseurl=https://vault.centos.org/centos/$stream/{repo}/Source",
-                    cls.gpgurls(state),
+                    cls.gpgurls(context),
                     enabled=False,
                 )
             else:
@@ -183,64 +183,64 @@ class Installer(DistributionInstaller):
                     yield RpmRepository(
                         repo.lower(),
                         f"{url}?arch=$basearch&repo=centos-extras-sig-extras-common-$stream",
-                        cls.gpgurls(state),
+                        cls.gpgurls(context),
                     )
                     yield RpmRepository(
                         f"{repo.lower()}-source",
                         f"{url}?arch=source&repo=centos-extras-sig-extras-common-source-$stream",
-                        cls.gpgurls(state),
+                        cls.gpgurls(context),
                         enabled=False,
                     )
                 else:
                     yield RpmRepository(
                         repo.lower(),
                         f"{url}?arch=$basearch&repo=centos-{repo.lower()}-$stream",
-                        cls.gpgurls(state),
+                        cls.gpgurls(context),
                     )
                     yield RpmRepository(
                         f"{repo.lower()}-debuginfo",
                         f"{url}?arch=$basearch&repo=centos-{repo.lower()}-debug-$stream",
-                        cls.gpgurls(state),
+                        cls.gpgurls(context),
                         enabled=False,
                     )
                     yield RpmRepository(
                         f"{repo.lower()}-source",
                         f"{url}?arch=source&repo=centos-{repo.lower()}-source-$stream",
-                        cls.gpgurls(state),
+                        cls.gpgurls(context),
                         enabled=False,
                     )
 
     @classmethod
-    def repositories(cls, state: MkosiState) -> Iterable[RpmRepository]:
-        if state.config.local_mirror:
-            yield from cls.repository_variants(state, "AppStream")
+    def repositories(cls, context: Context) -> Iterable[RpmRepository]:
+        if context.config.local_mirror:
+            yield from cls.repository_variants(context, "AppStream")
         else:
-            yield from cls.repository_variants(state, "BaseOS")
-            yield from cls.repository_variants(state, "AppStream")
-            yield from cls.repository_variants(state, "extras")
+            yield from cls.repository_variants(context, "BaseOS")
+            yield from cls.repository_variants(context, "AppStream")
+            yield from cls.repository_variants(context, "extras")
 
-        if GenericVersion(state.config.release) >= 9:
-            yield from cls.repository_variants(state, "CRB")
+        if GenericVersion(context.config.release) >= 9:
+            yield from cls.repository_variants(context, "CRB")
         else:
-            yield from cls.repository_variants(state, "PowerTools")
+            yield from cls.repository_variants(context, "PowerTools")
 
-        yield from cls.epel_repositories(state)
-        yield from cls.sig_repositories(state)
+        yield from cls.epel_repositories(context)
+        yield from cls.sig_repositories(context)
 
     @classmethod
-    def epel_repositories(cls, state: MkosiState) -> Iterable[RpmRepository]:
+    def epel_repositories(cls, context: Context) -> Iterable[RpmRepository]:
         gpgurls = (
             find_rpm_gpgkey(
-                state,
-                f"RPM-GPG-KEY-EPEL-{state.config.release}",
-                f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{state.config.release}",
+                context,
+                f"RPM-GPG-KEY-EPEL-{context.config.release}",
+                f"https://dl.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-{context.config.release}",
             ),
         )
 
-        if state.config.local_mirror:
+        if context.config.local_mirror:
             return
 
-        if state.config.mirror:
+        if mirror := context.config.mirror:
             for repo, dir in (
                 ("epel", "epel"),
                 ("epel-next", "epel/next"),
@@ -249,19 +249,19 @@ class Installer(DistributionInstaller):
             ):
                 yield RpmRepository(
                     repo,
-                    f"baseurl={join_mirror(state.config.mirror, f'{dir}/$releasever/Everything/$basearch')}",
+                    f"baseurl={join_mirror(mirror, f'{dir}/$releasever/Everything/$basearch')}",
                     gpgurls,
                     enabled=False,
                 )
                 yield RpmRepository(
                     f"{repo}-debuginfo",
-                    f"baseurl={join_mirror(state.config.mirror, f'{dir}/$releasever/Everything/$basearch/debug')}",
+                    f"baseurl={join_mirror(mirror, f'{dir}/$releasever/Everything/$basearch/debug')}",
                     gpgurls,
                     enabled=False,
                 )
                 yield RpmRepository(
                     f"{repo}-source",
-                    f"baseurl={join_mirror(state.config.mirror, f'{dir}/$releasever/Everything/source/tree')}",
+                    f"baseurl={join_mirror(mirror, f'{dir}/$releasever/Everything/source/tree')}",
                     gpgurls,
                     enabled=False,
                 )
@@ -320,8 +320,8 @@ class Installer(DistributionInstaller):
             )
 
     @classmethod
-    def sig_repositories(cls, state: MkosiState) -> Iterable[RpmRepository]:
-        if state.config.local_mirror:
+    def sig_repositories(cls, context: Context) -> Iterable[RpmRepository]:
+        if context.config.local_mirror:
             return
 
         sigs = (
@@ -333,50 +333,50 @@ class Installer(DistributionInstaller):
         )
 
         for sig, components, keys in sigs:
-            gpgurls = tuple(find_rpm_gpgkey(state, key, f"https://www.centos.org/keys/{key}") for key in keys)
+            gpgurls = tuple(find_rpm_gpgkey(context, key, f"https://www.centos.org/keys/{key}") for key in keys)
 
             for c in components:
-                if state.config.mirror:
-                    if GenericVersion(state.config.release) <= 8:
+                if mirror := context.config.mirror:
+                    if GenericVersion(context.config.release) <= 8:
                         yield RpmRepository(
                             f"{sig}-{c}",
-                            f"baseurl={join_mirror(state.config.mirror, f'centos/$stream/{sig}/$basearch/{c}')}",
+                            f"baseurl={join_mirror(mirror, f'centos/$stream/{sig}/$basearch/{c}')}",
                             gpgurls,
                             enabled=False,
                         )
                         yield RpmRepository(
                             f"{sig}-{c}-debuginfo",
-                            f"baseurl={join_mirror(state.config.mirror, f'$stream/{sig}/$basearch')}",
+                            f"baseurl={join_mirror(mirror, f'$stream/{sig}/$basearch')}",
                             gpgurls,
                             enabled=False,
                         )
                         yield RpmRepository(
                             f"{sig}-{c}-source",
-                            f"baseurl={join_mirror(state.config.mirror, f'centos/$stream/{sig}/Source')}",
+                            f"baseurl={join_mirror(mirror, f'centos/$stream/{sig}/Source')}",
                             gpgurls,
                             enabled=False,
                         )
                     else:
                         yield RpmRepository(
                             f"{sig}-{c}",
-                            f"baseurl={join_mirror(state.config.mirror, f'SIGs/$stream/{sig}/$basearch/{c}')}",
+                            f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}')}",
                             gpgurls,
                             enabled=False,
                         )
                         yield RpmRepository(
                             f"{sig}-{c}-debuginfo",
-                            f"baseurl={join_mirror(state.config.mirror, f'SIGs/$stream/{sig}/$basearch/{c}/debug')}",
+                            f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/$basearch/{c}/debug')}",
                             gpgurls,
                             enabled=False,
                         )
                         yield RpmRepository(
                             f"{sig}-{c}-source",
-                            f"baseurl={join_mirror(state.config.mirror, f'SIGs/$stream/{sig}/source/{c}')}",
+                            f"baseurl={join_mirror(mirror, f'SIGs/$stream/{sig}/source/{c}')}",
                             gpgurls,
                             enabled=False,
                         )
                 else:
-                    if GenericVersion(state.config.release) <= 8:
+                    if GenericVersion(context.config.release) <= 8:
                         yield RpmRepository(
                             f"{sig}-{c}",
                             f"mirrorlist=http://mirrorlist.centos.org/?release=$stream&arch=$basearch&repo={sig}-{c}",
@@ -424,7 +424,7 @@ class Installer(DistributionInstaller):
                         enabled=False,
                     )
 
-                    if GenericVersion(state.config.release) >= 9:
+                    if GenericVersion(context.config.release) >= 9:
                         yield RpmRepository(
                             f"{sig}-{c}-testing-debuginfo",
                             f"baseurl=https://buildlogs.centos.org/centos/$stream/{sig}/$basearch/{c}",
index bdcf3477759485bf8994e71d162d6c8c062478ea..3306c29ea3b83bf6a4f37365d42f951d6d270e93 100644 (file)
@@ -3,9 +3,9 @@
 from collections.abc import Sequence
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import DistributionInstaller
 from mkosi.log import die
-from mkosi.state import MkosiState
 
 
 class Installer(DistributionInstaller):
@@ -14,19 +14,19 @@ class Installer(DistributionInstaller):
         return str(arch)
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
+    def setup(cls, context: Context) -> None:
         pass
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
+    def install(cls, context: Context) -> None:
         pass
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
+    def install_packages(cls, context: Context, packages: Sequence[str]) -> None:
         if packages:
             die("Installing packages is not supported for custom distributions'")
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
         if packages:
             die("Removing packages is not supported for custom distributions")
index 9a3b897b9cc0c99a7fe14fe2018d2789e44d9373..9503d5831bf23a1a16492ec69b3404d5c28249d0 100644 (file)
@@ -8,10 +8,10 @@ from pathlib import Path
 from mkosi.archive import extract_tar
 from mkosi.bubblewrap import bwrap
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import Distribution, DistributionInstaller, PackageType
 from mkosi.installer.apt import invoke_apt, setup_apt
 from mkosi.log import die
-from mkosi.state import MkosiState
 from mkosi.util import umask
 
 
@@ -37,48 +37,48 @@ class Installer(DistributionInstaller):
         return Distribution.debian
 
     @staticmethod
-    def repositories(state: MkosiState, local: bool = True) -> list[str]:
+    def repositories(context: Context, local: bool = True) -> list[str]:
         archives = ("deb", "deb-src")
-        components = ' '.join(("main", *state.config.repositories))
+        components = ' '.join(("main", *context.config.repositories))
 
-        if state.config.local_mirror and local:
-            return [f"deb [trusted=yes] {state.config.local_mirror} {state.config.release} {components}"]
+        if context.config.local_mirror and local:
+            return [f"deb [trusted=yes] {context.config.local_mirror} {context.config.release} {components}"]
 
-        mirror = state.config.mirror or "http://deb.debian.org/debian"
+        mirror = context.config.mirror or "http://deb.debian.org/debian"
         signedby = "[signed-by=/usr/share/keyrings/debian-archive-keyring.gpg]"
 
         repos = [
-            f"{archive} {signedby} {mirror} {state.config.release} {components}"
+            f"{archive} {signedby} {mirror} {context.config.release} {components}"
             for archive in archives
         ]
 
         # Debug repos are typically not mirrored.
         url = "http://deb.debian.org/debian-debug"
-        repos += [f"deb {signedby} {url} {state.config.release}-debug {components}"]
+        repos += [f"deb {signedby} {url} {context.config.release}-debug {components}"]
 
-        if state.config.release in ("unstable", "sid"):
+        if context.config.release in ("unstable", "sid"):
             return repos
 
         repos += [
-            f"{archive} {signedby} {mirror} {state.config.release}-updates {components}"
+            f"{archive} {signedby} {mirror} {context.config.release}-updates {components}"
             for archive in archives
         ]
 
         # Security updates repos are never mirrored.
         url = "http://security.debian.org/debian-security "
         repos += [
-            f"{archive} {signedby} {url} {state.config.release}-security {components}"
+            f"{archive} {signedby} {url} {context.config.release}-security {components}"
             for archive in archives
         ]
 
         return repos
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
-        setup_apt(state, cls.repositories(state))
+    def setup(cls, context: Context) -> None:
+        setup_apt(context, cls.repositories(context))
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
+    def install(cls, context: Context) -> None:
         # Instead of using debootstrap, we replicate its core functionality here. Because dpkg does not have
         # an option to delay running pre-install maintainer scripts when it installs a package, it's
         # impossible to use apt directly to bootstrap a Debian chroot since dpkg will try to run a maintainer
@@ -102,12 +102,12 @@ class Installer(DistributionInstaller):
             "sparc"       : ["lib64"],
             "sparc64"     : ["lib32", "lib64"],
             "x32"         : ["lib32", "lib64", "libx32"],
-        }.get(state.config.distribution.architecture(state.config.architecture), [])
+        }.get(context.config.distribution.architecture(context.config.architecture), [])
 
         with umask(~0o755):
             for d in subdirs:
-                (state.root / d).symlink_to(f"usr/{d}")
-                (state.root / f"usr/{d}").mkdir(parents=True, exist_ok=True)
+                (context.root / d).symlink_to(f"usr/{d}")
+                (context.root / f"usr/{d}").mkdir(parents=True, exist_ok=True)
 
         # Next, we invoke apt-get install to download all the essential packages. With DPkg::Pre-Install-Pkgs,
         # we specify a shell command that will receive the list of packages that will be installed on stdin.
@@ -115,7 +115,7 @@ class Installer(DistributionInstaller):
         # all it does is download the essential debs and tell us their full in the apt cache without actually
         # installing them.
         with tempfile.NamedTemporaryFile(mode="r") as f:
-            cls.install_packages(state, [
+            cls.install_packages(context, [
                 "-oDebug::pkgDPkgPm=1",
                 f"-oDPkg::Pre-Install-Pkgs::=cat >{f.name}",
                 "?essential", "?name(usr-is-merged)",
@@ -128,41 +128,41 @@ class Installer(DistributionInstaller):
 
         for deb in essential:
             with tempfile.NamedTemporaryFile() as f:
-                bwrap(state, ["dpkg-deb", "--fsys-tarfile", deb], stdout=f)
-                extract_tar(state, Path(f.name), state.root, log=False)
+                bwrap(context, ["dpkg-deb", "--fsys-tarfile", deb], stdout=f)
+                extract_tar(context, Path(f.name), context.root, log=False)
 
         # Finally, run apt to properly install packages in the chroot without having to worry that maintainer
         # scripts won't find basic tools that they depend on.
 
-        cls.install_packages(state, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential])
+        cls.install_packages(context, [Path(deb).name.partition("_")[0].removesuffix(".deb") for deb in essential])
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
         # Debian policy is to start daemons by default. The policy-rc.d script can be used choose which ones to
         # start. Let's install one that denies all daemon startups.
         # See https://people.debian.org/~hmh/invokerc.d-policyrc.d-specification.txt for more information.
         # Note: despite writing in /usr/sbin, this file is not shipped by the OS and instead should be managed by
         # the admin.
-        policyrcd = state.root / "usr/sbin/policy-rc.d"
+        policyrcd = context.root / "usr/sbin/policy-rc.d"
         with umask(~0o644):
             policyrcd.write_text("#!/bin/sh\nexit 101\n")
 
-        invoke_apt(state, "apt-get", "update", apivfs=False)
-        invoke_apt(state, "apt-get", "install", packages, apivfs=apivfs)
-        install_apt_sources(state, cls.repositories(state, local=False))
+        invoke_apt(context, "apt-get", "update", apivfs=False)
+        invoke_apt(context, "apt-get", "install", packages, apivfs=apivfs)
+        install_apt_sources(context, cls.repositories(context, local=False))
 
         policyrcd.unlink()
 
-        for d in state.root.glob("boot/vmlinuz-*"):
+        for d in context.root.glob("boot/vmlinuz-*"):
             kver = d.name.removeprefix("vmlinuz-")
-            vmlinuz = state.root / "usr/lib/modules" / kver / "vmlinuz"
+            vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz"
             if not vmlinuz.exists():
                 shutil.copy2(d, vmlinuz)
 
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
-        invoke_apt(state, "apt-get", "purge", packages)
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
+        invoke_apt(context, "apt-get", "purge", packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
@@ -190,11 +190,11 @@ class Installer(DistributionInstaller):
         return a
 
 
-def install_apt_sources(state: MkosiState, repos: Sequence[str]) -> None:
-    if not (state.root / "usr/bin/apt").exists():
+def install_apt_sources(context: Context, repos: Sequence[str]) -> None:
+    if not (context.root / "usr/bin/apt").exists():
         return
 
-    sources = state.root / "etc/apt/sources.list"
+    sources = context.root / "etc/apt/sources.list"
     if not sources.exists():
         with sources.open("w") as f:
             for repo in repos:
index 014caaff804f0e6a02e5c543b7dbc7857f0c4eec..45d9d11d20f46eb0988dbc88ed67336d9bd513af 100644 (file)
@@ -3,6 +3,7 @@
 from collections.abc import Sequence
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import (
     Distribution,
     DistributionInstaller,
@@ -12,7 +13,6 @@ from mkosi.distributions import (
 from mkosi.installer.dnf import invoke_dnf, setup_dnf
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
 from mkosi.log import die
-from mkosi.state import MkosiState
 
 
 class Installer(DistributionInstaller):
@@ -37,21 +37,21 @@ class Installer(DistributionInstaller):
         return Distribution.fedora
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
+    def setup(cls, context: Context) -> None:
         gpgurls = (
             find_rpm_gpgkey(
-                state,
-                key=f"RPM-GPG-KEY-fedora-{state.config.release}-primary",
+                context,
+                key=f"RPM-GPG-KEY-fedora-{context.config.release}-primary",
                 url="https://fedoraproject.org/fedora.gpg",
             ),
         )
 
         repos = []
 
-        if state.config.local_mirror:
-            repos += [RpmRepository("fedora", f"baseurl={state.config.local_mirror}", gpgurls)]
-        elif state.config.release == "eln":
-            mirror = state.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose"
+        if context.config.local_mirror:
+            repos += [RpmRepository("fedora", f"baseurl={context.config.local_mirror}", gpgurls)]
+        elif context.config.release == "eln":
+            mirror = context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose"
             for repo in ("Appstream", "BaseOS", "Extras", "CRB"):
                 url = f"baseurl={join_mirror(mirror, repo)}"
                 repos += [
@@ -59,24 +59,24 @@ class Installer(DistributionInstaller):
                     RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False),
                     RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False),
                 ]
-        elif state.config.mirror:
-            directory = "development" if state.config.release == "rawhide" else "releases"
-            url = f"baseurl={join_mirror(state.config.mirror, f'{directory}/$releasever/Everything')}"
+        elif context.config.mirror:
+            directory = "development" if context.config.release == "rawhide" else "releases"
+            url = f"baseurl={join_mirror(context.config.mirror, f'{directory}/$releasever/Everything')}"
             repos += [
                 RpmRepository("fedora", f"{url}/$basearch/os", gpgurls),
                 RpmRepository("fedora-debuginfo", f"{url}/$basearch/debug/tree", gpgurls, enabled=False),
                 RpmRepository("fedora-source", f"{url}/source/tree", gpgurls, enabled=False),
             ]
 
-            if state.config.release != "rawhide":
-                url = f"baseurl={join_mirror(state.config.mirror, 'updates/$releasever/Everything')}"
+            if context.config.release != "rawhide":
+                url = f"baseurl={join_mirror(context.config.mirror, 'updates/$releasever/Everything')}"
                 repos += [
                     RpmRepository("updates", f"{url}/$basearch", gpgurls),
                     RpmRepository("updates-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False),
                     RpmRepository("updates-source", f"{url}/source/tree", gpgurls, enabled=False),
                 ]
 
-                url = f"baseurl={join_mirror(state.config.mirror, 'updates/testing/$releasever/Everything')}"
+                url = f"baseurl={join_mirror(context.config.mirror, 'updates/testing/$releasever/Everything')}"
                 repos += [
                     RpmRepository("updates-testing", f"{url}/$basearch", gpgurls, enabled=False),
                     RpmRepository("updates-testing-debuginfo", f"{url}/$basearch/debug", gpgurls, enabled=False),
@@ -90,7 +90,7 @@ class Installer(DistributionInstaller):
                 RpmRepository("fedora-source", f"{url}&repo=fedora-source-$releasever", gpgurls, enabled=False),
             ]
 
-            if state.config.release != "rawhide":
+            if context.config.release != "rawhide":
                 repos += [
                     RpmRepository("updates", f"{url}&repo=updates-released-f$releasever", gpgurls),
                     RpmRepository(
@@ -126,19 +126,19 @@ class Installer(DistributionInstaller):
                 ]
 
         # TODO: Use `filelists=True` when F37 goes EOL.
-        setup_dnf(state, repos, filelists=fedora_release_at_most(state.config.release, "37"))
+        setup_dnf(context, repos, filelists=fedora_release_at_most(context.config.release, "37"))
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
-        cls.install_packages(state, ["filesystem"], apivfs=False)
+    def install(cls, context: Context) -> None:
+        cls.install_packages(context, ["filesystem"], apivfs=False)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
-        invoke_dnf(state, "install", packages, apivfs=apivfs)
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
+        invoke_dnf(context, "install", packages, apivfs=apivfs)
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
-        invoke_dnf(state, "remove", packages)
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
+        invoke_dnf(context, "remove", packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
index 6aff128cd3ec4a37a5eaab8ba47169b2b4359aea..1e9f9e7f0768c6df337df1b41ab946701e06bfdf 100644 (file)
@@ -10,6 +10,7 @@ from pathlib import Path
 from mkosi.archive import extract_tar
 from mkosi.bubblewrap import apivfs_cmd, bwrap, chroot_cmd
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import (
     Distribution,
     DistributionInstaller,
@@ -18,22 +19,21 @@ from mkosi.distributions import (
 )
 from mkosi.log import ARG_DEBUG, complete_step, die
 from mkosi.run import run
-from mkosi.state import MkosiState
 from mkosi.tree import copy_tree, rmtree
 from mkosi.types import PathString
 from mkosi.util import sort_packages
 
 
-def invoke_emerge(state: MkosiState, packages: Sequence[str] = (), apivfs: bool = True) -> None:
+def invoke_emerge(context: Context, packages: Sequence[str] = (), apivfs: bool = True) -> None:
     bwrap(
-        state,
-        cmd=apivfs_cmd(state.root) + [
+        context,
+        cmd=apivfs_cmd(context.root) + [
             # We can't mount the stage 3 /usr using `options`, because bwrap isn't available in the stage 3
             # tarball which is required by apivfs_cmd(), so we have to mount /usr from the tarball later
             # using another bwrap exec.
             "bwrap",
             "--dev-bind", "/", "/",
-            "--bind", state.cache_dir / "stage3/usr", "/usr",
+            "--bind", context.cache_dir / "stage3/usr", "/usr",
             "emerge",
             "--buildpkg=y",
             "--usepkg=y",
@@ -46,21 +46,21 @@ def invoke_emerge(state: MkosiState, packages: Sequence[str] = (), apivfs: bool
             "--verbose-conflicts",
             "--noreplace",
             *(["--verbose", "--quiet=n", "--quiet-fail=n"] if ARG_DEBUG.get() else ["--quiet-build", "--quiet"]),
-            f"--root={state.root}",
+            f"--root={context.root}",
             *sort_packages(packages),
         ],
         network=True,
         options=[
             # TODO: Get rid of as many of these as possible.
-            "--bind", state.cache_dir / "stage3/etc", "/etc",
-            "--bind", state.cache_dir / "stage3/var", "/var",
+            "--bind", context.cache_dir / "stage3/etc", "/etc",
+            "--bind", context.cache_dir / "stage3/var", "/var",
             "--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf",
-            "--bind", state.cache_dir / "repos", "/var/db/repos",
+            "--bind", context.cache_dir / "repos", "/var/db/repos",
         ],
         env=dict(
-            PKGDIR=str(state.cache_dir / "binpkgs"),
-            DISTDIR=str(state.cache_dir / "distfiles"),
-        ) | ({"USE": "build"} if not apivfs else {}) | state.config.environment,
+            PKGDIR=str(context.cache_dir / "binpkgs"),
+            DISTDIR=str(context.cache_dir / "distfiles"),
+        ) | ({"USE": "build"} if not apivfs else {}) | context.config.environment,
     )
 
 
@@ -86,14 +86,14 @@ class Installer(DistributionInstaller):
         return Distribution.gentoo
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
+    def setup(cls, context: Context) -> None:
         pass
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
-        arch = state.config.distribution.architecture(state.config.architecture)
+    def install(cls, context: Context) -> None:
+        arch = context.config.distribution.architecture(context.config.architecture)
 
-        mirror = state.config.mirror or "https://distfiles.gentoo.org"
+        mirror = context.config.mirror or "https://distfiles.gentoo.org"
         # http://distfiles.gentoo.org/releases/amd64/autobuilds/latest-stage3.txt
         stage3tsf_path_url = join_mirror(
             mirror.partition(" ")[0],
@@ -112,8 +112,8 @@ class Installer(DistributionInstaller):
                 die("profile names changed upstream?")
 
         stage3_url = join_mirror(mirror, f"releases/{arch}/autobuilds/{stage3_latest}")
-        stage3_tar = state.cache_dir / "stage3.tar"
-        stage3 = state.cache_dir / "stage3"
+        stage3_tar = context.cache_dir / "stage3.tar"
+        stage3 = context.cache_dir / "stage3"
 
         with complete_step("Fetching latest stage3 snapshot"):
             old = stage3_tar.stat().st_mtime if stage3_tar.exists() else 0
@@ -131,12 +131,12 @@ class Installer(DistributionInstaller):
 
         if not any(stage3.iterdir()):
             with complete_step(f"Extracting {stage3_tar.name} to {stage3}"):
-                extract_tar(state, stage3_tar, stage3)
+                extract_tar(context, stage3_tar, stage3)
 
         for d in ("binpkgs", "distfiles", "repos/gentoo"):
-            (state.cache_dir / d).mkdir(parents=True, exist_ok=True)
+            (context.cache_dir / d).mkdir(parents=True, exist_ok=True)
 
-        copy_tree(state.pkgmngr, stage3, preserve_owner=False, use_subvolumes=state.config.use_subvolumes)
+        copy_tree(context.pkgmngr, stage3, preserve_owner=False, use_subvolumes=context.config.use_subvolumes)
 
         features = " ".join([
             # Disable sandboxing in emerge because we already do it in mkosi.
@@ -150,7 +150,7 @@ class Installer(DistributionInstaller):
             "-usersync",
             "-ebuild-locks",
             "parallel-install",
-            *(["noman", "nodoc", "noinfo"] if state.config.with_docs else []),
+            *(["noman", "nodoc", "noinfo"] if context.config.with_docs else []),
         ])
 
         # Setting FEATURES via the environment variable does not seem to apply to ebuilds in portage, so we
@@ -160,25 +160,25 @@ class Installer(DistributionInstaller):
 
         chroot = chroot_cmd(
             stage3,
-            options=["--bind", state.cache_dir / "repos", "/var/db/repos"],
+            options=["--bind", context.cache_dir / "repos", "/var/db/repos"],
         )
 
-        bwrap(state, cmd=chroot + ["emerge-webrsync"], network=True)
+        bwrap(context, cmd=chroot + ["emerge-webrsync"], network=True)
 
-        invoke_emerge(state, packages=["sys-apps/baselayout"], apivfs=False)
+        invoke_emerge(context, packages=["sys-apps/baselayout"], apivfs=False)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
-        invoke_emerge(state, packages=packages, apivfs=apivfs)
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
+        invoke_emerge(context, packages=packages, apivfs=apivfs)
 
-        for d in state.root.glob("usr/src/linux-*"):
+        for d in context.root.glob("usr/src/linux-*"):
             kver = d.name.removeprefix("linux-")
             kimg = d / {
                 Architecture.x86_64: "arch/x86/boot/bzImage",
                 Architecture.arm64: "arch/arm64/boot/Image.gz",
                 Architecture.arm: "arch/arm/boot/zImage",
-            }[state.config.architecture]
-            vmlinuz = state.root / "usr/lib/modules" / kver / "vmlinuz"
+            }[context.config.architecture]
+            vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz"
             if not vmlinuz.exists() and not vmlinuz.is_symlink():
                 vmlinuz.symlink_to(os.path.relpath(kimg, start=vmlinuz.parent))
 
index 39b5ac6236df5918e08d2ada67f943cdd7eda675..56324e063f4692b1e6efb31e552fb38e40d2e163 100644 (file)
@@ -4,6 +4,7 @@ import shutil
 from collections.abc import Sequence
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import (
     Distribution,
     DistributionInstaller,
@@ -13,7 +14,6 @@ from mkosi.distributions import (
 from mkosi.installer.dnf import invoke_dnf, setup_dnf
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
 from mkosi.log import die
-from mkosi.state import MkosiState
 
 
 class Installer(DistributionInstaller):
@@ -38,10 +38,10 @@ class Installer(DistributionInstaller):
         return Distribution.mageia
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
+    def setup(cls, context: Context) -> None:
         gpgurls = (
             find_rpm_gpgkey(
-                state,
+                context,
                 "RPM-GPG-KEY-Mageia",
                 "https://mirrors.kernel.org/mageia/distrib/$releasever/$basearch/media/core/release/media_info/pubkey",
             ),
@@ -49,10 +49,10 @@ class Installer(DistributionInstaller):
 
         repos = []
 
-        if state.config.local_mirror:
-            repos += [RpmRepository("core-release", f"baseurl={state.config.local_mirror}", gpgurls)]
-        elif state.config.mirror:
-            url = f"baseurl={join_mirror(state.config.mirror, 'distrib/$releasever/$basearch/media/core/')}"
+        if context.config.local_mirror:
+            repos += [RpmRepository("core-release", f"baseurl={context.config.local_mirror}", gpgurls)]
+        elif context.config.mirror:
+            url = f"baseurl={join_mirror(context.config.mirror, 'distrib/$releasever/$basearch/media/core/')}"
             repos += [
                 RpmRepository("core-release", f"{url}/release", gpgurls),
                 RpmRepository("core-updates", f"{url}/updates/", gpgurls)
@@ -64,25 +64,25 @@ class Installer(DistributionInstaller):
                 RpmRepository("core-updates", f"{url}&repo=updates", gpgurls)
             ]
 
-        setup_dnf(state, repos)
+        setup_dnf(context, repos)
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
-        cls.install_packages(state, ["filesystem"], apivfs=False)
+    def install(cls, context: Context) -> None:
+        cls.install_packages(context, ["filesystem"], apivfs=False)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
-        invoke_dnf(state, "install", packages, apivfs=apivfs)
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
+        invoke_dnf(context, "install", packages, apivfs=apivfs)
 
-        for d in state.root.glob("boot/vmlinuz-*"):
+        for d in context.root.glob("boot/vmlinuz-*"):
             kver = d.name.removeprefix("vmlinuz-")
-            vmlinuz = state.root / "usr/lib/modules" / kver / "vmlinuz"
+            vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz"
             if not vmlinuz.exists():
                 shutil.copy2(d, vmlinuz)
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
-        invoke_dnf(state, "remove", packages)
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
+        invoke_dnf(context, "remove", packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
index d47db247e6f19359de0948064cb457b8e47dd931..935cf79586499262f67186dbb8cb73041e952334 100644 (file)
@@ -4,6 +4,7 @@ import shutil
 from collections.abc import Sequence
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import (
     Distribution,
     DistributionInstaller,
@@ -13,7 +14,6 @@ from mkosi.distributions import (
 from mkosi.installer.dnf import invoke_dnf, setup_dnf
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
 from mkosi.log import die
-from mkosi.state import MkosiState
 
 
 class Installer(DistributionInstaller):
@@ -38,12 +38,12 @@ class Installer(DistributionInstaller):
         return Distribution.openmandriva
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
-        mirror = state.config.mirror or "http://mirror.openmandriva.org"
+    def setup(cls, context: Context) -> None:
+        mirror = context.config.mirror or "http://mirror.openmandriva.org"
 
         gpgurls = (
             find_rpm_gpgkey(
-                state,
+                context,
                 "RPM-GPG-KEY-OpenMandriva",
                 "https://raw.githubusercontent.com/OpenMandrivaAssociation/openmandriva-repos/master/RPM-GPG-KEY-OpenMandriva",
             ),
@@ -51,8 +51,8 @@ class Installer(DistributionInstaller):
 
         repos = []
 
-        if state.config.local_mirror:
-            repos += [RpmRepository("main-release", f"baseurl={state.config.local_mirror}", gpgurls)]
+        if context.config.local_mirror:
+            repos += [RpmRepository("main-release", f"baseurl={context.config.local_mirror}", gpgurls)]
         else:
             url = f"baseurl={join_mirror(mirror, '$releasever/repository/$basearch/main')}"
             repos += [
@@ -60,19 +60,19 @@ class Installer(DistributionInstaller):
                 RpmRepository("main-updates", f"{url}/updates", gpgurls),
             ]
 
-        setup_dnf(state, repos)
+        setup_dnf(context, repos)
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
-        cls.install_packages(state, ["filesystem"], apivfs=False)
+    def install(cls, context: Context) -> None:
+        cls.install_packages(context, ["filesystem"], apivfs=False)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
-        invoke_dnf(state, "install", packages, apivfs=apivfs)
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
+        invoke_dnf(context, "install", packages, apivfs=apivfs)
 
-        for d in state.root.glob("boot/vmlinuz-*"):
+        for d in context.root.glob("boot/vmlinuz-*"):
             kver = d.name.removeprefix("vmlinuz-")
-            vmlinuz = state.root / "usr/lib/modules" / kver / "vmlinuz"
+            vmlinuz = context.root / "usr/lib/modules" / kver / "vmlinuz"
             # Openmandriva symlinks /usr/lib/modules/<kver>/vmlinuz to /boot/vmlinuz-<kver>, so get rid of the symlink
             # and put the actual vmlinuz in /usr/lib/modules/<kver>.
             if vmlinuz.is_symlink():
@@ -81,8 +81,8 @@ class Installer(DistributionInstaller):
                 shutil.copy2(d, vmlinuz)
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
-        invoke_dnf(state, "remove", packages)
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
+        invoke_dnf(context, "remove", packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
index bae8fbea0a2a28fe1166a1d29672dbb952098408..17b6300934d60c57e8b8fa04fdae24d61507de24 100644 (file)
@@ -7,13 +7,13 @@ from collections.abc import Sequence
 from pathlib import Path
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import Distribution, DistributionInstaller, PackageType
 from mkosi.installer.dnf import invoke_dnf, setup_dnf
 from mkosi.installer.rpm import RpmRepository
 from mkosi.installer.zypper import invoke_zypper, setup_zypper
 from mkosi.log import die
 from mkosi.run import run
-from mkosi.state import MkosiState
 
 
 class Installer(DistributionInstaller):
@@ -38,17 +38,17 @@ class Installer(DistributionInstaller):
         return Distribution.opensuse
 
     @classmethod
-    def setup(cls, state: MkosiState) -> None:
-        release = state.config.release
+    def setup(cls, context: Context) -> None:
+        release = context.config.release
         if release == "leap":
             release = "stable"
 
-        mirror = state.config.mirror or "https://download.opensuse.org"
+        mirror = context.config.mirror or "https://download.opensuse.org"
 
         # If the release looks like a timestamp, it's Tumbleweed. 13.x is legacy
         # (14.x won't ever appear). For anything else, let's default to Leap.
-        if state.config.local_mirror:
-            release_url = f"{state.config.local_mirror}"
+        if context.config.local_mirror:
+            release_url = f"{context.config.local_mirror}"
             updates_url = None
         if release.isdigit() or release == "tumbleweed":
             release_url = f"{mirror}/tumbleweed/repo/oss/"
@@ -64,8 +64,8 @@ class Installer(DistributionInstaller):
 
         # If we need to use a local mirror, create a temporary repository definition
         # that doesn't get in the image, as it is valid only at image build time.
-        if state.config.local_mirror:
-            repos = [RpmRepository("local-mirror", f"baseurl={state.config.local_mirror}", ())]
+        if context.config.local_mirror:
+            repos = [RpmRepository("local-mirror", f"baseurl={context.config.local_mirror}", ())]
         else:
             repos = [
                 RpmRepository("repo-oss", f"baseurl={release_url}", fetch_gpgurls(release_url) if not zypper else ()),
@@ -80,31 +80,31 @@ class Installer(DistributionInstaller):
                 ]
 
         if zypper:
-            setup_zypper(state, repos)
+            setup_zypper(context, repos)
         else:
-            setup_dnf(state, repos)
+            setup_dnf(context, repos)
 
     @classmethod
-    def install(cls, state: MkosiState) -> None:
-        cls.install_packages(state, ["filesystem", "distribution-release"], apivfs=False)
+    def install(cls, context: Context) -> None:
+        cls.install_packages(context, ["filesystem", "distribution-release"], apivfs=False)
 
     @classmethod
-    def install_packages(cls, state: MkosiState, packages: Sequence[str], apivfs: bool = True) -> None:
+    def install_packages(cls, context: Context, packages: Sequence[str], apivfs: bool = True) -> None:
         if shutil.which("zypper"):
             options = [
                 "--download", "in-advance",
-                "--recommends" if state.config.with_recommends else "--no-recommends",
+                "--recommends" if context.config.with_recommends else "--no-recommends",
             ]
-            invoke_zypper(state, "install", packages, options, apivfs=apivfs)
+            invoke_zypper(context, "install", packages, options, apivfs=apivfs)
         else:
-            invoke_dnf(state, "install", packages, apivfs=apivfs)
+            invoke_dnf(context, "install", packages, apivfs=apivfs)
 
     @classmethod
-    def remove_packages(cls, state: MkosiState, packages: Sequence[str]) -> None:
+    def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
         if shutil.which("zypper"):
-            invoke_zypper(state, "remove", packages, ["--clean-deps"])
+            invoke_zypper(context, "remove", packages, ["--clean-deps"])
         else:
-            invoke_dnf(state, "remove", packages)
+            invoke_dnf(context, "remove", packages)
 
     @classmethod
     def architecture(cls, arch: Architecture) -> str:
index 290a9ac6b69a6601fc9284917e4a074fe52c7d8a..e673383ebf4cee2d07d5db3f7884e8d9834ad468 100644 (file)
@@ -4,10 +4,10 @@ from collections.abc import Iterable
 from pathlib import Path
 from typing import Any, Optional
 
+from mkosi.context import Context
 from mkosi.distributions import centos, join_mirror
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
 from mkosi.log import die
-from mkosi.state import MkosiState
 
 
 class Installer(centos.Installer):
@@ -16,25 +16,25 @@ class Installer(centos.Installer):
         return "RHEL"
 
     @staticmethod
-    def gpgurls(state: MkosiState) -> tuple[str, ...]:
-        major = int(float(state.config.release))
+    def gpgurls(context: Context) -> tuple[str, ...]:
+        major = int(float(context.config.release))
 
         return (
             find_rpm_gpgkey(
-                state,
+                context,
                 f"RPM-GPG-KEY-redhat{major}-release",
                 "https://access.redhat.com/security/data/fd431d51.txt",
             ),
         )
 
     @staticmethod
-    def sslcacert(state: MkosiState) -> Optional[Path]:
-        if state.config.mirror:
+    def sslcacert(context: Context) -> Optional[Path]:
+        if context.config.mirror:
             return None
 
         p = Path("etc/rhsm/ca/redhat-uep.pem")
-        if (state.pkgmngr / p).exists():
-            p = state.pkgmngr / p
+        if (context.pkgmngr / p).exists():
+            p = context.pkgmngr / p
         elif (Path("/") / p).exists():
             p = Path("/") / p
         else:
@@ -43,13 +43,13 @@ class Installer(centos.Installer):
         return p
 
     @staticmethod
-    def sslclientkey(state: MkosiState) -> Optional[Path]:
-        if state.config.mirror:
+    def sslclientkey(context: Context) -> Optional[Path]:
+        if context.config.mirror:
             return None
 
         pattern = "etc/pki/entitlement/*-key.pem"
 
-        p = next((p for p in sorted(state.pkgmngr.glob(pattern))), None)
+        p = next((p for p in sorted(context.pkgmngr.glob(pattern))), None)
         if not p:
             p = next((p for p in Path("/").glob(pattern)), None)
         if not p:
@@ -58,13 +58,13 @@ class Installer(centos.Installer):
         return p
 
     @staticmethod
-    def sslclientcert(state: MkosiState) -> Optional[Path]:
-        if state.config.mirror:
+    def sslclientcert(context: Context) -> Optional[Path]:
+        if context.config.mirror:
             return None
 
         pattern = "etc/pki/entitlement/*.pem"
 
-        p = next((p for p in sorted(state.pkgmngr.glob(pattern)) if "key" not in p.name), None)
+        p = next((p for p in sorted(context.pkgmngr.glob(pattern)) if "key" not in p.name), None)
         if not p:
             p = next((p for p in sorted(Path("/").glob(pattern)) if "key" not in p.name), None)
         if not p:
@@ -73,20 +73,20 @@ class Installer(centos.Installer):
         return p
 
     @classmethod
-    def repository_variants(cls, state: MkosiState, repo: str) -> Iterable[RpmRepository]:
-        if state.config.local_mirror:
-            yield RpmRepository(repo, f"baseurl={state.config.local_mirror}", cls.gpgurls(state))
+    def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]:
+        if context.config.local_mirror:
+            yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context))
         else:
-            mirror = state.config.mirror or "https://cdn.redhat.com/content/dist/"
+            mirror = context.config.mirror or "https://cdn.redhat.com/content/dist/"
 
             common: dict[str, Any] = dict(
-                gpgurls=cls.gpgurls(state),
-                sslcacert=cls.sslcacert(state),
-                sslclientcert=cls.sslclientcert(state),
-                sslclientkey=cls.sslclientkey(state),
+                gpgurls=cls.gpgurls(context),
+                sslcacert=cls.sslcacert(context),
+                sslclientcert=cls.sslclientcert(context),
+                sslclientkey=cls.sslclientkey(context),
             )
 
-            v = state.config.release
+            v = context.config.release
             major = int(float(v))
             yield RpmRepository(
                 f"rhel-{v}-{repo}-rpms",
@@ -108,8 +108,8 @@ class Installer(centos.Installer):
             )
 
     @classmethod
-    def repositories(cls, state: MkosiState) -> Iterable[RpmRepository]:
-        yield from cls.repository_variants(state, "baseos")
-        yield from cls.repository_variants(state, "appstream")
-        yield from cls.repository_variants(state, "codeready-builder")
-        yield from cls.epel_repositories(state)
+    def repositories(cls, context: Context) -> Iterable[RpmRepository]:
+        yield from cls.repository_variants(context, "baseos")
+        yield from cls.repository_variants(context, "appstream")
+        yield from cls.repository_variants(context, "codeready-builder")
+        yield from cls.epel_repositories(context)
index f789cf006abf61edab9d6fe0143f9d6f31a23f6a..baff86b486cc4760a5e3319d730d02155b973017 100644 (file)
@@ -2,9 +2,9 @@
 
 from collections.abc import Iterable
 
+from mkosi.context import Context
 from mkosi.distributions import centos, join_mirror
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
-from mkosi.state import MkosiState
 
 
 class Installer(centos.Installer):
@@ -13,46 +13,46 @@ class Installer(centos.Installer):
         return "RHEL UBI"
 
     @staticmethod
-    def gpgurls(state: MkosiState) -> tuple[str, ...]:
-        major = int(float(state.config.release))
+    def gpgurls(context: Context) -> tuple[str, ...]:
+        major = int(float(context.config.release))
 
         return (
             find_rpm_gpgkey(
-                state,
+                context,
                 f"RPM-GPG-KEY-redhat{major}-release",
                 "https://access.redhat.com/security/data/fd431d51.txt",
             ),
         )
 
     @classmethod
-    def repository_variants(cls, state: MkosiState, repo: str) -> Iterable[RpmRepository]:
-        if state.config.local_mirror:
-            yield RpmRepository(repo, f"baseurl={state.config.local_mirror}", cls.gpgurls(state))
+    def repository_variants(cls, context: Context, repo: str) -> Iterable[RpmRepository]:
+        if context.config.local_mirror:
+            yield RpmRepository(repo, f"baseurl={context.config.local_mirror}", cls.gpgurls(context))
         else:
-            mirror = state.config.mirror or "https://cdn-ubi.redhat.com/content/public/ubi/dist/"
+            mirror = context.config.mirror or "https://cdn-ubi.redhat.com/content/public/ubi/dist/"
 
-            v = state.config.release
+            v = context.config.release
             yield RpmRepository(
                 f"ubi-{v}-{repo}-rpms",
                 f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/os')}",
-                cls.gpgurls(state),
+                cls.gpgurls(context),
             )
             yield RpmRepository(
                 f"ubi-{v}-{repo}-debug-rpms",
                 f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/debug')}",
-                cls.gpgurls(state),
+                cls.gpgurls(context),
                 enabled=False,
             )
             yield RpmRepository(
                 f"ubi-{v}-{repo}-source",
                 f"baseurl={join_mirror(mirror, f'ubi{v}/{v}/$basearch/{repo}/source')}",
-                cls.gpgurls(state),
+                cls.gpgurls(context),
                 enabled=False,
             )
 
     @classmethod
-    def repositories(cls, state: MkosiState) -> Iterable[RpmRepository]:
-        yield from cls.repository_variants(state, "baseos")
-        yield from cls.repository_variants(state, "appstream")
-        yield from cls.repository_variants(state, "codeready-builder")
-        yield from cls.epel_repositories(state)
+    def repositories(cls, context: Context) -> Iterable[RpmRepository]:
+        yield from cls.repository_variants(context, "baseos")
+        yield from cls.repository_variants(context, "appstream")
+        yield from cls.repository_variants(context, "codeready-builder")
+        yield from cls.epel_repositories(context)
index f35607e338762c721c41ce4ce63fea66154a7aaa..1da604e55a8c7ac90dc7ad9b4a01f3ddc40739b7 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: LGPL-2.1+
 
+from mkosi.context import Context
 from mkosi.distributions import centos, join_mirror
 from mkosi.installer.rpm import RpmRepository, find_rpm_gpgkey
-from mkosi.state import MkosiState
 
 
 class Installer(centos.Installer):
@@ -11,24 +11,24 @@ class Installer(centos.Installer):
         return "Rocky Linux"
 
     @staticmethod
-    def gpgurls(state: MkosiState) -> tuple[str, ...]:
+    def gpgurls(context: Context) -> tuple[str, ...]:
         return (
             find_rpm_gpgkey(
-                state,
-                f"RPM-GPG-KEY-Rocky-{state.config.release}",
-                f"https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-{state.config.release}",
+                context,
+                f"RPM-GPG-KEY-Rocky-{context.config.release}",
+                f"https://download.rockylinux.org/pub/rocky/RPM-GPG-KEY-Rocky-{context.config.release}",
             ),
         )
 
     @classmethod
-    def repository_variants(cls, state: MkosiState, repo: str) -> list[RpmRepository]:
-        if state.config.mirror:
-            url = f"baseurl={join_mirror(state.config.mirror, f'rocky/$releasever/{repo}/$basearch/os')}"
+    def repository_variants(cls, context: Context, repo: str) -> list[RpmRepository]:
+        if context.config.mirror:
+            url = f"baseurl={join_mirror(context.config.mirror, f'rocky/$releasever/{repo}/$basearch/os')}"
         else:
             url = f"mirrorlist=https://mirrors.rockylinux.org/mirrorlist?arch=$basearch&repo={repo}-$releasever"
 
-        return [RpmRepository(repo, url, cls.gpgurls(state))]
+        return [RpmRepository(repo, url, cls.gpgurls(context))]
 
     @classmethod
-    def sig_repositories(cls, state: MkosiState) -> list[RpmRepository]:
+    def sig_repositories(cls, context: Context) -> list[RpmRepository]:
         return []
index e3141cfcef6eb7b7d584d61251b5ac7205fbbb28..3b01c05c0d956cd1ca9c394fdc50ac02789ac705 100644 (file)
@@ -1,8 +1,8 @@
 # SPDX-License-Identifier: LGPL-2.1+
 
 from mkosi.config import Architecture
+from mkosi.context import Context
 from mkosi.distributions import debian
-from mkosi.state import MkosiState
 
 
 class Installer(debian.Installer):
@@ -15,42 +15,42 @@ class Installer(debian.Installer):
         return "lunar"
 
     @staticmethod
-    def repositories(state: MkosiState, local: bool = True) -> list[str]:
-        if state.config.local_mirror and local:
-            return [f"deb [trusted=yes] {state.config.local_mirror} {state.config.release} main"]
+    def repositories(context: Context, local: bool = True) -> list[str]:
+        if context.config.local_mirror and local:
+            return [f"deb [trusted=yes] {context.config.local_mirror} {context.config.release} main"]
 
         archives = ("deb", "deb-src")
 
-        if state.config.architecture in (Architecture.x86, Architecture.x86_64):
-            mirror = state.config.mirror or "http://archive.ubuntu.com/ubuntu"
+        if context.config.architecture in (Architecture.x86, Architecture.x86_64):
+            mirror = context.config.mirror or "http://archive.ubuntu.com/ubuntu"
         else:
-            mirror = state.config.mirror or "http://ports.ubuntu.com"
+            mirror = context.config.mirror or "http://ports.ubuntu.com"
 
         signedby = "[signed-by=/usr/share/keyrings/ubuntu-archive-keyring.gpg]"
 
         # From kinetic onwards, the usr-is-merged package is available in universe and is required by
         # mkosi to set up a proper usr-merged system so we add the universe repository unconditionally.
-        components = ["main"] + (["universe"] if state.config.release not in ("focal", "jammy") else [])
-        components = ' '.join((*components, *state.config.repositories))
+        components = ["main"] + (["universe"] if context.config.release not in ("focal", "jammy") else [])
+        components = ' '.join((*components, *context.config.repositories))
 
         repos = [
-            f"{archive} {signedby} {mirror} {state.config.release} {components}"
+            f"{archive} {signedby} {mirror} {context.config.release} {components}"
             for archive in archives
         ]
 
         repos += [
-            f"{archive} {signedby} {mirror} {state.config.release}-updates {components}"
+            f"{archive} {signedby} {mirror} {context.config.release}-updates {components}"
             for archive in archives
         ]
 
         # Security updates repos are never mirrored. But !x86 are on the ports server.
-        if state.config.architecture in [Architecture.x86, Architecture.x86_64]:
+        if context.config.architecture in [Architecture.x86, Architecture.x86_64]:
             mirror = "http://security.ubuntu.com/ubuntu/"
         else:
             mirror = "http://ports.ubuntu.com/"
 
         repos += [
-            f"{archive} {signedby} {mirror} {state.config.release}-security {components}"
+            f"{archive} {signedby} {mirror} {context.config.release}-security {components}"
             for archive in archives
         ]
 
index 5ab05503c595ab2dcd3b6a950c5339eafe668552..f3f2f62176c18852d5a8f9a3d43dd4f9c0a47458 100644 (file)
@@ -4,17 +4,17 @@ import os
 
 from mkosi.bubblewrap import apivfs_cmd
 from mkosi.config import ConfigFeature
+from mkosi.context import Context
 from mkosi.installer.apt import apt_cmd
 from mkosi.installer.dnf import dnf_cmd
 from mkosi.installer.pacman import pacman_cmd
 from mkosi.installer.rpm import rpm_cmd
 from mkosi.installer.zypper import zypper_cmd
-from mkosi.state import MkosiState
 from mkosi.tree import rmtree
 from mkosi.types import PathString
 
 
-def clean_package_manager_metadata(state: MkosiState) -> None:
+def clean_package_manager_metadata(context: Context) -> None:
     """
     Remove package manager metadata
 
@@ -22,31 +22,31 @@ def clean_package_manager_metadata(state: MkosiState) -> None:
     the package manager is not present in the image.
     """
 
-    if state.config.clean_package_metadata == ConfigFeature.disabled:
+    if context.config.clean_package_metadata == ConfigFeature.disabled:
         return
 
-    always = state.config.clean_package_metadata == ConfigFeature.enabled
+    always = context.config.clean_package_metadata == ConfigFeature.enabled
 
     for tool, paths in (("rpm",    ["var/lib/rpm", "usr/lib/sysimage/rpm"]),
                         ("dnf5",   ["usr/lib/sysimage/libdnf5"]),
                         ("dpkg",   ["var/lib/dpkg"]),
                         ("pacman", ["var/lib/pacman"])):
         for bin in ("bin", "sbin"):
-            if not always and os.access(state.root / "usr" / bin / tool, mode=os.F_OK, follow_symlinks=False):
+            if not always and os.access(context.root / "usr" / bin / tool, mode=os.F_OK, follow_symlinks=False):
                 break
         else:
             for p in paths:
-                rmtree(state.root / p)
+                rmtree(context.root / p)
 
 
-def package_manager_scripts(state: MkosiState) -> dict[str, list[PathString]]:
+def package_manager_scripts(context: Context) -> dict[str, list[PathString]]:
     return {
-        "pacman": apivfs_cmd(state.root) + pacman_cmd(state),
-        "zypper": apivfs_cmd(state.root) + zypper_cmd(state),
-        "dnf"   : apivfs_cmd(state.root) + dnf_cmd(state),
-        "rpm"   : apivfs_cmd(state.root) + rpm_cmd(state),
+        "pacman": apivfs_cmd(context.root) + pacman_cmd(context),
+        "zypper": apivfs_cmd(context.root) + zypper_cmd(context),
+        "dnf"   : apivfs_cmd(context.root) + dnf_cmd(context),
+        "rpm"   : apivfs_cmd(context.root) + rpm_cmd(context),
     } | {
-        command: apivfs_cmd(state.root) + apt_cmd(state, command) for command in (
+        command: apivfs_cmd(context.root) + apt_cmd(context, command) for command in (
             "apt",
             "apt-cache",
             "apt-cdrom",
index 59574bf8814894f00249d6839327f1df5bed1416..796ffda0d5e1d00e5e474ac7b3863b4e07f41f6d 100644 (file)
@@ -4,30 +4,30 @@ import textwrap
 from collections.abc import Sequence
 
 from mkosi.bubblewrap import apivfs_cmd, bwrap
-from mkosi.state import MkosiState
+from mkosi.context import Context
 from mkosi.types import PathString
 from mkosi.util import sort_packages, umask
 
 
-def setup_apt(state: MkosiState, repos: Sequence[str]) -> None:
-    (state.pkgmngr / "etc/apt").mkdir(exist_ok=True, parents=True)
-    (state.pkgmngr / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True)
-    (state.pkgmngr / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True)
-    (state.pkgmngr / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True)
+def setup_apt(context: Context, repos: Sequence[str]) -> None:
+    (context.pkgmngr / "etc/apt").mkdir(exist_ok=True, parents=True)
+    (context.pkgmngr / "etc/apt/apt.conf.d").mkdir(exist_ok=True, parents=True)
+    (context.pkgmngr / "etc/apt/preferences.d").mkdir(exist_ok=True, parents=True)
+    (context.pkgmngr / "etc/apt/sources.list.d").mkdir(exist_ok=True, parents=True)
 
     # TODO: Drop once apt 2.5.4 is widely available.
     with umask(~0o755):
-        (state.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True)
-        (state.root / "var/lib/dpkg/status").touch()
+        (context.root / "var/lib/dpkg").mkdir(parents=True, exist_ok=True)
+        (context.root / "var/lib/dpkg/status").touch()
 
-    (state.cache_dir / "lib/apt").mkdir(exist_ok=True, parents=True)
-    (state.cache_dir / "cache/apt").mkdir(exist_ok=True, parents=True)
+    (context.cache_dir / "lib/apt").mkdir(exist_ok=True, parents=True)
+    (context.cache_dir / "cache/apt").mkdir(exist_ok=True, parents=True)
 
     # We have a special apt.conf outside of pkgmngr dir that only configures "Dir::Etc" that we pass to APT_CONFIG to
     # tell apt it should read config files from /etc/apt in case this is overridden by distributions. This is required
     # because apt parses CLI configuration options after parsing its configuration files and as such we can't use CLI
     # options to tell apt where to look for configuration files.
-    config = state.workspace / "apt.conf"
+    config = context.workspace / "apt.conf"
     if not config.exists():
         config.write_text(
             textwrap.dedent(
@@ -37,39 +37,39 @@ def setup_apt(state: MkosiState, repos: Sequence[str]) -> None:
             )
         )
 
-    sources = state.pkgmngr / "etc/apt/sources.list"
+    sources = context.pkgmngr / "etc/apt/sources.list"
     if not sources.exists():
         with sources.open("w") as f:
             for repo in repos:
                 f.write(f"{repo}\n")
 
 
-def apt_cmd(state: MkosiState, command: str) -> list[PathString]:
-    debarch = state.config.distribution.architecture(state.config.architecture)
+def apt_cmd(context: Context, command: str) -> list[PathString]:
+    debarch = context.config.distribution.architecture(context.config.architecture)
 
     cmdline: list[PathString] = [
         "env",
-        f"APT_CONFIG={state.workspace / 'apt.conf'}",
+        f"APT_CONFIG={context.workspace / 'apt.conf'}",
         "DEBIAN_FRONTEND=noninteractive",
         "DEBCONF_INTERACTIVE_SEEN=true",
         "INITRD=No",
         command,
         "-o", f"APT::Architecture={debarch}",
         "-o", f"APT::Architectures={debarch}",
-        "-o", f"APT::Install-Recommends={str(state.config.with_recommends).lower()}",
+        "-o", f"APT::Install-Recommends={str(context.config.with_recommends).lower()}",
         "-o", "APT::Immediate-Configure=off",
         "-o", "APT::Get::Assume-Yes=true",
         "-o", "APT::Get::AutomaticRemove=true",
         "-o", "APT::Get::Allow-Change-Held-Packages=true",
         "-o", "APT::Get::Allow-Remove-Essential=true",
         "-o", "APT::Sandbox::User=root",
-        "-o", f"Dir::Cache={state.cache_dir / 'cache/apt'}",
-        "-o", f"Dir::State={state.cache_dir / 'lib/apt'}",
-        "-o", f"Dir::State::Status={state.root / 'var/lib/dpkg/status'}",
-        "-o", f"Dir::Log={state.workspace}",
+        "-o", f"Dir::Cache={context.cache_dir / 'cache/apt'}",
+        "-o", f"Dir::State={context.cache_dir / 'lib/apt'}",
+        "-o", f"Dir::State::Status={context.root / 'var/lib/dpkg/status'}",
+        "-o", f"Dir::Log={context.workspace}",
         "-o", f"Dir::Bin::DPkg={shutil.which('dpkg')}",
         "-o", "Debug::NoLocking=true",
-        "-o", f"DPkg::Options::=--root={state.root}",
+        "-o", f"DPkg::Options::=--root={context.root}",
         "-o", "DPkg::Options::=--force-unsafe-io",
         "-o", "DPkg::Options::=--force-architecture",
         "-o", "DPkg::Options::=--force-depends",
@@ -79,7 +79,7 @@ def apt_cmd(state: MkosiState, command: str) -> list[PathString]:
         "-o", "pkgCacheGen::ForceEssential=,",
     ]
 
-    if not state.config.with_docs:
+    if not context.config.with_docs:
         cmdline += [
             "-o", "DPkg::Options::=--path-exclude=/usr/share/doc/*",
             "-o", "DPkg::Options::=--path-include=/usr/share/doc/*/copyright",
@@ -92,12 +92,12 @@ def apt_cmd(state: MkosiState, command: str) -> list[PathString]:
 
 
 def invoke_apt(
-    state: MkosiState,
+    context: Context,
     command: str,
     operation: str,
     packages: Sequence[str] = (),
     apivfs: bool = True,
 ) -> None:
-    cmd = apivfs_cmd(state.root) if apivfs else []
-    bwrap(state, cmd + apt_cmd(state, command) + [operation, *sort_packages(packages)],
-          network=True, env=state.config.environment)
+    cmd = apivfs_cmd(context.root) if apivfs else []
+    bwrap(context, cmd + apt_cmd(context, command) + [operation, *sort_packages(packages)],
+          network=True, env=context.config.environment)
index 58d609f0266c704c0fe50d86901503b8107835e1..25ae26d36dc637f454b2ac7394f849c7a26d0838 100644 (file)
@@ -4,34 +4,34 @@ import textwrap
 from collections.abc import Iterable
 
 from mkosi.bubblewrap import apivfs_cmd, bwrap
+from mkosi.context import Context
 from mkosi.installer.rpm import RpmRepository, fixup_rpmdb_location, setup_rpm
-from mkosi.state import MkosiState
 from mkosi.types import PathString
 from mkosi.util import sort_packages
 
 
-def dnf_executable(state: MkosiState) -> str:
+def dnf_executable(context: Context) -> str:
     # Allow the user to override autodetection with an environment variable
-    dnf = state.config.environment.get("MKOSI_DNF")
+    dnf = context.config.environment.get("MKOSI_DNF")
 
     return dnf or shutil.which("dnf5") or shutil.which("dnf") or "yum"
 
 
-def setup_dnf(state: MkosiState, repositories: Iterable[RpmRepository], filelists: bool = True) -> None:
-    (state.pkgmngr / "etc/dnf/vars").mkdir(exist_ok=True, parents=True)
-    (state.pkgmngr / "etc/yum.repos.d").mkdir(exist_ok=True, parents=True)
+def setup_dnf(context: Context, repositories: Iterable[RpmRepository], filelists: bool = True) -> None:
+    (context.pkgmngr / "etc/dnf/vars").mkdir(exist_ok=True, parents=True)
+    (context.pkgmngr / "etc/yum.repos.d").mkdir(exist_ok=True, parents=True)
 
-    config = state.pkgmngr / "etc/dnf/dnf.conf"
+    config = context.pkgmngr / "etc/dnf/dnf.conf"
 
     if not config.exists():
         config.parent.mkdir(exist_ok=True, parents=True)
         with config.open("w") as f:
             # Make sure we download filelists so all dependencies can be resolved.
             # See https://bugzilla.redhat.com/show_bug.cgi?id=2180842
-            if dnf_executable(state).endswith("dnf5") and filelists:
+            if dnf_executable(context).endswith("dnf5") and filelists:
                 f.write("[main]\noptional_metadata_types=filelists\n")
 
-    repofile = state.pkgmngr / "etc/yum.repos.d/mkosi.repo"
+    repofile = context.pkgmngr / "etc/yum.repos.d/mkosi.repo"
     if not repofile.exists():
         repofile.parent.mkdir(exist_ok=True, parents=True)
         with repofile.open("w") as f:
@@ -61,11 +61,11 @@ def setup_dnf(state: MkosiState, repositories: Iterable[RpmRepository], filelist
 
                 f.write("\n")
 
-    setup_rpm(state)
+    setup_rpm(context)
 
 
-def dnf_cmd(state: MkosiState) -> list[PathString]:
-    dnf = dnf_executable(state)
+def dnf_cmd(context: Context) -> list[PathString]:
+    dnf = dnf_executable(context)
 
     cmdline: list[PathString] = [
         "env",
@@ -73,32 +73,32 @@ def dnf_cmd(state: MkosiState) -> list[PathString]:
         dnf,
         "--assumeyes",
         "--best",
-        f"--releasever={state.config.release}",
-        f"--installroot={state.root}",
+        f"--releasever={context.config.release}",
+        f"--installroot={context.root}",
         "--setopt=keepcache=1",
-        f"--setopt=cachedir={state.cache_dir / 'cache' / ('libdnf5' if dnf.endswith('dnf5') else 'dnf')}",
-        f"--setopt=persistdir={state.cache_dir / 'lib' / ('libdnf5' if dnf.endswith('dnf5') else 'dnf')}",
-        f"--setopt=install_weak_deps={int(state.config.with_recommends)}",
+        f"--setopt=cachedir={context.cache_dir / 'cache' / ('libdnf5' if dnf.endswith('dnf5') else 'dnf')}",
+        f"--setopt=persistdir={context.cache_dir / 'lib' / ('libdnf5' if dnf.endswith('dnf5') else 'dnf')}",
+        f"--setopt=install_weak_deps={int(context.config.with_recommends)}",
         "--setopt=check_config_file_age=0",
         "--disable-plugin=*" if dnf.endswith("dnf5") else "--disableplugin=*",
         "--enable-plugin=builddep" if dnf.endswith("dnf5") else "--enableplugin=builddep",
     ]
 
-    if not state.config.repository_key_check:
+    if not context.config.repository_key_check:
         cmdline += ["--nogpgcheck"]
 
-    if state.config.repositories:
+    if context.config.repositories:
         opt = "--enable-repo" if dnf.endswith("dnf5") else "--enablerepo"
-        cmdline += [f"{opt}={repo}" for repo in state.config.repositories]
+        cmdline += [f"{opt}={repo}" for repo in context.config.repositories]
 
     # TODO: this breaks with a local, offline repository created with 'createrepo'
-    if state.config.cache_only and not state.config.local_mirror:
+    if context.config.cache_only and not context.config.local_mirror:
         cmdline += ["--cacheonly"]
 
-    if not state.config.architecture.is_native():
-        cmdline += [f"--forcearch={state.config.distribution.architecture(state.config.architecture)}"]
+    if not context.config.architecture.is_native():
+        cmdline += [f"--forcearch={context.config.distribution.architecture(context.config.architecture)}"]
 
-    if not state.config.with_docs:
+    if not context.config.with_docs:
         cmdline += ["--no-docs" if dnf.endswith("dnf5") else "--nodocs"]
 
     if dnf.endswith("dnf5"):
@@ -113,15 +113,15 @@ def dnf_cmd(state: MkosiState) -> list[PathString]:
     return cmdline
 
 
-def invoke_dnf(state: MkosiState, command: str, packages: Iterable[str], apivfs: bool = True) -> None:
-    cmd = apivfs_cmd(state.root) if apivfs else []
-    bwrap(state, cmd + dnf_cmd(state) + [command, *sort_packages(packages)],
-          network=True, env=state.config.environment)
+def invoke_dnf(context: Context, command: str, packages: Iterable[str], apivfs: bool = True) -> None:
+    cmd = apivfs_cmd(context.root) if apivfs else []
+    bwrap(context, cmd + dnf_cmd(context) + [command, *sort_packages(packages)],
+          network=True, env=context.config.environment)
 
-    fixup_rpmdb_location(state.root)
+    fixup_rpmdb_location(context.root)
 
     # The log directory is always interpreted relative to the install root so there's nothing we can do but
     # to remove the log files from the install root afterwards.
-    for p in (state.root / "var/log").iterdir():
+    for p in (context.root / "var/log").iterdir():
         if any(p.name.startswith(prefix) for prefix in ("dnf", "hawkey", "yum")):
             p.unlink()
index f0bbe30d6757289f5ea27008a6e55ad14f480948..e65d3cda476bf8faea81c2600b7780cd6f26dc52 100644 (file)
@@ -4,7 +4,7 @@ from collections.abc import Iterable, Sequence
 from typing import NamedTuple
 
 from mkosi.bubblewrap import apivfs_cmd, bwrap
-from mkosi.state import MkosiState
+from mkosi.context import Context
 from mkosi.types import PathString
 from mkosi.util import sort_packages, umask
 
@@ -14,8 +14,8 @@ class PacmanRepository(NamedTuple):
     url: str
 
 
-def setup_pacman(state: MkosiState, repositories: Iterable[PacmanRepository]) -> None:
-    if state.config.repository_key_check:
+def setup_pacman(context: Context, repositories: Iterable[PacmanRepository]) -> None:
+    if context.config.repository_key_check:
         sig_level = "Required DatabaseOptional"
     else:
         # If we are using a single local mirror built on the fly there
@@ -24,11 +24,11 @@ def setup_pacman(state: MkosiState, repositories: Iterable[PacmanRepository]) ->
 
     # Create base layout for pacman and pacman-key
     with umask(~0o755):
-        (state.root / "var/lib/pacman").mkdir(exist_ok=True, parents=True)
+        (context.root / "var/lib/pacman").mkdir(exist_ok=True, parents=True)
 
-    (state.cache_dir / "cache/pacman/pkg").mkdir(parents=True, exist_ok=True)
+    (context.cache_dir / "cache/pacman/pkg").mkdir(parents=True, exist_ok=True)
 
-    config = state.pkgmngr / "etc/pacman.conf"
+    config = context.pkgmngr / "etc/pacman.conf"
     if config.exists():
         return
 
@@ -57,7 +57,7 @@ def setup_pacman(state: MkosiState, repositories: Iterable[PacmanRepository]) ->
                 )
             )
 
-        if any((state.pkgmngr / "etc/pacman.d/").glob("*.conf")):
+        if any((context.pkgmngr / "etc/pacman.d/").glob("*.conf")):
             f.write(
                 textwrap.dedent(
                     """\
@@ -68,26 +68,26 @@ def setup_pacman(state: MkosiState, repositories: Iterable[PacmanRepository]) ->
             )
 
 
-def pacman_cmd(state: MkosiState) -> list[PathString]:
+def pacman_cmd(context: Context) -> list[PathString]:
     return [
         "pacman",
-        "--root", state.root,
+        "--root", context.root,
         "--logfile=/dev/null",
-        "--cachedir", state.cache_dir / "cache/pacman/pkg",
-        "--hookdir", state.root / "etc/pacman.d/hooks",
-        "--arch", state.config.distribution.architecture(state.config.architecture),
+        "--cachedir", context.cache_dir / "cache/pacman/pkg",
+        "--hookdir", context.root / "etc/pacman.d/hooks",
+        "--arch", context.config.distribution.architecture(context.config.architecture),
         "--color", "auto",
         "--noconfirm",
     ]
 
 
 def invoke_pacman(
-    state: MkosiState,
+    context: Context,
     operation: str,
     options: Sequence[str] = (),
     packages: Sequence[str] = (),
     apivfs: bool = True,
 ) -> None:
-    cmd = apivfs_cmd(state.root) if apivfs else []
-    bwrap(state, cmd + pacman_cmd(state) + [operation, *options, *sort_packages(packages)],
-          network=True, env=state.config.environment)
+    cmd = apivfs_cmd(context.root) if apivfs else []
+    bwrap(context, cmd + pacman_cmd(context) + [operation, *options, *sort_packages(packages)],
+          network=True, env=context.config.environment)
index 05416b782ccb5ea27bd703616cf0cc5507a899df..ab46b97f3228fd0afd99cfefbe2c3a8e991e6ee2 100644 (file)
@@ -7,7 +7,7 @@ from pathlib import Path
 from typing import NamedTuple, Optional
 
 from mkosi.bubblewrap import bwrap
-from mkosi.state import MkosiState
+from mkosi.context import Context
 from mkosi.tree import rmtree
 from mkosi.types import PathString
 
@@ -22,25 +22,25 @@ class RpmRepository(NamedTuple):
     sslclientcert: Optional[Path] = None
 
 
-def find_rpm_gpgkey(state: MkosiState, key: str, url: str) -> str:
+def find_rpm_gpgkey(context: Context, key: str, url: str) -> str:
     gpgpath = next(Path("/usr/share/distribution-gpg-keys").rglob(key), None)
     if gpgpath:
         return f"file://{gpgpath}"
 
-    gpgpath = next(Path(state.pkgmngr / "etc/pki/rpm-gpg").rglob(key), None)
+    gpgpath = next(Path(context.pkgmngr / "etc/pki/rpm-gpg").rglob(key), None)
     if gpgpath:
-        return f"file://{Path('/') / gpgpath.relative_to(state.pkgmngr)}"
+        return f"file://{Path('/') / gpgpath.relative_to(context.pkgmngr)}"
 
     return url
 
 
-def setup_rpm(state: MkosiState) -> None:
-    confdir = state.pkgmngr / "etc/rpm"
+def setup_rpm(context: Context) -> None:
+    confdir = context.pkgmngr / "etc/rpm"
     confdir.mkdir(parents=True, exist_ok=True)
-    if not (confdir / "macros.lang").exists() and state.config.locale:
-        (confdir / "macros.lang").write_text(f"%_install_langs {state.config.locale}")
+    if not (confdir / "macros.lang").exists() and context.config.locale:
+        (confdir / "macros.lang").write_text(f"%_install_langs {context.config.locale}")
 
-    plugindir = Path(bwrap(state, ["rpm", "--eval", "%{__plugindir}"], stdout=subprocess.PIPE).stdout.strip())
+    plugindir = Path(bwrap(context, ["rpm", "--eval", "%{__plugindir}"], stdout=subprocess.PIPE).stdout.strip())
     if plugindir.exists():
         with (confdir / "macros.disable-plugins").open("w") as f:
             for plugin in plugindir.iterdir():
@@ -64,5 +64,5 @@ def fixup_rpmdb_location(root: Path) -> None:
     rpmdb_home.symlink_to(os.path.relpath(rpmdb, start=rpmdb_home.parent))
 
 
-def rpm_cmd(state: MkosiState) -> list[PathString]:
-    return ["env", "HOME=/", "rpm", "--root", state.root]
+def rpm_cmd(context: Context) -> list[PathString]:
+    return ["env", "HOME=/", "rpm", "--root", context.root]
index 7e2626bd4fa9c63b952a01b5597f42572d0e9350..16cd1b5df4cb87362489ef76d4d49b61f6af7d8e 100644 (file)
@@ -4,14 +4,14 @@ from collections.abc import Sequence
 
 from mkosi.bubblewrap import apivfs_cmd, bwrap
 from mkosi.config import yes_no
+from mkosi.context import Context
 from mkosi.installer.rpm import RpmRepository, fixup_rpmdb_location, setup_rpm
-from mkosi.state import MkosiState
 from mkosi.types import PathString
 from mkosi.util import sort_packages
 
 
-def setup_zypper(state: MkosiState, repos: Sequence[RpmRepository]) -> None:
-    config = state.pkgmngr / "etc/zypp/zypp.conf"
+def setup_zypper(context: Context, repos: Sequence[RpmRepository]) -> None:
+    config = context.pkgmngr / "etc/zypp/zypp.conf"
     config.parent.mkdir(exist_ok=True, parents=True)
 
     # rpm.install.excludedocs can only be configured in zypp.conf so we append
@@ -22,13 +22,13 @@ def setup_zypper(state: MkosiState, repos: Sequence[RpmRepository]) -> None:
             textwrap.dedent(
                 f"""
                 [main]
-                rpm.install.excludedocs = {yes_no(not state.config.with_docs)}
+                rpm.install.excludedocs = {yes_no(not context.config.with_docs)}
                 repo.refresh.delay = {48 * 60}
                 """
             )
         )
 
-    repofile = state.pkgmngr / "etc/zypp/repos.d/mkosi.repo"
+    repofile = context.pkgmngr / "etc/zypp/repos.d/mkosi.repo"
     if not repofile.exists():
         repofile.parent.mkdir(exist_ok=True, parents=True)
         with repofile.open("w") as f:
@@ -51,31 +51,31 @@ def setup_zypper(state: MkosiState, repos: Sequence[RpmRepository]) -> None:
                     f.write("gpgkey=" if i == 0 else len("gpgkey=") * " ")
                     f.write(f"{url}\n")
 
-    setup_rpm(state)
+    setup_rpm(context)
 
 
-def zypper_cmd(state: MkosiState) -> list[PathString]:
+def zypper_cmd(context: Context) -> list[PathString]:
     return [
         "env",
         "ZYPP_CONF=/etc/zypp/zypp.conf",
         "HOME=/",
         "zypper",
-        f"--installroot={state.root}",
-        f"--cache-dir={state.cache_dir / 'cache/zypp'}",
-        "--gpg-auto-import-keys" if state.config.repository_key_check else "--no-gpg-checks",
+        f"--installroot={context.root}",
+        f"--cache-dir={context.cache_dir / 'cache/zypp'}",
+        "--gpg-auto-import-keys" if context.config.repository_key_check else "--no-gpg-checks",
         "--non-interactive",
     ]
 
 
 def invoke_zypper(
-    state: MkosiState,
+    context: Context,
     verb: str,
     packages: Sequence[str],
     options: Sequence[str] = (),
     apivfs: bool = True,
 ) -> None:
-    cmd = apivfs_cmd(state.root) if apivfs else []
-    bwrap(state, cmd + zypper_cmd(state) + [verb, *options, *sort_packages(packages)],
-          network=True, env=state.config.environment)
+    cmd = apivfs_cmd(context.root) if apivfs else []
+    bwrap(context, cmd + zypper_cmd(context) + [verb, *options, *sort_packages(packages)],
+          network=True, env=context.config.environment)
 
-    fixup_rpmdb_location(state.root)
+    fixup_rpmdb_location(context.root)
index 73738ab60052e98e114c39c27f4578f640cd3c18..458d43511d796e77455968e408d9b7cb0ae4ec42 100644 (file)
@@ -66,7 +66,7 @@ def complete_step(text: str, text2: Optional[str] = None) -> Iterator[list[Any]]
         log_step(text2.format(*args))
 
 
-class MkosiFormatter(logging.Formatter):
+class Formatter(logging.Formatter):
     def __init__(self, fmt: Optional[str] = None, *args: Any, **kwargs: Any) -> None:
         fmt = fmt or "%(message)s"
 
@@ -86,7 +86,7 @@ class MkosiFormatter(logging.Formatter):
 
 def log_setup() -> None:
     handler = logging.StreamHandler(stream=sys.stderr)
-    handler.setFormatter(MkosiFormatter())
+    handler.setFormatter(Formatter())
 
     logging.getLogger().addHandler(handler)
     logging.getLogger().setLevel(logging.getLevelName(os.getenv("SYSTEMD_LOG_LEVEL", "info").upper()))
index 22d7761116492df9e1f054dff37e29f33b56fb45..270316fb6f2965ce0d0269bb5e787bf396e72671 100644 (file)
@@ -9,7 +9,7 @@ import textwrap
 from pathlib import Path
 from typing import IO, Any, Optional
 
-from mkosi.config import ManifestFormat, MkosiConfig
+from mkosi.config import Config, ManifestFormat
 from mkosi.distributions import Distribution, PackageType
 from mkosi.run import run
 
@@ -80,7 +80,7 @@ def parse_pkg_desc(f: Path) -> tuple[str, str, str, str]:
 
 @dataclasses.dataclass
 class Manifest:
-    config: MkosiConfig
+    config: Config
     packages: list[PackageManifest] = dataclasses.field(default_factory=list)
     source_packages: dict[str, SourcePackageManifest] = dataclasses.field(default_factory=dict)
 
index 18131f6e24a1d08f250b7f1ff84e389ae72f102a..9595481dcad631140ad5413069f5849f8d04f249 100644 (file)
@@ -23,9 +23,9 @@ from typing import Optional
 
 from mkosi.config import (
     Architecture,
+    Args,
+    Config,
     ConfigFeature,
-    MkosiArgs,
-    MkosiConfig,
     OutputFormat,
     QemuFirmware,
     QemuVsockCID,
@@ -34,14 +34,7 @@ from mkosi.config import (
 from mkosi.log import die
 from mkosi.mounts import mount_passwd
 from mkosi.partition import finalize_root, find_partitions
-from mkosi.run import (
-    MkosiAsyncioThread,
-    become_root,
-    find_binary,
-    fork_and_wait,
-    run,
-    spawn,
-)
+from mkosi.run import AsyncioThread, become_root, find_binary, fork_and_wait, run, spawn
 from mkosi.tree import copy_tree, rmtree
 from mkosi.types import PathString
 from mkosi.util import INVOKING_USER, StrEnum
@@ -64,7 +57,7 @@ class QemuDeviceNode(StrEnum):
             QemuDeviceNode.vhost_vsock: "a VSock device",
         }[self]
 
-    def feature(self, config: MkosiConfig) -> ConfigFeature:
+    def feature(self, config: Config) -> ConfigFeature:
         return {
             QemuDeviceNode.kvm: config.qemu_kvm,
             QemuDeviceNode.vhost_vsock: config.qemu_vsock,
@@ -95,7 +88,7 @@ class QemuDeviceNode(StrEnum):
         return True
 
 
-def hash_output(config: MkosiConfig) -> "hashlib._Hash":
+def hash_output(config: Config) -> "hashlib._Hash":
     p = os.fspath(config.output_dir_or_cwd() / config.output_with_compression)
     return hashlib.sha256(p.encode())
 
@@ -118,7 +111,7 @@ def vsock_cid_in_use(vfd: int, cid: int) -> bool:
     return False
 
 
-def find_unused_vsock_cid(config: MkosiConfig, vfd: int) -> int:
+def find_unused_vsock_cid(config: Config, vfd: int) -> int:
     hash = hash_output(config)
 
     for i in range(64):
@@ -154,7 +147,7 @@ class KernelType(StrEnum):
             return KernelType.unknown
 
 
-def find_qemu_binary(config: MkosiConfig) -> str:
+def find_qemu_binary(config: Config) -> str:
     binaries = ["qemu", "qemu-kvm"] if config.architecture.is_native() else []
     binaries += [f"qemu-system-{config.architecture.to_qemu()}"]
     for binary in binaries:
@@ -164,7 +157,7 @@ def find_qemu_binary(config: MkosiConfig) -> str:
     die("Couldn't find QEMU/KVM binary")
 
 
-def find_ovmf_firmware(config: MkosiConfig) -> tuple[Path, bool]:
+def find_ovmf_firmware(config: Config) -> tuple[Path, bool]:
     FIRMWARE_LOCATIONS = {
         Architecture.x86_64: [
             "/usr/share/ovmf/x64/OVMF_CODE.secboot.fd",
@@ -230,7 +223,7 @@ def find_ovmf_firmware(config: MkosiConfig) -> tuple[Path, bool]:
     die("Couldn't find OVMF UEFI firmware blob.")
 
 
-def find_ovmf_vars(config: MkosiConfig) -> Path:
+def find_ovmf_vars(config: Config) -> Path:
     OVMF_VARS_LOCATIONS = []
 
     if config.architecture == Architecture.x86_64:
@@ -340,15 +333,15 @@ def start_virtiofsd(directory: Path, *, uidmap: bool) -> Iterator[Path]:
     # We create the socket ourselves and pass the fd to virtiofsd to avoid race conditions where we start qemu
     # before virtiofsd has had the chance to create the socket (or where we try to chown it first).
     with (
-        tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd") as state,
+        tempfile.TemporaryDirectory(prefix="mkosi-virtiofsd") as context,
         socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as sock,
     ):
         # Make sure qemu can access the virtiofsd socket in this directory.
-        os.chown(state, INVOKING_USER.uid, INVOKING_USER.gid)
+        os.chown(context, INVOKING_USER.uid, INVOKING_USER.gid)
 
         # Make sure we can use the socket name as a unique identifier for the fs as well but make sure it's not too
         # long as virtiofs tag names are limited to 36 bytes.
-        path = Path(state) / f"sock-{uuid.uuid4().hex}"[:35]
+        path = Path(context) / f"sock-{uuid.uuid4().hex}"[:35]
         sock.bind(os.fspath(path))
         sock.listen()
 
@@ -414,7 +407,7 @@ def vsock_notify_handler() -> Iterator[tuple[str, dict[str, str]]]:
                     k, _, v = msg.partition("=")
                     messages[k] = v
 
-        with MkosiAsyncioThread(notify()):
+        with AsyncioThread(notify()):
             yield f"vsock-stream:{socket.VMADDR_CID_HOST}:{vsock.getsockname()[1]}", messages
 
         logging.debug(f"Received {num_messages} notify messages totalling {format_bytes(num_bytes)} bytes")
@@ -423,7 +416,7 @@ def vsock_notify_handler() -> Iterator[tuple[str, dict[str, str]]]:
 
 
 @contextlib.contextmanager
-def copy_ephemeral(config: MkosiConfig, src: Path) -> Iterator[Path]:
+def copy_ephemeral(config: Config, src: Path) -> Iterator[Path]:
     src = src.resolve()
     # tempfile doesn't provide an API to get a random filename in an arbitrary directory so we do this
     # instead.
@@ -452,11 +445,11 @@ def copy_ephemeral(config: MkosiConfig, src: Path) -> Iterator[Path]:
         fork_and_wait(rm)
 
 
-def qemu_version(config: MkosiConfig) -> GenericVersion:
+def qemu_version(config: Config) -> GenericVersion:
     return GenericVersion(run([find_qemu_binary(config), "--version"], stdout=subprocess.PIPE).stdout.split()[3])
 
 
-def run_qemu(args: MkosiArgs, config: MkosiConfig, qemu_device_fds: Mapping[QemuDeviceNode, int]) -> None:
+def run_qemu(args: Args, config: Config, qemu_device_fds: Mapping[QemuDeviceNode, int]) -> None:
     if config.output_format not in (
         OutputFormat.disk,
         OutputFormat.cpio,
@@ -800,7 +793,7 @@ def run_qemu(args: MkosiArgs, config: MkosiConfig, qemu_device_fds: Mapping[Qemu
         raise subprocess.CalledProcessError(status, cmdline)
 
 
-def run_ssh(args: MkosiArgs, config: MkosiConfig) -> None:
+def run_ssh(args: Args, config: Config) -> None:
     if config.qemu_vsock_cid == QemuVsockCID.auto:
         die("Can't use ssh verb with QemuVSockCID=auto")
 
index 92901d9cae048daa1f2ce2a0b60d8fb8b33077f7..5bcc8e333fd280ccf7a667874c98cba44d9f55c8 100644 (file)
@@ -383,7 +383,7 @@ def find_binary(*names: PathString, root: Optional[Path] = None) -> Optional[Pat
     return None
 
 
-class MkosiAsyncioThread(threading.Thread):
+class AsyncioThread(threading.Thread):
     """
     The default threading.Thread() is not interruptable, so we make our own version by using the concurrency
     feature in python that is interruptable, namely asyncio.
@@ -416,7 +416,7 @@ class MkosiAsyncioThread(threading.Thread):
         for task in asyncio.tasks.all_tasks(loop):
             loop.call_soon_threadsafe(task.cancel)
 
-    def __enter__(self) -> "MkosiAsyncioThread":
+    def __enter__(self) -> "AsyncioThread":
         self.start()
         return self
 
index 4a4ab925f72cf01afb7f333a984317fbaed2b5d5..7d8e6007ba64cc262284f29edf7a89c601f0226b 100644 (file)
@@ -13,9 +13,9 @@ import pytest
 from mkosi.config import (
     Architecture,
     Compression,
+    Config,
     ConfigFeature,
     ConfigTree,
-    MkosiConfig,
     OutputFormat,
     Verb,
     config_parse_bytes,
@@ -801,7 +801,7 @@ def test_output_id_version(tmp_path: Path) -> None:
 
 
 def test_deterministic() -> None:
-    assert MkosiConfig.default() == MkosiConfig.default()
+    assert Config.default() == Config.default()
 
 
 def test_environment(tmp_path: Path) -> None:
index 253519e3a7bb7aa087e290434dfd41bf445da297..1302f3bba28f798b546182358b234be619e2498e 100644 (file)
@@ -10,15 +10,15 @@ import pytest
 
 from mkosi.config import (
     Architecture,
+    Args,
     BiosBootloader,
     Bootloader,
     Compression,
+    Config,
     ConfigFeature,
     ConfigTree,
     DocFormat,
     ManifestFormat,
-    MkosiArgs,
-    MkosiConfig,
     OutputFormat,
     QemuDrive,
     QemuFirmware,
@@ -56,7 +56,7 @@ def test_args(path: Optional[Path]) -> None:
         """
     )
 
-    args = MkosiArgs(
+    args = Args(
         auto_bump = False,
         cmdline = ["foo", "bar"],
         debug = False,
@@ -73,7 +73,7 @@ def test_args(path: Optional[Path]) -> None:
     )
 
     assert args.to_json(indent=4, sort_keys=True) == dump.rstrip()
-    assert MkosiArgs.from_json(dump) == args
+    assert Args.from_json(dump) == args
 
 
 def test_config() -> None:
@@ -290,7 +290,7 @@ def test_config() -> None:
         """
     )
 
-    args = MkosiConfig(
+    args = Config(
         acl =  True,
         architecture = Architecture.ia64,
         autologin = False,
@@ -414,4 +414,4 @@ def test_config() -> None:
     )
 
     assert args.to_json() == dump.rstrip()
-    assert MkosiConfig.from_json(dump) == args
+    assert Config.from_json(dump) == args