From 099f1a6f8c6fd897d0daafaddc55313bad4292b7 Mon Sep 17 00:00:00 2001 From: Daan De Meyer Date: Mon, 17 Jul 2023 23:21:38 +0200 Subject: [PATCH] Rework user/mount namespace handling and tools tree The biggest change is that instead of making bwrap() responsible for mounting the tools tree, we do it ourselves before we build/boot each image. We do the same for remounting the top level directories read-only, instead of leaving it to bwrap(), we do it once at the start of run_verb(). Because we now mess with the host system mounts ourselves again, we also go back to unconditionally unsharing a mount namespace, even when running as root. With the above out of the way, there's no real reason left to run regular executables with bwrap(), so those are moved back to be executed using run(). The above changes also remove the need for bwrap_cmd(), so it is merged back with bwrap() again. One nasty caveat of overmounting /usr ourselves at the start of execution is that some python modules are loaded dynamically and we need to make sure this has happened before we start overmounting /usr. Finally, this commit also gets rid of running the image build in a subprocess. Instead, after doing the build and doing the final tools tree mount for the image we're going to boot/qemu/ssh into, if we're going to do an unprivleged operation, we change uid/gid to the invoking user. This is more or less the same as running these operations unprivileged outside of the user namespace. For boot/shell, these only run privileged, so we check beforehand that we're running as root, and this doesn't change after become_root(), so since we're just root all the time, there's no need to run the image build in a subprocess. To keep ssh working, we have to trick it into recognizing our user in the user namespace by overmounting /etc/passwd with a file containing an entry for the mapped user uid. We also unify more of the uid/gid handling in run_verb() in general. --- README.md | 4 +- mkosi/__init__.py | 406 ++++++++++++++++---------------- mkosi/__main__.py | 4 +- mkosi/btrfs.py | 26 +- mkosi/config.py | 4 +- mkosi/distributions/arch.py | 3 +- mkosi/distributions/debian.py | 18 +- mkosi/distributions/fedora.py | 3 +- mkosi/distributions/gentoo.py | 25 +- mkosi/distributions/opensuse.py | 3 +- mkosi/install.py | 7 +- mkosi/manifest.py | 47 ++-- mkosi/mounts.py | 49 +++- mkosi/qemu.py | 33 ++- mkosi/run.py | 169 +++---------- mkosi/state.py | 4 +- mkosi/util.py | 35 +-- 17 files changed, 372 insertions(+), 468 deletions(-) diff --git a/README.md b/README.md index daaa348ac..90cdc84df 100644 --- a/README.md +++ b/README.md @@ -44,7 +44,9 @@ when you are in the repository top level. To use your local mkosi checkout without being in the top level of the repository you can either call the shim `bin/mkosi` or make an editable install -into a virtual environment. +into a virtual environment. The `MKOSI_INTERPRETER` environment variable can be +set when using the `bin/mkosi` shim to configure the python interpreter used to +execute mkosi. The shim can be symlinked somewhere into your `PATH`. To make an editable install add `--editable` to either of the above examples using pip or pipx and diff --git a/mkosi/__init__.py b/mkosi/__init__.py index 7ff1650b8..df11535ff 100644 --- a/mkosi/__init__.py +++ b/mkosi/__init__.py @@ -33,19 +33,11 @@ from mkosi.config import ( from mkosi.install import add_dropin_config_from_resource, copy_path, flock from mkosi.log import Style, color_error, complete_step, die, log_step from mkosi.manifest import Manifest -from mkosi.mounts import mount_overlay, scandir_recursive +from mkosi.mounts import mount_overlay, mount_passwd, mount_tools, scandir_recursive from mkosi.pager import page from mkosi.qemu import copy_ephemeral, machine_cid, run_qemu from mkosi.remove import unlink_try_hard -from mkosi.run import ( - become_root, - bwrap, - bwrap_cmd, - chroot_cmd, - fork_and_wait, - run, - spawn, -) +from mkosi.run import become_root, bwrap, chroot_cmd, init_mount_namespace, run, spawn from mkosi.state import MkosiState from mkosi.types import PathString from mkosi.util import ( @@ -59,7 +51,8 @@ from mkosi.util import ( format_rlimit, is_apt_distribution, is_portage_distribution, - prepend_to_environ_path, + tmp_dir, + try_import, ) MKOSI_COMMANDS_NEED_BUILD = (Verb.build, Verb.shell, Verb.boot, Verb.qemu, Verb.serve) @@ -84,7 +77,6 @@ def mount_image(state: MkosiState) -> Iterator[None]: shutil.unpack_archive(path, d) bases += [d] elif path.suffix == ".raw": - # We want to use bwrap() here but it doesn't propagate mounts so we use run() instead. run(["systemd-dissect", "-M", path, d]) stack.callback(lambda: run(["systemd-dissect", "-U", d])) bases += [d] @@ -322,7 +314,6 @@ def run_prepare_script(state: MkosiState, build: bool) -> None: with complete_step("Running prepare script in build overlay…"), mount_build_overlay(state): bwrap( ["chroot", "/work/prepare", "build"], - tools=state.config.tools_tree, apivfs=state.root, scripts=dict(chroot=chroot_cmd(state.root, options=options, network=True)), env=dict(SRCDIR="/work/src") | state.environment, @@ -332,7 +323,6 @@ def run_prepare_script(state: MkosiState, build: bool) -> None: with complete_step("Running prepare script…"): bwrap( ["chroot", "/work/prepare", "final"], - tools=state.config.tools_tree, apivfs=state.root, scripts=dict(chroot=chroot_cmd(state.root, options=options, network=True)), env=dict(SRCDIR="/work/src") | state.environment, @@ -347,7 +337,6 @@ def run_postinst_script(state: MkosiState) -> None: with complete_step("Running postinstall script…"): bwrap( ["chroot", "/work/postinst", "final"], - tools=state.config.tools_tree, apivfs=state.root, scripts=dict( chroot=chroot_cmd( @@ -367,20 +356,19 @@ def run_finalize_script(state: MkosiState) -> None: return with complete_step("Running finalize script…"): - bwrap([state.config.finalize_script], - root=state.config.tools_tree, - env={**state.environment, "BUILDROOT": str(state.root), "OUTPUTDIR": str(state.staging)}) + run([state.config.finalize_script], + env={**state.environment, "BUILDROOT": str(state.root), "OUTPUTDIR": str(state.staging)}) def certificate_common_name(state: MkosiState, certificate: Path) -> str: - output = bwrap([ + output = run([ "openssl", "x509", "-noout", "-subject", "-nameopt", "multiline", "-in", certificate, - ], root=state.config.tools_tree, stdout=subprocess.PIPE).stdout + ], stdout=subprocess.PIPE).stdout for line in output.splitlines(): if not line.strip().startswith("commonName"): @@ -407,25 +395,23 @@ def pesign_prepare(state: MkosiState) -> None: # pesign takes a certificate directory and a certificate common name as input arguments, so we have # to transform our input key and cert into that format. Adapted from # https://www.mankier.com/1/pesign#Examples-Signing_with_the_certificate_and_private_key_in_individual_files - bwrap(["openssl", - "pkcs12", - "-export", - # Arcane incantation to create a pkcs12 certificate without a password. - "-keypbe", "NONE", - "-certpbe", "NONE", - "-nomaciter", - "-passout", "pass:", - "-out", state.workspace / "secure-boot.p12", - "-inkey", state.config.secure_boot_key, - "-in", state.config.secure_boot_certificate], - root=state.config.tools_tree) - - bwrap(["pk12util", - "-K", "", - "-W", "", - "-i", state.workspace / "secure-boot.p12", - "-d", state.workspace / "pesign"], - root=state.config.tools_tree) + run(["openssl", + "pkcs12", + "-export", + # Arcane incantation to create a pkcs12 certificate without a password. + "-keypbe", "NONE", + "-certpbe", "NONE", + "-nomaciter", + "-passout", "pass:", + "-out", state.workspace / "secure-boot.p12", + "-inkey", state.config.secure_boot_key, + "-in", state.config.secure_boot_certificate]) + + run(["pk12util", + "-K", "", + "-W", "", + "-i", state.workspace / "secure-boot.p12", + "-d", state.workspace / "pesign"]) def install_boot_loader(state: MkosiState) -> None: @@ -461,30 +447,28 @@ def install_boot_loader(state: MkosiState) -> None: if (state.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or state.config.secure_boot_sign_tool == SecureBootSignTool.auto and shutil.which("sbsign") is not None): - bwrap(["sbsign", - "--key", state.config.secure_boot_key, - "--cert", state.config.secure_boot_certificate, - "--output", output, - input], - root=state.config.tools_tree) + run(["sbsign", + "--key", state.config.secure_boot_key, + "--cert", state.config.secure_boot_certificate, + "--output", output, + input]) elif (state.config.secure_boot_sign_tool == SecureBootSignTool.pesign or state.config.secure_boot_sign_tool == SecureBootSignTool.auto and shutil.which("pesign") is not None): pesign_prepare(state) - bwrap(["pesign", - "--certdir", state.workspace / "pesign", - "--certificate", certificate_common_name(state, state.config.secure_boot_certificate), - "--sign", - "--force", - "--in", input, - "--out", output], - root=state.config.tools_tree) + run(["pesign", + "--certdir", state.workspace / "pesign", + "--certificate", certificate_common_name(state, state.config.secure_boot_certificate), + "--sign", + "--force", + "--in", input, + "--out", output]) else: die("One of sbsign or pesign is required to use SecureBoot=") with complete_step("Installing boot loader…"): - bwrap(["bootctl", "install", "--root", state.root, "--all-architectures"], - env={"SYSTEMD_ESP_PATH": "/efi"}, root=state.config.tools_tree) + run(["bootctl", "install", "--root", state.root, "--all-architectures"], + env={"SYSTEMD_ESP_PATH": "/efi"}) if state.config.secure_boot: assert state.config.secure_boot_key @@ -495,30 +479,27 @@ def install_boot_loader(state: MkosiState) -> None: keys.mkdir(parents=True, exist_ok=True) # sbsiglist expects a DER certificate. - bwrap(["openssl", - "x509", - "-outform", "DER", - "-in", state.config.secure_boot_certificate, - "-out", state.workspace / "mkosi.der"], - root=state.config.tools_tree) - bwrap(["sbsiglist", - "--owner", str(uuid.uuid4()), - "--type", "x509", - "--output", state.workspace / "mkosi.esl", - state.workspace / "mkosi.der"], - root=state.config.tools_tree) + run(["openssl", + "x509", + "-outform", "DER", + "-in", state.config.secure_boot_certificate, + "-out", state.workspace / "mkosi.der"]) + run(["sbsiglist", + "--owner", str(uuid.uuid4()), + "--type", "x509", + "--output", state.workspace / "mkosi.esl", + state.workspace / "mkosi.der"]) # We reuse the key for all secure boot databases to keep things simple. for db in ["PK", "KEK", "db"]: - bwrap(["sbvarsign", - "--attr", - "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", - "--key", state.config.secure_boot_key, - "--cert", state.config.secure_boot_certificate, - "--output", keys / f"{db}.auth", - db, - state.workspace / "mkosi.esl"], - root=state.config.tools_tree) + run(["sbvarsign", + "--attr", + "NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS", + "--key", state.config.secure_boot_key, + "--cert", state.config.secure_boot_certificate, + "--output", keys / f"{db}.auth", + db, + state.workspace / "mkosi.esl"]) def install_base_trees(state: MkosiState) -> None: @@ -532,8 +513,7 @@ def install_base_trees(state: MkosiState) -> None: elif path.suffix == ".tar": shutil.unpack_archive(path, state.root) elif path.suffix == ".raw": - bwrap(["systemd-dissect", "--copy-from", path, "/", state.root], - root=state.config.tools_tree) + run(["systemd-dissect", "--copy-from", path, "/", state.root]) else: die(f"Unsupported base tree source {path}") @@ -551,7 +531,7 @@ def install_skeleton_trees(state: MkosiState) -> None: t.parent.mkdir(mode=0o755, parents=True, exist_ok=True) if source.is_dir() or target: - copy_path(source, t, preserve_owner=False, root=state.config.tools_tree) + copy_path(source, t, preserve_owner=False) else: shutil.unpack_archive(source, t) @@ -569,7 +549,7 @@ def install_package_manager_trees(state: MkosiState) -> None: t.parent.mkdir(mode=0o755, parents=True, exist_ok=True) if source.is_dir() or target: - copy_path(source, t, preserve_owner=False, root=state.config.tools_tree) + copy_path(source, t, preserve_owner=False) else: shutil.unpack_archive(source, t) @@ -587,7 +567,7 @@ def install_extra_trees(state: MkosiState) -> None: t.parent.mkdir(mode=0o755, parents=True, exist_ok=True) if source.is_dir() or target: - copy_path(source, t, preserve_owner=False, root=state.config.tools_tree) + copy_path(source, t, preserve_owner=False) else: shutil.unpack_archive(source, t) @@ -597,7 +577,7 @@ def install_build_dest(state: MkosiState) -> None: return with complete_step("Copying in build tree…"): - copy_path(state.install_dir, state.root, root=state.config.tools_tree) + copy_path(state.install_dir, state.root) def gzip_binary() -> str: @@ -630,7 +610,7 @@ def make_tar(state: MkosiState) -> None: ] with complete_step("Creating archive…"): - bwrap(cmd, root=state.config.tools_tree) + run(cmd) def find_files(dir: Path, root: Path) -> Iterator[Path]: @@ -647,9 +627,8 @@ def make_initrd(state: MkosiState) -> None: def make_cpio(state: MkosiState, files: Iterator[Path], output: Path) -> None: - with complete_step(f"Creating cpio {output}…"), bwrap_cmd(root=state.config.tools_tree) as bwrap: + with complete_step(f"Creating cpio {output}…"): cmd: list[PathString] = [ - *bwrap, "cpio", "-o", "--reproducible", @@ -738,8 +717,8 @@ def resolve_module_dependencies(state: MkosiState, kver: str, modules: Sequence[ # We could run modinfo once for each module but that's slow. Luckily we can pass multiple modules to # modinfo and it'll process them all in a single go. We get the modinfo for all modules to build two maps # that map the path of the module to its module dependencies and its firmware dependencies respectively. - info = bwrap(["modinfo", "--basedir", state.root, "--set-version", kver, "--null", *nametofile.keys(), *builtin], - stdout=subprocess.PIPE, root=state.config.tools_tree).stdout + info = run(["modinfo", "--basedir", state.root, "--set-version", kver, "--null", *nametofile.keys(), *builtin], + stdout=subprocess.PIPE).stdout moddep = {} firmwaredep = {} @@ -908,7 +887,7 @@ def install_unified_kernel(state: MkosiState, roothash: Optional[str]) -> None: config = presets[0] unlink_output(args, config) - build_image(args, config, state.uid, state.gid) + build_image(args, config) initrds = [config.output_dir / config.output] @@ -1002,7 +981,7 @@ def install_unified_kernel(state: MkosiState, roothash: Optional[str]) -> None: if state.config.kernel_modules_initrd: cmd += [gen_kernel_modules_initrd(state, kver)] - bwrap(cmd, root=state.config.tools_tree) + run(cmd) if not state.staging.joinpath(state.config.output_split_uki).exists(): shutil.copy(boot_binary, state.staging / state.config.output_split_uki) @@ -1040,8 +1019,7 @@ def maybe_compress(state: MkosiState, compression: Compression, src: Path, dst: src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file. with dst.open("wb") as o: - bwrap(compressor_command(compression), stdin=i, stdout=o, root=state.config.tools_tree) - os.chown(dst, uid=state.uid, gid=state.gid) + run(compressor_command(compression), stdin=i, stdout=o) def copy_nspawn_settings(state: MkosiState) -> None: @@ -1094,7 +1072,7 @@ def calculate_signature(state: MkosiState) -> None: state.staging / state.config.output_checksum, ] - bwrap( + run( cmdline, # Do not output warnings about keyring permissions stderr=subprocess.DEVNULL, @@ -1108,7 +1086,6 @@ def calculate_signature(state: MkosiState) -> None: Path(os.environ['HOME']).joinpath('.gnupg') ) }, - root=state.config.tools_tree, ) @@ -1518,23 +1495,22 @@ def run_depmod(state: MkosiState) -> None: process_kernel_modules(state, kver) with complete_step(f"Running depmod for {kver}"): - bwrap(["depmod", "--all", "--basedir", state.root, kver], root=state.config.tools_tree) + run(["depmod", "--all", "--basedir", state.root, kver]) def run_sysusers(state: MkosiState) -> None: with complete_step("Generating system users"): - bwrap(["systemd-sysusers", "--root", state.root], root=state.config.tools_tree) + run(["systemd-sysusers", "--root", state.root]) def run_preset(state: MkosiState) -> None: with complete_step("Applying presets…"): - bwrap(["systemctl", "--root", state.root, "preset-all"], root=state.config.tools_tree) + run(["systemctl", "--root", state.root, "preset-all"]) def run_hwdb(state: MkosiState) -> None: with complete_step("Generating hardware database"): - bwrap(["systemd-hwdb", "--root", state.root, "--usr", "--strict", "update"], - root=state.config.tools_tree) + run(["systemd-hwdb", "--root", state.root, "--usr", "--strict", "update"]) def run_firstboot(state: MkosiState) -> None: @@ -1568,8 +1544,7 @@ def run_firstboot(state: MkosiState) -> None: return with complete_step("Applying first boot settings"): - bwrap(["systemd-firstboot", "--root", state.root, "--force", *options], - root=state.config.tools_tree) + run(["systemd-firstboot", "--root", state.root, "--force", *options]) # Initrds generally don't ship with only /usr so there's not much point in putting the credentials in # /usr/lib/credstore. @@ -1588,8 +1563,7 @@ def run_selinux_relabel(state: MkosiState) -> None: if not selinux.exists(): return - policy = bwrap(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], - stdout=subprocess.PIPE, root=state.config.tools_tree).stdout.strip() + policy = run(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"], stdout=subprocess.PIPE).stdout.strip() if not policy: return @@ -1602,7 +1576,6 @@ def run_selinux_relabel(state: MkosiState) -> None: with complete_step(f"Relabeling files using {policy} policy"): bwrap( cmd=["chroot", "sh", "-c", cmd], - tools=state.config.tools_tree, apivfs=state.root, scripts=dict(chroot=chroot_cmd(state.root)), env=state.environment, @@ -1743,8 +1716,7 @@ def make_image(state: MkosiState, skip: Sequence[str] = [], split: bool = False) env[option] = value with complete_step("Generating disk image"): - output = json.loads(bwrap(cmdline, stdout=subprocess.PIPE, env=env, - root=state.config.tools_tree).stdout) + output = json.loads(run(cmdline, stdout=subprocess.PIPE, env=env).stdout) roothash = usrhash = None for p in output: @@ -1767,19 +1739,16 @@ def make_image(state: MkosiState, skip: Sequence[str] = [], split: bool = False) def finalize_staging(state: MkosiState) -> None: for f in state.staging.iterdir(): - if not f.is_dir(): - os.chown(f, state.uid, state.gid) - shutil.move(f, state.config.output_dir) -def build_image(args: MkosiArgs, config: MkosiConfig, uid: int, gid: int) -> None: - state = MkosiState(args, config, uid, gid) +def build_image(args: MkosiArgs, config: MkosiConfig) -> None: + state = MkosiState(args, config) manifest = Manifest(config) # Make sure tmpfiles' aging doesn't interfere with our workspace # while we are working on it. - with flock(state.workspace), acl_toggle_build(state): + with flock(state.workspace): install_package_manager_trees(state) with mount_image(state): @@ -1852,7 +1821,6 @@ def build_image(args: MkosiArgs, config: MkosiConfig, uid: int, gid: int) -> Non if not output_base.exists() or output_base.is_symlink(): output_base.unlink(missing_ok=True) output_base.symlink_to(state.config.output_with_compression) - os.chown(output_base, uid, gid, follow_symlinks=False) print_output_size(config.output_dir / config.output) @@ -1901,28 +1869,23 @@ def run_build_script(state: MkosiState) -> None: options += ["--bind", state.config.build_dir, "/work/build"] env |= dict(BUILDDIR="/work/build") - # build-script output goes to stdout so we can run language servers from within mkosi - # build-scripts. See https://github.com/systemd/mkosi/pull/566 for more information. bwrap( ["chroot", "/work/build-script"], - tools=state.config.tools_tree, apivfs=state.root, scripts=dict(chroot=chroot_cmd(state.root, options=options, network=state.config.with_network)), env=env | state.environment, - stdout=sys.stdout, ) -def setfacl(config: MkosiConfig, root: Path, uid: int, allow: bool) -> None: - bwrap(["setfacl", - "--physical", - "--modify" if allow else "--remove", - f"user:{uid}:rwx" if allow else f"user:{uid}", - "-"], - root=config.tools_tree, - # Supply files via stdin so we don't clutter --debug run output too much - input="\n".join([str(root), - *(e.path for e in cast(Iterator[os.DirEntry[str]], scandir_recursive(root)) if e.is_dir())]) +def setfacl(root: Path, uid: int, allow: bool) -> None: + run(["setfacl", + "--physical", + "--modify" if allow else "--remove", + f"user:{uid}:rwx" if allow else f"user:{uid}", + "-"], + # Supply files via stdin so we don't clutter --debug run output too much + input="\n".join([str(root), + *(e.path for e in cast(Iterator[os.DirEntry[str]], scandir_recursive(root)) if e.is_dir())]) ) @@ -1934,11 +1897,8 @@ def acl_maybe_toggle(config: MkosiConfig, root: Path, uid: int, *, always: bool) # getfacl complains about absolute paths so make sure we pass a relative one. if root.exists(): - has_acl = f"user:{uid}:rwx" in bwrap([ - "getfacl", "-n", root.relative_to(Path.cwd())], - stdout=subprocess.PIPE, - root=config.tools_tree, - ).stdout + has_acl = f"user:{uid}:rwx" in run(["getfacl", "-n", root.relative_to(Path.cwd())], + stdout=subprocess.PIPE).stdout if not has_acl and not always: yield @@ -1949,37 +1909,35 @@ def acl_maybe_toggle(config: MkosiConfig, root: Path, uid: int, *, always: bool) try: if has_acl: with complete_step(f"Removing ACLs from {root}"): - setfacl(config, root, uid, allow=False) + setfacl(root, uid, allow=False) yield finally: if has_acl or always: with complete_step(f"Adding ACLs to {root}"): - setfacl(config, root, uid, allow=True) + setfacl(root, uid, allow=True) @contextlib.contextmanager -def acl_toggle_build(state: MkosiState) -> Iterator[None]: - if not state.config.acl: +def acl_toggle_build(config: MkosiConfig, uid: int) -> Iterator[None]: + if not config.acl: yield return - extras = [e[0] for e in state.config.extra_trees] - skeletons = [s[0] for s in state.config.skeleton_trees] + extras = [e[0] for e in config.extra_trees] + skeletons = [s[0] for s in config.skeleton_trees] with contextlib.ExitStack() as stack: - for p in (*state.config.base_trees, *extras, *skeletons): + for p in (*config.base_trees, *extras, *skeletons): if p and p.is_dir(): - stack.enter_context(acl_maybe_toggle(state.config, p, state.uid, always=False)) + stack.enter_context(acl_maybe_toggle(config, p, uid, always=False)) - for p in (state.config.cache_dir, state.config.build_dir): + for p in (config.cache_dir, config.build_dir): if p: - stack.enter_context(acl_maybe_toggle(state.config, p, state.uid, always=True)) + stack.enter_context(acl_maybe_toggle(config, p, uid, always=True)) - if state.config.output_format == OutputFormat.directory: - stack.enter_context(acl_maybe_toggle(state.config, - state.config.output_dir / state.config.output, - state.uid, always=True)) + if config.output_format == OutputFormat.directory: + stack.enter_context(acl_maybe_toggle(config, config.output_dir / config.output, uid, always=True)) yield @@ -1990,12 +1948,12 @@ def check_root() -> None: @contextlib.contextmanager -def acl_toggle_boot(config: MkosiConfig) -> Iterator[None]: +def acl_toggle_boot(config: MkosiConfig, uid: int) -> Iterator[None]: if not config.acl or config.output_format != OutputFormat.directory: yield return - with acl_maybe_toggle(config, config.output_dir / config.output, InvokingUser.uid(), always=False): + with acl_maybe_toggle(config, config.output_dir / config.output, uid, always=False): yield @@ -2026,14 +1984,13 @@ def run_shell(args: MkosiArgs, config: MkosiConfig) -> None: fname = config.output_dir / config.output if config.output_format == OutputFormat.disk and args.verb == Verb.boot: - bwrap(["systemd-repart", - "--image", fname, - "--size", "8G", - "--no-pager", - "--dry-run=no", - "--offline=no", - fname], - root=config.tools_tree) + run(["systemd-repart", + "--image", fname, + "--size", "8G", + "--no-pager", + "--dry-run=no", + "--offline=no", + fname]) if config.output_format == OutputFormat.directory: cmdline += ["--directory", fname] @@ -2055,14 +2012,7 @@ def run_shell(args: MkosiArgs, config: MkosiConfig) -> None: cmdline += ["--"] cmdline += args.cmdline - stack.enter_context(acl_toggle_boot(config)) - - bwrap(cmdline, - stdin=sys.stdin, - stdout=sys.stdout, - env=os.environ, - log=False, - root=config.tools_tree) + run(cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False) def run_ssh(args: MkosiArgs, config: MkosiConfig) -> None: @@ -2078,7 +2028,7 @@ def run_ssh(args: MkosiArgs, config: MkosiConfig) -> None: cmd += args.cmdline - bwrap(cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False, root=config.tools_tree) + run(cmd, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False) def run_serve(config: MkosiConfig) -> None: @@ -2130,8 +2080,9 @@ def generate_key_cert_pair(args: MkosiArgs) -> None: run(cmd) -def bump_image_version() -> None: +def bump_image_version(uid: Optional[int] = None, gid: Optional[int] = None) -> None: """Write current image version plus one to mkosi.version""" + assert bool(uid) == bool(gid) version = Path("mkosi.version").read_text().strip() v = version.split(".") @@ -2148,6 +2099,8 @@ def bump_image_version() -> None: logging.info(f"Increasing last component of version by one, bumping '{version}' → '{new_version}'.") Path("mkosi.version").write_text(f"{new_version}\n") + if uid and gid: + os.chown("mkosi.version", uid, gid) def expand_specifier(s: str) -> str: @@ -2158,6 +2111,28 @@ def needs_build(args: MkosiArgs, config: MkosiConfig) -> bool: return args.verb in MKOSI_COMMANDS_NEED_BUILD and (args.force > 0 or not config.output_dir.joinpath(config.output_with_compression).exists()) +@contextlib.contextmanager +def prepend_to_environ_path(config: MkosiConfig) -> Iterator[None]: + if config.tools_tree or not config.extra_search_paths: + yield + return + + with tempfile.TemporaryDirectory(prefix="mkosi.path", dir=tmp_dir()) as d: + + for path in config.extra_search_paths: + if not path.is_dir(): + Path(d).joinpath(path.name).symlink_to(path.absolute()) + + news = [os.fspath(path) for path in [Path(d), *config.extra_search_paths] if path.is_dir()] + olds = os.getenv("PATH", "").split(":") + os.environ["PATH"] = ":".join(news + olds) + + try: + yield + finally: + os.environ["PATH"] = ":".join(olds) + + def run_verb(args: MkosiArgs, presets: Sequence[MkosiConfig]) -> None: if args.verb in MKOSI_COMMANDS_SUDO: check_root() @@ -2196,6 +2171,25 @@ def run_verb(args: MkosiArgs, presets: Sequence[MkosiConfig]) -> None: if args.verb == Verb.build and not args.force: check_outputs(config) + # Because we overmount /usr when using a tools tree, we need to make sure we load all python modules we + # might end up using before overmounting /usr. Any modules that might be dynamically loaded during + # execution are forcibly loaded early here. + try_import("importlib.readers") + try_import("importlib.resources.readers") + for config in presets: + try_import(f"mkosi.distributions.{config.distribution}") + + name = InvokingUser.name() + + # Get the user UID/GID either on the host or in the user namespace running the build + uid, gid = become_root() + init_mount_namespace() + + # For extra safety when running as root, remount a bunch of stuff read-only. + for d in ("/usr", "/etc", "/opt", "/srv", "/boot", "/efi"): + if Path(d).exists(): + run(["mount", "--rbind", d, d, "--options", "ro"]) + # First, process all directory removals because otherwise if different presets share directories a later # preset could end up output generated by an earlier preset. @@ -2203,11 +2197,7 @@ def run_verb(args: MkosiArgs, presets: Sequence[MkosiConfig]) -> None: if not needs_build(args, config) and args.verb != Verb.clean: continue - def target() -> None: - become_root() - unlink_output(args, config) - - fork_and_wait(target) + unlink_output(args, config) if args.verb == Verb.clean: return @@ -2220,42 +2210,54 @@ def run_verb(args: MkosiArgs, presets: Sequence[MkosiConfig]) -> None: if not needs_build(args, config): continue - with prepend_to_environ_path(config.extra_search_paths): - def target() -> None: - # Create these before changing user to make sure they're owned by the user running mkosi. - for d in ( - config.output_dir, - config.cache_dir, - config.build_dir, - config.workspace_dir, - ): - if d: - d.mkdir(parents=True, exist_ok=True) - - # Get the user UID/GID either on the host or in the user namespace running the build - uid, gid = become_root() - build_image(args, config, uid, gid) - - # We only want to run the build in a user namespace but not the following steps. Since we - # can't rejoin the parent user namespace after unsharing from it, let's run the build in a - # fork so that the main process does not leave its user namespace. - with complete_step(f"Building {config.preset or 'default'} image"): - fork_and_wait(target) + with complete_step(f"Building {config.preset or 'default'} image"),\ + mount_tools(config),\ + prepend_to_environ_path(config): + + # Create these as the invoking user to make sure they're owned by the user running mkosi. + for p in ( + config.output_dir, + config.cache_dir, + config.build_dir, + config.workspace_dir, + ): + if p: + run(["mkdir", "--parents", p], user=uid, group=gid) + + with acl_toggle_build(config, uid): + build_image(args, config) + + # Make sure all build outputs that are not directories are owned by the user running mkosi. + for p in config.output_dir.iterdir(): + if not p.is_dir(): + os.chown(p, uid, gid, follow_symlinks=False) build = True - if build and args.auto_bump: - bump_image_version() + # We want to drop privileges after mounting the last tools tree, but to unmount it we still need + # privileges. To avoid a permission error, let's not unmount the final tools tree, since we'll exit + # right after (and we're in a mount namespace so the /usr mount disappears when we exit) + with mount_tools(last, umount=False), mount_passwd(name, uid, gid, umount=False): + + # After mounting the last tools tree, if we're not going to execute systemd-nspawn, we don't need to + # be (fake) root anymore, so switch user to the invoking user. + if args.verb not in (Verb.shell, Verb.boot): + os.setresgid(gid, gid, gid) + os.setresuid(uid, uid, uid) + + if build and args.auto_bump: + bump_image_version(uid, gid) - with prepend_to_environ_path(last.extra_search_paths): - if args.verb in (Verb.shell, Verb.boot): - run_shell(args, last) + with prepend_to_environ_path(last): + if args.verb in (Verb.shell, Verb.boot): + with acl_toggle_boot(last, uid): + run_shell(args, last) - if args.verb == Verb.qemu: - run_qemu(args, last) + if args.verb == Verb.qemu: + run_qemu(args, last) - if args.verb == Verb.ssh: - run_ssh(args, last) + if args.verb == Verb.ssh: + run_ssh(args, last) - if args.verb == Verb.serve: - run_serve(last) + if args.verb == Verb.serve: + run_serve(last) diff --git a/mkosi/__main__.py b/mkosi/__main__.py index 719ae78f5..db091f63a 100644 --- a/mkosi/__main__.py +++ b/mkosi/__main__.py @@ -10,13 +10,11 @@ from collections.abc import Iterator from mkosi import run_verb from mkosi.config import MkosiConfigParser from mkosi.log import ARG_DEBUG, log_setup -from mkosi.run import ensure_exc_info, excepthook +from mkosi.run import ensure_exc_info @contextlib.contextmanager def propagate_failed_return() -> Iterator[None]: - sys.excepthook = excepthook - try: yield except SystemExit as e: diff --git a/mkosi/btrfs.py b/mkosi/btrfs.py index 552a638cf..3b38c048c 100644 --- a/mkosi/btrfs.py +++ b/mkosi/btrfs.py @@ -8,19 +8,19 @@ from typing import cast from mkosi.config import ConfigFeature, MkosiConfig from mkosi.install import copy_path from mkosi.log import die -from mkosi.run import bwrap +from mkosi.run import run -def statfs(config: MkosiConfig, path: Path) -> str: - return cast(str, bwrap(["stat", "--file-system", "--format", "%T", path.parent], - root=config.tools_tree, stdout=subprocess.PIPE).stdout.strip()) +def statfs(path: Path) -> str: + return cast(str, run(["stat", "--file-system", "--format", "%T", path.parent], + stdout=subprocess.PIPE).stdout.strip()) def btrfs_maybe_make_subvolume(config: MkosiConfig, path: Path, mode: int) -> None: if config.use_subvolumes == ConfigFeature.enabled and not shutil.which("btrfs"): die("Subvolumes requested but the btrfs command was not found") - if statfs(config, path.parent) != "btrfs": + if statfs(path.parent) != "btrfs": if config.use_subvolumes == ConfigFeature.enabled: die(f"Subvolumes requested but {path} is not located on a btrfs filesystem") @@ -28,9 +28,8 @@ def btrfs_maybe_make_subvolume(config: MkosiConfig, path: Path, mode: int) -> No return if config.use_subvolumes != ConfigFeature.disabled and shutil.which("btrfs") is not None: - result = bwrap(["btrfs", "subvolume", "create", path], - check=config.use_subvolumes == ConfigFeature.enabled, - root=config.tools_tree).returncode + result = run(["btrfs", "subvolume", "create", path], + check=config.use_subvolumes == ConfigFeature.enabled).returncode else: result = 1 @@ -48,19 +47,18 @@ def btrfs_maybe_snapshot_subvolume(config: MkosiConfig, src: Path, dst: Path) -> die("Subvolumes requested but the btrfs command was not found") # Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume. - if not subvolume or statfs(config, src) != "btrfs" or src.stat().st_ino != 256 or (dst.exists() and any(dst.iterdir())): - return copy_path(src, dst, root=config.tools_tree) + if not subvolume or statfs(src) != "btrfs" or src.stat().st_ino != 256 or (dst.exists() and any(dst.iterdir())): + return copy_path(src, dst) # btrfs can't snapshot to an existing directory so make sure the destination does not exist. if dst.exists(): dst.rmdir() if shutil.which("btrfs"): - result = bwrap(["btrfs", "subvolume", "snapshot", src, dst], - check=config.use_subvolumes == ConfigFeature.enabled, - root=config.tools_tree).returncode + result = run(["btrfs", "subvolume", "snapshot", src, dst], + check=config.use_subvolumes == ConfigFeature.enabled).returncode else: result = 1 if result != 0: - copy_path(src, dst, root=config.tools_tree) + copy_path(src, dst) diff --git a/mkosi/config.py b/mkosi/config.py index a587e46e9..048970880 100644 --- a/mkosi/config.py +++ b/mkosi/config.py @@ -1974,7 +1974,7 @@ def load_credentials(args: argparse.Namespace) -> dict[str, str]: if args.directory != "" and d.is_dir(): for e in d.iterdir(): if os.access(e, os.X_OK): - creds[e.name] = run([e], text=True, stdout=subprocess.PIPE, env=os.environ).stdout + creds[e.name] = run([e], stdout=subprocess.PIPE, env=os.environ).stdout else: creds[e.name] = e.read_text() @@ -1985,7 +1985,6 @@ def load_credentials(args: argparse.Namespace) -> dict[str, str]: if "firstboot.timezone" not in creds: tz = run( ["timedatectl", "show", "-p", "Timezone", "--value"], - text=True, stdout=subprocess.PIPE, check=False, ).stdout.strip() @@ -1998,7 +1997,6 @@ def load_credentials(args: argparse.Namespace) -> dict[str, str]: if args.ssh and "ssh.authorized_keys.root" not in creds and "SSH_AUTH_SOCK" in os.environ: key = run( ["ssh-add", "-L"], - text=True, stdout=subprocess.PIPE, env=os.environ, check=False, diff --git a/mkosi/distributions/arch.py b/mkosi/distributions/arch.py index e582d2f84..6bad590cd 100644 --- a/mkosi/distributions/arch.py +++ b/mkosi/distributions/arch.py @@ -135,5 +135,4 @@ def invoke_pacman(state: MkosiState, packages: Sequence[str], apivfs: bool = Tru bwrap(cmdline, apivfs=state.root if apivfs else None, - env=dict(KERNEL_INSTALL_BYPASS="1") | state.environment, - root=state.config.tools_tree) + env=dict(KERNEL_INSTALL_BYPASS="1") | state.environment) diff --git a/mkosi/distributions/debian.py b/mkosi/distributions/debian.py index b5ce0f025..febaf5781 100644 --- a/mkosi/distributions/debian.py +++ b/mkosi/distributions/debian.py @@ -9,9 +9,9 @@ from textwrap import dedent from mkosi.architecture import Architecture from mkosi.distributions import DistributionInstaller from mkosi.log import die -from mkosi.run import bwrap +from mkosi.run import bwrap, run from mkosi.state import MkosiState -from mkosi.types import CompletedProcess, PathString +from mkosi.types import PathString class DebianInstaller(DistributionInstaller): @@ -93,9 +93,8 @@ class DebianInstaller(DistributionInstaller): for deb in essential: with tempfile.NamedTemporaryFile(dir=state.workspace) as f: - bwrap(["dpkg-deb", "--fsys-tarfile", deb], stdout=f, root=state.config.tools_tree) - bwrap(["tar", "-C", state.root, "--keep-directory-symlink", "--extract", "--file", f.name], - root=state.config.tools_tree) + run(["dpkg-deb", "--fsys-tarfile", deb], stdout=f) + run(["tar", "-C", state.root, "--keep-directory-symlink", "--extract", "--file", f.name]) # Finally, run apt to properly install packages in the chroot without having to worry that maintainer # scripts won't find basic tools that they depend on. @@ -201,7 +200,7 @@ def invoke_apt( operation: str, packages: Sequence[str] = (), apivfs: bool = True, -) -> CompletedProcess: +) -> None: env: dict[str, PathString] = dict( APT_CONFIG=state.workspace / "apt.conf", DEBIAN_FRONTEND="noninteractive", @@ -244,10 +243,9 @@ def invoke_apt( "-o", "pkgCacheGen::ForceEssential=,", ] - return bwrap(["apt-get", *options, operation, *packages], - apivfs=state.root if apivfs else None, - env=env | state.environment, - root=state.config.tools_tree) + bwrap(["apt-get", *options, operation, *packages], + apivfs=state.root if apivfs else None, + env=env | state.environment) def install_apt_sources(state: MkosiState, repos: Sequence[str]) -> None: diff --git a/mkosi/distributions/fedora.py b/mkosi/distributions/fedora.py index f69ef195f..1d466dd3f 100644 --- a/mkosi/distributions/fedora.py +++ b/mkosi/distributions/fedora.py @@ -220,8 +220,7 @@ def invoke_dnf( bwrap(cmdline, apivfs=state.root if apivfs else None, - env=dict(KERNEL_INSTALL_BYPASS="1") | env | state.environment, - root=state.config.tools_tree) + env=dict(KERNEL_INSTALL_BYPASS="1") | env | state.environment) fixup_rpmdb_location(state.root) diff --git a/mkosi/distributions/gentoo.py b/mkosi/distributions/gentoo.py index 77595a011..433e416eb 100644 --- a/mkosi/distributions/gentoo.py +++ b/mkosi/distributions/gentoo.py @@ -12,7 +12,7 @@ from mkosi.distributions import DistributionInstaller from mkosi.install import copy_path from mkosi.log import ARG_DEBUG, complete_step, die from mkosi.remove import unlink_try_hard -from mkosi.run import bwrap, chroot_cmd +from mkosi.run import bwrap, chroot_cmd, run from mkosi.state import MkosiState from mkosi.types import PathString @@ -47,7 +47,6 @@ def invoke_emerge( *(["--verbose", "--quiet=n", "--quiet-fail=n"] if ARG_DEBUG.get() else ["--quiet-build", "--quiet"]), *options, ], - tools=state.config.tools_tree, apivfs=state.cache_dir / "stage3", scripts=dict( chroot=chroot_cmd( @@ -124,7 +123,7 @@ class GentooInstaller(DistributionInstaller): if stage3_tar.exists(): cmd += ["--time-cond", stage3_tar] - bwrap(cmd, root=state.config.tools_tree) + run(cmd) if stage3_tar.stat().st_mtime > old: unlink_try_hard(stage3) @@ -133,24 +132,22 @@ class GentooInstaller(DistributionInstaller): if not any(stage3.iterdir()): with complete_step(f"Extracting {stage3_tar.name} to {stage3}"): - bwrap(["tar", - "--numeric-owner", - "-C", stage3, - "--extract", - "--file", stage3_tar, - "--exclude", "./dev/*", - "--exclude", "./proc/*", - "--exclude", "./sys/*"], - root=state.config.tools_tree) + run(["tar", + "--numeric-owner", + "-C", stage3, + "--extract", + "--file", stage3_tar, + "--exclude", "./dev/*", + "--exclude", "./proc/*", + "--exclude", "./sys/*"]) for d in ("binpkgs", "distfiles", "repos/gentoo"): (state.cache_dir / d).mkdir(parents=True, exist_ok=True) - copy_path(state.pkgmngr, stage3, preserve_owner=False, root=state.config.tools_tree) + copy_path(state.pkgmngr, stage3, preserve_owner=False) bwrap( cmd=["chroot", "emerge-webrsync"], - tools=state.config.tools_tree, apivfs=stage3, scripts=dict( chroot=chroot_cmd( diff --git a/mkosi/distributions/opensuse.py b/mkosi/distributions/opensuse.py index 6e9c17499..ebfa1faa5 100644 --- a/mkosi/distributions/opensuse.py +++ b/mkosi/distributions/opensuse.py @@ -141,8 +141,7 @@ def invoke_zypper( bwrap(cmdline, apivfs=state.root if apivfs else None, - env=dict(ZYPP_CONF=str(state.pkgmngr / "etc/zypp/zypp.conf"), KERNEL_INSTALL_BYPASS="1") | state.environment, - root=state.config.tools_tree) + env=dict(ZYPP_CONF=str(state.pkgmngr / "etc/zypp/zypp.conf"), KERNEL_INSTALL_BYPASS="1") | state.environment) fixup_rpmdb_location(state.root) diff --git a/mkosi/install.py b/mkosi/install.py index 26c5b734c..4f355580e 100644 --- a/mkosi/install.py +++ b/mkosi/install.py @@ -8,7 +8,7 @@ from collections.abc import Iterator from pathlib import Path from typing import Optional -from mkosi.run import bwrap +from mkosi.run import run from mkosi.util import make_executable @@ -48,9 +48,8 @@ def copy_path( *, dereference: bool = False, preserve_owner: bool = True, - root: Optional[Path] = None, ) -> None: - bwrap([ + run([ "cp", "--recursive", f"--{'' if dereference else 'no-'}dereference", @@ -58,4 +57,4 @@ def copy_path( "--no-target-directory", "--reflink=auto", src, dst, - ], root=root) + ]) diff --git a/mkosi/manifest.py b/mkosi/manifest.py index a0a5de155..29578888d 100644 --- a/mkosi/manifest.py +++ b/mkosi/manifest.py @@ -9,7 +9,7 @@ from textwrap import dedent from typing import IO, Any, Optional from mkosi.config import MkosiConfig -from mkosi.run import bwrap +from mkosi.run import run from mkosi.util import Distribution, ManifestFormat, PackageType @@ -105,13 +105,12 @@ class Manifest: if not (root / dbpath).exists(): dbpath = "/var/lib/rpm" - c = bwrap(["rpm", - f"--root={root}", - f"--dbpath={dbpath}", - "-qa", - "--qf", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n"], - stdout=PIPE, - root=self.config.tools_tree) + c = run(["rpm", + f"--root={root}", + f"--dbpath={dbpath}", + "-qa", + "--qf", r"%{NEVRA}\t%{SOURCERPM}\t%{NAME}\t%{ARCH}\t%{LONGSIZE}\t%{INSTALLTIME}\n"], + stdout=PIPE) packages = sorted(c.stdout.splitlines()) @@ -146,15 +145,14 @@ class Manifest: source = self.source_packages.get(srpm) if source is None: - c = bwrap(["rpm", - f"--root={root}", - f"--dbpath={dbpath}", - "-q", - "--changelog", - nevra], - stdout=PIPE, - stderr=DEVNULL, - root=self.config.tools_tree) + c = run(["rpm", + f"--root={root}", + f"--dbpath={dbpath}", + "-q", + "--changelog", + nevra], + stdout=PIPE, + stderr=DEVNULL) changelog = c.stdout.strip() source = SourcePackageManifest(srpm, changelog) self.source_packages[srpm] = source @@ -162,13 +160,12 @@ class Manifest: source.add(package) def record_deb_packages(self, root: Path) -> None: - c = bwrap(["dpkg-query", - f"--admindir={root}/var/lib/dpkg", - "--show", - "--showformat", - r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n'], - stdout=PIPE, - root=self.config.tools_tree) + c = run(["dpkg-query", + f"--admindir={root}/var/lib/dpkg", + "--show", + "--showformat", + r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n'], + stdout=PIPE) packages = sorted(c.stdout.splitlines()) @@ -227,7 +224,7 @@ class Manifest: # We have to run from the root, because if we use the RootDir option to make # apt from the host look at the repositories in the image, it will also pick # the 'methods' executables from there, but the ABI might not be compatible. - result = bwrap(cmd, stdout=PIPE, root=self.config.tools_tree) + result = run(cmd, stdout=PIPE) source_package = SourcePackageManifest(source, result.stdout.strip()) self.source_packages[source] = source_package diff --git a/mkosi/mounts.py b/mkosi/mounts.py index 30e36bb7e..3e893c077 100644 --- a/mkosi/mounts.py +++ b/mkosi/mounts.py @@ -10,7 +10,7 @@ from collections.abc import Iterator, Sequence from pathlib import Path from typing import Callable, Deque, Optional, TypeVar, Union, cast -from mkosi.config import GenericVersion +from mkosi.config import GenericVersion, MkosiConfig from mkosi.log import complete_step from mkosi.run import run from mkosi.types import PathString @@ -59,8 +59,10 @@ def mount( options: Sequence[str] = (), type: Optional[str] = None, read_only: bool = False, + umount: bool = True, ) -> Iterator[Path]: - os.makedirs(where, 0o755, True) + if not where.exists(): + where.mkdir(mode=0o755, parents=True) if read_only: options = ["ro", *options] @@ -78,14 +80,15 @@ def mount( if options: cmd += ["--options", ",".join(options)] - # Ideally we'd run these with bwrap() but bubblewrap disables all mount propagation to the root so any - # mounts we do within bubblewrap aren't propagated to the overarching mount namespace. - try: run(cmd) yield where finally: - run(["umount", "--no-mtab", "--recursive", where]) + if umount: + # If we mounted over /usr, trying to use umount will fail with "target is busy", because umount + # is being called from /usr, which we're trying to unmount. To work around this issue, we do a + # lazy unmount. + run(["umount", "--no-mtab", "--lazy", where]) @contextlib.contextmanager @@ -110,3 +113,37 @@ def mount_overlay( delete_whiteout_files(upperdir) +@contextlib.contextmanager +def mount_tools(config: MkosiConfig, umount: bool = True) -> Iterator[None]: + if not config.tools_tree: + yield + return + + # If a tools tree is specified, we should ignore any local modifications made to PATH as any of those + # binaries might not work anymore when /usr is replaced wholesale. We also make sure that both /usr/bin + # and /usr/sbin/ are searched so that e.g. if the host is Arch and the root is Debian we don't ignore the + # binaries from /usr/sbin in the Debian root. + old = os.environ["PATH"] + os.environ["PATH"] = "/usr/bin:/usr/sbin" + + try: + with mount(what=config.tools_tree / "usr", where=Path("/usr"), operation="--bind", read_only=True, umount=umount): + yield + finally: + os.environ["PATH"] = old + + +@contextlib.contextmanager +def mount_passwd(name: str, uid: int, gid: int, umount: bool = True) -> Iterator[None]: + """ + ssh looks up the running user in /etc/passwd and fails if it can't find the running user. To trick it, we + mount over /etc/passwd with our own file containing our user in the user namespace. + """ + with tempfile.NamedTemporaryFile(mode="w") as passwd: + passwd.write(f"{name}:x:{uid}:{gid}:{name}:/bin/sh\n") + passwd.flush() + + os.chown(passwd.name, uid, gid) + + with mount(passwd.name, Path("/etc/passwd"), operation="--bind", umount=umount): + yield diff --git a/mkosi/qemu.py b/mkosi/qemu.py index a4dd0bb9a..a2204ab10 100644 --- a/mkosi/qemu.py +++ b/mkosi/qemu.py @@ -20,7 +20,7 @@ from mkosi.btrfs import btrfs_maybe_snapshot_subvolume from mkosi.config import ConfigFeature, MkosiArgs, MkosiConfig from mkosi.log import die from mkosi.remove import unlink_try_hard -from mkosi.run import MkosiAsyncioThread, bwrap, bwrap_cmd, spawn +from mkosi.run import MkosiAsyncioThread, run, spawn from mkosi.types import PathString from mkosi.util import ( Distribution, @@ -137,10 +137,16 @@ def find_ovmf_vars(config: MkosiConfig) -> Path: @contextlib.contextmanager -def start_swtpm(config: MkosiConfig) -> Iterator[Optional[Path]]: - with tempfile.TemporaryDirectory() as state, bwrap_cmd(root=config.tools_tree) as bwrap: +def start_swtpm() -> Iterator[Optional[Path]]: + with tempfile.TemporaryDirectory() as state: sock = Path(state) / Path("sock") - proc = spawn([*bwrap, "swtpm", "socket", "--tpm2", "--tpmstate", f"dir={state}", "--ctrl", f"type=unixio,path={sock}"]) + proc = spawn([ + "swtpm", + "socket", + "--tpm2", + "--tpmstate", f"dir={state}", + "--ctrl", f"type=unixio,path={sock}" + ]) try: yield sock @@ -274,7 +280,15 @@ def run_qemu(args: MkosiArgs, config: MkosiConfig) -> None: fname = config.output_dir / config.output if config.output_format == OutputFormat.disk: - bwrap(["systemd-repart", "--definitions", "", "--no-pager", "--size", "8G", "--pretty", "no", fname]) + run([ + "systemd-repart", + "--definitions", "", + "--no-pager", + "--size", "8G", + "--pretty", "no", + "--offline", "yes", + fname, + ]) # Debian images fail to boot with virtio-scsi, see: https://github.com/systemd/mkosi/issues/725 if config.output_format == OutputFormat.cpio: @@ -292,7 +306,7 @@ def run_qemu(args: MkosiArgs, config: MkosiConfig) -> None: "-device", "scsi-hd,drive=hd,bootindex=1"] if config.qemu_swtpm != ConfigFeature.disabled and shutil.which("swtpm") is not None: - sock = stack.enter_context(start_swtpm(config)) + sock = stack.enter_context(start_swtpm()) cmdline += ["-chardev", f"socket,id=chrtpm,path={sock}", "-tpmdev", "emulator,id=tpm0,chardev=chrtpm"] @@ -308,12 +322,7 @@ def run_qemu(args: MkosiArgs, config: MkosiConfig) -> None: cmdline += config.qemu_args cmdline += args.cmdline - bwrap(cmdline, - stdin=sys.stdin, - stdout=sys.stdout, - env=os.environ, - log=False, - root=config.tools_tree) + run(cmdline, stdin=sys.stdin, stdout=sys.stdout, env=os.environ, log=False) if status := int(notifications.get("EXIT_STATUS", 0)): raise subprocess.CalledProcessError(status, cmdline) diff --git a/mkosi/run.py b/mkosi/run.py index 5176a9b29..240bf0d2f 100644 --- a/mkosi/run.py +++ b/mkosi/run.py @@ -2,7 +2,6 @@ import asyncio import asyncio.tasks -import contextlib import ctypes import ctypes.util import logging @@ -17,21 +16,9 @@ import sys import tempfile import textwrap import threading -import traceback from pathlib import Path from types import TracebackType -from typing import ( - Any, - Awaitable, - Callable, - Iterator, - Mapping, - Optional, - Sequence, - Tuple, - Type, - TypeVar, -) +from typing import Any, Awaitable, Mapping, Optional, Sequence, Tuple, Type, TypeVar from mkosi.log import ARG_DEBUG, ARG_DEBUG_SHELL, die from mkosi.types import _FILE, CompletedProcess, PathString, Popen @@ -125,7 +112,7 @@ def become_root() -> tuple[int, int]: os._exit(0) - unshare(CLONE_NEWUSER|CLONE_NEWNS) + unshare(CLONE_NEWUSER) event.set() os.waitpid(child, 0) @@ -138,6 +125,11 @@ def become_root() -> tuple[int, int]: return SUBRANGE - 100, SUBRANGE - 100 +def init_mount_namespace() -> None: + unshare(CLONE_NEWNS) + run(["mount", "--make-rslave", "/"]) + + def foreground(*, new_process_group: bool = True) -> None: """ If we're connected to a terminal, put the process in a new process group and make that the foreground @@ -152,20 +144,6 @@ def foreground(*, new_process_group: bool = True) -> None: signal.signal(signal.SIGTTOU, old) -class RemoteException(Exception): - """ - Stores the exception from a subprocess along with its traceback. We have to do this explicitly because - the original traceback object cannot be pickled. When stringified, produces the subprocess stacktrace - plus the exception message. - """ - def __init__(self, e: BaseException, tb: traceback.StackSummary): - self.exception = e - self.tb = tb - - def __str__(self) -> str: - return f"Traceback (most recent call last):\n{''.join(self.tb.format()).strip()}\n{type(self.exception).__name__}: {self.exception}" - - def ensure_exc_info() -> Tuple[Type[BaseException], BaseException, TracebackType]: exctype, exc, tb = sys.exc_info() assert exctype @@ -174,51 +152,6 @@ def ensure_exc_info() -> Tuple[Type[BaseException], BaseException, TracebackType return (exctype, exc, tb) -def excepthook(exctype: Type[BaseException], exc: BaseException, tb: Optional[TracebackType]) -> None: - """Attach to sys.excepthook to automatically format exceptions with a RemoteException attached correctly.""" - if isinstance(exc.__cause__, RemoteException): - print(exc.__cause__, file=sys.stderr) - else: - sys.__excepthook__(exctype, exc, tb) - - -def fork_and_wait(target: Callable[[], T]) -> T: - """Run the target function in the foreground in a child process and collect its backtrace if there is one.""" - pout, pin = multiprocessing.Pipe(duplex=False) - - pid = os.fork() - if pid == 0: - foreground() - - try: - result = target() - except BaseException as e: - # Just getting the stacktrace from the traceback doesn't get us the parent frames for some reason - # so we have to attach those manually. - tb = traceback.StackSummary.from_list(traceback.extract_stack()[:-1] + traceback.extract_tb(e.__traceback__)) - pin.send(RemoteException(e, tb)) - else: - pin.send(result) - finally: - pin.close() - - sys.stdout.flush() - sys.stderr.flush() - - os._exit(0) - - try: - os.waitpid(pid, 0) - finally: - foreground(new_process_group=False) - - result = pout.recv() - if isinstance(result, RemoteException): - # Reraise the original exception and attach the remote exception with full traceback as the cause. - raise result.exception from result - - return result - def run( cmdline: Sequence[PathString], check: bool = True, @@ -226,7 +159,8 @@ def run( stdout: _FILE = None, stderr: _FILE = None, input: Optional[str] = None, - text: bool = True, + user: Optional[int] = None, + group: Optional[int] = None, env: Mapping[str, PathString] = {}, log: bool = True, ) -> CompletedProcess: @@ -264,6 +198,8 @@ def run( stderr=stderr, input=input, text=True, + user=user, + group=group, env=env, preexec_fn=foreground, ) @@ -283,6 +219,8 @@ def spawn( stdout: _FILE = None, stderr: _FILE = None, text: bool = True, + user: Optional[int] = None, + group: Optional[int] = None, ) -> Popen: if ARG_DEBUG.get(): logging.info(f"+ {' '.join(str(s) for s in cmdline)}") @@ -300,6 +238,8 @@ def spawn( stdout=stdout, stderr=stderr, text=text, + user=user, + group=group, preexec_fn=foreground, ) except FileNotFoundError: @@ -309,25 +249,21 @@ def spawn( raise e -@contextlib.contextmanager -def bwrap_cmd( +def bwrap( + cmd: Sequence[PathString], *, - root: Optional[Path] = None, apivfs: Optional[Path] = None, + log: bool = True, scripts: Mapping[str, Sequence[PathString]] = {}, -) -> Iterator[list[PathString]]: + env: Mapping[str, PathString] = {}, +) -> CompletedProcess: cmdline: list[PathString] = [ "bwrap", "--dev-bind", "/", "/", "--chdir", Path.cwd(), "--die-with-parent", - "--ro-bind", (root or Path("/")) / "usr", "/usr", ] - for d in ("/etc", "/opt", "/srv", "/boot", "/efi"): - if Path(d).exists(): - cmdline += ["--ro-bind", d, d] - if apivfs: if not (apivfs / "etc/machine-id").exists(): # Uninitialized means we want it to get initialized on first boot. @@ -361,7 +297,7 @@ def bwrap_cmd( with tempfile.TemporaryDirectory(dir="/var/tmp", prefix="mkosi-var-tmp") as var_tmp,\ tempfile.TemporaryDirectory(dir="/tmp", prefix="mkosi-scripts") as d: - for name, cmd in scripts.items(): + for name, script in scripts.items(): # Make sure we don't end up in a recursive loop when we name a script after the binary it execs # by removing the scripts directory from the PATH when we execute a script. (Path(d) / name).write_text( @@ -370,23 +306,14 @@ def bwrap_cmd( #!/bin/sh PATH="$(echo $PATH | tr ':' '\n' | grep -v {Path(d)} | tr '\n' ':')" export PATH - exec {shlex.join(str(s) for s in cmd)} "$@" + exec {shlex.join(str(s) for s in script)} "$@" """ ) ) make_executable(Path(d) / name) - # We modify the PATH via --setenv so that bwrap itself is looked up in PATH before we change it. - if root: - # If a tools tree is specified, we should ignore any local modifications made to PATH as any of - # those binaries might not work anymore when /usr is replaced wholesale. We also make sure that - # both /usr/bin and /usr/sbin/ are searched so that e.g. if the host is Arch and the root is - # Debian we don't ignore the binaries from /usr/sbin in the Debian root. We also keep the scripts - # directory in PATH as all of them are interpreted and can't be messed up by replacing /usr. - cmdline += ["--setenv", "PATH", f"{d}:/usr/bin:/usr/sbin"] - else: - cmdline += ["--setenv", "PATH", f"{d}:{os.environ['PATH']}"] + cmdline += ["--setenv", "PATH", f"{d}:{os.environ['PATH']}"] if apivfs: cmdline += [ @@ -398,7 +325,13 @@ def bwrap_cmd( cmdline += ["sh", "-c", f"{chmod} && exec $0 \"$@\" || exit $?"] try: - yield cmdline + result = run([*cmdline, *cmd], env=env, log=False) + except subprocess.CalledProcessError as e: + if log: + logging.error(f"\"{' '.join(str(s) for s in cmd)}\" returned non-zero exit code {e.returncode}.") + if ARG_DEBUG_SHELL.get(): + run([*cmdline, "sh"], stdin=sys.stdin, check=False, env=env, log=False) + raise e finally: # Clean up some stuff that might get written by package manager post install scripts. if apivfs: @@ -408,48 +341,6 @@ def bwrap_cmd( if (apivfs / f).exists(): (apivfs / f).unlink() - -def bwrap( - cmd: Sequence[PathString], - *, - root: Optional[Path] = None, - apivfs: Optional[Path] = None, - log: bool = True, - scripts: Mapping[str, Sequence[PathString]] = {}, - # The following arguments are passed directly to run(). - stdin: _FILE = None, - stdout: _FILE = None, - stderr: _FILE = None, - input: Optional[str] = None, - check: bool = True, - env: Mapping[str, PathString] = {}, -) -> CompletedProcess: - with bwrap_cmd(root=root, apivfs=apivfs, scripts=scripts) as bwrap: - try: - result = run( - [*bwrap, *cmd], - text=True, - env=env, - log=False, - stdin=stdin, - stdout=stdout, - stderr=stderr, - input=input, - check=check, - ) - except subprocess.CalledProcessError as e: - if log: - logging.error(f"\"{' '.join(str(s) for s in cmd)}\" returned non-zero exit code {e.returncode}.") - if ARG_DEBUG_SHELL.get(): - run( - [*bwrap, "sh"], - stdin=sys.stdin, - check=False, - env=env, - log=False, - ) - raise e - return result diff --git a/mkosi/state.py b/mkosi/state.py index e29175e22..79323ea5d 100644 --- a/mkosi/state.py +++ b/mkosi/state.py @@ -14,11 +14,9 @@ from mkosi.log import die class MkosiState: """State related properties.""" - def __init__(self, args: MkosiArgs, config: MkosiConfig, uid: int, gid: int) -> None: + def __init__(self, args: MkosiArgs, config: MkosiConfig) -> None: self.args = args self.config = config - self.uid = uid - self.gid = gid self._workspace = tempfile.TemporaryDirectory(dir=config.workspace_dir or Path.cwd(), prefix=".mkosi.tmp") diff --git a/mkosi/util.py b/mkosi/util.py index 55274f939..5b6b08169 100644 --- a/mkosi/util.py +++ b/mkosi/util.py @@ -5,6 +5,7 @@ import contextlib import enum import errno import functools +import importlib import itertools import logging import os @@ -13,8 +14,7 @@ import re import resource import stat import sys -import tempfile -from collections.abc import Iterable, Iterator, Sequence +from collections.abc import Iterable, Iterator from pathlib import Path from typing import Any, Callable, Optional, TypeVar @@ -258,30 +258,6 @@ def chdir(directory: Path) -> Iterator[None]: os.chdir(old) -@contextlib.contextmanager -def prepend_to_environ_path(paths: Sequence[Path]) -> Iterator[None]: - if not paths: - yield - return - - with tempfile.TemporaryDirectory(prefix="mkosi.path", dir=tmp_dir()) as d: - - for path in paths: - if not path.is_dir(): - Path(d).joinpath(path.name).symlink_to(path.absolute()) - - paths = [Path(d), *paths] - - news = [os.fspath(path) for path in paths if path.is_dir()] - olds = os.getenv("PATH", "").split(":") - os.environ["PATH"] = ":".join(news + olds) - - try: - yield - finally: - os.environ["PATH"] = ":".join(olds) - - def qemu_check_kvm_support() -> bool: kvm = Path("/dev/kvm") if not kvm.is_char_device(): @@ -326,3 +302,10 @@ def format_bytes(num_bytes: int) -> str: def make_executable(path: Path) -> None: st = path.stat() os.chmod(path, st.st_mode | stat.S_IEXEC) + + +def try_import(module: str) -> None: + try: + importlib.import_module(module) + except ModuleNotFoundError: + pass -- 2.47.2