log_setup()
parser = argparse.ArgumentParser(
- description='kernel-install plugin to build initrds or Unified Kernel Images using mkosi',
+ description="kernel-install plugin to build initrds or Unified Kernel Images using mkosi",
allow_abbrev=False,
- usage='50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…',
+ usage="50-mkosi.install COMMAND KERNEL_VERSION ENTRY_DIR KERNEL_IMAGE INITRD…",
)
parser.add_argument(
"--format", str(format),
"--output", output,
"--output-dir", context.staging_area,
- ]
+ ] # fmt: skip
if context.verbose:
cmdline += ["--debug"]
(context.staging_area / f"{output}.initrd").unlink()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
elif path.suffix == ".raw":
run(
["systemd-dissect", "--mount", "--mkdir", path, d],
- env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"))
+ env=dict(SYSTEMD_DISSECT_VERITY_EMBEDDED="no", SYSTEMD_DISSECT_VERITY_SIDECAR="no"),
+ )
stack.callback(lambda: run(["systemd-dissect", "--umount", "--rmdir", d]))
bases += [d]
else:
else:
if context.config.overlay or context.config.output_format in (OutputFormat.sysext, OutputFormat.confext):
if context.config.packages:
- die("Cannot install packages in extension images without a base tree",
- hint="Configure a base tree with the BaseTrees= setting")
+ die(
+ "Cannot install packages in extension images without a base tree",
+ hint="Configure a base tree with the BaseTrees= setting",
+ )
return
with complete_step(f"Installing {str(context.config.distribution).capitalize()}"):
if context.config.packages:
context.config.distribution.install_packages(context, context.config.packages)
- for f in ("var/lib/systemd/random-seed",
- "var/lib/systemd/credential.secret",
- "etc/machine-info",
- "var/lib/dbus/machine-id"):
+ for f in (
+ "var/lib/systemd/random-seed",
+ "var/lib/systemd/credential.secret",
+ "etc/machine-info",
+ "var/lib/dbus/machine-id",
+ ):
# Using missing_ok=True still causes an OSError if the mount is read-only even if the
# file doesn't exist so do an explicit exists() check first.
if (context.root / f).exists():
hint=(
"The root must be populated by the distribution, or from base trees, "
"skeleton trees, and prepare scripts."
- )
+ ),
)
return
with complete_step("Setting up autologin…"):
- configure_autologin_service(context, "console-getty.service",
- "--noclear --keep-baud console 115200,38400,9600")
- configure_autologin_service(context, "getty@tty1.service",
- "--noclear -")
- configure_autologin_service(context,
- "serial-getty@hvc0.service",
- "--keep-baud 115200,57600,38400,9600 -")
+ configure_autologin_service(
+ context, "console-getty.service", "--noclear --keep-baud console 115200,38400,9600"
+ )
+ configure_autologin_service(context, "getty@tty1.service", "--noclear -")
+ configure_autologin_service(context, "serial-getty@hvc0.service", "--keep-baud 115200,57600,38400,9600 -")
@contextlib.contextmanager
),
input=config.to_json(indent=None),
stdout=subprocess.PIPE,
- )
+ ) # fmt: skip
config = Config.from_json(result.stdout)
"--dir", "/work/src",
"--chdir", "/work/src",
*sources,
- ]
+ ] # fmt: skip
if (p := INVOKING_USER.home()).exists() and p != Path("/"):
# We use a writable mount here to keep git worktrees working which encode absolute
],
"mkosi-as-caller": mkosi_as_caller(),
**context.config.distribution.package_manager(context.config).scripts(context),
- }
+ } # fmt: skip
with finalize_host_scripts(context, helpers) as hd:
if script.suffix != ".chroot":
*context.config.distribution.package_manager(context.config).mounts(context),
],
scripts=hd,
- ) as sandbox:
+ ) as sandbox: # fmt: skip
yield sandbox
else:
if suppress_chown:
else []
),
*sources,
- ]
+ ] # fmt: skip
run(
["/work/prepare", arg],
else []
),
*sources,
- ]
+ ] # fmt: skip
run(
["/work/build-script", *cmdline],
else []
),
*sources,
- ]
+ ] # fmt: skip
run(
["/work/postinst", "final"],
else []
),
*sources,
- ]
+ ] # fmt: skip
run(
["/work/finalize"],
script=script,
options=options,
network=context.config.with_network,
- )
+ ),
)
"--dir", "/work/out",
"--become-root",
*sources,
- ]
+ ],
),
stdin=sys.stdin,
- )
+ ) # fmt: skip
def install_tree(
def copy() -> None:
copy_tree(
- src, t,
+ src,
+ t,
preserve=preserve,
use_subvolumes=config.use_subvolumes,
sandbox=config.sandbox,
"--bind", t.parent, workdir(t.parent),
],
),
- )
+ ) # fmt: skip
else:
# If we get an unknown file without a target, we just copy it into /.
copy()
if (p := config.tools() / "etc/crypto-policies").exists():
copy_tree(
- p, dst / "etc/crypto-policies",
+ p,
+ dst / "etc/crypto-policies",
preserve=False,
dereference=True,
sandbox=config.sandbox,
- )
+ ) # fmt: skip
if not config.sandbox_trees:
return
with complete_step("Copying in build tree…"):
copy_tree(
- context.install_dir, context.root,
+ context.install_dir,
+ context.root,
use_subvolumes=context.config.use_subvolumes,
sandbox=context.sandbox,
)
"--selinux-relabel", str(relabel),
*(["-f"] * args.force),
"--include=mkosi-initrd",
- ]
+ ] # fmt: skip
_, [config] = parse_config(cmdline + ["build"], resources=resources)
return kmods
make_cpio(
- context.root, kmods,
+ context.root,
+ kmods,
files=gen_required_kernel_modules(
- context.root, kver,
+ context.root,
+ kver,
include=finalize_kernel_modules_include(
context,
include=context.config.kernel_modules_initrd_include,
sandbox=context.sandbox,
)
-
if context.config.distribution.is_apt_distribution():
# Ubuntu Focal's kernel does not support zstd-compressed initrds so use xz instead.
if context.config.distribution == Distribution.ubuntu and context.config.release == "focal":
for p in initrds:
initrd = p.read_bytes()
n = len(initrd)
- padding = b'\0' * (round_up(n, 4) - n) # pad to 32 bit alignment
+ padding = b"\0" * (round_up(n, 4) - n) # pad to 32 bit alignment
seq.write(initrd)
seq.write(padding)
def want_signed_pcrs(config: Config) -> bool:
- return (
- config.sign_expected_pcr == ConfigFeature.enabled or
- (
- config.sign_expected_pcr == ConfigFeature.auto and
- config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None
- )
+ return config.sign_expected_pcr == ConfigFeature.enabled or (
+ config.sign_expected_pcr == ConfigFeature.auto
+ and config.find_binary("systemd-measure", "/usr/lib/systemd/systemd-measure") is not None
)
"--output", workdir(output),
"--efi-arch", arch,
"--uname", kver,
- ]
+ ] # fmt: skip
options: list[PathString] = [
"--bind", output.parent, workdir(output.parent),
"--ro-bind", context.workspace / "cmdline", context.workspace / "cmdline",
"--ro-bind", context.root / "usr/lib/os-release", context.root / "usr/lib/os-release",
"--ro-bind", stub, stub,
- ]
+ ] # fmt: skip
if context.config.secure_boot:
assert context.config.secure_boot_key
if context.config.secure_boot_sign_tool != SecureBootSignTool.pesign:
cmd += [
"--signtool", "sbsign",
- "--secureboot-private-key",
- context.config.secure_boot_key,
- "--secureboot-certificate",
- context.config.secure_boot_certificate,
- ]
+ "--secureboot-private-key", context.config.secure_boot_key,
+ "--secureboot-certificate", context.config.secure_boot_certificate,
+ ] # fmt: skip
options += [
"--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate,
- ]
+ ] # fmt: skip
if context.config.secure_boot_key_source.type == KeySourceType.engine:
cmd += ["--signing-engine", context.config.secure_boot_key_source.source]
if context.config.secure_boot_key.exists():
context.workspace / "pesign",
"--secureboot-certificate-name",
certificate_common_name(context, context.config.secure_boot_certificate),
- ]
+ ] # fmt: skip
options += ["--ro-bind", context.workspace / "pesign", context.workspace / "pesign"]
if want_signed_pcrs(context.config):
# SHA1 might be disabled in OpenSSL depending on the distro so we opt to not sign
# for SHA1 to avoid having to manage a bunch of configuration to re-enable SHA1.
"--pcr-banks", "sha256",
- ]
+ ] # fmt: skip
if context.config.secure_boot_key.exists():
options += ["--bind", context.config.secure_boot_key, context.config.secure_boot_key]
if context.config.secure_boot_key_source.type == KeySourceType.engine:
cmd += [
"--signing-engine", context.config.secure_boot_key_source.source,
"--pcr-public-key", context.config.secure_boot_certificate,
- ]
+ ] # fmt: skip
options += [
"--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate,
- ]
+ ] # fmt: skip
cmd += ["build", "--linux", kimg]
options += ["--ro-bind", kimg, kimg]
python_binary(context.config, binary=ukify),
ukify,
sandbox=context.sandbox,
- ) >= "256" and
- (version := systemd_stub_version(context, stub)) and
- version >= "256"
+ )
+ >= "256"
+ and (version := systemd_stub_version(context, stub))
+ and version >= "256"
):
for microcode in microcodes:
cmd += ["--microcode", microcode]
def want_uki(context: Context) -> bool:
return want_efi(context.config) and (
- context.config.bootloader == Bootloader.uki or
- context.config.unified_kernel_images == ConfigFeature.enabled or (
- context.config.unified_kernel_images == ConfigFeature.auto and
- systemd_stub_binary(context).exists() and
- context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None
- )
+ context.config.bootloader == Bootloader.uki
+ or context.config.unified_kernel_images == ConfigFeature.enabled
+ or (
+ context.config.unified_kernel_images == ConfigFeature.auto
+ and systemd_stub_binary(context).exists()
+ and context.config.find_binary("ukify", "/usr/lib/systemd/ukify") is not None
+ )
)
def find_entry_token(context: Context) -> str:
if (
- not context.config.find_binary("kernel-install") or
- "--version" not in run(["kernel-install", "--help"],
- stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install")).stdout or
- systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1"
+ not context.config.find_binary("kernel-install")
+ or (
+ "--version"
+ not in run(
+ ["kernel-install", "--help"], stdout=subprocess.PIPE, sandbox=context.sandbox(binary="kernel-install")
+ ).stdout
+ )
+ or systemd_tool_version("kernel-install", sandbox=context.sandbox) < "255.1"
):
return context.config.image_id or context.config.distribution.name
with umask(~0o600):
if (
- want_efi(context.config) and
- context.config.secure_boot and
- context.config.shim_bootloader != ShimBootloader.signed and
- KernelType.identify(context.config, kimg) == KernelType.pe
+ want_efi(context.config)
+ and context.config.secure_boot
+ and context.config.shim_bootloader != ShimBootloader.signed
+ and KernelType.identify(context.config, kimg) == KernelType.pe
):
kimg = sign_efi_binary(context, kimg, dst / "vmlinuz")
else:
assert config
if (
- not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line) and
- not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line) and
- (root := finalize_root(partitions))
+ not any(c.startswith("root=PARTUUID=") for c in context.config.kernel_command_line)
+ and not any(c.startswith("mount.usr=PARTUUID=") for c in context.config.kernel_command_line)
+ and (root := finalize_root(partitions))
):
cmdline = [root] + cmdline
"e": token,
"k": kver,
"h": roothash,
- "c": boot_count
+ "c": boot_count,
}
def replacer(match: re.Match[str]) -> str:
return
if context.config.bootable == ConfigFeature.auto and (
- context.config.output_format == OutputFormat.cpio or
- context.config.output_format.is_extension_image() or
- context.config.overlay
+ context.config.output_format == OutputFormat.cpio
+ or context.config.output_format.is_extension_image()
+ or context.config.overlay
):
return
if not compression or src.is_dir():
if dst:
move_tree(
- src, dst,
+ src,
+ dst,
use_subvolumes=context.config.use_subvolumes,
sandbox=context.sandbox,
)
with complete_step(f"Compressing {src} with {compression}"):
with src.open("rb") as i:
- src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file.
+ src.unlink() # if src == dst, make sure dst doesn't truncate the src file but creates a new file.
with dst.open("wb") as o:
run(cmd, stdin=i, stdout=o, sandbox=context.sandbox(binary=cmd[0]))
reverse=True,
)
- if (
- (uki := context.root / efi_boot_binary(context)).exists() and
+ if (uki := context.root / efi_boot_binary(context)).exists() and (
KernelType.identify(context.config, uki) == KernelType.uki
):
pass
- elif (
- (uki := context.root / shim_second_stage_binary(context)).exists() and
+ elif (uki := context.root / shim_second_stage_binary(context)).exists() and (
KernelType.identify(context.config, uki) == KernelType.uki
):
pass
cmdline += ["--default-key", context.config.key]
cmdline += [
- "--output", workdir(context.staging / context.config.output_signature),
+ "--output",
+ workdir(context.staging / context.config.output_signature),
workdir(context.staging / context.config.output_checksum),
]
"--bind", home, home,
"--bind", context.staging, workdir(context.staging),
"--bind", "/run", "/run",
- ]
+ ] # fmt: skip
- with (complete_step("Signing SHA256SUMS…")):
+ with complete_step("Signing SHA256SUMS…"):
run(
cmdline,
env=env,
sandbox=context.sandbox(
binary="gpg",
options=options,
- )
+ ),
)
if manifest.has_data():
if ManifestFormat.json in context.config.manifest_format:
with complete_step(f"Saving manifest {context.config.output_manifest}"):
- with open(context.staging / context.config.output_manifest, 'w') as f:
+ with open(context.staging / context.config.output_manifest, "w") as f:
manifest.write_json(f)
if ManifestFormat.changelog in context.config.manifest_format:
with complete_step(f"Saving report {context.config.output_changelog}"):
- with open(context.staging / context.config.output_changelog, 'w') as f:
+ with open(context.staging / context.config.output_changelog, "w") as f:
manifest.write_package_report(f)
if config.image:
fragments += [config.image]
- key = '~'.join(str(s) for s in fragments)
+ key = "~".join(str(s) for s in fragments)
assert config.cache_dir
return (
die(f"{script} is not executable")
if config.secure_boot and not config.secure_boot_key:
- die("SecureBoot= is enabled but no secure boot key is configured",
- hint="Run mkosi genkey to generate a secure boot key/certificate pair")
+ die(
+ "SecureBoot= is enabled but no secure boot key is configured",
+ hint="Run mkosi genkey to generate a secure boot key/certificate pair",
+ )
if config.secure_boot and not config.secure_boot_certificate:
- die("SecureBoot= is enabled but no secure boot key is configured",
- hint="Run mkosi genkey to generate a secure boot key/certificate pair")
+ die(
+ "SecureBoot= is enabled but no secure boot key is configured",
+ hint="Run mkosi genkey to generate a secure boot key/certificate pair",
+ )
def check_tool(config: Config, *tools: PathString, reason: str, hint: Optional[str] = None) -> Path:
v = systemd_tool_version(tool, sandbox=config.sandbox)
if v < version:
- die(f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.",
- hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.")
+ die(
+ f"Found '{tool}' with version {v} but version {version} or newer is required to {reason}.",
+ hint=f"Use ToolsTree=default to get a newer version of '{tools[0]}'.",
+ )
def check_ukify(
v = systemd_tool_version(python_binary(config, binary=ukify), ukify, sandbox=config.sandbox)
if v < version:
- die(f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.",
- hint="Use ToolsTree=default to get a newer version of 'ukify'.")
+ die(
+ f"Found '{ukify}' with version {v} but version {version} or newer is required to {reason}.",
+ hint="Use ToolsTree=default to get a newer version of 'ukify'.",
+ )
def check_tools(config: Config, verb: Verb) -> None:
version="254",
reason="build bootable images",
hint="Use ToolsTree=default to download most required tools including ukify automatically or use "
- "Bootable=no to create a non-bootable image which doesn't require ukify",
+ "Bootable=no to create a non-bootable image which doesn't require ukify",
)
if config.output_format in (OutputFormat.disk, OutputFormat.esp):
return
if (
- not (context.root / "init").exists() and
- not (context.root / "init").is_symlink() and
- (context.root / "usr/lib/systemd/systemd").exists()
+ not (context.root / "init").exists()
+ and not (context.root / "init").is_symlink()
+ and (context.root / "usr/lib/systemd/systemd").exists()
):
(context.root / "init").symlink_to("/usr/lib/systemd/systemd")
for kver, _ in gen_kernel_images(context):
modulesd = context.root / "usr/lib/modules" / kver
- if (
- not cache and
- not context.config.kernel_modules_exclude and
- all((modulesd / o).exists() for o in outputs)
- ):
+ if not cache and not context.config.kernel_modules_exclude and all((modulesd / o).exists() for o in outputs):
mtime = (modulesd / "modules.dep").stat().st_mtime
if all(m.stat().st_mtime <= mtime for m in modulesd.rglob("*.ko*")):
continue
if not cache:
process_kernel_modules(
- context.root, kver,
+ context.root,
+ kver,
include=finalize_kernel_modules_include(
context,
include=context.config.kernel_modules_include,
return
with complete_step("Generating system users"):
- run(["systemd-sysusers", "--root=/buildroot"],
- sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"]))
+ run(
+ ["systemd-sysusers", "--root=/buildroot"],
+ sandbox=context.sandbox(binary="systemd-sysusers", options=["--bind", context.root, "/buildroot"]),
+ )
def run_tmpfiles(context: Context) -> None:
"--become-root",
],
),
- )
+ ) # fmt: skip
def run_preset(context: Context) -> None:
return
with complete_step("Applying presets…"):
- run(["systemctl", "--root=/buildroot", "preset-all"],
- sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]))
- run(["systemctl", "--root=/buildroot", "--global", "preset-all"],
- sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]))
+ run(
+ ["systemctl", "--root=/buildroot", "preset-all"],
+ sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]),
+ )
+ run(
+ ["systemctl", "--root=/buildroot", "--global", "preset-all"],
+ sandbox=context.sandbox(binary="systemctl", options=["--bind", context.root, "/buildroot"]),
+ )
def run_hwdb(context: Context) -> None:
return
with complete_step("Generating hardware database"):
- run(["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"],
- sandbox=context.sandbox(binary="systemd-hwdb", options=["--bind", context.root, "/buildroot"]))
+ run(
+ ["systemd-hwdb", "--root=/buildroot", "--usr", "--strict", "update"],
+ sandbox=context.sandbox(binary="systemd-hwdb", options=["--bind", context.root, "/buildroot"]),
+ )
# Remove any existing hwdb in /etc in favor of the one we just put in /usr.
(context.root / "etc/udev/hwdb.bin").unlink(missing_ok=True)
("--hostname", None, context.config.hostname),
("--root-password-hashed", "passwd.hashed-password.root", password),
("--root-shell", "passwd.shell.root", context.config.root_shell),
- )
+ ) # fmt: skip
options = []
creds = []
return
with complete_step("Applying first boot settings"):
- run(["systemd-firstboot", "--root=/buildroot", "--force", *options],
- sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"]))
+ run(
+ ["systemd-firstboot", "--root=/buildroot", "--force", *options],
+ sandbox=context.sandbox(binary="systemd-firstboot", options=["--bind", context.root, "/buildroot"]),
+ )
# Initrds generally don't ship with only /usr so there's not much point in putting the
# credentials in /usr/lib/credstore.
binpolicy = Path("/buildroot") / binpolicy.relative_to(context.root)
with complete_step(f"Relabeling files using {policy} policy"):
- run([setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"],
+ run(
+ [setfiles, "-mFr", "/buildroot", "-c", binpolicy, fc, "/buildroot"],
sandbox=context.sandbox(binary=setfiles, options=["--bind", context.root, "/buildroot"]),
- check=context.config.selinux_relabel == ConfigFeature.enabled)
+ check=context.config.selinux_relabel == ConfigFeature.enabled,
+ )
def need_build_overlay(config: Config) -> bool:
rmtree(final, sandbox=context.sandbox)
move_tree(
- context.root, final,
+ context.root,
+ final,
use_subvolumes=context.config.use_subvolumes,
sandbox=context.sandbox,
)
if need_build_overlay(context.config) and (context.workspace / "build-overlay").exists():
rmtree(build, sandbox=context.sandbox)
move_tree(
- context.workspace / "build-overlay", build,
+ context.workspace / "build-overlay",
+ build,
use_subvolumes=context.config.use_subvolumes,
sandbox=context.sandbox,
)
if prev != json.loads(new):
logging.info("Cache manifest mismatch, not reusing cached images")
if ARG_DEBUG.get():
- run(["diff", manifest, "-"], input=new, check=False,
- sandbox=config.sandbox(binary="diff", options=["--bind", manifest, manifest]))
+ run(
+ ["diff", manifest, "-"],
+ input=new,
+ check=False,
+ sandbox=config.sandbox(binary="diff", options=["--bind", manifest, manifest]),
+ )
return False
else:
with complete_step("Copying cached trees"):
copy_tree(
- final, context.root,
+ final,
+ context.root,
use_subvolumes=context.config.use_subvolumes,
sandbox=context.sandbox,
)
f"--offline={yes_no(context.config.repart_offline)}",
"--seed", str(context.config.seed),
workdir(context.staging / context.config.output_with_format),
- ]
+ ] # fmt: skip
options: list[PathString] = [
# Make sure we're root so that the mkfs tools invoked by systemd-repart think the files
# that go into the disk image are owned by root.
"--become-root",
"--bind", context.staging, workdir(context.staging),
- ]
+ ] # fmt: skip
if root:
cmdline += ["--root=/buildroot"]
sandbox=context.sandbox(
binary="systemd-repart",
devices=(
- not context.config.repart_offline or
- context.config.verity_key_source.type != KeySourceType.file
+ not context.config.repart_offline
+ or context.config.verity_key_source.type != KeySourceType.file
),
options=options,
),
else:
bootloader = None
- esp = (
- context.config.bootable == ConfigFeature.enabled or
- (context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists())
+ esp = context.config.bootable == ConfigFeature.enabled or (
+ context.config.bootable == ConfigFeature.auto and bootloader and bootloader.exists()
)
- bios = (context.config.bootable != ConfigFeature.disabled and want_grub_bios(context))
+ bios = context.config.bootable != ConfigFeature.disabled and want_grub_bios(context)
if esp or bios:
# Even if we're doing BIOS, let's still use the ESP to store the kernels, initrds
],
"annotations": {
"io.systemd.mkosi.version": __version__,
- **({
- "org.opencontainers.image.version": context.config.image_version,
- } if context.config.image_version else {}),
- }
+ **(
+ {
+ "org.opencontainers.image.version": context.config.image_version,
+ }
+ if context.config.image_version
+ else {}
+ ),
+ },
}
oci_manifest_blob = json.dumps(oci_manifest)
oci_manifest_digest = hashlib.sha256(oci_manifest_blob.encode()).hexdigest()
"--size=auto",
"--definitions", r,
workdir(output),
- ]
+ ] # fmt: skip
options: list[PathString] = [
# Make sure we're root so that the mkfs tools invoked by systemd-repart think the files
# that go into the disk image are owned by root.
"--bind", output.parent, workdir(output.parent),
"--ro-bind", context.root, "/buildroot",
"--ro-bind", r, r,
- ]
+ ] # fmt: skip
if not context.config.architecture.is_native():
cmdline += ["--architecture", str(context.config.architecture)]
sandbox=context.sandbox(
binary="systemd-repart",
devices=(
- not context.config.repart_offline or
- context.config.verity_key_source.type != KeySourceType.file
+ not context.config.repart_offline
+ or context.config.verity_key_source.type != KeySourceType.file
),
options=options,
),
os.chmod(f, context.config.output_mode)
move_tree(
- f, context.config.output_dir_or_cwd(),
+ f,
+ context.config.output_dir_or_cwd(),
use_subvolumes=context.config.use_subvolumes,
sandbox=context.sandbox,
)
st = os.stat(path, follow_symlinks=False)
orig = (st.st_atime_ns, st.st_mtime_ns)
updated = (min(orig[0], mtime * 1_000_000_000),
- min(orig[1], mtime * 1_000_000_000))
+ min(orig[1], mtime * 1_000_000_000)) # fmt: skip
if orig != updated:
os.utime(path, ns=updated, follow_symlinks=False)
with contextlib.ExitStack() as stack:
workspace = Path(tempfile.mkdtemp(dir=config.workspace_dir_or_default(), prefix="mkosi-workspace-"))
# Discard setuid/setgid bits as these are inherited and can leak into the image.
- workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID|stat.S_ISUID))
+ workspace.chmod(stat.S_IMODE(workspace.stat().st_mode) & ~(stat.S_ISGID | stat.S_ISUID))
stack.callback(lambda: rmtree(workspace, sandbox=config.sandbox))
(workspace / "tmp").mkdir(mode=0o1777)
- with scopedenv({"TMPDIR" : os.fspath(workspace / "tmp")}):
+ with scopedenv({"TMPDIR": os.fspath(workspace / "tmp")}):
try:
yield Path(workspace)
except BaseException:
exclude: list[PathString]
if d == "cache":
exclude = flatten(
- ("--ro-bind", tmp, p)
- for p in config.distribution.package_manager(config).cache_subdirs(src)
+ ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).cache_subdirs(src)
)
else:
exclude = flatten(
- ("--ro-bind", tmp, p)
- for p in config.distribution.package_manager(config).state_subdirs(src)
+ ("--ro-bind", tmp, p) for p in config.distribution.package_manager(config).state_subdirs(src)
)
subdst = dst / d / subdir
wantrepo = (
(
not cached
- and (
- context.config.packages
- or context.config.build_packages
- or context.config.prepare_scripts
- )
+ and (context.config.packages or context.config.build_packages or context.config.prepare_scripts)
)
or context.config.volatile_packages
or context.config.postinst_scripts
context.root.rename(context.staging / context.config.output_with_format)
if context.config.output_format not in (OutputFormat.uki, OutputFormat.esp):
- maybe_compress(context, context.config.compress_output,
- context.staging / context.config.output_with_format,
- context.staging / context.config.output_with_compression)
+ maybe_compress(
+ context,
+ context.config.compress_output,
+ context.staging / context.config.output_with_format,
+ context.staging / context.config.output_with_compression,
+ )
calculate_sha256sum(context)
calculate_signature(context)
# copy to avoid ending up with files not owned by the directory image owner in the
# directory image.
if config.ephemeral or (
- config.output_format == OutputFormat.directory and
- args.verb == Verb.boot and
- (config.output_dir_or_cwd() / config.output).stat().st_uid != 0
+ config.output_format == OutputFormat.directory
+ and args.verb == Verb.boot
+ and (config.output_dir_or_cwd() / config.output).stat().st_uid != 0
):
fname = stack.enter_context(copy_ephemeral(config, config.output_dir_or_cwd() / config.output))
else:
devices=True,
options=["--bind", fname, fname],
),
- )
+ ) # fmt: skip
if config.output_format == OutputFormat.directory:
cmdline += ["--directory", fname]
cmdline += ["--bind", f"{path}:/root:norbind,{uidmap}"]
if config.runtime_scratch == ConfigFeature.enabled or (
- config.runtime_scratch == ConfigFeature.auto and
- config.output_format == OutputFormat.disk
+ config.runtime_scratch == ConfigFeature.auto and config.output_format == OutputFormat.disk
):
scratch = stack.enter_context(tempfile.TemporaryDirectory(dir="/var/tmp"))
os.chmod(scratch, 0o1777)
cmdline += [
"--bind", f"{addr}:/run/host/journal/socket",
"--set-credential=journal.forward_to_socket:/run/host/journal/socket",
- ]
+ ] # fmt: skip
for p in config.unit_properties:
cmdline += ["--property", p]
run(
[python_binary(config, binary=None), "-m", "http.server", "8081"],
- stdin=sys.stdin, stdout=sys.stdout,
+ stdin=sys.stdin,
+ stdout=sys.stdout,
sandbox=config.sandbox(
binary=python_binary(config, binary=None),
network=True,
for f in ("mkosi.key", "mkosi.crt"):
if Path(f).exists() and not args.force:
- die(f"{f} already exists",
- hint=("To generate new keys, first remove mkosi.key and mkosi.crt"))
+ die(f"{f} already exists", hint=("To generate new keys, first remove mkosi.key and mkosi.crt"))
log_step(f"Generating keys rsa:{keylength} for CN {cn!r}.")
logging.info(
"-nodes"
],
env=dict(OPENSSL_CONF="/dev/null"),
- )
+ ) # fmt: skip
def bump_image_version() -> None:
return
with tempfile.TemporaryDirectory(prefix="mkosi.path-") as d:
-
for path in config.extra_search_paths:
if not path.is_dir():
(Path(d) / path.name).symlink_to(path.absolute())
def finalize_default_tools(args: Args, config: Config, *, resources: Path) -> Config:
if not config.tools_tree_distribution:
- die(f"{config.distribution} does not have a default tools tree distribution",
- hint="use ToolsTreeDistribution= to set one explicitly")
+ die(
+ f"{config.distribution} does not have a default tools tree distribution",
+ hint="use ToolsTreeDistribution= to set one explicitly",
+ )
cmdline = [
"--directory", "",
*(["--proxy-client-certificate", str(p)] if (p := config.proxy_client_certificate) else []),
*(["--proxy-client-key", str(p)] if (p := config.proxy_client_key) else []),
*(["-f"] * args.force),
- ]
+ ] # fmt: skip
_, [tools] = parse_config(
cmdline + ["--include=mkosi-tools", "build"],
for tree in config.build_sources:
if wd.is_relative_to(tree.source):
- die(f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})",
+ die(
+ f"The workspace directory ({wd}) cannot be a subdirectory of any source directory ({tree.source})",
hint="Set BuildSources= to the empty string or use WorkspaceDirectory= to configure a different "
- "workspace directory")
+ "workspace directory",
+ )
def run_clean_scripts(config: Config) -> None:
"--ro-bind", json, "/work/config.json",
*(["--bind", str(o), "/work/out"] if (o := config.output_dir_or_cwd()).exists() else []),
*sources,
- ]
+ ],
),
stdin=sys.stdin,
- )
+ ) # fmt: skip
def needs_build(args: Args, config: Config, force: int = 1) -> bool:
return (
- args.force >= force or
- not (config.output_dir_or_cwd() / config.output_with_compression).exists() or
+ args.force >= force
+ or not (config.output_dir_or_cwd() / config.output_with_compression).exists()
+ or
# When the output is a directory, its name is the same as the symlink we create that points
# to the actual output when not building a directory. So if the full output path exists, we
# have to check that it's not a symlink as well.
complete_step(f"Removing output files of {config.name()} image…"),
flock_or_die(config.output_dir_or_cwd() / config.output)
if (config.output_dir_or_cwd() / config.output).exists()
- else contextlib.nullcontext()
+ else contextlib.nullcontext(),
):
rmtree(*outputs, sandbox=sandbox)
lock_repository_metadata(config),
):
rmtree(
- *(
- config.package_cache_dir_or_default() / d / subdir
- for d in ("cache", "lib")
- ),
+ *(config.package_cache_dir_or_default() / d / subdir for d in ("cache", "lib")),
sandbox=sandbox,
)
st = config.build_dir.stat()
# Discard setuid/setgid bits if set as these are inherited and can leak into the image.
- if stat.S_IMODE(st.st_mode) & (stat.S_ISGID|stat.S_ISUID):
- config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID|stat.S_ISUID))
+ if stat.S_IMODE(st.st_mode) & (stat.S_ISGID | stat.S_ISUID):
+ config.build_dir.chmod(stat.S_IMODE(st.st_mode) & ~(stat.S_ISGID | stat.S_ISUID))
def metadata_cache(config: Config) -> Path:
# If we have a metadata cache and any cached image and using cached metadata is not explicitly disabled, reuse the
# metadata cache.
if (
- last.incremental and
- metadata_cache(last).exists() and
- last.cacheonly != Cacheonly.never and
- any(have_cache(config) for config in images)
+ last.incremental
+ and metadata_cache(last).exists()
+ and last.cacheonly != Cacheonly.never
+ and any(have_cache(config) for config in images)
):
with complete_step("Copying cached package manager metadata"):
copy_tree(metadata_cache(last), dst, use_subvolumes=last.use_subvolumes, sandbox=last.sandbox)
unshare(CLONE_NEWNS)
if os.getuid() == 0:
- mount("", "/", "", MS_SLAVE|MS_REC, "")
+ mount("", "/", "", MS_SLAVE | MS_REC, "")
# For extra safety when running as root, remount a bunch of stuff read-only.
# Because some build systems use output directories in /usr, we only remount
attrs = MOUNT_ATTR_RDONLY
if d not in ("/usr", "/opt"):
- attrs |= MOUNT_ATTR_NOSUID|MOUNT_ATTR_NODEV|MOUNT_ATTR_NOEXEC
+ attrs |= MOUNT_ATTR_NOSUID | MOUNT_ATTR_NODEV | MOUNT_ATTR_NOEXEC
mount_rbind(d, d, attrs)
return
if all(config == Config.default() for config in images):
- die("No configuration found",
- hint="Make sure mkosi is run from a directory with configuration files")
+ die("No configuration found", hint="Make sure mkosi is run from a directory with configuration files")
if args.verb == Verb.summary:
if args.json:
text = json.dumps(
- {"Images": [config.to_dict() for config in images]},
- cls=JsonEncoder,
- indent=4,
- sort_keys=True
+ {"Images": [config.to_dict() for config in images]}, cls=JsonEncoder, indent=4, sort_keys=True
)
else:
text = "\n".join(summary(config) for config in images)
assert args.verb.needs_build()
if (
- tools and
- not (tools.output_dir_or_cwd() / tools.output).exists() and
- args.verb != Verb.build and
- not args.force
+ tools
+ and not (tools.output_dir_or_cwd() / tools.output).exists()
+ and args.verb != Verb.build
+ and not args.force
):
- die(f"Default tools tree requested for image '{last.name()}' but it has not been built yet",
- hint="Make sure to build the image first with 'mkosi build' or use '--force'")
+ die(
+ f"Default tools tree requested for image '{last.name()}' but it has not been built yet",
+ hint="Make sure to build the image first with 'mkosi build' or use '--force'",
+ )
if not last.repart_offline and os.getuid() != 0:
die(f"Must be root to build {last.name()} image configured with RepartOffline=no")
return
if args.verb != Verb.build and not args.force and not output.exists():
- die(f"Image '{last.name()}' has not been built yet",
- hint="Make sure to build the image first with 'mkosi build' or use '--force'")
+ die(
+ f"Image '{last.name()}' has not been built yet",
+ hint="Make sure to build the image first with 'mkosi build' or use '--force'",
+ )
check_workspace_directory(last)
# If we're doing an incremental build and the cache is not out of date, don't clean up the
# tools tree so that we can reuse the previous one.
- if (
- tools and
- (
- not tools.incremental or
- ((args.verb == Verb.build or args.force > 0) and not have_cache(tools)) or
- needs_build(args, tools, force=2)
- )
+ if tools and (
+ not tools.incremental
+ or ((args.verb == Verb.build or args.force > 0) and not have_cache(tools))
+ or needs_build(args, tools, force=2)
):
run_clean(args, tools, resources=resources)
tools.output_dir_or_cwd() / tools.output
if tools and config.tools_tree == Path("default")
else config.tools_tree
- )
+ ),
)
with prepend_to_environ_path(config):
return
if (
- last.output_format == OutputFormat.directory and
- (last.output_dir_or_cwd() / last.output).stat().st_uid == 0 and
- os.getuid() != 0
+ last.output_format == OutputFormat.directory
+ and (last.output_dir_or_cwd() / last.output).stat().st_uid == 0
+ and os.getuid() != 0
):
- die("Cannot operate on directory images built as root when running unprivileged",
- hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image")
+ die(
+ "Cannot operate on directory images built as root when running unprivileged",
+ hint="Clean the root owned image by running mkosi -ff clean as root and then rebuild the image",
+ )
with prepend_to_environ_path(last):
run_vm = {
"--exclude", "./tmp/*",
"--exclude", "./run/*",
"--exclude", "./var/tmp/*",
- ]
+ ] # fmt: skip
def make_tar(src: Path, dst: Path, *, sandbox: SandboxProtocol = nosandbox) -> None:
stdout=f,
# Make sure tar uses user/group information from the root directory instead of the host.
sandbox=sandbox(binary="tar", options=["--ro-bind", src, src, *finalize_passwd_mounts(src)]),
- )
+ ) # fmt: skip
def can_extract_tar(src: Path) -> bool:
sandbox=sandbox(
binary="tar",
# Make sure tar uses user/group information from the root directory instead of the host.
- options=["--ro-bind", src, src, "--bind", dst, dst, *finalize_passwd_mounts(dst)]
+ options=["--ro-bind", src, src, "--bind", dst, dst, *finalize_passwd_mounts(dst)],
),
- )
+ ) # fmt: skip
def make_cpio(
input="\0".join(os.fspath(f) for f in files),
stdout=f,
sandbox=sandbox(binary="cpio", options=["--ro-bind", src, src, *finalize_passwd_mounts(src)]),
- )
+ ) # fmt: skip
@contextlib.contextmanager
def _tempfile(
reader,
- suffix='',
+ suffix="",
# gh-93353: Keep a reference to call os.remove() in late Python
# finalization.
*,
except FileNotFoundError:
pass
+
@no_type_check
def _temp_file(path):
return _tempfile(path.read_bytes, suffix=path.name)
+
@no_type_check
def _is_present_dir(path) -> bool:
"""
return path.is_dir()
return False
+
@no_type_check
@functools.singledispatch
def as_file(path):
"""
return _temp_dir(path) if _is_present_dir(path) else _temp_file(path)
+
@no_type_check
@contextlib.contextmanager
def _temp_path(dir: tempfile.TemporaryDirectory):
with dir as result:
yield Path(result)
+
@no_type_check
@contextlib.contextmanager
def _temp_dir(path):
with _temp_path(tempfile.TemporaryDirectory()) as temp_dir:
yield _write_contents(temp_dir, path)
+
@no_type_check
def _write_contents(target, source):
child = target.joinpath(source.name)
return False
if (
- (config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay)
- and config.bootable == ConfigFeature.auto
- ):
+ config.output_format == OutputFormat.cpio or config.output_format.is_extension_image() or config.overlay
+ ) and config.bootable == ConfigFeature.auto:
return False
if config.architecture.to_efi() is None:
with (
complete_step(f"Generating grub image for {target}"),
- tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig
+ tempfile.NamedTemporaryFile("w", prefix="grub-early-config") as earlyconfig,
):
earlyconfig.write(
textwrap.dedent(
*(["--ro-bind", str(sbat), str(sbat)] if sbat else []),
],
),
- )
+ ) # fmt: skip
def find_signed_grub_image(context: Context) -> Optional[Path]:
arch = context.config.architecture.to_efi()
patterns = [
- f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu
- f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS
- "usr/share/efi/*/grub.efi", # OpenSUSE
+ f"usr/lib/grub/*-signed/grub{arch}.efi.signed", # Debian/Ubuntu
+ f"boot/efi/EFI/*/grub{arch}.efi", # Fedora/CentOS
+ "usr/share/efi/*/grub.efi", # OpenSUSE
]
for p in flatten(context.root.glob(pattern) for pattern in patterns):
def python_binary(config: Config, *, binary: Optional[PathString]) -> PathString:
tools = (
- not binary or
- not (path := config.find_binary(binary)) or
- not any(path.is_relative_to(d) for d in config.extra_search_paths)
+ not binary
+ or not (path := config.find_binary(binary))
+ or not any(path.is_relative_to(d) for d in config.extra_search_paths)
)
# If there's no tools tree, prefer the interpreter from MKOSI_INTERPRETER. If there is a tools
"--bind", mountinfo.name, "/proc/self/mountinfo",
],
),
- )
+ ) # fmt: skip
def efi_boot_binary(context: Context) -> Path:
],
stdout=subprocess.PIPE,
sandbox=context.sandbox(binary="openssl", options=["--ro-bind", certificate, certificate]),
- ).stdout
+ ).stdout # fmt: skip
for line in output.splitlines():
if not line.strip().startswith("commonName"):
die(f"Certificate {certificate} is missing Common Name")
-
def pesign_prepare(context: Context) -> None:
assert context.config.secure_boot_key
assert context.config.secure_boot_certificate
"--ro-bind", context.config.secure_boot_certificate, context.config.secure_boot_certificate,
],
),
- )
+ ) # fmt: skip
(context.workspace / "pesign").mkdir(exist_ok=True)
"--ro-bind", context.workspace / "pesign", context.workspace / "pesign",
],
),
- )
+ ) # fmt: skip
def sign_efi_binary(context: Context, input: Path, output: Path) -> Path:
assert context.config.secure_boot_certificate
if (
- context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign or
- context.config.secure_boot_sign_tool == SecureBootSignTool.auto and
- context.config.find_binary("sbsign") is not None
+ context.config.secure_boot_sign_tool == SecureBootSignTool.sbsign
+ or context.config.secure_boot_sign_tool == SecureBootSignTool.auto
+ and context.config.find_binary("sbsign") is not None
):
cmd: list[PathString] = [
"sbsign",
"--cert", workdir(context.config.secure_boot_certificate),
"--output", workdir(output),
- ]
+ ] # fmt: skip
options: list[PathString] = [
"--ro-bind", context.config.secure_boot_certificate, workdir(context.config.secure_boot_certificate),
"--ro-bind", input, workdir(input),
"--bind", output.parent, workdir(output.parent),
- ]
+ ] # fmt: skip
if context.config.secure_boot_key_source.type == KeySourceType.engine:
cmd += ["--engine", context.config.secure_boot_key_source.source]
if context.config.secure_boot_key.exists():
binary="sbsign",
options=options,
devices=context.config.secure_boot_key_source.type != KeySourceType.file,
- )
+ ),
)
elif (
- context.config.secure_boot_sign_tool == SecureBootSignTool.pesign or
- context.config.secure_boot_sign_tool == SecureBootSignTool.auto and
- context.config.find_binary("pesign") is not None
+ context.config.secure_boot_sign_tool == SecureBootSignTool.pesign
+ or context.config.secure_boot_sign_tool == SecureBootSignTool.auto
+ and context.config.find_binary("pesign") is not None
):
pesign_prepare(context)
run(
"--bind", output.parent, workdir(output),
]
),
- )
+ ) # fmt: skip
else:
die("One of sbsign or pesign is required to use SecureBoot=")
for kver in sorted(
(k for k in (context.root / "usr/lib/modules").iterdir() if k.is_dir()),
key=lambda k: GenericVersion(k.name),
- reverse=True
+ reverse=True,
):
# Make sure we look for anything that remotely resembles vmlinuz, as
# the arch specific install scripts in the kernel source tree sometimes
signed = context.config.shim_bootloader == ShimBootloader.signed
if not directory.glob("*.efi.signed" if signed else "*.efi"):
if context.config.bootable == ConfigFeature.enabled:
- die(f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}"
- f"systemd-boot binary was not found at {directory.relative_to(context.root)}")
+ die(
+ f"An EFI bootable image with systemd-boot was requested but a {'signed ' if signed else ''}"
+ f"systemd-boot binary was not found at {directory.relative_to(context.root)}"
+ )
return
if context.config.secure_boot and not signed:
with complete_step("Signing systemd-boot binaries…"):
- for input in itertools.chain(directory.glob('*.efi'), directory.glob('*.EFI')):
+ for input in itertools.chain(directory.glob("*.efi"), directory.glob("*.EFI")):
output = directory / f"{input}.signed"
sign_efi_binary(context, input, output)
"--bind", context.workspace, workdir(context.workspace),
],
),
- )
+ ) # fmt: skip
with umask(~0o600):
run(
"--ro-bind", context.workspace / "mkosi.der", workdir(context.workspace / "mkosi.der"),
]
),
- )
+ ) # fmt: skip
# We reuse the key for all secure boot databases to keep things simple.
for db in ["PK", "KEK", "db"]:
"NON_VOLATILE,BOOTSERVICE_ACCESS,RUNTIME_ACCESS,TIME_BASED_AUTHENTICATED_WRITE_ACCESS",
"--cert", workdir(context.config.secure_boot_certificate),
"--output", workdir(keys / f"{db}.auth"),
- ]
+ ] # fmt: skip
options: list[PathString] = [
"--ro-bind",
context.config.secure_boot_certificate,
workdir(context.config.secure_boot_certificate),
"--ro-bind", context.workspace / "mkosi.esl", workdir(context.workspace / "mkosi.esl"),
"--bind", keys, workdir(keys),
- ]
+ ] # fmt: skip
if context.config.secure_boot_key_source.type == KeySourceType.engine:
cmd += ["--engine", context.config.secure_boot_key_source.source]
if context.config.secure_boot_key.exists():
- cmd += ["--key", workdir(context.config.secure_boot_key),]
+ cmd += ["--key", workdir(context.config.secure_boot_key)]
options += [
"--ro-bind", context.config.secure_boot_key, workdir(context.config.secure_boot_key),
- ]
+ ] # fmt: skip
else:
cmd += ["--key", context.config.secure_boot_key]
cmd += [db, workdir(context.workspace / "mkosi.esl")]
arch = context.config.architecture.to_efi()
signed = [
- f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu
- f"usr/lib/shim/shim{arch}.efi.signed", # Debian
- f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS
- "usr/share/efi/*/shim.efi", # OpenSUSE
+ f"usr/lib/shim/shim{arch}.efi.signed.latest", # Ubuntu
+ f"usr/lib/shim/shim{arch}.efi.signed", # Debian
+ f"boot/efi/EFI/*/shim{arch}.efi", # Fedora/CentOS
+ "usr/share/efi/*/shim.efi", # OpenSUSE
]
unsigned = [
- f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu
- f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS
- f"usr/share/shim/shim{arch}.efi", # Arch
+ f"usr/lib/shim/shim{arch}.efi", # Debian/Ubuntu
+ f"usr/share/shim/*/*/shim{arch}.efi", # Fedora/CentOS
+ f"usr/share/shim/shim{arch}.efi", # Arch
]
find_and_install_shim_binary(context, "shim", signed, unsigned, dst)
signed = [
- f"usr/lib/shim/mm{arch}.efi.signed", # Debian
- f"usr/lib/shim/mm{arch}.efi", # Ubuntu
- f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS
- "usr/share/efi/*/MokManager.efi", # OpenSUSE
+ f"usr/lib/shim/mm{arch}.efi.signed", # Debian
+ f"usr/lib/shim/mm{arch}.efi", # Ubuntu
+ f"boot/efi/EFI/*/mm{arch}.efi", # Fedora/CentOS
+ "usr/share/efi/*/MokManager.efi", # OpenSUSE
]
unsigned = [
- f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu
- f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS
- f"usr/share/shim/mm{arch}.efi", # Arch
+ f"usr/lib/shim/mm{arch}.efi", # Debian/Ubuntu
+ f"usr/share/shim/*/*/mm{arch}.efi", # Fedora/CentOS
+ f"usr/share/shim/mm{arch}.efi", # Arch
]
find_and_install_shim_binary(context, "mok", signed, unsigned, dst.parent)
class CompGen(StrEnum):
default = enum.auto()
- files = enum.auto()
- dirs = enum.auto()
+ files = enum.auto()
+ dirs = enum.auto()
@staticmethod
def from_action(action: argparse.Action) -> "CompGen":
compgen=CompGen.from_action(action),
)
for action in parser._actions
- if (action.option_strings and
- action.help != argparse.SUPPRESS and
- action.dest not in config.SETTINGS_LOOKUP_BY_DEST)
+ if (
+ action.option_strings
+ and action.help != argparse.SUPPRESS
+ and action.dest not in config.SETTINGS_LOOKUP_BY_DEST
+ )
]
options += [
def to_bash_hasharray(name: str, entries: Mapping[str, Union[str, int]]) -> str:
return (
- f"{name.replace('-', '_')}=(" +
- " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items()) + ")"
+ f"{name.replace('-', '_')}=("
+ + " ".join(f"[{shlex.quote(str(k))}]={shlex.quote(str(v))}" for k, v in entries.items())
+ + ")"
)
completion = resources / "completion.bash"
c.write("complete -c mkosi -n '__fish_is_first_token' -a \"")
c.write(" ".join(str(v) for v in config.Verb))
- c.write("\"\n")
+ c.write('"\n')
for option in options:
if not option.short and not option.long:
if isinstance(option.nargs, int) and option.nargs > 0:
c.write("-r ")
if option.choices:
- c.write("-a \"")
+ c.write('-a "')
c.write(" ".join(option.choices))
- c.write("\" ")
+ c.write('" ')
if option.help is not None:
help = option.help.replace("'", "\\'")
- c.write(f"-d \"{help}\" ")
+ c.write(f'-d "{help}" ')
c.write(option.compgen.to_fish())
c.write("\n")
if not args.cmdline:
die(
"No shell to generate completion script for specified",
- hint="Please specify either one of: bash, fish, zsh"
+ hint="Please specify either one of: bash, fish, zsh",
)
shell = args.cmdline[0]
func = finalize_completion_zsh
else:
die(
- f"{shell!r} is not supported for completion scripts.",
- hint="Please specify either one of: bash, fish, zsh"
+ f"{shell!r} is not supported for completion scripts.", hint="Please specify either one of: bash, fish, zsh"
)
completion_args = collect_completion_arguments()
class Verb(StrEnum):
- build = enum.auto()
- clean = enum.auto()
- summary = enum.auto()
- cat_config = enum.auto()
- shell = enum.auto()
- boot = enum.auto()
- qemu = enum.auto()
- ssh = enum.auto()
- serve = enum.auto()
- bump = enum.auto()
- help = enum.auto()
- genkey = enum.auto()
+ build = enum.auto()
+ clean = enum.auto()
+ summary = enum.auto()
+ cat_config = enum.auto()
+ shell = enum.auto()
+ boot = enum.auto()
+ qemu = enum.auto()
+ ssh = enum.auto()
+ serve = enum.auto()
+ bump = enum.auto()
+ help = enum.auto()
+ genkey = enum.auto()
documentation = enum.auto()
- journalctl = enum.auto()
- coredumpctl = enum.auto()
- burn = enum.auto()
- dependencies = enum.auto()
- completion = enum.auto()
- sysupdate = enum.auto()
+ journalctl = enum.auto()
+ coredumpctl = enum.auto()
+ burn = enum.auto()
+ dependencies = enum.auto()
+ completion = enum.auto()
+ sysupdate = enum.auto()
def supports_cmdline(self) -> bool:
return self in (
class ConfigFeature(StrEnum):
- auto = enum.auto()
- enabled = enum.auto()
+ auto = enum.auto()
+ enabled = enum.auto()
disabled = enum.auto()
def to_tristate(self) -> str:
class SecureBootSignTool(StrEnum):
- auto = enum.auto()
+ auto = enum.auto()
sbsign = enum.auto()
pesign = enum.auto()
class OutputFormat(StrEnum):
- confext = enum.auto()
- cpio = enum.auto()
+ confext = enum.auto()
+ cpio = enum.auto()
directory = enum.auto()
- disk = enum.auto()
- esp = enum.auto()
- none = enum.auto()
- portable = enum.auto()
- sysext = enum.auto()
- tar = enum.auto()
- uki = enum.auto()
- oci = enum.auto()
+ disk = enum.auto()
+ esp = enum.auto()
+ none = enum.auto()
+ portable = enum.auto()
+ sysext = enum.auto()
+ tar = enum.auto()
+ uki = enum.auto()
+ oci = enum.auto()
def extension(self) -> str:
return {
OutputFormat.sysext: ".raw",
OutputFormat.tar: ".tar",
OutputFormat.uki: ".efi",
- }.get(self, "")
+ }.get(self, "") # fmt: skip
def use_outer_compression(self) -> bool:
return self in (OutputFormat.tar, OutputFormat.cpio, OutputFormat.disk) or self.is_extension_image()
class ManifestFormat(StrEnum):
- json = enum.auto() # the standard manifest in json format
+ json = enum.auto() # the standard manifest in json format
changelog = enum.auto() # human-readable text file with package changelogs
class Compression(StrEnum):
+ # fmt: off
none = enum.auto()
zstd = enum.auto()
zst = zstd
gzip = gz
lz4 = enum.auto()
lzma = enum.auto()
+ # fmt: on
def __bool__(self) -> bool:
return self != Compression.none
def extension(self) -> str:
- return {
- Compression.zstd: ".zst"
- }.get(self, f".{self}")
+ return {Compression.zstd: ".zst"}.get(self, f".{self}")
def oci_media_type_suffix(self) -> str:
suffix = {
Compression.none: "",
Compression.gz: "+gzip",
Compression.zstd: "+zstd",
- }.get(self)
+ }.get(self) # fmt: skip
if not suffix:
die(f"Compression {self} not supported for OCI layers")
class DocFormat(StrEnum):
- auto = enum.auto()
+ auto = enum.auto()
markdown = enum.auto()
- man = enum.auto()
- pandoc = enum.auto()
- system = enum.auto()
+ man = enum.auto()
+ pandoc = enum.auto()
+ system = enum.auto()
@classmethod
def all(cls) -> list["DocFormat"]:
class Bootloader(StrEnum):
- none = enum.auto()
- uki = enum.auto()
+ none = enum.auto()
+ uki = enum.auto()
systemd_boot = enum.auto()
- grub = enum.auto()
+ grub = enum.auto()
class BiosBootloader(StrEnum):
class ShimBootloader(StrEnum):
- none = enum.auto()
- signed = enum.auto()
+ none = enum.auto()
+ signed = enum.auto()
unsigned = enum.auto()
class Cacheonly(StrEnum):
- always = enum.auto()
- auto = enum.auto()
- none = auto
+ always = enum.auto()
+ auto = enum.auto()
+ none = auto
metadata = enum.auto()
- never = enum.auto()
+ never = enum.auto()
class QemuFirmware(StrEnum):
- auto = enum.auto()
- linux = enum.auto()
- uefi = enum.auto()
+ auto = enum.auto()
+ linux = enum.auto()
+ uefi = enum.auto()
uefi_secure_boot = enum.auto()
- bios = enum.auto()
+ bios = enum.auto()
def is_uefi(self) -> bool:
return self in (QemuFirmware.uefi, QemuFirmware.uefi_secure_boot)
class Network(StrEnum):
interface = enum.auto()
- user = enum.auto()
- none = enum.auto()
+ user = enum.auto()
+ none = enum.auto()
class Vmm(StrEnum):
- qemu = enum.auto()
+ qemu = enum.auto()
vmspawn = enum.auto()
class Architecture(StrEnum):
- alpha = enum.auto()
- arc = enum.auto()
- arm = enum.auto()
- arm64 = enum.auto()
- ia64 = enum.auto()
+ alpha = enum.auto()
+ arc = enum.auto()
+ arm = enum.auto()
+ arm64 = enum.auto()
+ ia64 = enum.auto()
loongarch64 = enum.auto()
- mips_le = enum.auto()
- mips64_le = enum.auto()
- parisc = enum.auto()
- ppc = enum.auto()
- ppc64 = enum.auto()
- ppc64_le = enum.auto()
- riscv32 = enum.auto()
- riscv64 = enum.auto()
- s390 = enum.auto()
- s390x = enum.auto()
- tilegx = enum.auto()
- x86 = enum.auto()
- x86_64 = enum.auto()
+ mips_le = enum.auto()
+ mips64_le = enum.auto()
+ parisc = enum.auto()
+ ppc = enum.auto()
+ ppc64 = enum.auto()
+ ppc64_le = enum.auto()
+ riscv32 = enum.auto()
+ riscv64 = enum.auto()
+ s390 = enum.auto()
+ s390x = enum.auto()
+ tilegx = enum.auto()
+ x86 = enum.auto()
+ x86_64 = enum.auto()
@staticmethod
def from_uname(s: str) -> "Architecture":
a = {
- "aarch64" : Architecture.arm64,
- "aarch64_be" : Architecture.arm64,
- "armv8l" : Architecture.arm,
- "armv8b" : Architecture.arm,
- "armv7ml" : Architecture.arm,
- "armv7mb" : Architecture.arm,
- "armv7l" : Architecture.arm,
- "armv7b" : Architecture.arm,
- "armv6l" : Architecture.arm,
- "armv6b" : Architecture.arm,
- "armv5tl" : Architecture.arm,
- "armv5tel" : Architecture.arm,
- "armv5tejl" : Architecture.arm,
- "armv5tejb" : Architecture.arm,
- "armv5teb" : Architecture.arm,
- "armv5tb" : Architecture.arm,
- "armv4tl" : Architecture.arm,
- "armv4tb" : Architecture.arm,
- "armv4l" : Architecture.arm,
- "armv4b" : Architecture.arm,
- "alpha" : Architecture.alpha,
- "arc" : Architecture.arc,
- "arceb" : Architecture.arc,
- "x86_64" : Architecture.x86_64,
- "i686" : Architecture.x86,
- "i586" : Architecture.x86,
- "i486" : Architecture.x86,
- "i386" : Architecture.x86,
- "ia64" : Architecture.ia64,
- "parisc64" : Architecture.parisc,
- "parisc" : Architecture.parisc,
- "loongarch64" : Architecture.loongarch64,
- "mips64" : Architecture.mips64_le,
- "mips" : Architecture.mips_le,
- "ppc64le" : Architecture.ppc64_le,
- "ppc64" : Architecture.ppc64,
- "ppc" : Architecture.ppc,
- "riscv64" : Architecture.riscv64,
- "riscv32" : Architecture.riscv32,
- "riscv" : Architecture.riscv64,
- "s390x" : Architecture.s390x,
- "s390" : Architecture.s390,
- "tilegx" : Architecture.tilegx,
- }.get(s)
+ "aarch64": Architecture.arm64,
+ "aarch64_be": Architecture.arm64,
+ "armv8l": Architecture.arm,
+ "armv8b": Architecture.arm,
+ "armv7ml": Architecture.arm,
+ "armv7mb": Architecture.arm,
+ "armv7l": Architecture.arm,
+ "armv7b": Architecture.arm,
+ "armv6l": Architecture.arm,
+ "armv6b": Architecture.arm,
+ "armv5tl": Architecture.arm,
+ "armv5tel": Architecture.arm,
+ "armv5tejl": Architecture.arm,
+ "armv5tejb": Architecture.arm,
+ "armv5teb": Architecture.arm,
+ "armv5tb": Architecture.arm,
+ "armv4tl": Architecture.arm,
+ "armv4tb": Architecture.arm,
+ "armv4l": Architecture.arm,
+ "armv4b": Architecture.arm,
+ "alpha": Architecture.alpha,
+ "arc": Architecture.arc,
+ "arceb": Architecture.arc,
+ "x86_64": Architecture.x86_64,
+ "i686": Architecture.x86,
+ "i586": Architecture.x86,
+ "i486": Architecture.x86,
+ "i386": Architecture.x86,
+ "ia64": Architecture.ia64,
+ "parisc64": Architecture.parisc,
+ "parisc": Architecture.parisc,
+ "loongarch64": Architecture.loongarch64,
+ "mips64": Architecture.mips64_le,
+ "mips": Architecture.mips_le,
+ "ppc64le": Architecture.ppc64_le,
+ "ppc64": Architecture.ppc64,
+ "ppc": Architecture.ppc,
+ "riscv64": Architecture.riscv64,
+ "riscv32": Architecture.riscv32,
+ "riscv": Architecture.riscv64,
+ "s390x": Architecture.s390x,
+ "s390": Architecture.s390,
+ "tilegx": Architecture.tilegx,
+ }.get(s) # fmt: skip
if not a:
die(f"Architecture {s} is not supported")
def to_efi(self) -> Optional[str]:
return {
- Architecture.x86_64 : "x64",
- Architecture.x86 : "ia32",
- Architecture.arm64 : "aa64",
- Architecture.arm : "arm",
- Architecture.riscv64 : "riscv64",
- Architecture.loongarch64 : "loongarch64",
- }.get(self)
+ Architecture.x86_64: "x64",
+ Architecture.x86: "ia32",
+ Architecture.arm64: "aa64",
+ Architecture.arm: "arm",
+ Architecture.riscv64: "riscv64",
+ Architecture.loongarch64: "loongarch64",
+ }.get(self) # fmt: skip
def to_qemu(self) -> str:
a = {
- Architecture.alpha : "alpha",
- Architecture.arm : "arm",
- Architecture.arm64 : "aarch64",
- Architecture.loongarch64 : "loongarch64",
- Architecture.mips64_le : "mips",
- Architecture.mips_le : "mips",
- Architecture.parisc : "hppa",
- Architecture.ppc : "ppc",
- Architecture.ppc64 : "ppc64",
- Architecture.ppc64_le : "ppc64",
- Architecture.riscv32 : "riscv32",
- Architecture.riscv64 : "riscv64",
- Architecture.s390x : "s390x",
- Architecture.x86 : "i386",
- Architecture.x86_64 : "x86_64",
- }.get(self)
+ Architecture.alpha: "alpha",
+ Architecture.arm: "arm",
+ Architecture.arm64: "aarch64",
+ Architecture.loongarch64: "loongarch64",
+ Architecture.mips64_le: "mips",
+ Architecture.mips_le: "mips",
+ Architecture.parisc: "hppa",
+ Architecture.ppc: "ppc",
+ Architecture.ppc64: "ppc64",
+ Architecture.ppc64_le: "ppc64",
+ Architecture.riscv32: "riscv32",
+ Architecture.riscv64: "riscv64",
+ Architecture.s390x: "s390x",
+ Architecture.x86: "i386",
+ Architecture.x86_64: "x86_64",
+ }.get(self) # fmt: skip
if not a:
die(f"Architecture {self} not supported by QEMU")
def to_oci(self) -> str:
a = {
- Architecture.arm : "arm",
- Architecture.arm64 : "arm64",
- Architecture.loongarch64 : "loong64",
- Architecture.mips64_le : "mips64le",
- Architecture.mips_le : "mipsle",
- Architecture.ppc : "ppc",
- Architecture.ppc64 : "ppc64",
- Architecture.ppc64_le : "ppc64le",
- Architecture.riscv32 : "riscv",
- Architecture.riscv64 : "riscv64",
- Architecture.s390x : "s390x",
- Architecture.x86 : "386",
- Architecture.x86_64 : "amd64",
- }.get(self)
+ Architecture.arm: "arm",
+ Architecture.arm64: "arm64",
+ Architecture.loongarch64: "loong64",
+ Architecture.mips64_le: "mips64le",
+ Architecture.mips_le: "mipsle",
+ Architecture.ppc: "ppc",
+ Architecture.ppc64: "ppc64",
+ Architecture.ppc64_le: "ppc64le",
+ Architecture.riscv32: "riscv",
+ Architecture.riscv64: "riscv64",
+ Architecture.s390x: "s390x",
+ Architecture.x86: "386",
+ Architecture.x86_64: "amd64",
+ }.get(self) # fmt: skip
if not a:
die(f"Architecture {self} not supported by OCI")
return self.is_x86_variant()
def can_kvm(self) -> bool:
- return (
- self == Architecture.native() or
- (Architecture.native() == Architecture.x86_64 and self == Architecture.x86)
+ return self == Architecture.native() or (
+ Architecture.native() == Architecture.x86_64 and self == Architecture.x86
)
def default_qemu_machine(self) -> str:
m = {
- Architecture.x86 : "q35",
- Architecture.x86_64 : "q35",
- Architecture.arm : "virt",
- Architecture.arm64 : "virt",
- Architecture.s390 : "s390-ccw-virtio",
- Architecture.s390x : "s390-ccw-virtio",
- Architecture.ppc : "pseries",
- Architecture.ppc64 : "pseries",
- Architecture.ppc64_le : "pseries",
- }
+ Architecture.x86: "q35",
+ Architecture.x86_64: "q35",
+ Architecture.arm: "virt",
+ Architecture.arm64: "virt",
+ Architecture.s390: "s390-ccw-virtio",
+ Architecture.s390x: "s390-ccw-virtio",
+ Architecture.ppc: "pseries",
+ Architecture.ppc64: "pseries",
+ Architecture.ppc64_le: "pseries",
+ } # fmt: skip
if self not in m:
die(f"No qemu machine defined for architecture {self}")
def default_qemu_nic_model(self) -> str:
return {
- Architecture.s390 : "virtio",
- Architecture.s390x : "virtio",
- }.get(self, "virtio-net-pci")
+ Architecture.s390: "virtio",
+ Architecture.s390x: "virtio",
+ }.get(self, "virtio-net-pci") # fmt: skip
def is_native(self) -> bool:
return self == self.native()
die(f"Invalid boolean literal: {s!r}")
-def parse_path(value: str,
- *,
- required: bool = True,
- resolve: bool = True,
- expanduser: bool = True,
- expandvars: bool = True,
- secret: bool = False,
- absolute: bool = False,
- constants: Sequence[str] = ()) -> Path:
+def parse_path(
+ value: str,
+ *,
+ required: bool = True,
+ resolve: bool = True,
+ expanduser: bool = True,
+ expandvars: bool = True,
+ secret: bool = False,
+ absolute: bool = False,
+ constants: Sequence[str] = (),
+) -> Path:
if value in constants:
return Path(value)
if secret and path.exists():
mode = path.stat().st_mode & 0o777
if mode & 0o007:
- die(textwrap.dedent(f"""\
+ die(
+ textwrap.dedent(f"""\
Permissions of '{path}' of '{mode:04o}' are too open.
When creating secret files use an access mode that restricts access to the owner only.
- """))
+ """)
+ )
return path
def make_tree_parser(absolute: bool = True, required: bool = False) -> Callable[[str], ConfigTree]:
def parse_tree(value: str) -> ConfigTree:
- src, sep, tgt = value.partition(':')
+ src, sep, tgt = value.partition(":")
return ConfigTree(
source=parse_path(src, required=required),
resolve=False,
expanduser=False,
absolute=absolute,
- ) if sep else None,
+ )
+ if sep
+ else None,
)
return parse_tree
return cast(
bool,
- (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution()) or
- namespace.tools_tree_distribution.is_rpm_distribution()
+ (namespace.tools_tree_distribution == Distribution.ubuntu and namespace.distribution.is_rpm_distribution())
+ or namespace.tools_tree_distribution.is_rpm_distribution(),
)
return config_match_enum
-def config_make_list_parser(delimiter: str,
- *,
- parse: Callable[[str], Any] = str,
- unescape: bool = False,
- reset: bool = True) -> ConfigParseCallback:
+def config_make_list_parser(
+ delimiter: str, *, parse: Callable[[str], Any] = str, unescape: bool = False, reset: bool = True
+) -> ConfigParseCallback:
def config_parse_list(value: Optional[str], old: Optional[list[Any]]) -> Optional[list[Any]]:
new = old.copy() if old else []
return True
-def config_make_dict_parser(delimiter: str,
- *,
- parse: Callable[[str], tuple[str, Any]],
- unescape: bool = False,
- allow_paths: bool = False,
- reset: bool = True) -> ConfigParseCallback:
+def config_make_dict_parser(
+ delimiter: str,
+ *,
+ parse: Callable[[str], tuple[str, Any]],
+ unescape: bool = False,
+ allow_paths: bool = False,
+ reset: bool = True,
+) -> ConfigParseCallback:
def config_parse_dict(value: Optional[str], old: Optional[dict[str, Any]]) -> Optional[dict[str, Any]]:
new = old.copy() if old else {}
return (key, value)
-def make_path_parser(*,
- required: bool = True,
- resolve: bool = True,
- expanduser: bool = True,
- expandvars: bool = True,
- secret: bool = False,
- constants: Sequence[str] = ()) -> Callable[[str], Path]:
+def make_path_parser(
+ *,
+ required: bool = True,
+ resolve: bool = True,
+ expanduser: bool = True,
+ expandvars: bool = True,
+ secret: bool = False,
+ constants: Sequence[str] = (),
+) -> Callable[[str], Path]:
return functools.partial(
parse_path,
required=required,
)
-def config_make_path_parser(*,
- required: bool = True,
- resolve: bool = True,
- expanduser: bool = True,
- expandvars: bool = True,
- secret: bool = False,
- constants: Sequence[str] = ()) -> ConfigParseCallback:
+def config_make_path_parser(
+ *,
+ required: bool = True,
+ resolve: bool = True,
+ expanduser: bool = True,
+ expandvars: bool = True,
+ secret: bool = False,
+ constants: Sequence[str] = (),
+) -> ConfigParseCallback:
def config_parse_path(value: Optional[str], old: Optional[Path]) -> Optional[Path]:
if not value:
return None
return None
if not is_valid_filename(value):
- die(f"{value!r} is not a valid profile",
- hint="Profile= or --profile= requires a name with no path components.")
+ die(
+ f"{value!r} is not a valid profile", hint="Profile= or --profile= requires a name with no path components."
+ )
return value
content = file.read_text()
if content.startswith("#!/"):
- die(f"{file} starts with a shebang ({content.splitlines()[0]})",
- hint="This file should be executable")
+ die(f"{file} starts with a shebang ({content.splitlines()[0]})", hint="This file should be executable")
return content
class KeySourceType(StrEnum):
- file = enum.auto()
+ file = enum.auto()
engine = enum.auto()
def __post_init__(self) -> None:
if not self.name:
- object.__setattr__(self, 'name', ''.join(x.capitalize() for x in self.dest.split('_') if x))
+ object.__setattr__(self, "name", "".join(x.capitalize() for x in self.dest.split("_") if x))
if not self.long:
object.__setattr__(self, "long", f"--{self.dest.replace('_', '-')}")
Otherwise, the text is wrapped without indentation.
"""
lines = text.splitlines()
- subindent = ' ' if lines[0].endswith(':') else ''
- return flatten(textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False,
- subsequent_indent=subindent) for line in lines)
+ subindent = " " if lines[0].endswith(":") else ""
+ return flatten(
+ textwrap.wrap(line, width, break_long_words=False, break_on_hyphens=False, subsequent_indent=subindent)
+ for line in lines
+ )
def parse_chdir(path: str) -> Optional[Path]:
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
- option_string: Optional[str] = None
+ option_string: Optional[str] = None,
) -> None:
logging.warning(f"{option_string} is no longer supported")
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None] = None,
- option_string: Optional[str] = None
+ option_string: Optional[str] = None,
) -> None:
page(parser.format_help(), namespace.pager)
parser.exit()
@classmethod
def from_namespace(cls, ns: argparse.Namespace) -> "Args":
- return cls(**{
- k: v for k, v in vars(ns).items()
- if k in inspect.signature(cls).parameters
- })
+ return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters})
def to_dict(self) -> dict[str, Any]:
return dataclasses.asdict(self, dict_factory=dict_with_capitalised_keys_factory)
k = key_transformer(k)
if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v):
- die(f"Serialized JSON has unknown field {k} with value {v}",
- hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON")
+ die(
+ f"Serialized JSON has unknown field {k} with value {v}",
+ hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON",
+ )
value_transformer = json_type_transformer(cls)
j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()}
- return dataclasses.replace(cls.default(), **{
- k: v for k, v in j.items()
- if k in inspect.signature(cls).parameters
- })
+ return dataclasses.replace(
+ cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters}
+ )
PACKAGE_GLOBS = (
@classmethod
def from_namespace(cls, ns: argparse.Namespace) -> "Config":
- return cls(**{
- k: v for k, v in vars(ns).items()
- if k in inspect.signature(cls).parameters
- })
+ return cls(**{k: v for k, v in vars(ns).items() if k in inspect.signature(cls).parameters})
@property
def output_with_format(self) -> str:
"repositories": sorted(self.repositories),
"overlay": self.overlay,
"prepare_scripts": sorted(
- base64.b64encode(script.read_bytes()).decode()
- for script in self.prepare_scripts
+ base64.b64encode(script.read_bytes()).decode() for script in self.prepare_scripts
),
# We don't use the full path here since tests will often use temporary directories for the output directory
# which would trigger a rebuild every time.
k = key_transformer(k)
if k not in inspect.signature(cls).parameters and (not isinstance(v, (dict, list, set)) or v):
- die(f"Serialized JSON has unknown field {k} with value {v}",
- hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON")
+ die(
+ f"Serialized JSON has unknown field {k} with value {v}",
+ hint="Re-running mkosi once with -f should solve the issue by re-generating the JSON",
+ )
value_transformer = json_type_transformer(cls)
j = {(tk := key_transformer(k)): value_transformer(tk, v) for k, v in j.items()}
- return dataclasses.replace(cls.default(), **{
- k: v for k, v in j.items()
- if k in inspect.signature(cls).parameters
- })
+ return dataclasses.replace(
+ cls.default(), **{k: v for k, v in j.items() if k in inspect.signature(cls).parameters}
+ )
def find_binary(self, *names: PathString, tools: bool = True) -> Optional[Path]:
return find_binary(*names, root=self.tools() if tools else Path("/"), extra=self.extra_search_paths)
]
if (
- binary and
- (path := self.find_binary(binary, tools=tools)) and
- any(path.is_relative_to(d) for d in self.extra_search_paths)
+ binary
+ and (path := self.find_binary(binary, tools=tools))
+ and any(path.is_relative_to(d) for d in self.extra_search_paths)
):
tools = False
opt += flatten(("--ro-bind", d, d) for d in self.extra_search_paths if not relaxed)
line = line.strip()
- if line[0] == '[':
- if line[-1] != ']':
+ if line[0] == "[":
+ if line[-1] != "]":
die(f"{line} is not a valid section")
# Yield the section name with an empty key and value to indicate we've finished the current section.
help="Repositories to use",
scope=SettingScope.universal,
),
-
ConfigSetting(
dest="output_format",
short="-t",
paths=("mkosi.clean",),
help="Clean script to run after cleanup",
),
-
ConfigSetting(
dest="packages",
short="-p",
),
ConfigSetting(
dest="base_trees",
- long='--base-tree',
- metavar='PATH',
+ long="--base-tree",
+ metavar="PATH",
section="Content",
parse=config_make_list_parser(delimiter=",", parse=make_path_parser(required=False)),
- help='Use the given tree as base tree (e.g. lower sysext layer)',
+ help="Use the given tree as base tree (e.g. lower sysext layer)",
),
ConfigSetting(
dest="skeleton_trees",
# The default value is set in `__init__.py` in `install_uki`.
# `None` is used to determine if the roothash and boot count format
# should be appended to the filename if they are found.
- #default=
+ # default=
help="Specify the format used for the UKI filename",
),
ConfigSetting(
parse=config_parse_feature,
help="Specify whether to relabel all files with setfiles",
),
-
ConfigSetting(
dest="secure_boot",
metavar="BOOL",
section="Validation",
help="GPG key to use for signing",
),
-
ConfigSetting(
dest="tools_tree",
metavar="PATH",
section="Build",
parse=config_make_list_parser(delimiter=",", parse=make_tree_parser(required=True)),
help="Use a sandbox tree to configure the various tools that mkosi executes",
- paths=("mkosi.sandbox", "mkosi.sandbox.tar", "mkosi.pkgmngr", "mkosi.pkgmngr.tar",),
+ paths=(
+ "mkosi.sandbox",
+ "mkosi.sandbox.tar",
+ "mkosi.pkgmngr",
+ "mkosi.pkgmngr.tar",
+ ),
scope=SettingScope.universal,
),
ConfigSetting(
parse=config_parse_boolean,
help="Whether mkosi can store information about previous builds",
),
-
ConfigSetting(
dest="proxy_url",
section="Host",
metavar="BOOL",
section="Host",
parse=config_parse_boolean,
- help=('If specified, the container/VM is run with a temporary snapshot of the output '
- 'image that is removed immediately when the container/VM terminates'),
+ help=(
+ "If specified, the container/VM is run with a temporary snapshot of the output "
+ "image that is removed immediately when the container/VM terminates"
+ ),
nargs="?",
),
ConfigSetting(
prog="mkosi",
description="Build Bespoke OS Images",
# the synopsis below is supposed to be indented by two spaces
- usage="\n " + textwrap.dedent("""\
+ usage="\n "
+ + textwrap.dedent("""\
mkosi [options…] {b}summary{e}
mkosi [options…] {b}cat-config{e}
mkosi [options…] {b}build{e} [command line…]
help=argparse.SUPPRESS,
)
parser.add_argument(
- "-f", "--force",
+ "-f",
+ "--force",
action="count",
dest="force",
default=0,
help="Remove existing image file before operation",
)
parser.add_argument(
- "-C", "--directory",
+ "-C",
+ "--directory",
type=parse_chdir if chdir else str,
default=Path.cwd(),
help="Change to specified directory before doing anything",
default="mkosi of %u",
)
parser.add_argument(
- "-B", "--auto-bump",
+ "-B",
+ "--auto-bump",
help="Automatically bump image version after building",
action="store_true",
default=False,
default=False,
)
parser.add_argument(
- "-w", "--wipe-build-dir",
+ "-w",
+ "--wipe-build-dir",
help="Remove the build directory before building the image",
action="store_true",
default=False,
help=argparse.SUPPRESS,
)
parser.add_argument(
- "-h", "--help",
+ "-h",
+ "--help",
action=PagerHelpAction,
help=argparse.SUPPRESS,
)
for long in [s.long, *s.compat_longs]:
opts = [s.short, long] if s.short and long == s.long else [long]
- group.add_argument( # type: ignore
+ group.add_argument( # type: ignore
*opts,
dest=s.dest,
choices=s.choices,
metavar=s.metavar,
- nargs=s.nargs, # type: ignore
+ nargs=s.nargs, # type: ignore
const=s.const,
help=s.help if long == s.long else argparse.SUPPRESS,
action=ConfigAction,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: Union[str, Sequence[Any], None],
- option_string: Optional[str] = None
+ option_string: Optional[str] = None,
) -> None:
assert option_string is not None
# specified in configuration files.
self.cli = argparse.Namespace()
self.config = argparse.Namespace(
- files = [],
+ files=[],
)
self.defaults = argparse.Namespace()
# Compare inodes instead of paths so we can't get tricked by bind mounts and such.
# If a value was specified on the CLI, it always takes priority. If the setting is a collection of values, we
# merge the value from the CLI with the value from the configuration, making sure that the value from the CLI
# always takes priority.
- if (
- hasattr(self.cli, setting.dest) and
- (v := getattr(self.cli, setting.dest)) is not None
- ):
+ if hasattr(self.cli, setting.dest) and (v := getattr(self.cli, setting.dest)) is not None:
if isinstance(v, list):
return (getattr(self.config, setting.dest, None) or []) + v
elif isinstance(v, dict):
# value either if the setting is set to the empty string on the command line.
if (
- not hasattr(self.cli, setting.dest) and
- hasattr(self.config, setting.dest) and
- (v := getattr(self.config, setting.dest)) is not None
+ not hasattr(self.cli, setting.dest)
+ and hasattr(self.config, setting.dest)
+ and (v := getattr(self.config, setting.dest)) is not None
):
return v
- if (
- (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and
- isinstance(setting.parse(None, None), (dict, list, set))
+ if (hasattr(self.cli, setting.dest) or hasattr(self.config, setting.dest)) and isinstance(
+ setting.parse(None, None), (dict, list, set)
):
default = setting.parse(None, None)
elif hasattr(self.defaults, setting.dest):
return match_triggered is not False
def parse_config_one(self, path: Path, profiles: bool = False, local: bool = False) -> bool:
- s: Optional[ConfigSetting] # Make mypy happy
+ s: Optional[ConfigSetting] # Make mypy happy
extras = path.is_dir()
if path.is_dir():
delattr(self.config, s.dest)
for s in SETTINGS:
- if (
- s.scope == SettingScope.universal and
- (image := getattr(self.config, "image", None)) is not None
- ):
+ if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None:
continue
if self.only_sections and s.section not in self.only_sections:
s.dest,
s.parse(
file_run_or_read(extra).rstrip("\n") if s.path_read_text else f,
- getattr(self.config, s.dest, None)
+ getattr(self.config, s.dest, None),
),
)
if path.exists():
abs_path = Path.cwd() / path
logging.debug(f"Loading configuration file {abs_path}")
- files = getattr(self.config, 'files')
+ files = getattr(self.config, "files")
files += [abs_path]
for section, k, v in parse_ini(path, only_sections=self.only_sections or {s.section for s in SETTINGS}):
if not (s := SETTINGS_LOOKUP_BY_NAME.get(name)):
die(f"Unknown setting {name}")
- if (
- s.scope == SettingScope.universal and
- (image := getattr(self.config, "image", None)) is not None
- ):
+ if s.scope == SettingScope.universal and (image := getattr(self.config, "image", None)) is not None:
die(f"Setting {name} cannot be configured in subimage {image}")
if name in self.immutable:
die(f"Setting {name} cannot be modified anymore at this point")
return args, ()
if (
- args.verb.needs_build() and
- args.verb != Verb.build and
- not args.force and
- Path(".mkosi-private/history/latest.json").exists()
+ args.verb.needs_build()
+ and args.verb != Verb.build
+ and not args.force
+ and Path(".mkosi-private/history/latest.json").exists()
):
prev = Config.from_json(Path(".mkosi-private/history/latest.json").read_text())
if hasattr(context.config, s.dest):
delattr(context.config, s.dest)
- context.only_sections = ("Include", "Host",)
+ context.only_sections = ("Include", "Host")
else:
prev = None
# we check here to see if dependencies were explicitly provided and if not we gather
# the list of default dependencies while we parse the subimages.
dependencies: Optional[list[str]] = (
- None
- if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies")
- else []
+ None if hasattr(context.cli, "dependencies") or hasattr(context.config, "dependencies") else []
)
if args.directory is not None and Path("mkosi.images").exists():
name: getattr(config, "environment")[name]
for name in getattr(config, "pass_environment", {})
if name in getattr(config, "environment", {})
- }
+ },
)
for p in sorted(Path("mkosi.images").iterdir()):
if "ssh.authorized_keys.root" not in creds:
if args.ssh_certificate:
- pubkey = run(["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"],
- stdout=subprocess.PIPE, env=dict(OPENSSL_CONF="/dev/null")).stdout.strip()
- sshpubkey = run(["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"],
- input=pubkey, stdout=subprocess.PIPE).stdout.strip()
+ pubkey = run(
+ ["openssl", "x509", "-in", args.ssh_certificate, "-pubkey", "-noout"],
+ stdout=subprocess.PIPE,
+ env=dict(OPENSSL_CONF="/dev/null"),
+ ).stdout.strip()
+ sshpubkey = run(
+ ["ssh-keygen", "-f", "/dev/stdin", "-i", "-m", "PKCS8"], input=pubkey, stdout=subprocess.PIPE
+ ).stdout.strip()
creds["ssh.authorized_keys.root"] = sshpubkey
elif args.ssh:
- die("Ssh= is enabled but no SSH certificate was found",
- hint="Run 'mkosi genkey' to automatically create one")
+ die(
+ "Ssh= is enabled but no SSH certificate was found",
+ hint="Run 'mkosi genkey' to automatically create one",
+ )
return creds
if gnupghome := os.getenv("GNUPGHOME"):
env["GNUPGHOME"] = gnupghome
- env |= dict(
- parse_environment(line)
- for f in args.environment_files
- for line in f.read_text().strip().splitlines()
- )
+ env |= dict(parse_environment(line) for f in args.environment_files for line in f.read_text().strip().splitlines())
env |= args.environment
return env
# For unprivileged builds we need the userxattr OverlayFS mount option, which is only available
# in Linux v5.11 and later.
if (
- (config.build_scripts or config.base_trees) and
- GenericVersion(platform.release()) < GenericVersion("5.11") and
- os.geteuid() != 0
+ (config.build_scripts or config.base_trees)
+ and GenericVersion(platform.release()) < GenericVersion("5.11")
+ and os.geteuid() != 0
):
die("This unprivileged build configuration requires at least Linux v5.11")
# Display the paths as relative to ., if underneath.
if path.is_relative_to(Path.cwd()):
path = path.relative_to(Path.cwd())
- print(f'{Style.blue}# {path}{Style.reset}', file=c)
+ print(f"{Style.blue}# {path}{Style.reset}", file=c)
print(path.read_text(), file=c)
return c.getvalue()
die("SELinux relabel is requested but could not find selinux config at /etc/selinux/config")
return None
- policy = run(["sh", "-c", f". {selinux} && echo $SELINUXTYPE"],
- sandbox=config.sandbox(binary="sh", options=["--ro-bind", selinux, selinux]),
- stdout=subprocess.PIPE).stdout.strip()
+ policy = run(
+ ["sh", "-c", f". {selinux} && echo $SELINUXTYPE"],
+ sandbox=config.sandbox(binary="sh", options=["--ro-bind", selinux, selinux]),
+ stdout=subprocess.PIPE,
+ ).stdout.strip()
if not policy:
if fatal and config.selinux_relabel == ConfigFeature.enabled:
die("SELinux relabel is requested but no selinux policy is configured in /etc/selinux/config")
[*tool, "--version"],
stdout=subprocess.PIPE,
sandbox=sandbox(binary=tool[-1]),
- ).stdout.split()[2].strip("()").removeprefix("v")
+ )
+ .stdout.split()[2]
+ .strip("()")
+ .removeprefix("v")
)
network=True,
options=["--bind", output_dir, output_dir, *finalize_crypto_mounts(config)],
),
- )
+ ) # fmt: skip
class PackageType(StrEnum):
- none = enum.auto()
- rpm = enum.auto()
- deb = enum.auto()
- pkg = enum.auto()
+ none = enum.auto()
+ rpm = enum.auto()
+ deb = enum.auto()
+ pkg = enum.auto()
class DistributionInstaller:
class Distribution(StrEnum):
# Please consult docs/distribution-policy.md and contact one
# of the mkosi maintainers before implementing a new distribution.
- fedora = enum.auto()
- debian = enum.auto()
- kali = enum.auto()
- ubuntu = enum.auto()
- arch = enum.auto()
- opensuse = enum.auto()
- mageia = enum.auto()
- centos = enum.auto()
- rhel = enum.auto()
- rhel_ubi = enum.auto()
+ fedora = enum.auto()
+ debian = enum.auto()
+ kali = enum.auto()
+ ubuntu = enum.auto()
+ arch = enum.auto()
+ opensuse = enum.auto()
+ mageia = enum.auto()
+ centos = enum.auto()
+ rhel = enum.auto()
+ rhel_ubi = enum.auto()
openmandriva = enum.auto()
- rocky = enum.auto()
- alma = enum.auto()
- azure = enum.auto()
- custom = enum.auto()
+ rocky = enum.auto()
+ alma = enum.auto()
+ azure = enum.auto()
+ custom = enum.auto()
def is_centos_variant(self) -> bool:
return self in (
return self.installer().package_manager(context.config).createrepo(context)
def installer(self) -> type[DistributionInstaller]:
- modname = str(self).replace('-', '_')
+ modname = str(self).replace("-", "_")
mod = importlib.import_module(f"mkosi.distributions.{modname}")
installer = getattr(mod, "Installer")
assert issubclass(installer, DistributionInstaller)
# Testing repositories have to go before regular ones to to take precedence.
repos = [
- repo for repo in (
+ repo
+ for repo in (
"core-testing",
"core-testing-debug",
"extra-testing",
"extra-debug",
"multilib-testing",
"multilib",
- ) if repo in context.config.repositories
+ )
+ if repo in context.config.repositories
] + ["core", "extra"]
if context.config.architecture.is_arm_variant():
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.x86_64 : "x86_64",
- Architecture.arm64 : "aarch64",
- Architecture.arm : "armv7h",
- }.get(arch)
+ Architecture.x86_64: "x86_64",
+ Architecture.arm64: "aarch64",
+ Architecture.arm: "armv7h",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by Arch Linux")
return a
-
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.arm64 : "aarch64",
- Architecture.x86_64 : "x86_64",
- }.get(arch)
+ Architecture.arm64: "aarch64",
+ Architecture.x86_64: "x86_64",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by {cls.pretty_name()}")
# The Hyperscale SIG uses /usr/lib/sysimage/rpm in its rebuild of rpm for C9S that's shipped in the
# hyperscale-packages-experimental repository.
if (
- GenericVersion(context.config.release) > 9 or
- "hyperscale-packages-experimental" in context.config.repositories
+ GenericVersion(context.config.release) > 9
+ or "hyperscale-packages-experimental" in context.config.repositories
):
return "/usr/lib/sysimage/rpm"
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.x86_64 : "x86_64",
- Architecture.ppc64_le : "ppc64le",
- Architecture.s390x : "s390x",
- Architecture.arm64 : "aarch64",
- }.get(arch)
+ Architecture.x86_64: "x86_64",
+ Architecture.ppc64_le: "ppc64le",
+ Architecture.s390x: "s390x",
+ Architecture.arm64: "aarch64",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by {cls.pretty_name()}")
("epel", "epel"),
("epel-next", "epel/next"),
("epel-testing", "epel/testing"),
- ("epel-next-testing", "epel/testing/next")
+ ("epel-next-testing", "epel/testing/next"),
):
# For EPEL we make the assumption that epel is mirrored in the parent directory of the mirror URL and
# path we were given. Since this doesn't work for all scenarios, we also allow overriding the mirror
for repo in ("epel", "epel-next"):
yield RpmRepository(repo, f"{url}&repo={repo}-$releasever", gpgurls, enabled=False)
yield RpmRepository(
- f"{repo}-debuginfo",
- f"{url}&repo={repo}-debug-$releasever",
- gpgurls,
- enabled=False
- )
- yield RpmRepository(
- f"{repo}-source",
- f"{url}&repo={repo}-source-$releasever",
- gpgurls,
- enabled=False
+ f"{repo}-debuginfo", f"{url}&repo={repo}-debug-$releasever", gpgurls, enabled=False
)
+ yield RpmRepository(f"{repo}-source", f"{url}&repo={repo}-source-$releasever", gpgurls, enabled=False)
+ yield RpmRepository("epel-testing", f"{url}&repo=testing-epel$releasever", gpgurls, enabled=False)
yield RpmRepository(
- "epel-testing",
- f"{url}&repo=testing-epel$releasever",
- gpgurls,
- enabled=False
- )
- yield RpmRepository(
- "epel-testing-debuginfo",
- f"{url}&repo=testing-debug-epel$releasever",
- gpgurls,
- enabled=False
+ "epel-testing-debuginfo", f"{url}&repo=testing-debug-epel$releasever", gpgurls, enabled=False
)
yield RpmRepository(
- "epel-testing-source",
- f"{url}&repo=testing-source-epel$releasever",
- gpgurls,
- enabled=False
+ "epel-testing-source", f"{url}&repo=testing-source-epel$releasever", gpgurls, enabled=False
)
yield RpmRepository(
- "epel-next-testing",
- f"{url}&repo=epel-testing-next-$releasever",
- gpgurls,
- enabled=False
+ "epel-next-testing", f"{url}&repo=epel-testing-next-$releasever", gpgurls, enabled=False
)
yield RpmRepository(
"epel-next-testing-debuginfo",
"sparc" : ["lib64"],
"sparc64" : ["lib32", "lib64"],
"x32" : ["lib32", "lib64", "libx32"],
- }.get(context.config.distribution.architecture(context.config.architecture), [])
+ }.get(context.config.distribution.architecture(context.config.architecture), []) # fmt: skip
with umask(~0o755):
for d in subdirs:
if not context.config.with_docs
else []
),
- sandbox=context.sandbox
+ sandbox=context.sandbox,
)
# Finally, run apt to properly install packages in the chroot without having to worry that maintainer
# Let's make sure it is enabled by default in our images.
(context.root / "etc/systemd/system-generators/systemd-gpt-auto-generator").unlink(missing_ok=True)
-
@classmethod
def remove_packages(cls, context: Context, packages: Sequence[str]) -> None:
Apt.invoke(context, "purge", packages, apivfs=True)
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.arm64 : "arm64",
- Architecture.arm : "armhf",
- Architecture.alpha : "alpha",
- Architecture.x86_64 : "amd64",
- Architecture.x86 : "i386",
- Architecture.ia64 : "ia64",
- Architecture.loongarch64 : "loongarch64",
- Architecture.mips64_le : "mips64el",
- Architecture.mips_le : "mipsel",
- Architecture.parisc : "hppa",
- Architecture.ppc64_le : "ppc64el",
- Architecture.ppc64 : "ppc64",
- Architecture.riscv64 : "riscv64",
- Architecture.s390x : "s390x",
- Architecture.s390 : "s390",
- }.get(arch)
+ Architecture.arm64: "arm64",
+ Architecture.arm: "armhf",
+ Architecture.alpha: "alpha",
+ Architecture.x86_64: "amd64",
+ Architecture.x86: "i386",
+ Architecture.ia64: "ia64",
+ Architecture.loongarch64: "loongarch64",
+ Architecture.mips64_le: "mips64el",
+ Architecture.mips_le: "mipsel",
+ Architecture.parisc: "hppa",
+ Architecture.ppc64_le: "ppc64el",
+ Architecture.ppc64: "ppc64",
+ Architecture.riscv64: "riscv64",
+ Architecture.s390x: "s390x",
+ Architecture.s390: "s390",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {arch} is not supported by Debian")
with osrelease.open("r") as old, newosrelease.open("w") as new:
for line in old.readlines():
if line.startswith("VERSION_CODENAME="):
- new.write('VERSION_CODENAME=sid\n')
+ new.write("VERSION_CODENAME=sid\n")
else:
new.write(line)
# precedence over /usr/lib/os-release, and ignore the latter and assume that if an usr-only
# image is built then the package manager will not run on it.
if candidate == "etc/os-release":
- run([
- "dpkg-divert",
- "--quiet",
- "--root=/buildroot",
- "--local",
- "--add",
- "--rename",
- "--divert",
- f"/{candidate}.dpkg",
- f"/{candidate}",
- ], sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"]))
+ run(
+ [
+ "dpkg-divert",
+ "--quiet",
+ "--root=/buildroot",
+ "--local",
+ "--add",
+ "--rename",
+ "--divert",
+ f"/{candidate}.dpkg",
+ f"/{candidate}",
+ ],
+ sandbox=context.sandbox(binary="dpkg-divert", options=["--bind", context.root, "/buildroot"]),
+ )
newosrelease.rename(osrelease)
if not key1 and not key2:
if not context.config.repository_key_fetch:
- die("Fedora GPG keys not found in /usr/share/distribution-gpg-keys",
- hint="Make sure the distribution-gpg-keys package is installed")
+ die(
+ "Fedora GPG keys not found in /usr/share/distribution-gpg-keys",
+ hint="Make sure the distribution-gpg-keys package is installed",
+ )
if context.config.release == "rawhide":
# https://fedoraproject.org/fedora.gpg is always outdated when the rawhide key changes. Instead, let's
return
if context.config.release == "eln":
- mirror = context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose"
+ mirror = (
+ context.config.mirror or "https://odcs.fedoraproject.org/composes/production/latest-Fedora-ELN/compose"
+ )
for repo in ("Appstream", "BaseOS", "Extras", "CRB"):
url = f"baseurl={join_mirror(mirror, repo)}"
yield RpmRepository(repo.lower(), f"{url}/$basearch/os", gpgurls)
yield RpmRepository(repo.lower(), f"{url}/$basearch/debug/tree", gpgurls, enabled=False)
yield RpmRepository(repo.lower(), f"{url}/source/tree", gpgurls, enabled=False)
- elif (m := context.config.mirror):
+ elif m := context.config.mirror:
directory = "development" if context.config.release == "rawhide" else "releases"
url = f"baseurl={join_mirror(m, f'linux/{directory}/$releasever/Everything')}"
yield RpmRepository("fedora", f"{url}/$basearch/os", gpgurls)
enabled=False,
)
yield RpmRepository(
- "updates-source",
- f"{url}&repo=updates-released-source-f$releasever",
- gpgurls,
- enabled=False
+ "updates-source", f"{url}&repo=updates-released-source-f$releasever", gpgurls, enabled=False
)
yield RpmRepository(
- "updates-testing",
- f"{url}&repo=updates-testing-f$releasever",
- gpgurls,
- enabled=False
+ "updates-testing", f"{url}&repo=updates-testing-f$releasever", gpgurls, enabled=False
)
yield RpmRepository(
"updates-testing-debuginfo",
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.arm64 : "aarch64",
- Architecture.mips64_le : "mips64el",
- Architecture.mips_le : "mipsel",
- Architecture.ppc64_le : "ppc64le",
- Architecture.riscv64 : "riscv64",
- Architecture.s390x : "s390x",
- Architecture.x86_64 : "x86_64",
- }.get(arch)
+ Architecture.arm64: "aarch64",
+ Architecture.mips64_le: "mips64el",
+ Architecture.mips_le: "mipsel",
+ Architecture.ppc64_le: "ppc64le",
+ Architecture.riscv64: "riscv64",
+ Architecture.s390x: "s390x",
+ Architecture.x86_64: "x86_64",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by Fedora")
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.x86_64 : "x86_64",
- Architecture.arm64 : "aarch64",
- }.get(arch)
+ Architecture.x86_64: "x86_64",
+ Architecture.arm64: "aarch64",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by Mageia")
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.x86_64 : "x86_64",
- Architecture.arm64 : "aarch64",
- Architecture.riscv64 : "riscv64",
- }.get(arch)
+ Architecture.x86_64: "x86_64",
+ Architecture.arm64: "aarch64",
+ Architecture.riscv64: "riscv64",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by OpenMandriva")
"--recommends" if context.config.with_recommends else "--no-recommends",
*sort_packages(packages),
],
- apivfs=apivfs)
+ apivfs=apivfs,
+ ) # fmt: skip
else:
Dnf.invoke(context, "install", sort_packages(packages), apivfs=apivfs)
)
if not gpgkeys and not context.config.repository_key_fetch:
- die("OpenSUSE GPG keys not found in /usr/share/distribution-gpg-keys",
- hint="Make sure the distribution-gpg-keys package is installed")
+ die(
+ "OpenSUSE GPG keys not found in /usr/share/distribution-gpg-keys",
+ hint="Make sure the distribution-gpg-keys package is installed",
+ )
if zypper and gpgkeys:
run(
"--bind", context.root, "/buildroot",
*finalize_crypto_mounts(context.config),
],
- )
- )
+ ),
+ ) # fmt: skip
if context.config.release == "tumbleweed":
if context.config.architecture == Architecture.x86_64:
)
else:
if (
- context.config.release in ("current", "stable", "leap") and
- context.config.architecture != Architecture.x86_64
+ context.config.release in ("current", "stable", "leap")
+ and context.config.architecture != Architecture.x86_64
):
- die(f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture",
- hint="Specify either tumbleweed or a specific leap release such as 15.6")
+ die(
+ f"{cls.pretty_name()} only supports current and stable releases for the x86-64 architecture",
+ hint="Specify either tumbleweed or a specific leap release such as 15.6",
+ )
if context.config.release in ("current", "stable", "leap"):
release = "openSUSE-current"
@classmethod
def architecture(cls, arch: Architecture) -> str:
a = {
- Architecture.x86_64 : "x86_64",
- Architecture.arm64 : "aarch64",
- }.get(arch)
+ Architecture.x86_64: "x86_64",
+ Architecture.arm64: "aarch64",
+ }.get(arch) # fmt: skip
if not a:
die(f"Architecture {a} is not supported by OpenSUSE")
components=components,
signedby=signedby,
)
-
default=platform.uname().release,
)
parser.add_argument(
- "-t", "--format",
+ "-t",
+ "--format",
choices=[str(OutputFormat.cpio), str(OutputFormat.uki), str(OutputFormat.directory)],
help="Output format (CPIO archive, UKI or local directory)",
default="cpio",
)
parser.add_argument(
- "-o", "--output",
+ "-o",
+ "--output",
metavar="NAME",
help="Output name",
default="initrd",
)
parser.add_argument(
- "-O", "--output-dir",
+ "-O",
+ "--output-dir",
metavar="DIR",
help="Output directory",
default="",
default=False,
)
parser.add_argument(
- "-D", "--show-documentation",
+ "-D",
+ "--show-documentation",
help="Show the man page",
action="store_true",
default=False,
"--kernel-modules-include=host",
"--build-sources", "",
"--include=mkosi-initrd",
- ]
+ ] # fmt: skip
if args.debug:
cmdline += ["--debug"]
if (Path("/etc") / p).resolve().is_file():
shutil.copy2(Path("/etc") / p, Path(d) / "etc" / p)
else:
- shutil.copytree(Path("/etc") / p, Path(d) / "etc" / p,
- ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True)
+ shutil.copytree(
+ Path("/etc") / p, Path(d) / "etc" / p, ignore=shutil.ignore_patterns("gnupg"), dirs_exist_ok=True
+ )
cmdline += ["--sandbox-tree", d]
cmdline,
stdin=sys.stdin,
stdout=sys.stdout,
- env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {}
+ env={"MKOSI_DNF": dnf.name} if (dnf := find_binary("dnf", "dnf5")) else {},
)
@classmethod
def state_subdirs(cls, state: Path) -> list[Path]:
- return []
+ return []
@classmethod
def scripts(cls, context: Context) -> dict[str, list[PathString]]:
@classmethod
def finalize_environment(cls, context: Context) -> dict[str, str]:
env = {
- "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc.
+ "HOME": "/", # Make sure rpm doesn't pick up ~/.rpmmacros and ~/.rpmrc.
# systemd's chroot detection doesn't work when unprivileged so tell it explicitly.
"SYSTEMD_IN_CHROOT": "1",
}
env["SYSTEMD_HWDB_UPDATE_BYPASS"] = "1"
if (
- "KERNEL_INSTALL_BYPASS" not in context.config.environment and
- context.config.bootable != ConfigFeature.disabled
+ "KERNEL_INSTALL_BYPASS" not in context.config.environment
+ and context.config.bootable != ConfigFeature.disabled
):
env["KERNEL_INSTALL_BYPASS"] = "1"
else:
mounts = [
*finalize_crypto_mounts(context.config),
"--bind", context.repository, "/repository",
- ]
+ ] # fmt: skip
if context.config.local_mirror and (mirror := startswith(context.config.local_mirror, "file://")):
mounts += ["--ro-bind", mirror, mirror]
# original root won't be available anymore. If we're not in the sandbox yet, we want to pick up the passwd
# files from the original root.
*finalize_passwd_mounts(root),
- ]
+ ] # fmt: skip
@classmethod
def apivfs_script_cmd(cls, context: Context) -> list[PathString]:
*apivfs_options(),
*cls.options(root="/buildroot"),
"--",
- ]
+ ] # fmt: skip
@classmethod
def sandbox(
*cls.options(root=context.root, apivfs=apivfs),
*options,
],
- )
+ ) # fmt: skip
@classmethod
def sync(cls, context: Context, force: bool) -> None:
if context.config.clean_package_metadata == ConfigFeature.disabled:
return
- if (
- context.config.clean_package_metadata == ConfigFeature.auto and
- context.config.output_format in (OutputFormat.directory, OutputFormat.tar)
+ if context.config.clean_package_metadata == ConfigFeature.auto and context.config.output_format in (
+ OutputFormat.directory,
+ OutputFormat.tar,
):
return
executable = context.config.distribution.package_manager(context.config).executable(context.config)
remove = []
- for tool, paths in (("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]),
- ("dnf5", ["usr/lib/sysimage/libdnf5"]),
- ("dpkg", ["var/lib/dpkg"]),
- (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"])):
+ for tool, paths in (
+ ("rpm", ["var/lib/rpm", "usr/lib/sysimage/rpm"]),
+ ("dnf5", ["usr/lib/sysimage/libdnf5"]),
+ ("dpkg", ["var/lib/dpkg"]),
+ (executable, [f"var/lib/{subdir}", f"var/cache/{subdir}"]),
+ ): # fmt: skip
if context.config.clean_package_metadata == ConfigFeature.enabled or not find_binary(tool, root=context.root):
remove += [context.root / p for p in paths if (context.root / p).exists()]
return {
**{
- command: cmd + cls.env_cmd(context) + cls.cmd(context, command) for command in (
+ command: cmd + cls.env_cmd(context) + cls.cmd(context, command)
+ for command in (
"apt",
"apt-cache",
"apt-cdrom",
)
},
**{
- command: cmd + cls.dpkg_cmd(command) for command in(
+ command: cmd + cls.dpkg_cmd(command)
+ for command in (
"dpkg",
"dpkg-query",
)
},
- "mkosi-install" : ["apt-get", "install"],
- "mkosi-upgrade" : ["apt-get", "upgrade"],
- "mkosi-remove" : ["apt-get", "purge"],
+ "mkosi-install": ["apt-get", "install"],
+ "mkosi-upgrade": ["apt-get", "upgrade"],
+ "mkosi-remove": ["apt-get", "purge"],
"mkosi-reinstall": ["apt-get", "install", "--reinstall"],
- }
+ } # fmt: skip
@classmethod
def setup(cls, context: Context, repositories: Sequence[AptRepository]) -> None:
die(
f"Keyring for repo {repo.url} not found at {repo.signedby}",
hint="Make sure the right keyring package (e.g. debian-archive-keyring, kali-archive-keyring "
- "or ubuntu-keyring) is installed",
+ "or ubuntu-keyring) is installed",
)
with sources.open("w") as f:
def finalize_environment(cls, context: Context) -> dict[str, str]:
env = {
"APT_CONFIG": "/etc/apt.conf",
- "DEBIAN_FRONTEND" : "noninteractive",
+ "DEBIAN_FRONTEND": "noninteractive",
"DEBCONF_INTERACTIVE_SEEN": "true",
}
"-o", "DPkg::Use-Pty=false",
"-o", "DPkg::Install::Recursive::Minimum=1000",
"-o", "pkgCacheGen::ForceEssential=,",
- ]
+ ] # fmt: skip
if not context.config.repository_key_check:
cmdline += [
"-o", "Acquire::AllowInsecureRepositories=true",
"-o", "Acquire::AllowDowngradeToInsecureRepositories=true",
"-o", "APT::Get::AllowUnauthenticated=true",
- ]
+ ] # fmt: skip
if not context.config.with_docs:
cmdline += [f"--option=DPkg::Options::=--path-exclude=/{glob}" for glob in cls.documentation_exclude_globs]
cmdline += [
"-o", f"Acquire::http::Proxy={context.config.proxy_url}",
"-o", f"Acquire::https::Proxy={context.config.proxy_url}",
- ]
+ ] # fmt: skip
return cmdline
"-o", "Dir::Etc::sourceparts=-",
"-o", "APT::Get::List-Cleanup=0",
],
- )
+ ) # fmt: skip
@classmethod
def cache_subdirs(cls, cache: Path) -> list[Path]:
- return [
- p / "packages"
- for p in cache.iterdir()
- if p.is_dir() and "-" in p.name and "mkosi" not in p.name
- ]
+ return [p / "packages" for p in cache.iterdir() if p.is_dir() and "-" in p.name and "mkosi" not in p.name]
@classmethod
def scripts(cls, context: Context) -> dict[str, list[PathString]]:
return {
"dnf": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context),
"rpm": cls.apivfs_script_cmd(context) + rpm_cmd(),
- "mkosi-install" : ["dnf", "install"],
- "mkosi-upgrade" : ["dnf", "upgrade"],
- "mkosi-remove" : ["dnf", "remove"],
+ "mkosi-install": ["dnf", "install"],
+ "mkosi-upgrade": ["dnf", "upgrade"],
+ "mkosi-remove": ["dnf", "remove"],
"mkosi-reinstall": ["dnf", "reinstall"],
- }
+ } # fmt: skip
@classmethod
def setup(cls, context: Context, repositories: Sequence[RpmRepository], filelists: bool = True) -> None:
@classmethod
def cmd(
- cls,
- context: Context,
- cached_metadata: bool = True,
+ cls,
+ context: Context,
+ cached_metadata: bool = True,
) -> list[PathString]:
dnf = cls.executable(context.config)
f"--setopt=install_weak_deps={int(context.config.with_recommends)}",
"--setopt=check_config_file_age=0",
"--disable-plugin=*" if dnf.endswith("dnf5") else "--disableplugin=*",
- ]
+ ] # fmt: skip
for plugin in ("builddep", "versionlock"):
cmdline += ["--enable-plugin", plugin] if dnf.endswith("dnf5") else ["--enableplugin", plugin]
@classmethod
def createrepo(cls, context: Context) -> None:
- run(["createrepo_c", context.repository],
- sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]))
+ run(
+ ["createrepo_c", context.repository],
+ sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]),
+ )
(context.sandbox_tree / "etc/yum.repos.d/mkosi-local.repo").write_text(
textwrap.dedent(
def scripts(cls, context: Context) -> dict[str, list[PathString]]:
return {
"pacman": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context),
- "mkosi-install" : ["pacman", "--sync", "--needed"],
- "mkosi-upgrade" : ["pacman", "--sync", "--sysupgrade", "--needed"],
- "mkosi-remove" : ["pacman", "--remove", "--recursive", "--nosave"],
+ "mkosi-install": ["pacman", "--sync", "--needed"],
+ "mkosi-upgrade": ["pacman", "--sync", "--sysupgrade", "--needed"],
+ "mkosi-remove": ["pacman", "--remove", "--recursive", "--nosave"],
"mkosi-reinstall": ["pacman", "--sync"],
- }
+ } # fmt: skip
@classmethod
def mounts(cls, context: Context) -> list[PathString]:
# pacman writes downloaded packages to the first writable cache directory. We don't want it to write to our
# local repository directory so we expose it as a read-only directory to pacman.
"--ro-bind", context.repository, "/var/cache/pacman/mkosi",
- ]
+ ] # fmt: skip
if (context.root / "var/lib/pacman/local").exists():
# pacman reuses the same directory for the sync databases and the local database containing the list of
"--arch", context.config.distribution.architecture(context.config.architecture),
"--color", "auto",
"--noconfirm",
- ]
+ ] # fmt: skip
@classmethod
def invoke(
"repo-add",
"--quiet",
context.repository / "mkosi.db.tar",
- *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name))
+ *sorted(context.repository.glob("*.pkg.tar*"), key=lambda p: GenericVersion(Path(p).name)),
],
sandbox=context.sandbox(binary="repo-add", options=["--bind", context.repository, context.repository]),
)
)
# pacman can't sync a single repository, so we go behind its back and do it ourselves.
- shutil.move(
- context.repository / "mkosi.db.tar",
- context.metadata_dir / "lib/pacman/sync/mkosi.db"
- )
+ shutil.move(context.repository / "mkosi.db.tar", context.metadata_dir / "lib/pacman/sync/mkosi.db")
@overload
def find_rpm_gpgkey(
- context: Context,
- key: str,
- fallback: Optional[str] = None,
- *,
- required: Literal[False]
+ context: Context, key: str, fallback: Optional[str] = None, *, required: Literal[False]
) -> Optional[str]: ...
def find_rpm_gpgkey(
- context: Context,
- key: str,
- fallback: Optional[str] = None,
- *,
- required: bool = True
+ context: Context, key: str, fallback: Optional[str] = None, *, required: bool = True
) -> Optional[str]:
root = context.config.tools() if context.config.tools_tree_certificates else Path("/")
return fallback
if required:
- die(f"{key} GPG key not found in /usr/share/distribution-gpg-keys",
- hint="Make sure the distribution-gpg-keys package is installed")
+ die(
+ f"{key} GPG key not found in /usr/share/distribution-gpg-keys",
+ hint="Make sure the distribution-gpg-keys package is installed",
+ )
return None
if not (confdir / "macros.dbpath").exists():
(confdir / "macros.dbpath").write_text(f"%_dbpath {dbpath}")
- plugindir = Path(run(["rpm", "--eval", "%{__plugindir}"],
- sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE).stdout.strip())
+ plugindir = Path(
+ run(
+ ["rpm", "--eval", "%{__plugindir}"], sandbox=context.sandbox(binary="rpm"), stdout=subprocess.PIPE
+ ).stdout.strip()
+ )
if (plugindir := context.config.tools() / plugindir.relative_to("/")).exists():
with (confdir / "macros.disable-plugins").open("w") as f:
for plugin in plugindir.iterdir():
"install",
"--download", "in-advance",
"--recommends" if context.config.with_recommends else "--no-recommends",
- ]
+ ] # fmt: skip
return {
"zypper": cls.apivfs_script_cmd(context) + cls.env_cmd(context) + cls.cmd(context),
- "rpm" : cls.apivfs_script_cmd(context) + rpm_cmd(),
- "mkosi-install" : install,
- "mkosi-upgrade" : ["zypper", "update"],
- "mkosi-remove" : ["zypper", "remove", "--clean-deps"],
+ "rpm": cls.apivfs_script_cmd(context) + rpm_cmd(),
+ "mkosi-install": install,
+ "mkosi-upgrade": ["zypper", "update"],
+ "mkosi-remove": ["zypper", "remove", "--clean-deps"],
"mkosi-reinstall": install + ["--force"],
- }
+ } # fmt: skip
@classmethod
def setup(cls, context: Context, repositories: Sequence[RpmRepository]) -> None:
@classmethod
def createrepo(cls, context: Context) -> None:
- run(["createrepo_c", context.repository],
- sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]))
+ run(
+ ["createrepo_c", context.repository],
+ sandbox=context.sandbox(binary="createrepo_c", options=["--bind", context.repository, context.repository]),
+ )
(context.sandbox_tree / "etc/zypp/repos.d/mkosi-local.repo").write_text(
textwrap.dedent(
def loaded_modules() -> list[str]:
# Loaded modules are listed with underscores but the filenames might use dashes instead.
- return [fr"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()]
+ return [rf"/{line.split()[0].replace('_', '[_-]')}\.ko" for line in Path("/proc/modules").read_text().splitlines()]
def filter_kernel_modules(root: Path, kver: str, *, include: Iterable[str], exclude: Iterable[str]) -> list[Path]:
# modules than the max number of accepted CLI arguments, we split the modules list up into chunks.
info = ""
for i in range(0, len(nametofile.keys()), 8500):
- chunk = list(nametofile.keys())[i:i+8500]
+ chunk = list(nametofile.keys())[i : i + 8500]
info += run(
["modinfo", "--set-version", kver, "--null", *chunk],
stdout=subprocess.PIPE,
firmwared = Path("usr/lib/firmware")
with complete_step("Applying kernel module filters"):
- required = set(
- gen_required_kernel_modules(root, kver, include=include, exclude=exclude))
+ required = set(gen_required_kernel_modules(root, kver, include=include, exclude=exclude))
with chdir(root):
modules = sorted(modulesd.rglob("*.ko*"), reverse=True)
class Style:
- bold: Final[str] = "\033[0;1;39m" if not terminal_is_dumb() else ""
- blue: Final[str] = "\033[0;1;34m" if not terminal_is_dumb() else ""
- gray: Final[str] = "\033[0;38;5;245m" if not terminal_is_dumb() else ""
- red: Final[str] = "\033[31;1m" if not terminal_is_dumb() else ""
- yellow: Final[str] = "\033[33;1m" if not terminal_is_dumb() else ""
- reset: Final[str] = "\033[0m" if not terminal_is_dumb() else ""
+ # fmt: off
+ bold: Final[str] = "\033[0;1;39m" if not terminal_is_dumb() else ""
+ blue: Final[str] = "\033[0;1;34m" if not terminal_is_dumb() else ""
+ gray: Final[str] = "\033[0;38;5;245m" if not terminal_is_dumb() else ""
+ red: Final[str] = "\033[31;1m" if not terminal_is_dumb() else ""
+ yellow: Final[str] = "\033[33;1m" if not terminal_is_dumb() else ""
+ reset: Final[str] = "\033[0m" if not terminal_is_dumb() else ""
+ # fmt: on
-def die(message: str,
- *,
- hint: Optional[str] = None) -> NoReturn:
+def die(message: str, *, hint: Optional[str] = None) -> NoReturn:
logging.error(f"{message}")
if hint:
logging.info(f"({hint})")
logging.WARNING: logging.Formatter(f"‣ {Style.yellow}{fmt}{Style.reset}"),
logging.ERROR: logging.Formatter(f"‣ {Style.red}{fmt}{Style.reset}"),
logging.CRITICAL: logging.Formatter(f"‣ {Style.red}{Style.bold}{fmt}{Style.reset}"),
- }
+ } # fmt: skip
super().__init__(fmt, *args, **kwargs)
],
stdout=subprocess.PIPE,
sandbox=self.context.sandbox(binary="rpm", options=["--ro-bind", self.context.root, "/buildroot"]),
- )
+ ) # fmt: skip
packages = sorted(c.stdout.splitlines())
# packages that were installed in this execution of mkosi. We assume that the
# upper layer is put together in one go, which currently is always true.
if (
- self.context.config.base_trees and
- datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp
+ self.context.config.base_trees
+ and datetime.datetime.fromtimestamp(int(installtime)) < self._init_timestamp
):
continue
"dpkg-query",
"--admindir=/buildroot/var/lib/dpkg",
"--show",
- "--showformat",
- r'${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n',
+ "--showformat", r"${Package}\t${source:Package}\t${Version}\t${Architecture}\t${Installed-Size}\t${db-fsys:Last-Modified}\n",
],
stdout=subprocess.PIPE,
sandbox=self.context.sandbox(
binary="dpkg-query",
options=["--ro-bind", self.context.root, "/buildroot"],
),
- )
+ ) # fmt: skip
packages = sorted(c.stdout.splitlines())
# packages that were installed in this execution of mkosi. We assume that the
# upper layer is put together in one go, which currently is always true.
if (
- self.context.config.base_trees and
- datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp
+ self.context.config.base_trees
+ and datetime.datetime.fromtimestamp(int(installtime) if installtime else 0) < self._init_timestamp
):
continue
"--overlay-upperdir", upperdir,
"--overlay-workdir", workdir,
"--overlay", dst,
- ]
+ ] # fmt: skip
else:
options += ["--bind", src, dst]
return flatten(
("--symlink", src.readlink(), target) if src.is_symlink() else ("--ro-bind", src, target)
- for src, target
- in sorted(set(mounts), key=lambda s: s[1])
+ for src, target in sorted(set(mounts), key=lambda s: s[1])
)
from mkosi.versioncomp import GenericVersion
QEMU_KVM_DEVICE_VERSION = GenericVersion("9.0")
-VHOST_VSOCK_SET_GUEST_CID = 0x4008af60
+VHOST_VSOCK_SET_GUEST_CID = 0x4008AF60
class QemuDeviceNode(StrEnum):
}[self]
def open(self) -> int:
- return os.open(self.device(), os.O_RDWR|os.O_CLOEXEC|os.O_NONBLOCK)
+ return os.open(self.device(), os.O_RDWR | os.O_CLOEXEC | os.O_NONBLOCK)
def available(self, log: bool = False) -> bool:
try:
def hash_to_vsock_cid(hash: "hashlib._Hash") -> int:
- cid = int.from_bytes(hash.digest()[:4], byteorder='little')
+ cid = int.from_bytes(hash.digest()[:4], byteorder="little")
# Make sure we don't return any of the well-known CIDs.
return max(3, min(cid, 0xFFFFFFFF - 1))
if not vsock_cid_in_use(vfd, cid):
return cid
- hash.update(i.to_bytes(length=4, byteorder='little'))
+ hash.update(i.to_bytes(length=4, byteorder="little"))
for i in range(64):
cid = random.randint(0, 0xFFFFFFFF - 1)
class KernelType(StrEnum):
- pe = enum.auto()
- uki = enum.auto()
+ pe = enum.auto()
+ uki = enum.auto()
unknown = enum.auto()
@classmethod
"--no-announce-submounts",
"--sandbox=chroot",
f"--inode-file-handles={'prefer' if os.getuid() == 0 and not uidmap else 'never'}",
- ]
+ ] # fmt: skip
if selinux:
cmdline += ["--security-label"]
],
setup=scope + become_root_in_subuid_range_cmd() if scope and not uidmap else [],
),
- ) as proc:
+ ) as proc: # fmt: skip
yield path
proc.terminate()
with s:
data = []
try:
- while (buf := await loop.sock_recv(s, 4096)):
+ while buf := await loop.sock_recv(s, 4096):
data.append(buf)
except ConnectionResetError:
logging.debug("vsock notify listener connection reset by peer")
user=user if not scope else None,
group=group if not scope else None,
foreground=False,
- ) as proc:
+ ) as proc: # fmt: skip
yield
proc.terminate()
-
@contextlib.contextmanager
def start_journal_remote_vsock(config: Config) -> Iterator[str]:
with socket.socket(socket.AF_VSOCK, socket.SOCK_STREAM) as sock:
tmp = src.parent / f"{src.name}-{uuid.uuid4().hex[:16]}"
try:
+
def copy() -> None:
if config.output_format == OutputFormat.directory:
become_root_in_subuid_range()
)
copy_tree(
- src, tmp,
+ src,
+ tmp,
# Make sure the ownership is changed to the (fake) root user if the directory was not built as root.
preserve=config.output_format == OutputFormat.directory and src.stat().st_uid == 0,
use_subvolumes=config.use_subvolumes,
fork_and_wait(copy)
yield tmp
finally:
+
def rm() -> None:
if config.output_format == OutputFormat.directory:
become_root_in_subuid_range()
def want_scratch(config: Config) -> bool:
return config.runtime_scratch == ConfigFeature.enabled or (
- config.runtime_scratch == ConfigFeature.auto and
- config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None
+ config.runtime_scratch == ConfigFeature.auto
+ and config.find_binary(f"mkfs.{config.distribution.filesystem()}") is not None
)
run(
[f"mkfs.{fs}", "-L", "scratch", *extra.split(), scratch.name],
stdout=subprocess.DEVNULL,
- sandbox=config.sandbox(binary= f"mkfs.{fs}", options=["--bind", scratch.name, scratch.name]),
+ sandbox=config.sandbox(binary=f"mkfs.{fs}", options=["--bind", scratch.name, scratch.name]),
)
yield Path(scratch.name)
else QemuFirmware.linux
)
elif (
- config.output_format in (OutputFormat.cpio, OutputFormat.directory) or
- config.architecture.to_efi() is None
+ config.output_format in (OutputFormat.cpio, OutputFormat.directory) or config.architecture.to_efi() is None
):
return QemuFirmware.linux
else:
"--ro-bind", config.secure_boot_certificate, config.secure_boot_certificate,
],
),
- )
+ ) # fmt: skip
else:
tools = Path("/") if any(qemu.is_relative_to(d) for d in config.extra_search_paths) else config.tools()
vars = (
image,
],
sandbox=config.sandbox(binary="systemd-repart", options=["--bind", image, image]),
- )
+ ) # fmt: skip
@contextlib.contextmanager
with flock(INVOKING_USER.runtime_dir() / "machine"):
if (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists():
- die(f"Another virtual machine named {config.machine_or_name()} is already running",
- hint="Use --machine to specify a different virtual machine name")
+ die(
+ f"Another virtual machine named {config.machine_or_name()} is already running",
+ hint="Use --machine to specify a different virtual machine name",
+ )
p.write_text(
json.dumps(
if os.getuid() != 0 and "DBUS_SESSION_BUS_ADDRESS" in os.environ and "XDG_RUNTIME_DIR" in os.environ:
env = {
"DBUS_SESSION_BUS_ADDRESS": os.environ["DBUS_SESSION_BUS_ADDRESS"],
- "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"]
+ "XDG_RUNTIME_DIR": os.environ["XDG_RUNTIME_DIR"],
}
elif os.getuid() == 0:
if "DBUS_SYSTEM_ADDRESS" in os.environ:
*(["--uid", str(user)] if user is not None else []),
*(["--gid", str(group)] if group is not None else []),
*([f"--property={p}" for p in properties]),
- ]
+ ] # fmt: skip
def register_machine(config: Config, pid: int, fname: Path) -> None:
- if (
- os.getuid() != 0 or
- ("DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists())
+ if os.getuid() != 0 or (
+ "DBUS_SYSTEM_ADDRESS" not in os.environ and not Path("/run/dbus/system_bus_socket").exists()
):
return
"vm",
str(pid),
fname if fname.is_dir() else "",
- ],
+ ], # fmt: skip
foreground=False,
env=os.environ | config.environment,
sandbox=config.sandbox(binary="busctl", relaxed=True),
die(f"{config.output_format} images cannot be booted in qemu")
if (
- config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp) and
- config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux) and
- not config.qemu_firmware.is_uefi()
+ config.output_format in (OutputFormat.cpio, OutputFormat.uki, OutputFormat.esp)
+ and config.qemu_firmware not in (QemuFirmware.auto, QemuFirmware.linux)
+ and not config.qemu_firmware.is_uefi()
):
die(f"{config.output_format} images cannot be booted with the '{config.qemu_firmware}' firmware")
# after unsharing the user namespace. To get around this, open all those device nodes early can pass them as file
# descriptors to qemu later. Note that we can't pass the kvm file descriptor to qemu until version 9.0.
qemu_device_fds = {
- d: d.open()
- for d in QemuDeviceNode
- if d.feature(config) != ConfigFeature.disabled and d.available(log=True)
+ d: d.open() for d in QemuDeviceNode if d.feature(config) != ConfigFeature.disabled and d.available(log=True)
}
if not (qemu := config.find_binary(f"qemu-system-{config.architecture.to_qemu()}")):
die("qemu not found.", hint=f"Is qemu-system-{config.architecture.to_qemu()} installed on the host system?")
- have_kvm = ((qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or
- (qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds))
+ have_kvm = (qemu_version(config, qemu) < QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm.available()) or (
+ qemu_version(config, qemu) >= QEMU_KVM_DEVICE_VERSION and QemuDeviceNode.kvm in qemu_device_fds
+ )
if config.qemu_kvm == ConfigFeature.enabled and not have_kvm:
die("KVM acceleration requested but cannot access /dev/kvm")
firmware = finalize_qemu_firmware(config, kernel)
- if (
- not kernel and
- (
- firmware == QemuFirmware.linux or
- config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki)
- )
+ if not kernel and (
+ firmware == QemuFirmware.linux
+ or config.output_format in (OutputFormat.cpio, OutputFormat.directory, OutputFormat.uki)
):
if firmware.is_uefi():
name = config.output if config.output_format == OutputFormat.uki else config.output_split_uki
# A shared memory backend might increase ram usage so only add one if actually necessary for virtiofsd.
shm = []
if (
- config.runtime_trees or
- config.runtime_build_sources or
- config.runtime_home or
- config.output_format == OutputFormat.directory
+ config.runtime_trees
+ or config.runtime_build_sources
+ or config.runtime_home
+ or config.output_format == OutputFormat.directory
):
shm = ["-object", f"memory-backend-memfd,id=mem,size={config.qemu_mem // 1024**2}M,share=on"]
"-device", "virtio-balloon,free-page-reporting=on",
"-no-user-config",
*shm,
- ]
+ ] # fmt: skip
if config.runtime_network == Network.user:
cmdline += ["-nic", f"user,model={config.architecture.default_qemu_nic_model()}"]
cid = config.qemu_vsock_cid
if vsock_cid_in_use(qemu_device_fds[QemuDeviceNode.vhost_vsock], cid):
- die(f"VSock connection ID {cid} is already in use by another virtual machine",
- hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID")
+ die(
+ f"VSock connection ID {cid} is already in use by another virtual machine",
+ hint="Use QemuVsockConnectionId=auto to have mkosi automatically find a free vsock connection ID",
+ )
index = list(qemu_device_fds.keys()).index(QemuDeviceNode.vhost_vsock)
- cmdline += [
- "-device",
- f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}"
- ]
+ cmdline += ["-device", f"vhost-vsock-pci,guest-cid={cid},vhostfd={SD_LISTEN_FDS_START + index}"]
cmdline += ["-cpu", "max"]
"-device", "virtio-serial-pci,id=mkosi-virtio-serial-pci",
"-device", "virtconsole,chardev=console",
"-mon", "console",
- ]
+ ] # fmt: skip
# QEMU has built-in logic to look for the BIOS firmware so we don't need to do anything special for that.
if firmware.is_uefi():
cmdline += [
"-global", "ICH9-LPC.disable_s3=1",
"-global", "driver=cfi.pflash01,property=secure,value=on",
- ]
+ ] # fmt: skip
if config.qemu_cdrom and config.output_format in (OutputFormat.disk, OutputFormat.esp):
# CD-ROM devices have sector size 2048 so we transform disk images into ones with sector size 2048.
"--sector-size=2048",
"--copy-from", workdir(src),
workdir(fname),
- ],
+ ], # fmt: skip
sandbox=config.sandbox(
binary="systemd-repart",
options=[
"--ro-bind", src, workdir(src),
],
),
- )
+ ) # fmt: skip
stack.callback(lambda: fname.unlink())
else:
fname = stack.enter_context(
apply_runtime_size(config, fname)
- if (
- kernel and
- (
- KernelType.identify(config, kernel) != KernelType.uki or
- not config.architecture.supports_smbios(firmware)
- )
+ if kernel and (
+ KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware)
):
kcl = config.kernel_command_line + config.kernel_command_line_extra
else:
fname,
name=config.machine_or_name(),
uidmap=False,
- selinux=bool(want_selinux_relabel(config, fname, fatal=False))),
+ selinux=bool(want_selinux_relabel(config, fname, fatal=False)),
+ ),
)
cmdline += [
"-chardev", f"socket,id={sock.name},path={sock}",
"-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag=root",
- ]
+ ] # fmt: skip
kcl += ["root=root", "rootfstype=virtiofs"]
credentials = dict(config.credentials)
def add_virtiofs_mount(
- sock: Path,
- dst: PathString,
- cmdline: list[PathString],
- credentials: dict[str, str],
- *, tag: str
+ sock: Path, dst: PathString, cmdline: list[PathString], credentials: dict[str, str], *, tag: str
) -> None:
cmdline += [
"-chardev", f"socket,id={sock.name},path={sock}",
"-device", f"vhost-user-fs-pci,queue-size=1024,chardev={sock.name},tag={tag}",
- ]
+ ] # fmt: skip
if "fstab.extra" not in credentials:
credentials["fstab.extra"] = ""
cmdline += [
"-drive", f"if=none,id=scratch,file={scratch},format=raw,discard=on,{cache}",
"-device", "scsi-hd,drive=scratch",
- ]
+ ] # fmt: skip
kcl += [f"systemd.mount-extra=LABEL=scratch:/var/tmp:{config.distribution.filesystem()}"]
if config.output_format == OutputFormat.cpio:
cmdline += ["-initrd", fname]
elif (
- kernel and KernelType.identify(config, kernel) != KernelType.uki and
- "-initrd" not in args.cmdline and
- (config.output_dir_or_cwd() / config.output_split_initrd).exists()
+ kernel
+ and KernelType.identify(config, kernel) != KernelType.uki
+ and "-initrd" not in args.cmdline
+ and (config.output_dir_or_cwd() / config.output_split_initrd).exists()
):
cmdline += ["-initrd", config.output_dir_or_cwd() / config.output_split_initrd]
direct = fname.stat().st_size % resource.getpagesize() == 0
ephemeral = config.ephemeral
cache = f"cache.writeback=on,cache.direct={yes_no(direct)},cache.no-flush={yes_no(ephemeral)},aio=io_uring"
- cmdline += ["-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}",
- "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1"]
-
- if (
- config.qemu_swtpm == ConfigFeature.enabled or
- (
- config.qemu_swtpm == ConfigFeature.auto and
- firmware.is_uefi() and
- config.find_binary("swtpm") is not None
- )
+ cmdline += [
+ "-drive", f"if=none,id=mkosi,file={fname},format=raw,discard=on,{cache}",
+ "-device", f"scsi-{'cd' if config.qemu_cdrom else 'hd'},drive=mkosi,bootindex=1",
+ ] # fmt: skip
+
+ if config.qemu_swtpm == ConfigFeature.enabled or (
+ config.qemu_swtpm == ConfigFeature.auto and firmware.is_uefi() and config.find_binary("swtpm") is not None
):
sock = stack.enter_context(start_swtpm(config))
- cmdline += ["-chardev", f"socket,id=chrtpm,path={sock}",
- "-tpmdev", "emulator,id=tpm0,chardev=chrtpm"]
+ cmdline += [
+ "-chardev", f"socket,id=chrtpm,path={sock}",
+ "-tpmdev", "emulator,id=tpm0,chardev=chrtpm",
+ ] # fmt: skip
if config.architecture.is_x86_variant():
cmdline += ["-device", "tpm-tis,tpmdev=tpm0"]
elif kernel:
kcl += [f"systemd.set_credential_binary={k}:{payload}"]
- if (
- kernel and
- (
- KernelType.identify(config, kernel) != KernelType.uki or
- not config.architecture.supports_smbios(firmware)
- )
+ if kernel and (
+ KernelType.identify(config, kernel) != KernelType.uki or not config.architecture.supports_smbios(firmware)
):
cmdline += ["-append", " ".join(kcl)]
elif config.architecture.supports_smbios(firmware):
def run_ssh(args: Args, config: Config) -> None:
with flock(INVOKING_USER.runtime_dir() / "machine"):
if not (p := INVOKING_USER.runtime_dir() / "machine" / f"{config.machine_or_name()}.json").exists():
- die(f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}",
- hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?")
+ die(
+ f"{p} not found, cannot SSH into virtual machine {config.machine_or_name()}",
+ hint="Is the machine running and was it built with Ssh=yes and QemuVsock=yes?",
+ )
state = json.loads(p.read_text())
if not state["SshKey"]:
- die("An SSH key must be configured when booting the image to use 'mkosi ssh'",
- hint="Use 'mkosi genkey' to generate a new SSH key and certificate")
+ die(
+ "An SSH key must be configured when booting the image to use 'mkosi ssh'",
+ hint="Use 'mkosi genkey' to generate a new SSH key and certificate",
+ )
cmd: list[PathString] = [
"ssh",
"-o", "LogLevel=ERROR",
"-o", f"ProxyCommand={state['ProxyCommand']}",
"root@mkosi",
- ]
+ ] # fmt: skip
cmd += args.cmdline
# Failures from self come from the forks we spawn to build images in a user namespace. We've already done all
# the logging for those failures so we don't log stacktraces for those either.
if (
- ARG_DEBUG.get() and
- e.cmd and
- str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn") and
- "qemu-system" not in str(e.cmd[0])
+ ARG_DEBUG.get()
+ and e.cmd
+ and str(e.cmd[0]) not in ("self", "ssh", "systemd-nspawn")
+ and "qemu-system" not in str(e.cmd[0])
):
sys.excepthook(*ensure_exc_info())
except BaseException:
logging.error(f"{cmdline[0]} not found.")
else:
logging.error(
- f"\"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}\" returned non-zero exit code "
+ f'"{shlex.join([*sandbox, *cmdline] if ARG_DEBUG.get() else cmdline)}" returned non-zero exit code '
f"{returncode}."
)
directory instead of from the host.
"""
return flatten(
- ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}")
- for f in ("passwd", "group", "shadow", "gshadow")
+ ("--ro-bind-try", Path(root) / "etc" / f, f"/etc/{f}") for f in ("passwd", "group", "shadow", "gshadow")
)
return [
"--setenv", "SYSTEMD_OFFLINE", one_zero(network),
*(["--unshare-net"] if not network else []),
- ]
+ ] # fmt: skip
@contextlib.contextmanager
(d / "work").rmdir()
except OSError as e:
if e.errno == errno.ENOTEMPTY:
+
def remove() -> None:
acquire_privileges()
shutil.rmtree(d)
# apivfs_script_cmd() and chroot_script_cmd() are executed from within the sandbox, but they still use
# sandbox.py, so we make sure it is available inside the sandbox so it can be executed there as well.
"--ro-bind", Path(mkosi.sandbox.__file__), "/sandbox.py",
- ]
+ ] # fmt: skip
if overlay and (overlay / "usr").exists():
cmdline += [
"--overlay-lowerdir", tools / "usr"
"--overlay-lowerdir", overlay / "usr",
"--overlay", "/usr",
- ]
+ ] # fmt: skip
else:
cmdline += ["--ro-bind", tools / "usr", "/usr"]
"--dir", "/var/log",
"--unshare-ipc",
"--symlink", "../proc/self/mounts", "/etc/mtab",
- ]
+ ] # fmt: skip
if devices:
cmdline += ["--bind", "/sys", "/sys", "--bind", "/dev", "/dev"]
"--overlay-upperdir", tmp or "tmpfs",
*(["--overlay-workdir", str(work)] if work else []),
"--overlay", Path("/") / d,
- ]
+ ] # fmt: skip
elif not relaxed:
if tmp:
cmdline += ["--bind", tmp, Path("/") / d]
# Make sure anything running in the root directory thinks it's in a container. $container can't always
# be accessed so we write /run/host/container-manager as well which is always accessible.
"--write", "mkosi", root / "run/host/container-manager",
- ]
+ ] # fmt: skip
def chroot_options() -> list[PathString]:
"--setenv", "HOME", "/",
"--setenv", "PATH", "/usr/bin:/usr/sbin",
"--setenv", "BUILDROOT", "/",
- ]
+ ] # fmt: skip
@contextlib.contextmanager
*network_options(network=network),
*apivfs_options(root=Path("/")),
*chroot_options(),
- ]
+ ] # fmt: skip
if network and Path("/etc/resolv.conf").exists():
cmdline += ["--ro-bind", "/etc/resolv.conf", "/etc/resolv.conf"]
SCMP_ACT_ALLOW = 0x7FFF0000
SCMP_ACT_ERRNO = 0x00050000
+
class mount_attr(ctypes.Structure):
_fields_ = [
("attr_set", ctypes.c_uint64),
try:
libc.mount_setattr.argtypes = (
- ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t,
+ ctypes.c_int,
+ ctypes.c_char_p,
+ ctypes.c_uint,
+ ctypes.c_void_p,
+ ctypes.c_size_t,
)
r = libc.mount_setattr(fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0)
except AttributeError:
libc.syscall.argtypes = (
- ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint, ctypes.c_void_p, ctypes.c_size_t,
+ ctypes.c_long,
+ ctypes.c_int,
+ ctypes.c_char_p,
+ ctypes.c_uint,
+ ctypes.c_void_p,
+ ctypes.c_size_t,
)
r = libc.syscall(NR_mount_setattr, fd, b"", flags, ctypes.addressof(attr), MOUNT_ATTR_SIZE_VER0)
r = libc.move_mount(fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH)
except AttributeError:
libc.syscall.argtypes = (
- ctypes.c_long, ctypes.c_int, ctypes.c_char_p, ctypes.c_int, ctypes.c_char_p, ctypes.c_uint,
+ ctypes.c_long,
+ ctypes.c_int,
+ ctypes.c_char_p,
+ ctypes.c_int,
+ ctypes.c_char_p,
+ ctypes.c_uint,
)
r = libc.syscall(NR_move_mount, fd, b"", AT_FDCWD, dst.encode(), MOVE_MOUNT_F_EMPTY_PATH)
# Drop all bind mounts that are mounted from beneath another bind mount to the same
# location within the new rootfs.
optimized = [
- m for m in binds
+ m
+ for m in binds
if not any(
- m != n and
- m.readonly == n.readonly and
- m.required == n.required and
- is_relative_to(m.src, n.src) and
- is_relative_to(m.dst, n.dst) and
- os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst)
+ m != n
+ and m.readonly == n.readonly
+ and m.required == n.required
+ and is_relative_to(m.src, n.src)
+ and is_relative_to(m.dst, n.dst)
+ and os.path.relpath(m.src, n.src) == os.path.relpath(m.dst, n.dst)
for n in binds
)
]
mount("overlayfs", dst, "overlay", 0, ",".join(options))
-ANSI_HIGHLIGHT = "\x1B[0;1;39m" if os.isatty(2) else ""
-ANSI_NORMAL = "\x1B[0m" if os.isatty(2) else ""
+ANSI_HIGHLIGHT = "\x1b[0;1;39m" if os.isatty(2) else ""
+ANSI_NORMAL = "\x1b[0m" if os.isatty(2) else ""
HELP = f"""\
mkosi-sandbox [OPTIONS...] COMMAND [ARGUMENTS...]
See the mkosi-sandbox(1) man page for details.\
"""
+
def main() -> None:
# We don't use argparse as it takes +- 10ms to import and since this is purely for internal
# use, it's not necessary to have good UX for this CLI interface so it's trivial to write
os.chdir("/tmp")
with umask(~0o755):
- os.mkdir("newroot") # This is where we set up the sandbox rootfs
- os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs.
+ os.mkdir("newroot") # This is where we set up the sandbox rootfs
+ os.mkdir("oldroot") # This is the old rootfs which is used as the source for mounts in the new rootfs.
# Make sure that newroot is a mountpoint.
mount("newroot", "newroot", "", MS_BIND | MS_REC, "")
die("SplitArtifacts= must be enabled to be able to use mkosi sysupdate")
if not config.sysupdate_dir:
- die("No sysupdate definitions directory specified",
- hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=")
+ die(
+ "No sysupdate definitions directory specified",
+ hint="Specify a directory containing systemd-sysupdate transfer definitions with SysupdateDirectory=",
+ )
if not (sysupdate := config.find_binary("systemd-sysupdate", "/usr/lib/systemd/systemd-sysupdate")):
die("Could not find systemd-sysupdate")
"--definitions", config.sysupdate_dir,
"--transfer-source", config.output_dir_or_cwd(),
*args.cmdline,
- ]
+ ] # fmt: skip
run(
cmd,
options=[
*(["--bind", "/boot", "/boot"] if Path("/boot").exists() else []),
*(["--bind", "/efi", "/efi"] if Path("/efi").exists() else []),
- ]
+ ],
),
)
["cp", "--version"],
sandbox=sandbox(binary="cp"),
stdout=subprocess.PIPE,
- ).stdout.splitlines()[0].split()[3]
+ )
+ .stdout.splitlines()[0]
+ .split()[3]
)
result = run(
["btrfs", "subvolume", "create", workdir(path, sandbox)],
sandbox=sandbox(binary="btrfs", options=["--bind", path.parent, workdir(path.parent, sandbox)]),
- check=use_subvolumes == ConfigFeature.enabled
+ check=use_subvolumes == ConfigFeature.enabled,
).returncode
else:
result = 1
options: list[PathString] = [
"--ro-bind", src, workdir(src, sandbox),
"--bind", dst.parent, workdir(dst.parent, sandbox),
- ]
+ ] # fmt: skip
def copy() -> None:
cmdline: list[PathString] = [
f"--preserve=mode,links{',timestamps,ownership,xattr' if preserve else ''}",
"--reflink=auto",
"--copy-contents",
- workdir(src, sandbox), workdir(dst, sandbox),
+ workdir(src, sandbox),
+ workdir(dst, sandbox),
]
if dst.exists() and dst.is_dir() and any(dst.iterdir()) and cp_version(sandbox=sandbox) >= "9.5":
# Subvolumes always have inode 256 so we can use that to check if a directory is a subvolume.
if (
- use_subvolumes == ConfigFeature.disabled or
- not preserve or
- not is_subvolume(src) or
- (dst.exists() and (not dst.is_dir() or any(dst.iterdir())))
+ use_subvolumes == ConfigFeature.disabled
+ or not preserve
+ or not is_subvolume(src)
+ or (dst.exists() and (not dst.is_dir() or any(dst.iterdir())))
):
- with (
- preserve_target_directories_stat(src, dst)
- if not preserve
- else contextlib.nullcontext()
- ):
+ with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext():
copy()
return dst
).returncode
if result != 0:
- with (
- preserve_target_directories_stat(src, dst)
- if not preserve
- else contextlib.nullcontext()
- ):
+ with preserve_target_directories_stat(src, dst) if not preserve else contextlib.nullcontext():
copy()
return dst
if subvolumes := sorted({p for p in paths if p.exists() and is_subvolume(p)}):
# Silence and ignore failures since when not running as root, this will fail with a permission error unless the
# btrfs filesystem is mounted with user_subvol_rm_allowed.
- run(["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)],
+ run(
+ ["btrfs", "subvolume", "delete", *(workdir(p, sandbox) for p in subvolumes)],
check=False,
sandbox=sandbox(
- binary="btrfs",
- options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes)
+ binary="btrfs", options=flatten(("--bind", p.parent, workdir(p.parent, sandbox)) for p in subvolumes)
),
stdout=subprocess.DEVNULL if not ARG_DEBUG.get() else None,
- stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None)
+ stderr=subprocess.DEVNULL if not ARG_DEBUG.get() else None,
+ )
filtered = sorted({p for p in paths if p.exists() or p.is_symlink()})
if filtered:
dst: Path,
*,
use_subvolumes: ConfigFeature = ConfigFeature.disabled,
- sandbox: SandboxProtocol = nosandbox
+ sandbox: SandboxProtocol = nosandbox,
) -> Path:
src = src.absolute()
dst = dst.absolute()
# https://github.com/python/typeshed/blob/ec52bf1adde1d3183d0595d2ba982589df48dff1/stdlib/_typeshed/__init__.pyi#L224
_T_co = TypeVar("_T_co", covariant=True)
+
class SupportsRead(Protocol[_T_co]):
def read(self, __length: int = ...) -> _T_co: ...
# If we created a file/directory in a parent directory owned by a regular user, make sure the path and any
# parent directories are owned by the invoking user as well.
- if (q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None)):
+ if q := next((parent for parent in path.parents if cls.is_regular_user(parent.stat().st_uid)), None):
st = q.stat()
os.chown(path, st.st_uid, st.st_gid)
0, subuid, SUBRANGE - 100,
SUBRANGE - 100, os.getuid(), 1,
SUBRANGE - 100 + 1, subuid + SUBRANGE - 100 + 1, 99
- ]
+ ] # fmt: skip
newgidmap = [
"flock", "--exclusive", "--close", lock, "newgidmap", pid,
0, subgid, SUBRANGE - 100,
SUBRANGE - 100, os.getgid(), 1,
SUBRANGE - 100 + 1, subgid + SUBRANGE - 100 + 1, 99
- ]
+ ] # fmt: skip
# newuidmap and newgidmap have to run from outside the user namespace to be able to assign a uid mapping to the
# process in the user namespace. The mapping can only be assigned after the user namespace has been unshared.
"--map-groups", f"{SUBRANGE - 100}:{os.getgid()}:1",
"--map-groups", f"{SUBRANGE - 100 + 1}:{subgid + SUBRANGE - 100 + 1}:99",
"--keep-caps",
- ]
+ ] # fmt: skip
return [str(x) for x in cmd]
@contextlib.contextmanager
def flock(path: Path, flags: int = fcntl.LOCK_EX) -> Iterator[int]:
- fd = os.open(path, os.O_CLOEXEC|os.O_RDONLY)
+ fd = os.open(path, os.O_CLOEXEC | os.O_RDONLY)
try:
fcntl.fcntl(fd, fcntl.FD_CLOEXEC)
logging.debug(f"Acquiring lock on {path}")
@contextlib.contextmanager
def flock_or_die(path: Path) -> Iterator[Path]:
try:
- with flock(path, fcntl.LOCK_EX|fcntl.LOCK_NB):
+ with flock(path, fcntl.LOCK_EX | fcntl.LOCK_NB):
yield path
except OSError as e:
if e.errno != errno.EWOULDBLOCK:
raise e
- die(f"Cannot lock {path} as it is locked by another process",
+ die(
+ f"Cannot lock {path} as it is locked by another process",
hint="Maybe another mkosi process is still using it? Use Ephemeral=yes to enable booting multiple "
- "instances of the same image")
+ "instances of the same image",
+ )
@contextlib.contextmanager
def parents_below(path: Path, below: Path) -> list[Path]:
parents = list(path.parents)
- return parents[:parents.index(below)]
+ return parents[: parents.index(below)]
@contextlib.contextmanager
with as_file(t) as p:
# Make sure any temporary directory that the resources are unpacked in is accessible to the invoking user so
# that any commands executed as the invoking user can access files within it.
- if (
- p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and
- stat.S_IMODE(p.parent.stat().st_mode) == 0o700
- ):
+ if p.parent.parent == Path(os.getenv("TMPDIR", "/tmp")) and stat.S_IMODE(p.parent.stat().st_mode) == 0o700:
p.parent.chmod(0o755)
yield p
def hash_file(path: Path) -> str:
# TODO Replace with hashlib.file_digest after dropping support for Python 3.10.
h = hashlib.sha256()
- b = bytearray(16 * 1024**2)
+ b = bytearray(16 * 1024**2)
mv = memoryview(b)
with path.open("rb", buffering=0) as f:
@classmethod
def compare_versions(cls, v1: str, v2: str) -> int:
"""Implements comparison according to UAPI Group Version Format Specification"""
+
def rstrip_invalid_version_chars(s: str) -> str:
valid_version_chars = {*string.ascii_letters, *string.digits, "~", "-", "^", "."}
for i, c in enumerate(s):
v2 = v2.removeprefix("^")
elif v1.startswith("^"):
# TODO: bug?
- return cls._LEFT_SMALLER #cls._RIGHT_SMALLER
+ return cls._LEFT_SMALLER # cls._RIGHT_SMALLER
elif v2.startswith("^"):
- return cls._RIGHT_SMALLER #cls._LEFT_SMALLER
+ return cls._RIGHT_SMALLER # cls._LEFT_SMALLER
# If the remaining part of one of strings starts with ".": if the other remaining part
# does not start with ., the string with . compares lower. Otherwise, both dot
if not kernel.exists():
die(
f"Kernel or UKI not found at {kernel}",
- hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn"
+ hint="Please install a kernel in the image or provide a --qemu-kernel argument to mkosi vmspawn",
)
cmdline: list[PathString] = [
"--vsock", config.qemu_vsock.to_tristate(),
"--tpm", config.qemu_swtpm.to_tristate(),
"--secure-boot", yes_no(config.secure_boot),
- ]
+ ] # fmt: skip
if config.runtime_network == Network.user:
cmdline += ["--network-user-mode"]
user=user,
group=group,
env=os.environ,
- )
+ ) # fmt: skip
def build(self, options: Sequence[PathString] = (), args: Sequence[str] = ()) -> CompletedProcess:
kcl = [
"--output-dir", self.output_dir,
*(["--debug-shell"] if self.config.debug_shell else []),
*options,
- ]
+ ] # fmt: skip
self.mkosi("summary", options, user=self.uid, group=self.uid)
def have_vmspawn() -> bool:
return (
find_binary("systemd-vmspawn") is not None
- and GenericVersion(run(["systemd-vmspawn", "--version"],
- stdout=subprocess.PIPE).stdout.strip()) >= 256
+ and GenericVersion(run(["systemd-vmspawn", "--version"], stdout=subprocess.PIPE).stdout.strip()) >= 256
)
def test_compression_enum_str() -> None:
assert str(Compression.none) == "none"
assert str(Compression.zstd) == "zstd"
- assert str(Compression.zst) == "zstd"
- assert str(Compression.xz) == "xz"
- assert str(Compression.bz2) == "bz2"
- assert str(Compression.gz) == "gz"
- assert str(Compression.lz4) == "lz4"
+ assert str(Compression.zst) == "zstd"
+ assert str(Compression.xz) == "xz"
+ assert str(Compression.bz2) == "bz2"
+ assert str(Compression.gz) == "gz"
+ assert str(Compression.lz4) == "lz4"
assert str(Compression.lzma) == "lzma"
"--credential", "my.cred=cli.value",
"--repositories", "universe",
]
- )
+ ) # fmt: skip
# Values from the CLI should take priority.
assert config.distribution == Distribution.fedora
"--credential", "",
"--repositories", "",
]
- )
+ ) # fmt: skip
# Empty values on the CLIs resets non-collection based settings to their defaults and collection based settings to
# empty collections.
assert "testpkg3" in conf.packages
-@pytest.mark.parametrize(
- "release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2)
-)
+@pytest.mark.parametrize("release1,release2", itertools.combinations_with_replacement([36, 37, 38], 2))
def test_match_release(tmp_path: Path, release1: int, release2: int) -> None:
with chdir(tmp_path):
parent = Path("mkosi.conf")
@pytest.mark.parametrize(
- "image1,image2", itertools.combinations_with_replacement(
- ["image_a", "image_b", "image_c"], 2
- )
+ "image1,image2", itertools.combinations_with_replacement(["image_a", "image_b", "image_c"], 2)
)
def test_match_imageid(tmp_path: Path, image1: str, image2: str) -> None:
with chdir(tmp_path):
@pytest.mark.parametrize(
- "op,version", itertools.product(
+ "op,version",
+ itertools.product(
["", "==", "<", ">", "<=", ">="],
[122, 123, 124],
- )
+ ),
)
def test_match_imageversion(tmp_path: Path, op: str, version: str) -> None:
opfunc = {
"<=": operator.le,
">": operator.gt,
">=": operator.ge,
- }.get(op, operator.eq,)
+ }.get(op, operator.eq)
with chdir(tmp_path):
parent = Path("mkosi.conf")
def test_kernel_specifiers(tmp_path: Path) -> None:
- kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638
+ kver = "13.0.8-5.10.0-1057-oem" # taken from reporter of #1638
token = "MySystemImage"
roothash = "67e893261799236dcf20529115ba9fae4fd7c2269e1e658d42269503e5760d38"
boot_count = "3"
_, [sub, config] = parse_config()
expected = {
- "TestValue1": "100", # from other.env
- "TestValue2": "300", # from mkosi.conf
- "TestValue3": "400", # from mkosi.conf
- "TestValue4": "99", # from mkosi.env
+ "TestValue1": "100", # from other.env
+ "TestValue2": "300", # from mkosi.conf
+ "TestValue3": "400", # from mkosi.conf
+ "TestValue4": "99", # from mkosi.env
}
# Only check values for keys from expected, as config.environment contains other items as well
lvm.rename(Path(image.output_dir) / "image.raw")
- image.qemu([
- "--qemu-firmware=linux",
- # LVM confuses systemd-repart so we mask it for this test.
- "--kernel-command-line-extra=systemd.mask=systemd-repart.service",
- "--kernel-command-line-extra=root=LABEL=root",
- ])
+ image.qemu(
+ [
+ "--qemu-firmware=linux",
+ # LVM confuses systemd-repart so we mask it for this test.
+ "--kernel-command-line-extra=systemd.mask=systemd-repart.service",
+ "--kernel-command-line-extra=root=LABEL=root",
+ ]
+ )
def test_initrd_luks(config: ImageConfig, passphrase: Path) -> None:
"luksFormat",
f"{lodev}p1",
]
- )
+ ) # fmt: skip
run(["cryptsetup", "--key-file", passphrase, "luksOpen", f"{lodev}p1", "lvm_root"])
stack.callback(lambda: run(["cryptsetup", "close", "lvm_root"]))
luks_uuid = run(["cryptsetup", "luksUUID", f"{lodev}p1"], stdout=subprocess.PIPE).stdout.strip()
lvm.rename(Path(image.output_dir) / "image.raw")
- image.qemu([
- "--format=disk",
- "--credential=cryptsetup.passphrase=mkosi",
- "--qemu-firmware=linux",
- "--kernel-command-line-extra=root=LABEL=root",
- f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}",
- ])
+ image.qemu(
+ [
+ "--format=disk",
+ "--credential=cryptsetup.passphrase=mkosi",
+ "--qemu-firmware=linux",
+ "--kernel-command-line-extra=root=LABEL=root",
+ f"--kernel-command-line-extra=rd.luks.uuid={luks_uuid}",
+ ]
+ )
def test_initrd_size(config: ImageConfig) -> None:
build_sources_ephemeral=True,
cache_dir=Path("/is/this/the/cachedir"),
cacheonly=Cacheonly.always,
- checksum= False,
+ checksum=False,
clean_package_metadata=ConfigFeature.auto,
clean_scripts=[Path("/clean")],
compress_level=3,
compress_output=Compression.bz2,
configure_scripts=[Path("/configure")],
- credentials= {"credkey": "credval"},
+ credentials={"credkey": "credval"},
dependencies=["dep1"],
distribution=Distribution.fedora,
environment={"foo": "foo", "BAR": "BAR", "Qux": "Qux"},
with_docs=True,
with_network=False,
with_recommends=True,
- with_tests= True,
+ with_tests=True,
workspace_dir=Path("/cwd"),
)
image.build(["--clean-package-metadata=no", "--format=directory"])
with Image(image.config) as sysext:
- sysext.build([
- "--directory", "",
- "--incremental=no",
- "--base-tree", Path(image.output_dir) / "image",
- "--overlay",
- "--package=dnsmasq",
- "--format=disk",
- ])
-
+ sysext.build(
+ [
+ "--directory",
+ "",
+ "--incremental=no",
+ "--base-tree",
+ Path(image.output_dir) / "image",
+ "--overlay",
+ "--package=dnsmasq",
+ "--format=disk",
+ ]
+ )
GenericVersion("124-1"),
],
),
- 2
- )
+ 2,
+ ),
)
def test_generic_version_strverscmp_improved_doc(
s1: tuple[int, GenericVersion],
i1, v1 = s1
i2, v2 = s2
assert (v1 == v2) == (i1 == i2)
- assert (v1 < v2) == (i1 < i2)
+ assert (v1 < v2) == (i1 < i2)
assert (v1 <= v2) == (i1 <= i2)
- assert (v1 > v2) == (i1 > i2)
+ assert (v1 > v2) == (i1 > i2)
assert (v1 >= v2) == (i1 >= i2)
assert (v1 != v2) == (i1 != i2)
RPMVERCMP("5.5p1", "5.5p10", -1)
RPMVERCMP("5.5p10", "5.5p1", 1)
- RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */
- RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */
+ RPMVERCMP("10xyz", "10.1xyz", 1) # Note: this is reversed from rpm's vercmp */
+ RPMVERCMP("10.1xyz", "10xyz", -1) # Note: this is reversed from rpm's vercmp */
RPMVERCMP("xyz10", "xyz10", 0)
RPMVERCMP("xyz10", "xyz10.1", -1)
RPMVERCMP("20101122", "20101121", 1)
RPMVERCMP("2_0", "2_0", 0)
- RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal
- RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal
+ RPMVERCMP("2.0", "2_0", -1) # Note: in rpm those compare equal
+ RPMVERCMP("2_0", "2.0", 1) # Note: in rpm those compare equal
# RhBug:178798 case */
RPMVERCMP("a", "a", 0)
print("/* RPM version comparison oddities */")
# RhBug:811992 case
RPMVERCMP("1b.fc17", "1b.fc17", 0)
- RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */
+ RPMVERCMP("1b.fc17", "1.fc17", 1) # Note: this is reversed from rpm's vercmp, WAT! */
RPMVERCMP("1.fc17", "1b.fc17", -1)
RPMVERCMP("1g.fc17", "1g.fc17", 0)
RPMVERCMP("1g.fc17", "1.fc17", 1)