if (minversion := last.minimum_version) and minversion > __version__:
die(f"mkosi {minversion} or newer is required by this configuration (found {__version__})")
- if args.verb in (Verb.journalctl, Verb.coredumpctl, Verb.ssh):
- # We don't use a tools tree for verbs that don't need an image build.
- last = dataclasses.replace(images[-1], tools_tree=None)
- return {
- Verb.ssh: run_ssh,
- Verb.journalctl: run_journalctl,
- Verb.coredumpctl: run_coredumpctl,
- }[args.verb](args, last)
-
if last.tools_tree and last.tools_tree == Path("default"):
tools = finalize_default_tools(last, resources=resources)
else:
return
- assert args.verb == Verb.sandbox or args.verb.needs_build()
-
if (
tools
and (
hint="Make sure to (re)build the image first with 'mkosi build' or use '--force'",
)
- if not last.repart_offline and os.getuid() != 0:
- die(f"Must be root to build {last.name()} image configured with RepartOffline=no")
-
output = last.output_dir_or_cwd() / last.output_with_compression
if (
logging.info(f"Output path {output} exists already. (Use --force to rebuild.)")
return
- if args.verb not in (Verb.build, Verb.sandbox) and not args.force and not output.exists():
- die(
- f"Image '{last.name()}' has not been built yet",
- hint="Make sure to build the image first with 'mkosi build' or use '--force'",
- )
-
- check_workspace_directory(last)
-
- if args.verb != Verb.sandbox and last.incremental == Incremental.strict:
- if args.force > 1:
+ if args.verb.needs_build():
+ if args.verb != Verb.build and not args.force and not output.exists():
die(
- "Cannot remove incremental caches when building with Incremental=strict",
- hint="Build once with -i yes to update the image cache",
+ f"Image '{last.name()}' has not been built yet",
+ hint="Make sure to build the image first with 'mkosi build' or use '--force'",
)
- for config in images:
- if have_cache(config):
- continue
+ if not last.repart_offline and os.getuid() != 0:
+ die(f"Must be root to build {last.name()} image configured with RepartOffline=no")
- die(
- f"Strict incremental mode is enabled but the cache for image {config.name()} is out-of-date",
- hint="Build once with -i yes to update the image cache",
- )
+ check_workspace_directory(last)
+
+ if last.incremental == Incremental.strict:
+ if args.force > 1:
+ die(
+ "Cannot remove incremental caches when building with Incremental=strict",
+ hint="Build once with -i yes to update the image cache",
+ )
+
+ for config in images:
+ if have_cache(config):
+ continue
+
+ die(
+ f"Strict incremental mode is enabled and cache for image {config.name()} is out-of-date",
+ hint="Build once with -i yes to update the image cache",
+ )
# If we're doing an incremental build and the cache is not out of date, don't clean up the
# tools tree so that we can reuse the previous one.
# First, process all directory removals because otherwise if different images share directories
# a later image build could end up deleting the output generated by an earlier image build.
- if args.verb != Verb.sandbox and (needs_build(args, last) or args.wipe_build_dir):
+ if args.verb.needs_build() and (needs_build(args, last) or args.wipe_build_dir):
for config in images:
run_clean(args, config, resources=resources)
sync_repository_metadata(args, [tools], resources=resources, dst=Path(metadata_dir))
fork_and_wait(run_build, args, tools, resources=resources, metadata_dir=Path(metadata_dir))
- if args.verb == Verb.sandbox:
+ if not args.verb.needs_build():
with prepend_to_environ_path(last):
- return run_sandbox(args, last)
+ return {
+ Verb.ssh: run_ssh,
+ Verb.journalctl: run_journalctl,
+ Verb.coredumpctl: run_coredumpctl,
+ Verb.sandbox: run_sandbox,
+ }[args.verb](args, last)
for i, config in enumerate(images):
with prepend_to_environ_path(config):