return s.replace("%u", INVOKING_USER.name())
-def needs_build(args: Args, config: Config) -> bool:
- return (
- args.verb.needs_build() and
- (
- args.force > 0 or
- not (config.output_dir_or_cwd() / config.output_with_compression).exists() or
- # When the output is a directory, its name is the same as the symlink we create that points to the actual
- # output when not building a directory. So if the full output path exists, we have to check that it's not
- # a symlink as well.
- (config.output_dir_or_cwd() / config.output_with_compression).is_symlink()
- )
- )
-
-
@contextlib.contextmanager
def prepend_to_environ_path(config: Config) -> Iterator[None]:
if config.tools_tree or not config.extra_search_paths:
hint="Use WorkspaceDirectory= to configure a different workspace directory")
+def needs_clean(args: Args, config: Config) -> bool:
+ return (
+ args.force > 0 or
+ not (config.output_dir_or_cwd() / config.output_with_compression).exists() or
+ # When the output is a directory, its name is the same as the symlink we create that points to the actual
+ # output when not building a directory. So if the full output path exists, we have to check that it's not
+ # a symlink as well.
+ (config.output_dir_or_cwd() / config.output_with_compression).is_symlink()
+ )
+
+
def run_clean(args: Args, config: Config) -> None:
+ if not needs_clean(args, config):
+ return
+
become_root()
# We remove any cached images if either the user used --force twice, or he/she called "clean" with it
remove_build_cache = args.force > 1
remove_package_cache = args.force > 2
- with complete_step("Removing output files…"):
- if config.output_dir_or_cwd().exists():
- rmtree(*(p for p in config.output_dir_or_cwd().iterdir() if p.name.startswith(config.output)))
+ if (outputs := list(config.output_dir_or_cwd().glob(f"{config.output}*"))):
+ with complete_step(f"Removing output files of {config.name()} image…"):
+ rmtree(*outputs)
if remove_build_cache:
if config.cache_dir:
- with complete_step("Removing cache entries…"):
+ with complete_step(f"Removing cache entries of {config.name()} image…"):
rmtree(*(p for p in cache_tree_paths(config) if p.exists()))
if config.build_dir and config.build_dir.exists() and any(config.build_dir.iterdir()):
- with complete_step("Clearing out build directory…"):
+ with complete_step(f"Clearing out build directory of {config.name()} image…"):
rmtree(*config.build_dir.iterdir())
- if remove_package_cache:
- if config.cache_dir and config.cache_dir.exists() and any(config.cache_dir.iterdir()):
- with complete_step("Clearing out package cache…"):
- rmtree(
- *(
- config.cache_dir / p / d
- for p in ("cache", "lib")
- for d in ("apt", "dnf", "libdnf5", "pacman", "zypp")
- ),
- )
+ if remove_package_cache and config.cache_dir and config.cache_dir.exists() and any(config.cache_dir.iterdir()):
+ with complete_step(f"Clearing out package cache of {config.name()} image…"):
+ rmtree(
+ *(
+ config.cache_dir / p / d
+ for p in ("cache", "lib")
+ for d in ("apt", "dnf", "libdnf5", "pacman", "zypp")
+ ),
+ )
def run_build(args: Args, config: Config) -> None:
+ check_inputs(config)
+
if (uid := os.getuid()) != 0:
become_root()
unshare(CLONE_NEWNS)
# image build could end up deleting the output generated by an earlier image build.
for config in images:
- if not needs_build(args, config) and args.verb != Verb.clean:
+ if not args.verb.needs_build() and args.verb != Verb.clean:
continue
if config.tools_tree and config.tools_tree.name == "default":
build = False
for i, config in enumerate(images):
+ if not args.verb.needs_build():
+ continue
+
with (
finalize_default_tools(args, config)
if config.tools_tree and config.tools_tree.name == "default"
tools_tree=tools.output_dir_or_cwd() / tools.output if tools else config.tools_tree,
)
- if tools and needs_build(args, tools):
- check_inputs(tools)
+ if tools and not (tools.output_dir_or_cwd() / tools.output_with_compression).exists():
fork_and_wait(lambda: run_build(args, tools)) # pyright: ignore
- if not needs_build(args, config):
+ if (config.output_dir_or_cwd() / config.output_with_compression).exists():
continue
- check_inputs(config)
fork_and_wait(lambda: run_build(args, config))
build = True