From: Simon Glass Date: Tue, 6 Jan 2026 16:27:36 +0000 (-0700) Subject: scripts/make_fit: Compress dtbs in parallel X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c7c88b20cd4226af0c9fbdb365fb6f221501c7da;p=thirdparty%2Fkernel%2Flinux.git scripts/make_fit: Compress dtbs in parallel When there are 1500 device tree files it takes quite a while to compress them. Do it in parallel. Signed-off-by: Simon Glass Link: https://patch.msgid.link/20260106162738.2605574-7-sjg@chromium.org Signed-off-by: Nathan Chancellor --- diff --git a/scripts/make_fit.py b/scripts/make_fit.py index defb87f223d76..e923cc8b05b7a 100755 --- a/scripts/make_fit.py +++ b/scripts/make_fit.py @@ -37,6 +37,7 @@ as U-Boot, Linuxboot, Tianocore, etc. import argparse import collections +import multiprocessing import os import subprocess import sys @@ -225,15 +226,31 @@ def compress_data(inf, compress): return comp_data -def output_dtb(fsw, seq, fname, arch, compress): +def compress_dtb(fname, compress): + """Compress a single DTB file + + Args: + fname (str): Filename containing the DTB + compress (str): Compression algorithm, e.g. 'gzip' + + Returns: + tuple: (str: fname, bytes: compressed_data) + """ + with open(fname, 'rb') as inf: + compressed = compress_data(inf, compress) + return fname, compressed + + +def output_dtb(fsw, seq, fname, arch, compress, data=None): """Write out a single devicetree to the FIT Args: fsw (libfdt.FdtSw): Object to use for writing seq (int): Sequence number (1 for first) fname (str): Filename containing the DTB - arch: FIT architecture, e.g. 'arm64' + arch (str): FIT architecture, e.g. 'arm64' compress (str): Compressed algorithm, e.g. 'gzip' + data (bytes): Pre-compressed data (optional) """ with fsw.add_node(f'fdt-{seq}'): fsw.property_string('description', os.path.basename(fname)) @@ -241,9 +258,10 @@ def output_dtb(fsw, seq, fname, arch, compress): fsw.property_string('arch', arch) fsw.property_string('compression', compress) - with open(fname, 'rb') as inf: - compressed = compress_data(inf, compress) - fsw.property('data', compressed) + if data is None: + with open(fname, 'rb') as inf: + data = compress_data(inf, compress) + fsw.property('data', data) def process_dtb(fname, args): @@ -300,6 +318,11 @@ def _process_dtbs(args, fsw, entries, fdts): """ seq = 0 size = 0 + + # First figure out the unique DTB files that need compression + todo = [] + file_info = [] # List of (fname, model, compat, files) tuples + for fname in args.dtbs: # Ignore non-DTB (*.dtb) files if os.path.splitext(fname)[1] != '.dtb': @@ -311,11 +334,32 @@ def _process_dtbs(args, fsw, entries, fdts): sys.stderr.write(f'Error processing {fname}:\n') raise e + file_info.append((fname, model, compat, files)) + for fn in files: + if fn not in fdts and fn not in todo: + todo.append(fn) + + # Compress all DTBs in parallel + cache = {} + if todo and args.compress != 'none': + if args.verbose: + print(f'Compressing {len(todo)} DTBs...') + + with multiprocessing.Pool() as pool: + compress_args = [(fn, args.compress) for fn in todo] + # unpacks each tuple, calls compress_dtb(fn, compress) in parallel + results = pool.starmap(compress_dtb, compress_args) + + cache = dict(results) + + # Now write all DTBs to the FIT using pre-compressed data + for fname, model, compat, files in file_info: for fn in files: if fn not in fdts: seq += 1 size += os.path.getsize(fn) - output_dtb(fsw, seq, fn, args.arch, args.compress) + output_dtb(fsw, seq, fn, args.arch, args.compress, + cache.get(fn)) fdts[fn] = seq files_seq = [fdts[fn] for fn in files]