import argparse
import collections
+import multiprocessing
import os
import subprocess
import sys
return comp_data
-def output_dtb(fsw, seq, fname, arch, compress):
+def compress_dtb(fname, compress):
+ """Compress a single DTB file
+
+ Args:
+ fname (str): Filename containing the DTB
+ compress (str): Compression algorithm, e.g. 'gzip'
+
+ Returns:
+ tuple: (str: fname, bytes: compressed_data)
+ """
+ with open(fname, 'rb') as inf:
+ compressed = compress_data(inf, compress)
+ return fname, compressed
+
+
+def output_dtb(fsw, seq, fname, arch, compress, data=None):
"""Write out a single devicetree to the FIT
Args:
fsw (libfdt.FdtSw): Object to use for writing
seq (int): Sequence number (1 for first)
fname (str): Filename containing the DTB
- arch: FIT architecture, e.g. 'arm64'
+ arch (str): FIT architecture, e.g. 'arm64'
compress (str): Compressed algorithm, e.g. 'gzip'
+ data (bytes): Pre-compressed data (optional)
"""
with fsw.add_node(f'fdt-{seq}'):
fsw.property_string('description', os.path.basename(fname))
fsw.property_string('arch', arch)
fsw.property_string('compression', compress)
- with open(fname, 'rb') as inf:
- compressed = compress_data(inf, compress)
- fsw.property('data', compressed)
+ if data is None:
+ with open(fname, 'rb') as inf:
+ data = compress_data(inf, compress)
+ fsw.property('data', data)
def process_dtb(fname, args):
"""
seq = 0
size = 0
+
+ # First figure out the unique DTB files that need compression
+ todo = []
+ file_info = [] # List of (fname, model, compat, files) tuples
+
for fname in args.dtbs:
# Ignore non-DTB (*.dtb) files
if os.path.splitext(fname)[1] != '.dtb':
sys.stderr.write(f'Error processing {fname}:\n')
raise e
+ file_info.append((fname, model, compat, files))
+ for fn in files:
+ if fn not in fdts and fn not in todo:
+ todo.append(fn)
+
+ # Compress all DTBs in parallel
+ cache = {}
+ if todo and args.compress != 'none':
+ if args.verbose:
+ print(f'Compressing {len(todo)} DTBs...')
+
+ with multiprocessing.Pool() as pool:
+ compress_args = [(fn, args.compress) for fn in todo]
+ # unpacks each tuple, calls compress_dtb(fn, compress) in parallel
+ results = pool.starmap(compress_dtb, compress_args)
+
+ cache = dict(results)
+
+ # Now write all DTBs to the FIT using pre-compressed data
+ for fname, model, compat, files in file_info:
for fn in files:
if fn not in fdts:
seq += 1
size += os.path.getsize(fn)
- output_dtb(fsw, seq, fn, args.arch, args.compress)
+ output_dtb(fsw, seq, fn, args.arch, args.compress,
+ cache.get(fn))
fdts[fn] = seq
files_seq = [fdts[fn] for fn in files]