From: Lennart Poettering Date: Tue, 8 Jun 2021 21:17:53 +0000 (+0200) Subject: journal: don't try to reuse already calculated hash between files with keyed hash... X-Git-Tag: v249-rc1~53^2 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=refs%2Fpull%2F19854%2Fhead;p=thirdparty%2Fsystemd.git journal: don't try to reuse already calculated hash between files with keyed hash feature When suppressing duplicate fields between files we so far tried to reuse the already known hash value of the data fields between files. This was fine as long as we used the same hash function everywhere. However, since addition of the keyed hash feature for journal files this doesn't work anymore, since the hashes will be different for different files. Fixes: #19172 --- diff --git a/src/libsystemd/sd-journal/sd-journal.c b/src/libsystemd/sd-journal/sd-journal.c index 0a79d8c98d5..5728c537bc6 100644 --- a/src/libsystemd/sd-journal/sd-journal.c +++ b/src/libsystemd/sd-journal/sd-journal.c @@ -3004,7 +3004,13 @@ _public_ int sd_journal_enumerate_unique( if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0) continue; - r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL); + /* We can reuse the hash from our current file only on old-style journal files + * without keyed hashes. On new-style files we have to calculate the hash anew, to + * take the per-file hash seed into consideration. */ + if (!JOURNAL_HEADER_KEYED_HASH(j->unique_file->header) && !JOURNAL_HEADER_KEYED_HASH(of->header)) + r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL); + else + r = journal_file_find_data_object(of, odata, ol, NULL, NULL); if (r < 0) return r; if (r > 0) {