]> git.ipfire.org Git - thirdparty/systemd.git/commitdiff
journal: don't try to reuse already calculated hash between files with keyed hash... 19854/head
authorLennart Poettering <lennart@poettering.net>
Tue, 8 Jun 2021 21:17:53 +0000 (23:17 +0200)
committerLennart Poettering <lennart@poettering.net>
Wed, 9 Jun 2021 07:34:55 +0000 (09:34 +0200)
When suppressing duplicate fields between files we so far tried to reuse
the already known hash value of the data fields between files. This was
fine as long as we used the same hash function everywhere. However,
since addition of the keyed hash feature for journal files this doesn't
work anymore, since the hashes will be different for different files.

Fixes: #19172
src/libsystemd/sd-journal/sd-journal.c

index 0a79d8c98d5087a05e0b1643a6cac3f143f0133e..5728c537bc603059fe65a7aa5bfe7e902ec38b00 100644 (file)
@@ -3004,7 +3004,13 @@ _public_ int sd_journal_enumerate_unique(
                         if (JOURNAL_HEADER_CONTAINS(of->header, n_fields) && le64toh(of->header->n_fields) <= 0)
                                 continue;
 
-                        r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
+                        /* We can reuse the hash from our current file only on old-style journal files
+                         * without keyed hashes. On new-style files we have to calculate the hash anew, to
+                         * take the per-file hash seed into consideration. */
+                        if (!JOURNAL_HEADER_KEYED_HASH(j->unique_file->header) && !JOURNAL_HEADER_KEYED_HASH(of->header))
+                                r = journal_file_find_data_object_with_hash(of, odata, ol, le64toh(o->data.hash), NULL, NULL);
+                        else
+                                r = journal_file_find_data_object(of, odata, ol, NULL, NULL);
                         if (r < 0)
                                 return r;
                         if (r > 0) {