]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-journal/journal-file.c
mmap-cache: introduce enum MMapCacheCategory
[thirdparty/systemd.git] / src / libsystemd / sd-journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <linux/fs.h>
6 #include <linux/magic.h>
7 #include <pthread.h>
8 #include <stddef.h>
9 #include <sys/mman.h>
10 #include <sys/statvfs.h>
11 #include <sys/uio.h>
12 #include <unistd.h>
13
14 #include "sd-event.h"
15
16 #include "alloc-util.h"
17 #include "chattr-util.h"
18 #include "compress.h"
19 #include "env-util.h"
20 #include "fd-util.h"
21 #include "format-util.h"
22 #include "fs-util.h"
23 #include "id128-util.h"
24 #include "journal-authenticate.h"
25 #include "journal-def.h"
26 #include "journal-file.h"
27 #include "journal-internal.h"
28 #include "lookup3.h"
29 #include "memory-util.h"
30 #include "missing_threads.h"
31 #include "path-util.h"
32 #include "prioq.h"
33 #include "random-util.h"
34 #include "set.h"
35 #include "sort-util.h"
36 #include "stat-util.h"
37 #include "string-table.h"
38 #include "string-util.h"
39 #include "strv.h"
40 #include "sync-util.h"
41 #include "user-util.h"
42 #include "xattr-util.h"
43
44 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
45 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
46
47 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
48 #define MIN_COMPRESS_THRESHOLD (8ULL)
49
50 #define U64_KB UINT64_C(1024)
51 #define U64_MB (UINT64_C(1024) * U64_KB)
52 #define U64_GB (UINT64_C(1024) * U64_MB)
53
54 /* This is the minimum journal file size */
55 #define JOURNAL_FILE_SIZE_MIN (512 * U64_KB) /* 512 KiB */
56 #define JOURNAL_COMPACT_SIZE_MAX ((uint64_t) UINT32_MAX) /* 4 GiB */
57
58 /* These are the lower and upper bounds if we deduce the max_use value from the file system size */
59 #define MAX_USE_LOWER (1 * U64_MB) /* 1 MiB */
60 #define MAX_USE_UPPER (4 * U64_GB) /* 4 GiB */
61
62 /* Those are the lower and upper bounds for the minimal use limit,
63 * i.e. how much we'll use even if keep_free suggests otherwise. */
64 #define MIN_USE_LOW (1 * U64_MB) /* 1 MiB */
65 #define MIN_USE_HIGH (16 * U64_MB) /* 16 MiB */
66
67 /* This is the upper bound if we deduce max_size from max_use */
68 #define MAX_SIZE_UPPER (128 * U64_MB) /* 128 MiB */
69
70 /* This is the upper bound if we deduce the keep_free value from the file system size */
71 #define KEEP_FREE_UPPER (4 * U64_GB) /* 4 GiB */
72
73 /* This is the keep_free value when we can't determine the system size */
74 #define DEFAULT_KEEP_FREE (1 * U64_MB) /* 1 MB */
75
76 /* This is the default maximum number of journal files to keep around. */
77 #define DEFAULT_N_MAX_FILES 100
78
79 /* n_data was the first entry we added after the initial file format design */
80 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
81
82 /* How many entries to keep in the entry array chain cache at max */
83 #define CHAIN_CACHE_MAX 20
84
85 /* How much to increase the journal file size at once each time we allocate something new. */
86 #define FILE_SIZE_INCREASE (8 * U64_MB) /* 8MB */
87
88 /* Reread fstat() of the file for detecting deletions at least this often */
89 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
90
91 /* Longest hash chain to rotate after */
92 #define HASH_CHAIN_DEPTH_MAX 100
93
94 #ifdef __clang__
95 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
96 #endif
97
98 static int mmap_prot_from_open_flags(int flags) {
99 switch (flags & O_ACCMODE) {
100 case O_RDONLY:
101 return PROT_READ;
102 case O_WRONLY:
103 return PROT_WRITE;
104 case O_RDWR:
105 return PROT_READ|PROT_WRITE;
106 default:
107 assert_not_reached();
108 }
109 }
110
111 int journal_file_tail_end_by_pread(JournalFile *f, uint64_t *ret_offset) {
112 uint64_t p;
113 int r;
114
115 assert(f);
116 assert(f->header);
117 assert(ret_offset);
118
119 /* Same as journal_file_tail_end_by_mmap() below, but operates with pread() to avoid the mmap cache
120 * (and thus is thread safe) */
121
122 p = le64toh(f->header->tail_object_offset);
123 if (p == 0)
124 p = le64toh(f->header->header_size);
125 else {
126 Object tail;
127 uint64_t sz;
128
129 r = journal_file_read_object_header(f, OBJECT_UNUSED, p, &tail);
130 if (r < 0)
131 return r;
132
133 sz = le64toh(tail.object.size);
134 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
135 return -EBADMSG;
136
137 sz = ALIGN64(sz);
138 if (p > UINT64_MAX - sz)
139 return -EBADMSG;
140
141 p += sz;
142 }
143
144 *ret_offset = p;
145
146 return 0;
147 }
148
149 int journal_file_tail_end_by_mmap(JournalFile *f, uint64_t *ret_offset) {
150 uint64_t p;
151 int r;
152
153 assert(f);
154 assert(f->header);
155 assert(ret_offset);
156
157 /* Same as journal_file_tail_end_by_pread() above, but operates with the usual mmap logic */
158
159 p = le64toh(f->header->tail_object_offset);
160 if (p == 0)
161 p = le64toh(f->header->header_size);
162 else {
163 Object *tail;
164 uint64_t sz;
165
166 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
167 if (r < 0)
168 return r;
169
170 sz = le64toh(READ_NOW(tail->object.size));
171 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
172 return -EBADMSG;
173
174 sz = ALIGN64(sz);
175 if (p > UINT64_MAX - sz)
176 return -EBADMSG;
177
178 p += sz;
179 }
180
181 *ret_offset = p;
182
183 return 0;
184 }
185
186 int journal_file_set_offline_thread_join(JournalFile *f) {
187 int r;
188
189 assert(f);
190
191 if (f->offline_state == OFFLINE_JOINED)
192 return 0;
193
194 r = pthread_join(f->offline_thread, NULL);
195 if (r)
196 return -r;
197
198 f->offline_state = OFFLINE_JOINED;
199
200 if (mmap_cache_fd_got_sigbus(f->cache_fd))
201 return -EIO;
202
203 return 0;
204 }
205
206 static int journal_file_set_online(JournalFile *f) {
207 bool wait = true;
208
209 assert(f);
210
211 if (!journal_file_writable(f))
212 return -EPERM;
213
214 if (f->fd < 0 || !f->header)
215 return -EINVAL;
216
217 while (wait) {
218 switch (f->offline_state) {
219 case OFFLINE_JOINED:
220 /* No offline thread, no need to wait. */
221 wait = false;
222 break;
223
224 case OFFLINE_SYNCING: {
225 OfflineState tmp_state = OFFLINE_SYNCING;
226 if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
227 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
228 continue;
229 }
230 /* Canceled syncing prior to offlining, no need to wait. */
231 wait = false;
232 break;
233
234 case OFFLINE_AGAIN_FROM_SYNCING: {
235 OfflineState tmp_state = OFFLINE_AGAIN_FROM_SYNCING;
236 if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
237 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
238 continue;
239 }
240 /* Canceled restart from syncing, no need to wait. */
241 wait = false;
242 break;
243
244 case OFFLINE_AGAIN_FROM_OFFLINING: {
245 OfflineState tmp_state = OFFLINE_AGAIN_FROM_OFFLINING;
246 if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
247 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
248 continue;
249 }
250 /* Canceled restart from offlining, must wait for offlining to complete however. */
251 _fallthrough_;
252 default: {
253 int r;
254
255 r = journal_file_set_offline_thread_join(f);
256 if (r < 0)
257 return r;
258
259 wait = false;
260 break;
261 }
262 }
263 }
264
265 if (mmap_cache_fd_got_sigbus(f->cache_fd))
266 return -EIO;
267
268 switch (f->header->state) {
269 case STATE_ONLINE:
270 return 0;
271
272 case STATE_OFFLINE:
273 f->header->state = STATE_ONLINE;
274 (void) fsync(f->fd);
275 return 0;
276
277 default:
278 return -EINVAL;
279 }
280 }
281
282 JournalFile* journal_file_close(JournalFile *f) {
283 if (!f)
284 return NULL;
285
286 assert(f->newest_boot_id_prioq_idx == PRIOQ_IDX_NULL);
287
288 if (f->cache_fd)
289 mmap_cache_fd_free(f->cache_fd);
290
291 if (f->close_fd)
292 safe_close(f->fd);
293 free(f->path);
294
295 ordered_hashmap_free_free(f->chain_cache);
296
297 #if HAVE_COMPRESSION
298 free(f->compress_buffer);
299 #endif
300
301 #if HAVE_GCRYPT
302 if (f->fss_file) {
303 size_t sz = PAGE_ALIGN(f->fss_file_size);
304 assert(sz < SIZE_MAX);
305 munmap(f->fss_file, sz);
306 } else
307 free(f->fsprg_state);
308
309 free(f->fsprg_seed);
310
311 if (f->hmac)
312 gcry_md_close(f->hmac);
313 #endif
314
315 return mfree(f);
316 }
317
318 static bool keyed_hash_requested(void) {
319 static thread_local int cached = -1;
320 int r;
321
322 if (cached < 0) {
323 r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
324 if (r < 0) {
325 if (r != -ENXIO)
326 log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
327 cached = true;
328 } else
329 cached = r;
330 }
331
332 return cached;
333 }
334
335 static bool compact_mode_requested(void) {
336 static thread_local int cached = -1;
337 int r;
338
339 if (cached < 0) {
340 r = getenv_bool("SYSTEMD_JOURNAL_COMPACT");
341 if (r < 0) {
342 if (r != -ENXIO)
343 log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
344 cached = true;
345 } else
346 cached = r;
347 }
348
349 return cached;
350 }
351
352 #if HAVE_COMPRESSION
353 static Compression getenv_compression(void) {
354 Compression c;
355 const char *e;
356 int r;
357
358 e = getenv("SYSTEMD_JOURNAL_COMPRESS");
359 if (!e)
360 return DEFAULT_COMPRESSION;
361
362 r = parse_boolean(e);
363 if (r >= 0)
364 return r ? DEFAULT_COMPRESSION : COMPRESSION_NONE;
365
366 c = compression_from_string(e);
367 if (c < 0) {
368 log_debug_errno(c, "Failed to parse SYSTEMD_JOURNAL_COMPRESS value, ignoring: %s", e);
369 return DEFAULT_COMPRESSION;
370 }
371
372 if (!compression_supported(c)) {
373 log_debug("Unsupported compression algorithm specified, ignoring: %s", e);
374 return DEFAULT_COMPRESSION;
375 }
376
377 return c;
378 }
379 #endif
380
381 static Compression compression_requested(void) {
382 #if HAVE_COMPRESSION
383 static thread_local Compression cached = _COMPRESSION_INVALID;
384
385 if (cached < 0)
386 cached = getenv_compression();
387
388 return cached;
389 #else
390 return COMPRESSION_NONE;
391 #endif
392 }
393
394 static int journal_file_init_header(
395 JournalFile *f,
396 JournalFileFlags file_flags,
397 JournalFile *template) {
398
399 bool seal = false;
400 ssize_t k;
401 int r;
402
403 assert(f);
404
405 #if HAVE_GCRYPT
406 /* Try to load the FSPRG state, and if we can't, then just don't do sealing */
407 seal = FLAGS_SET(file_flags, JOURNAL_SEAL) && journal_file_fss_load(f) >= 0;
408 #endif
409
410 Header h = {
411 .header_size = htole64(ALIGN64(sizeof(h))),
412 .incompatible_flags = htole32(
413 FLAGS_SET(file_flags, JOURNAL_COMPRESS) * COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(compression_requested()) |
414 keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH |
415 compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT),
416 .compatible_flags = htole32(
417 (seal * HEADER_COMPATIBLE_SEALED) |
418 HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID),
419 };
420
421 assert_cc(sizeof(h.signature) == sizeof(HEADER_SIGNATURE));
422 memcpy(h.signature, HEADER_SIGNATURE, sizeof(HEADER_SIGNATURE));
423
424 r = sd_id128_randomize(&h.file_id);
425 if (r < 0)
426 return r;
427
428 r = sd_id128_get_machine(&h.machine_id);
429 if (r < 0 && !ERRNO_IS_MACHINE_ID_UNSET(r))
430 return r; /* If we have no valid machine ID (test environment?), let's simply leave the
431 * machine ID field all zeroes. */
432
433 if (template) {
434 h.seqnum_id = template->header->seqnum_id;
435 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
436 } else
437 h.seqnum_id = h.file_id;
438
439 k = pwrite(f->fd, &h, sizeof(h), 0);
440 if (k < 0)
441 return -errno;
442 if (k != sizeof(h))
443 return -EIO;
444
445 return 0;
446 }
447
448 static int journal_file_refresh_header(JournalFile *f) {
449 int r;
450
451 assert(f);
452 assert(f->header);
453
454 /* We used to update the header's boot ID field here, but we don't do that anymore, as per
455 * HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID */
456
457 r = journal_file_set_online(f);
458
459 /* Sync the online state to disk; likely just created a new file, also sync the directory this file
460 * is located in. */
461 (void) fsync_full(f->fd);
462
463 return r;
464 }
465
466 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
467 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
468 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
469 const char *type = compatible ? "compatible" : "incompatible";
470 uint32_t flags;
471
472 assert(f);
473 assert(f->header);
474
475 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
476
477 if (flags & ~supported) {
478 if (flags & ~any)
479 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
480 f->path, type, flags & ~any);
481 flags = (flags & any) & ~supported;
482 if (flags) {
483 const char* strv[6];
484 size_t n = 0;
485 _cleanup_free_ char *t = NULL;
486
487 if (compatible) {
488 if (flags & HEADER_COMPATIBLE_SEALED)
489 strv[n++] = "sealed";
490 } else {
491 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ)
492 strv[n++] = "xz-compressed";
493 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
494 strv[n++] = "lz4-compressed";
495 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
496 strv[n++] = "zstd-compressed";
497 if (flags & HEADER_INCOMPATIBLE_KEYED_HASH)
498 strv[n++] = "keyed-hash";
499 if (flags & HEADER_INCOMPATIBLE_COMPACT)
500 strv[n++] = "compact";
501 }
502 strv[n] = NULL;
503 assert(n < ELEMENTSOF(strv));
504
505 t = strv_join((char**) strv, ", ");
506 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
507 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
508 }
509 return true;
510 }
511
512 return false;
513 }
514
515 static bool offset_is_valid(uint64_t offset, uint64_t header_size, uint64_t tail_object_offset) {
516 if (offset == 0)
517 return true;
518 if (!VALID64(offset))
519 return false;
520 if (offset < header_size)
521 return false;
522 if (offset > tail_object_offset)
523 return false;
524 return true;
525 }
526
527 static bool hash_table_is_valid(uint64_t offset, uint64_t size, uint64_t header_size, uint64_t arena_size, uint64_t tail_object_offset) {
528 if ((offset == 0) != (size == 0))
529 return false;
530 if (offset == 0)
531 return true;
532 if (offset <= offsetof(Object, hash_table.items))
533 return false;
534 offset -= offsetof(Object, hash_table.items);
535 if (!offset_is_valid(offset, header_size, tail_object_offset))
536 return false;
537 assert(offset <= header_size + arena_size);
538 if (size > header_size + arena_size - offset)
539 return false;
540 return true;
541 }
542
543 static int journal_file_verify_header(JournalFile *f) {
544 uint64_t arena_size, header_size;
545
546 assert(f);
547 assert(f->header);
548
549 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
550 return -EBADMSG;
551
552 /* In both read and write mode we refuse to open files with incompatible
553 * flags we don't know. */
554 if (warn_wrong_flags(f, false))
555 return -EPROTONOSUPPORT;
556
557 /* When open for writing we refuse to open files with compatible flags, too. */
558 if (journal_file_writable(f) && warn_wrong_flags(f, true))
559 return -EPROTONOSUPPORT;
560
561 if (f->header->state >= _STATE_MAX)
562 return -EBADMSG;
563
564 header_size = le64toh(READ_NOW(f->header->header_size));
565
566 /* The first addition was n_data, so check that we are at least this large */
567 if (header_size < HEADER_SIZE_MIN)
568 return -EBADMSG;
569
570 /* When open for writing we refuse to open files with a mismatch of the header size, i.e. writing to
571 * files implementing older or new header structures. */
572 if (journal_file_writable(f) && header_size != sizeof(Header))
573 return -EPROTONOSUPPORT;
574
575 /* Don't write to journal files without the new boot ID update behavior guarantee. */
576 if (journal_file_writable(f) && !JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header))
577 return -EPROTONOSUPPORT;
578
579 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
580 return -EBADMSG;
581
582 arena_size = le64toh(READ_NOW(f->header->arena_size));
583
584 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
585 return -ENODATA;
586
587 uint64_t tail_object_offset = le64toh(f->header->tail_object_offset);
588 if (!offset_is_valid(tail_object_offset, header_size, UINT64_MAX))
589 return -ENODATA;
590 if (header_size + arena_size < tail_object_offset)
591 return -ENODATA;
592 if (header_size + arena_size - tail_object_offset < sizeof(ObjectHeader))
593 return -ENODATA;
594
595 if (!hash_table_is_valid(le64toh(f->header->data_hash_table_offset),
596 le64toh(f->header->data_hash_table_size),
597 header_size, arena_size, tail_object_offset))
598 return -ENODATA;
599
600 if (!hash_table_is_valid(le64toh(f->header->field_hash_table_offset),
601 le64toh(f->header->field_hash_table_size),
602 header_size, arena_size, tail_object_offset))
603 return -ENODATA;
604
605 uint64_t entry_array_offset = le64toh(f->header->entry_array_offset);
606 if (!offset_is_valid(entry_array_offset, header_size, tail_object_offset))
607 return -ENODATA;
608
609 if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_offset)) {
610 uint32_t offset = le32toh(f->header->tail_entry_array_offset);
611 uint32_t n = le32toh(f->header->tail_entry_array_n_entries);
612
613 if (!offset_is_valid(offset, header_size, tail_object_offset))
614 return -ENODATA;
615 if (entry_array_offset > offset)
616 return -ENODATA;
617 if (entry_array_offset == 0 && offset != 0)
618 return -ENODATA;
619 if ((offset == 0) != (n == 0))
620 return -ENODATA;
621 assert(offset <= header_size + arena_size);
622 if ((uint64_t) n * journal_file_entry_array_item_size(f) > header_size + arena_size - offset)
623 return -ENODATA;
624 }
625
626 if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_offset)) {
627 uint64_t offset = le64toh(f->header->tail_entry_offset);
628
629 if (!offset_is_valid(offset, header_size, tail_object_offset))
630 return -ENODATA;
631
632 if (offset > 0) {
633 /* When there is an entry object, then these fields must be filled. */
634 if (sd_id128_is_null(f->header->tail_entry_boot_id))
635 return -ENODATA;
636 if (!VALID_REALTIME(le64toh(f->header->head_entry_realtime)))
637 return -ENODATA;
638 if (!VALID_REALTIME(le64toh(f->header->tail_entry_realtime)))
639 return -ENODATA;
640 if (!VALID_MONOTONIC(le64toh(f->header->tail_entry_realtime)))
641 return -ENODATA;
642 } else {
643 /* Otherwise, the fields must be zero. */
644 if (JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header) &&
645 !sd_id128_is_null(f->header->tail_entry_boot_id))
646 return -ENODATA;
647 if (f->header->head_entry_realtime != 0)
648 return -ENODATA;
649 if (f->header->tail_entry_realtime != 0)
650 return -ENODATA;
651 if (f->header->tail_entry_realtime != 0)
652 return -ENODATA;
653 }
654 }
655
656 /* Verify number of objects */
657 uint64_t n_objects = le64toh(f->header->n_objects);
658 if (n_objects > arena_size / sizeof(ObjectHeader))
659 return -ENODATA;
660
661 uint64_t n_entries = le64toh(f->header->n_entries);
662 if (n_entries > n_objects)
663 return -ENODATA;
664
665 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
666 le64toh(f->header->n_data) > n_objects)
667 return -ENODATA;
668
669 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
670 le64toh(f->header->n_fields) > n_objects)
671 return -ENODATA;
672
673 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags) &&
674 le64toh(f->header->n_tags) > n_objects)
675 return -ENODATA;
676
677 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays) &&
678 le64toh(f->header->n_entry_arrays) > n_objects)
679 return -ENODATA;
680
681 if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_n_entries) &&
682 le32toh(f->header->tail_entry_array_n_entries) > n_entries)
683 return -ENODATA;
684
685 if (journal_file_writable(f)) {
686 sd_id128_t machine_id;
687 uint8_t state;
688 int r;
689
690 r = sd_id128_get_machine(&machine_id);
691 if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r)) /* Gracefully handle the machine ID not being initialized yet */
692 machine_id = SD_ID128_NULL;
693 else if (r < 0)
694 return r;
695
696 if (!sd_id128_equal(machine_id, f->header->machine_id))
697 return log_debug_errno(SYNTHETIC_ERRNO(EHOSTDOWN),
698 "Trying to open journal file from different host for writing, refusing.");
699
700 state = f->header->state;
701
702 if (state == STATE_ARCHIVED)
703 return -ESHUTDOWN; /* Already archived */
704 if (state == STATE_ONLINE)
705 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
706 "Journal file %s is already online. Assuming unclean closing.",
707 f->path);
708 if (state != STATE_OFFLINE)
709 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
710 "Journal file %s has unknown state %i.",
711 f->path, state);
712
713 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
714 return -EBADMSG;
715 }
716
717 return 0;
718 }
719
720 int journal_file_fstat(JournalFile *f) {
721 int r;
722
723 assert(f);
724 assert(f->fd >= 0);
725
726 if (fstat(f->fd, &f->last_stat) < 0)
727 return -errno;
728
729 f->last_stat_usec = now(CLOCK_MONOTONIC);
730
731 /* Refuse dealing with files that aren't regular */
732 r = stat_verify_regular(&f->last_stat);
733 if (r < 0)
734 return r;
735
736 /* Refuse appending to files that are already deleted */
737 if (f->last_stat.st_nlink <= 0)
738 return -EIDRM;
739
740 return 0;
741 }
742
743 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
744 uint64_t old_size, new_size, old_header_size, old_arena_size;
745 int r;
746
747 assert(f);
748 assert(f->header);
749
750 /* We assume that this file is not sparse, and we know that for sure, since we always call
751 * posix_fallocate() ourselves */
752
753 if (size > PAGE_ALIGN_DOWN_U64(UINT64_MAX) - offset)
754 return -EINVAL;
755
756 if (mmap_cache_fd_got_sigbus(f->cache_fd))
757 return -EIO;
758
759 old_header_size = le64toh(READ_NOW(f->header->header_size));
760 old_arena_size = le64toh(READ_NOW(f->header->arena_size));
761 if (old_arena_size > PAGE_ALIGN_DOWN_U64(UINT64_MAX) - old_header_size)
762 return -EBADMSG;
763
764 old_size = old_header_size + old_arena_size;
765
766 new_size = MAX(PAGE_ALIGN_U64(offset + size), old_header_size);
767
768 if (new_size <= old_size) {
769
770 /* We already pre-allocated enough space, but before
771 * we write to it, let's check with fstat() if the
772 * file got deleted, in order make sure we don't throw
773 * away the data immediately. Don't check fstat() for
774 * all writes though, but only once ever 10s. */
775
776 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
777 return 0;
778
779 return journal_file_fstat(f);
780 }
781
782 /* Allocate more space. */
783
784 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
785 return -E2BIG;
786
787 /* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
788 if (JOURNAL_HEADER_COMPACT(f->header) && new_size > UINT32_MAX)
789 return -E2BIG;
790
791 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
792 struct statvfs svfs;
793
794 if (fstatvfs(f->fd, &svfs) >= 0) {
795 uint64_t available;
796
797 available = LESS_BY(u64_multiply_safe(svfs.f_bfree, svfs.f_bsize), f->metrics.keep_free);
798
799 if (new_size - old_size > available)
800 return -E2BIG;
801 }
802 }
803
804 /* Increase by larger blocks at once */
805 new_size = ROUND_UP(new_size, FILE_SIZE_INCREASE);
806 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
807 new_size = f->metrics.max_size;
808
809 /* Note that the glibc fallocate() fallback is very
810 inefficient, hence we try to minimize the allocation area
811 as we can. */
812 r = posix_fallocate_loop(f->fd, old_size, new_size - old_size);
813 if (r < 0)
814 return r;
815
816 f->header->arena_size = htole64(new_size - old_header_size);
817
818 return journal_file_fstat(f);
819 }
820
821 static int journal_file_move_to(
822 JournalFile *f,
823 ObjectType type,
824 bool keep_always,
825 uint64_t offset,
826 uint64_t size,
827 void **ret) {
828
829 int r;
830
831 assert(f);
832 assert(ret);
833
834 /* This function may clear, overwrite, or alter previously cached entries with the same type. After
835 * this function has been called, all previously read objects with the same type may be invalidated,
836 * hence must be re-read before use. */
837
838 if (size <= 0)
839 return -EINVAL;
840
841 if (size > UINT64_MAX - offset)
842 return -EBADMSG;
843
844 /* Avoid SIGBUS on invalid accesses */
845 if (offset + size > (uint64_t) f->last_stat.st_size) {
846 /* Hmm, out of range? Let's refresh the fstat() data
847 * first, before we trust that check. */
848
849 r = journal_file_fstat(f);
850 if (r < 0)
851 return r;
852
853 if (offset + size > (uint64_t) f->last_stat.st_size)
854 return -EADDRNOTAVAIL;
855 }
856
857 return mmap_cache_fd_get(f->cache_fd, type_to_category(type), keep_always, offset, size, &f->last_stat, ret);
858 }
859
860 static uint64_t minimum_header_size(JournalFile *f, Object *o) {
861
862 static const uint64_t table[] = {
863 [OBJECT_DATA] = sizeof(DataObject),
864 [OBJECT_FIELD] = sizeof(FieldObject),
865 [OBJECT_ENTRY] = sizeof(EntryObject),
866 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
867 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
868 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
869 [OBJECT_TAG] = sizeof(TagObject),
870 };
871
872 assert(f);
873 assert(o);
874
875 if (o->object.type == OBJECT_DATA)
876 return journal_file_data_payload_offset(f);
877
878 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
879 return sizeof(ObjectHeader);
880
881 return table[o->object.type];
882 }
883
884 static int check_object_header(JournalFile *f, Object *o, ObjectType type, uint64_t offset) {
885 uint64_t s;
886
887 assert(f);
888 assert(o);
889
890 s = le64toh(READ_NOW(o->object.size));
891 if (s == 0)
892 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
893 "Attempt to move to uninitialized object: %" PRIu64,
894 offset);
895
896 if (s < sizeof(ObjectHeader))
897 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
898 "Attempt to move to overly short object with size %"PRIu64": %" PRIu64,
899 s, offset);
900
901 if (o->object.type <= OBJECT_UNUSED || o->object.type >= _OBJECT_TYPE_MAX)
902 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
903 "Attempt to move to object with invalid type (%u): %" PRIu64,
904 o->object.type, offset);
905
906 if (type > OBJECT_UNUSED && o->object.type != type)
907 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
908 "Found %s object while expecting %s object: %" PRIu64,
909 journal_object_type_to_string(o->object.type),
910 journal_object_type_to_string(type),
911 offset);
912
913 if (s < minimum_header_size(f, o))
914 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
915 "Size of %s object (%"PRIu64") is smaller than the minimum object size (%"PRIu64"): %" PRIu64,
916 journal_object_type_to_string(o->object.type),
917 s,
918 minimum_header_size(f, o),
919 offset);
920
921 return 0;
922 }
923
924 /* Lightweight object checks. We want this to be fast, so that we won't
925 * slowdown every journal_file_move_to_object() call too much. */
926 static int check_object(JournalFile *f, Object *o, uint64_t offset) {
927 assert(f);
928 assert(o);
929
930 switch (o->object.type) {
931
932 case OBJECT_DATA:
933 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
934 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
935 "Bad data n_entries: %" PRIu64 ": %" PRIu64,
936 le64toh(o->data.n_entries),
937 offset);
938
939 if (le64toh(o->object.size) <= journal_file_data_payload_offset(f))
940 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
941 "Bad data size (<= %zu): %" PRIu64 ": %" PRIu64,
942 journal_file_data_payload_offset(f),
943 le64toh(o->object.size),
944 offset);
945
946 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
947 !VALID64(le64toh(o->data.next_field_offset)) ||
948 !VALID64(le64toh(o->data.entry_offset)) ||
949 !VALID64(le64toh(o->data.entry_array_offset)))
950 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
951 "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
952 le64toh(o->data.next_hash_offset),
953 le64toh(o->data.next_field_offset),
954 le64toh(o->data.entry_offset),
955 le64toh(o->data.entry_array_offset),
956 offset);
957
958 break;
959
960 case OBJECT_FIELD:
961 if (le64toh(o->object.size) <= offsetof(Object, field.payload))
962 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
963 "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
964 offsetof(Object, field.payload),
965 le64toh(o->object.size),
966 offset);
967
968 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
969 !VALID64(le64toh(o->field.head_data_offset)))
970 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
971 "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
972 le64toh(o->field.next_hash_offset),
973 le64toh(o->field.head_data_offset),
974 offset);
975 break;
976
977 case OBJECT_ENTRY: {
978 uint64_t sz;
979
980 sz = le64toh(READ_NOW(o->object.size));
981 if (sz < offsetof(Object, entry.items) ||
982 (sz - offsetof(Object, entry.items)) % journal_file_entry_item_size(f) != 0)
983 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
984 "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
985 offsetof(Object, entry.items),
986 sz,
987 offset);
988
989 if ((sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f) <= 0)
990 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
991 "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
992 (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f),
993 offset);
994
995 if (le64toh(o->entry.seqnum) <= 0)
996 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
997 "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
998 le64toh(o->entry.seqnum),
999 offset);
1000
1001 if (!VALID_REALTIME(le64toh(o->entry.realtime)))
1002 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1003 "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
1004 le64toh(o->entry.realtime),
1005 offset);
1006
1007 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
1008 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1009 "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
1010 le64toh(o->entry.monotonic),
1011 offset);
1012
1013 if (sd_id128_is_null(o->entry.boot_id))
1014 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1015 "Invalid object entry with an empty boot ID: %" PRIu64,
1016 offset);
1017
1018 break;
1019 }
1020
1021 case OBJECT_DATA_HASH_TABLE:
1022 case OBJECT_FIELD_HASH_TABLE: {
1023 uint64_t sz;
1024
1025 sz = le64toh(READ_NOW(o->object.size));
1026 if (sz < offsetof(Object, hash_table.items) ||
1027 (sz - offsetof(Object, hash_table.items)) % sizeof(HashItem) != 0 ||
1028 (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem) <= 0)
1029 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1030 "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
1031 journal_object_type_to_string(o->object.type),
1032 sz,
1033 offset);
1034
1035 break;
1036 }
1037
1038 case OBJECT_ENTRY_ARRAY: {
1039 uint64_t sz, next;
1040
1041 sz = le64toh(READ_NOW(o->object.size));
1042 if (sz < offsetof(Object, entry_array.items) ||
1043 (sz - offsetof(Object, entry_array.items)) % journal_file_entry_array_item_size(f) != 0 ||
1044 (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f) <= 0)
1045 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1046 "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
1047 sz,
1048 offset);
1049 /* Here, we request that the offset of each entry array object is in strictly increasing order. */
1050 next = le64toh(o->entry_array.next_entry_array_offset);
1051 if (!VALID64(next) || (next > 0 && next <= offset))
1052 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1053 "Invalid object entry array next_entry_array_offset: %" PRIu64 ": %" PRIu64,
1054 next,
1055 offset);
1056
1057 break;
1058 }
1059
1060 case OBJECT_TAG:
1061 if (le64toh(o->object.size) != sizeof(TagObject))
1062 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1063 "Invalid object tag size: %" PRIu64 ": %" PRIu64,
1064 le64toh(o->object.size),
1065 offset);
1066
1067 if (!VALID_EPOCH(le64toh(o->tag.epoch)))
1068 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1069 "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
1070 le64toh(o->tag.epoch), offset);
1071
1072 break;
1073 }
1074
1075 return 0;
1076 }
1077
1078 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
1079 int r;
1080 Object *o;
1081
1082 assert(f);
1083
1084 /* Even if this function fails, it may clear, overwrite, or alter previously cached entries with the
1085 * same type. After this function has been called, all previously read objects with the same type may
1086 * be invalidated, hence must be re-read before use. */
1087
1088 /* Objects may only be located at multiple of 64 bit */
1089 if (!VALID64(offset))
1090 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1091 "Attempt to move to %s object at non-64-bit boundary: %" PRIu64,
1092 journal_object_type_to_string(type),
1093 offset);
1094
1095 /* Object may not be located in the file header */
1096 if (offset < le64toh(f->header->header_size))
1097 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1098 "Attempt to move to %s object located in file header: %" PRIu64,
1099 journal_object_type_to_string(type),
1100 offset);
1101
1102 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), (void**) &o);
1103 if (r < 0)
1104 return r;
1105
1106 r = check_object_header(f, o, type, offset);
1107 if (r < 0)
1108 return r;
1109
1110 r = journal_file_move_to(f, type, false, offset, le64toh(READ_NOW(o->object.size)), (void**) &o);
1111 if (r < 0)
1112 return r;
1113
1114 r = check_object_header(f, o, type, offset);
1115 if (r < 0)
1116 return r;
1117
1118 r = check_object(f, o, offset);
1119 if (r < 0)
1120 return r;
1121
1122 if (ret)
1123 *ret = o;
1124
1125 return 0;
1126 }
1127
1128 int journal_file_read_object_header(JournalFile *f, ObjectType type, uint64_t offset, Object *ret) {
1129 ssize_t n;
1130 Object o;
1131 int r;
1132
1133 assert(f);
1134
1135 /* Objects may only be located at multiple of 64 bit */
1136 if (!VALID64(offset))
1137 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1138 "Attempt to read %s object at non-64-bit boundary: %" PRIu64,
1139 journal_object_type_to_string(type), offset);
1140
1141 /* Object may not be located in the file header */
1142 if (offset < le64toh(f->header->header_size))
1143 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1144 "Attempt to read %s object located in file header: %" PRIu64,
1145 journal_object_type_to_string(type), offset);
1146
1147 /* This will likely read too much data but it avoids having to call pread() twice. */
1148 n = pread(f->fd, &o, sizeof(o), offset);
1149 if (n < 0)
1150 return log_debug_errno(errno, "Failed to read journal %s object at offset: %" PRIu64,
1151 journal_object_type_to_string(type), offset);
1152
1153 if ((size_t) n < sizeof(o.object))
1154 return log_debug_errno(SYNTHETIC_ERRNO(EIO),
1155 "Failed to read short %s object at offset: %" PRIu64,
1156 journal_object_type_to_string(type), offset);
1157
1158 r = check_object_header(f, &o, type, offset);
1159 if (r < 0)
1160 return r;
1161
1162 if ((size_t) n < minimum_header_size(f, &o))
1163 return log_debug_errno(SYNTHETIC_ERRNO(EIO),
1164 "Short read while reading %s object: %" PRIu64,
1165 journal_object_type_to_string(type), offset);
1166
1167 r = check_object(f, &o, offset);
1168 if (r < 0)
1169 return r;
1170
1171 if (ret)
1172 *ret = o;
1173
1174 return 0;
1175 }
1176
1177 static uint64_t inc_seqnum(uint64_t seqnum) {
1178 if (seqnum < UINT64_MAX-1)
1179 return seqnum + 1;
1180
1181 return 1; /* skip over UINT64_MAX and 0 when we run out of seqnums and start again */
1182 }
1183
1184 static uint64_t journal_file_entry_seqnum(
1185 JournalFile *f,
1186 uint64_t *seqnum) {
1187
1188 uint64_t next_seqnum;
1189
1190 assert(f);
1191 assert(f->header);
1192
1193 /* Picks a new sequence number for the entry we are about to add and returns it. */
1194
1195 next_seqnum = inc_seqnum(le64toh(f->header->tail_entry_seqnum));
1196
1197 /* If an external seqnum counter was passed, we update both the local and the external one, and set
1198 * it to the maximum of both */
1199 if (seqnum)
1200 *seqnum = next_seqnum = MAX(inc_seqnum(*seqnum), next_seqnum);
1201
1202 f->header->tail_entry_seqnum = htole64(next_seqnum);
1203
1204 if (f->header->head_entry_seqnum == 0)
1205 f->header->head_entry_seqnum = htole64(next_seqnum);
1206
1207 return next_seqnum;
1208 }
1209
1210 int journal_file_append_object(
1211 JournalFile *f,
1212 ObjectType type,
1213 uint64_t size,
1214 Object **ret_object,
1215 uint64_t *ret_offset) {
1216
1217 int r;
1218 uint64_t p;
1219 Object *o;
1220
1221 assert(f);
1222 assert(f->header);
1223 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1224 assert(size >= sizeof(ObjectHeader));
1225
1226 r = journal_file_set_online(f);
1227 if (r < 0)
1228 return r;
1229
1230 r = journal_file_tail_end_by_mmap(f, &p);
1231 if (r < 0)
1232 return r;
1233
1234 r = journal_file_allocate(f, p, size);
1235 if (r < 0)
1236 return r;
1237
1238 r = journal_file_move_to(f, type, false, p, size, (void**) &o);
1239 if (r < 0)
1240 return r;
1241
1242 o->object = (ObjectHeader) {
1243 .type = type,
1244 .size = htole64(size),
1245 };
1246
1247 f->header->tail_object_offset = htole64(p);
1248 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1249
1250 if (ret_object)
1251 *ret_object = o;
1252
1253 if (ret_offset)
1254 *ret_offset = p;
1255
1256 return 0;
1257 }
1258
1259 static int journal_file_setup_data_hash_table(JournalFile *f) {
1260 uint64_t s, p;
1261 Object *o;
1262 int r;
1263
1264 assert(f);
1265 assert(f->header);
1266
1267 /* We estimate that we need 1 hash table entry per 768 bytes
1268 of journal file and we want to make sure we never get
1269 beyond 75% fill level. Calculate the hash table size for
1270 the maximum file size based on these metrics. */
1271
1272 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1273 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1274 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1275
1276 log_debug("Reserving %"PRIu64" entries in data hash table.", s / sizeof(HashItem));
1277
1278 r = journal_file_append_object(f,
1279 OBJECT_DATA_HASH_TABLE,
1280 offsetof(Object, hash_table.items) + s,
1281 &o, &p);
1282 if (r < 0)
1283 return r;
1284
1285 memzero(o->hash_table.items, s);
1286
1287 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1288 f->header->data_hash_table_size = htole64(s);
1289
1290 return 0;
1291 }
1292
1293 static int journal_file_setup_field_hash_table(JournalFile *f) {
1294 uint64_t s, p;
1295 Object *o;
1296 int r;
1297
1298 assert(f);
1299 assert(f->header);
1300
1301 /* We use a fixed size hash table for the fields as this
1302 * number should grow very slowly only */
1303
1304 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1305 log_debug("Reserving %"PRIu64" entries in field hash table.", s / sizeof(HashItem));
1306
1307 r = journal_file_append_object(f,
1308 OBJECT_FIELD_HASH_TABLE,
1309 offsetof(Object, hash_table.items) + s,
1310 &o, &p);
1311 if (r < 0)
1312 return r;
1313
1314 memzero(o->hash_table.items, s);
1315
1316 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1317 f->header->field_hash_table_size = htole64(s);
1318
1319 return 0;
1320 }
1321
1322 int journal_file_map_data_hash_table(JournalFile *f) {
1323 uint64_t s, p;
1324 void *t;
1325 int r;
1326
1327 assert(f);
1328 assert(f->header);
1329
1330 if (f->data_hash_table)
1331 return 0;
1332
1333 p = le64toh(f->header->data_hash_table_offset);
1334 s = le64toh(f->header->data_hash_table_size);
1335
1336 r = journal_file_move_to(f,
1337 OBJECT_DATA_HASH_TABLE,
1338 true,
1339 p, s,
1340 &t);
1341 if (r < 0)
1342 return r;
1343
1344 f->data_hash_table = t;
1345 return 0;
1346 }
1347
1348 int journal_file_map_field_hash_table(JournalFile *f) {
1349 uint64_t s, p;
1350 void *t;
1351 int r;
1352
1353 assert(f);
1354 assert(f->header);
1355
1356 if (f->field_hash_table)
1357 return 0;
1358
1359 p = le64toh(f->header->field_hash_table_offset);
1360 s = le64toh(f->header->field_hash_table_size);
1361
1362 r = journal_file_move_to(f,
1363 OBJECT_FIELD_HASH_TABLE,
1364 true,
1365 p, s,
1366 &t);
1367 if (r < 0)
1368 return r;
1369
1370 f->field_hash_table = t;
1371 return 0;
1372 }
1373
1374 static int journal_file_link_field(
1375 JournalFile *f,
1376 Object *o,
1377 uint64_t offset,
1378 uint64_t hash) {
1379
1380 uint64_t p, h, m;
1381 int r;
1382
1383 assert(f);
1384 assert(f->header);
1385 assert(f->field_hash_table);
1386 assert(o);
1387 assert(offset > 0);
1388
1389 if (o->object.type != OBJECT_FIELD)
1390 return -EINVAL;
1391
1392 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1393 if (m <= 0)
1394 return -EBADMSG;
1395
1396 /* This might alter the window we are looking at */
1397 o->field.next_hash_offset = o->field.head_data_offset = 0;
1398
1399 h = hash % m;
1400 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1401 if (p == 0)
1402 f->field_hash_table[h].head_hash_offset = htole64(offset);
1403 else {
1404 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1405 if (r < 0)
1406 return r;
1407
1408 o->field.next_hash_offset = htole64(offset);
1409 }
1410
1411 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1412
1413 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1414 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1415
1416 return 0;
1417 }
1418
1419 static int journal_file_link_data(
1420 JournalFile *f,
1421 Object *o,
1422 uint64_t offset,
1423 uint64_t hash) {
1424
1425 uint64_t p, h, m;
1426 int r;
1427
1428 assert(f);
1429 assert(f->header);
1430 assert(f->data_hash_table);
1431 assert(o);
1432 assert(offset > 0);
1433
1434 if (o->object.type != OBJECT_DATA)
1435 return -EINVAL;
1436
1437 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1438 if (m <= 0)
1439 return -EBADMSG;
1440
1441 /* This might alter the window we are looking at */
1442 o->data.next_hash_offset = o->data.next_field_offset = 0;
1443 o->data.entry_offset = o->data.entry_array_offset = 0;
1444 o->data.n_entries = 0;
1445
1446 h = hash % m;
1447 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1448 if (p == 0)
1449 /* Only entry in the hash table is easy */
1450 f->data_hash_table[h].head_hash_offset = htole64(offset);
1451 else {
1452 /* Move back to the previous data object, to patch in
1453 * pointer */
1454
1455 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1456 if (r < 0)
1457 return r;
1458
1459 o->data.next_hash_offset = htole64(offset);
1460 }
1461
1462 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1463
1464 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1465 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1466
1467 return 0;
1468 }
1469
1470 static int get_next_hash_offset(
1471 JournalFile *f,
1472 uint64_t *p,
1473 le64_t *next_hash_offset,
1474 uint64_t *depth,
1475 le64_t *header_max_depth) {
1476
1477 uint64_t nextp;
1478
1479 assert(f);
1480 assert(p);
1481 assert(next_hash_offset);
1482 assert(depth);
1483
1484 nextp = le64toh(READ_NOW(*next_hash_offset));
1485 if (nextp > 0) {
1486 if (nextp <= *p) /* Refuse going in loops */
1487 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1488 "Detected hash item loop in %s, refusing.", f->path);
1489
1490 (*depth)++;
1491
1492 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1493 if (header_max_depth && journal_file_writable(f))
1494 *header_max_depth = htole64(MAX(*depth, le64toh(*header_max_depth)));
1495 }
1496
1497 *p = nextp;
1498 return 0;
1499 }
1500
1501 int journal_file_find_field_object_with_hash(
1502 JournalFile *f,
1503 const void *field,
1504 uint64_t size,
1505 uint64_t hash,
1506 Object **ret_object,
1507 uint64_t *ret_offset) {
1508
1509 uint64_t p, osize, h, m, depth = 0;
1510 int r;
1511
1512 assert(f);
1513 assert(f->header);
1514 assert(field);
1515 assert(size > 0);
1516
1517 /* If the field hash table is empty, we can't find anything */
1518 if (le64toh(f->header->field_hash_table_size) <= 0)
1519 return 0;
1520
1521 /* Map the field hash table, if it isn't mapped yet. */
1522 r = journal_file_map_field_hash_table(f);
1523 if (r < 0)
1524 return r;
1525
1526 osize = offsetof(Object, field.payload) + size;
1527
1528 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1529 if (m <= 0)
1530 return -EBADMSG;
1531
1532 h = hash % m;
1533 p = le64toh(f->field_hash_table[h].head_hash_offset);
1534 while (p > 0) {
1535 Object *o;
1536
1537 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1538 if (r < 0)
1539 return r;
1540
1541 if (le64toh(o->field.hash) == hash &&
1542 le64toh(o->object.size) == osize &&
1543 memcmp(o->field.payload, field, size) == 0) {
1544
1545 if (ret_object)
1546 *ret_object = o;
1547 if (ret_offset)
1548 *ret_offset = p;
1549
1550 return 1;
1551 }
1552
1553 r = get_next_hash_offset(
1554 f,
1555 &p,
1556 &o->field.next_hash_offset,
1557 &depth,
1558 JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) ? &f->header->field_hash_chain_depth : NULL);
1559 if (r < 0)
1560 return r;
1561 }
1562
1563 return 0;
1564 }
1565
1566 uint64_t journal_file_hash_data(
1567 JournalFile *f,
1568 const void *data,
1569 size_t sz) {
1570
1571 assert(f);
1572 assert(f->header);
1573 assert(data || sz == 0);
1574
1575 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1576 * function use siphash. Old journal files use the Jenkins hash. */
1577
1578 if (JOURNAL_HEADER_KEYED_HASH(f->header))
1579 return siphash24(data, sz, f->header->file_id.bytes);
1580
1581 return jenkins_hash64(data, sz);
1582 }
1583
1584 int journal_file_find_field_object(
1585 JournalFile *f,
1586 const void *field,
1587 uint64_t size,
1588 Object **ret_object,
1589 uint64_t *ret_offset) {
1590
1591 assert(f);
1592 assert(field);
1593 assert(size > 0);
1594
1595 return journal_file_find_field_object_with_hash(
1596 f,
1597 field, size,
1598 journal_file_hash_data(f, field, size),
1599 ret_object, ret_offset);
1600 }
1601
1602 int journal_file_find_data_object_with_hash(
1603 JournalFile *f,
1604 const void *data,
1605 uint64_t size,
1606 uint64_t hash,
1607 Object **ret_object,
1608 uint64_t *ret_offset) {
1609
1610 uint64_t p, h, m, depth = 0;
1611 int r;
1612
1613 assert(f);
1614 assert(f->header);
1615 assert(data || size == 0);
1616
1617 /* If there's no data hash table, then there's no entry. */
1618 if (le64toh(f->header->data_hash_table_size) <= 0)
1619 return 0;
1620
1621 /* Map the data hash table, if it isn't mapped yet. */
1622 r = journal_file_map_data_hash_table(f);
1623 if (r < 0)
1624 return r;
1625
1626 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1627 if (m <= 0)
1628 return -EBADMSG;
1629
1630 h = hash % m;
1631 p = le64toh(f->data_hash_table[h].head_hash_offset);
1632
1633 while (p > 0) {
1634 Object *o;
1635 void *d;
1636 size_t rsize;
1637
1638 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1639 if (r < 0)
1640 return r;
1641
1642 if (le64toh(o->data.hash) != hash)
1643 goto next;
1644
1645 r = journal_file_data_payload(f, o, p, NULL, 0, 0, &d, &rsize);
1646 if (r < 0)
1647 return r;
1648 assert(r > 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
1649
1650 if (memcmp_nn(data, size, d, rsize) == 0) {
1651 if (ret_object)
1652 *ret_object = o;
1653
1654 if (ret_offset)
1655 *ret_offset = p;
1656
1657 return 1;
1658 }
1659
1660 next:
1661 r = get_next_hash_offset(
1662 f,
1663 &p,
1664 &o->data.next_hash_offset,
1665 &depth,
1666 JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) ? &f->header->data_hash_chain_depth : NULL);
1667 if (r < 0)
1668 return r;
1669 }
1670
1671 return 0;
1672 }
1673
1674 int journal_file_find_data_object(
1675 JournalFile *f,
1676 const void *data,
1677 uint64_t size,
1678 Object **ret_object,
1679 uint64_t *ret_offset) {
1680
1681 assert(f);
1682 assert(data || size == 0);
1683
1684 return journal_file_find_data_object_with_hash(
1685 f,
1686 data, size,
1687 journal_file_hash_data(f, data, size),
1688 ret_object, ret_offset);
1689 }
1690
1691 bool journal_field_valid(const char *p, size_t l, bool allow_protected) {
1692 /* We kinda enforce POSIX syntax recommendations for
1693 environment variables here, but make a couple of additional
1694 requirements.
1695
1696 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
1697
1698 assert(p);
1699
1700 if (l == SIZE_MAX)
1701 l = strlen(p);
1702
1703 /* No empty field names */
1704 if (l <= 0)
1705 return false;
1706
1707 /* Don't allow names longer than 64 chars */
1708 if (l > 64)
1709 return false;
1710
1711 /* Variables starting with an underscore are protected */
1712 if (!allow_protected && p[0] == '_')
1713 return false;
1714
1715 /* Don't allow digits as first character */
1716 if (ascii_isdigit(p[0]))
1717 return false;
1718
1719 /* Only allow A-Z0-9 and '_' */
1720 for (const char *a = p; a < p + l; a++)
1721 if ((*a < 'A' || *a > 'Z') &&
1722 !ascii_isdigit(*a) &&
1723 *a != '_')
1724 return false;
1725
1726 return true;
1727 }
1728
1729 static int journal_file_append_field(
1730 JournalFile *f,
1731 const void *field,
1732 uint64_t size,
1733 Object **ret_object,
1734 uint64_t *ret_offset) {
1735
1736 uint64_t hash, p;
1737 uint64_t osize;
1738 Object *o;
1739 int r;
1740
1741 assert(f);
1742 assert(field);
1743 assert(size > 0);
1744
1745 if (!journal_field_valid(field, size, true))
1746 return -EBADMSG;
1747
1748 hash = journal_file_hash_data(f, field, size);
1749
1750 r = journal_file_find_field_object_with_hash(f, field, size, hash, ret_object, ret_offset);
1751 if (r < 0)
1752 return r;
1753 if (r > 0)
1754 return 0;
1755
1756 osize = offsetof(Object, field.payload) + size;
1757 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1758 if (r < 0)
1759 return r;
1760
1761 o->field.hash = htole64(hash);
1762 memcpy(o->field.payload, field, size);
1763
1764 r = journal_file_link_field(f, o, p, hash);
1765 if (r < 0)
1766 return r;
1767
1768 /* The linking might have altered the window, so let's only pass the offset to hmac which will
1769 * move to the object again if needed. */
1770
1771 #if HAVE_GCRYPT
1772 r = journal_file_hmac_put_object(f, OBJECT_FIELD, NULL, p);
1773 if (r < 0)
1774 return r;
1775 #endif
1776
1777 if (ret_object) {
1778 r = journal_file_move_to_object(f, OBJECT_FIELD, p, ret_object);
1779 if (r < 0)
1780 return r;
1781 }
1782
1783 if (ret_offset)
1784 *ret_offset = p;
1785
1786 return 0;
1787 }
1788
1789 static int maybe_compress_payload(JournalFile *f, uint8_t *dst, const uint8_t *src, uint64_t size, size_t *rsize) {
1790 assert(f);
1791 assert(f->header);
1792
1793 #if HAVE_COMPRESSION
1794 Compression c;
1795 int r;
1796
1797 c = JOURNAL_FILE_COMPRESSION(f);
1798 if (c == COMPRESSION_NONE || size < f->compress_threshold_bytes)
1799 return 0;
1800
1801 r = compress_blob(c, src, size, dst, size - 1, rsize);
1802 if (r < 0)
1803 return log_debug_errno(r, "Failed to compress data object using %s, ignoring: %m", compression_to_string(c));
1804
1805 log_debug("Compressed data object %"PRIu64" -> %zu using %s", size, *rsize, compression_to_string(c));
1806
1807 return 1; /* compressed */
1808 #else
1809 return 0;
1810 #endif
1811 }
1812
1813 static int journal_file_append_data(
1814 JournalFile *f,
1815 const void *data,
1816 uint64_t size,
1817 Object **ret_object,
1818 uint64_t *ret_offset) {
1819
1820 uint64_t hash, p, osize;
1821 Object *o, *fo;
1822 size_t rsize = 0;
1823 const void *eq;
1824 int r;
1825
1826 assert(f);
1827
1828 if (!data || size == 0)
1829 return -EINVAL;
1830
1831 hash = journal_file_hash_data(f, data, size);
1832
1833 r = journal_file_find_data_object_with_hash(f, data, size, hash, ret_object, ret_offset);
1834 if (r < 0)
1835 return r;
1836 if (r > 0)
1837 return 0;
1838
1839 eq = memchr(data, '=', size);
1840 if (!eq)
1841 return -EINVAL;
1842
1843 osize = journal_file_data_payload_offset(f) + size;
1844 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1845 if (r < 0)
1846 return r;
1847
1848 o->data.hash = htole64(hash);
1849
1850 r = maybe_compress_payload(f, journal_file_data_payload_field(f, o), data, size, &rsize);
1851 if (r <= 0)
1852 /* We don't really care failures, let's continue without compression */
1853 memcpy_safe(journal_file_data_payload_field(f, o), data, size);
1854 else {
1855 Compression c = JOURNAL_FILE_COMPRESSION(f);
1856
1857 assert(c >= 0 && c < _COMPRESSION_MAX && c != COMPRESSION_NONE);
1858
1859 o->object.size = htole64(journal_file_data_payload_offset(f) + rsize);
1860 o->object.flags |= COMPRESSION_TO_OBJECT_FLAG(c);
1861 }
1862
1863 r = journal_file_link_data(f, o, p, hash);
1864 if (r < 0)
1865 return r;
1866
1867 /* The linking might have altered the window, so let's refresh our pointer. */
1868 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1869 if (r < 0)
1870 return r;
1871
1872 #if HAVE_GCRYPT
1873 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1874 if (r < 0)
1875 return r;
1876 #endif
1877
1878 /* Create field object ... */
1879 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, NULL);
1880 if (r < 0)
1881 return r;
1882
1883 /* ... and link it in. */
1884 o->data.next_field_offset = fo->field.head_data_offset;
1885 fo->field.head_data_offset = le64toh(p);
1886
1887 if (ret_object)
1888 *ret_object = o;
1889
1890 if (ret_offset)
1891 *ret_offset = p;
1892
1893 return 0;
1894 }
1895
1896 static int maybe_decompress_payload(
1897 JournalFile *f,
1898 uint8_t *payload,
1899 uint64_t size,
1900 Compression compression,
1901 const char *field,
1902 size_t field_length,
1903 size_t data_threshold,
1904 void **ret_data,
1905 size_t *ret_size) {
1906
1907 assert(f);
1908
1909 /* We can't read objects larger than 4G on a 32-bit machine */
1910 if ((uint64_t) (size_t) size != size)
1911 return -E2BIG;
1912
1913 if (compression != COMPRESSION_NONE) {
1914 #if HAVE_COMPRESSION
1915 size_t rsize;
1916 int r;
1917
1918 if (field) {
1919 r = decompress_startswith(compression, payload, size, &f->compress_buffer, field,
1920 field_length, '=');
1921 if (r < 0)
1922 return log_debug_errno(r,
1923 "Cannot decompress %s object of length %" PRIu64 ": %m",
1924 compression_to_string(compression),
1925 size);
1926 if (r == 0) {
1927 if (ret_data)
1928 *ret_data = NULL;
1929 if (ret_size)
1930 *ret_size = 0;
1931 return 0;
1932 }
1933 }
1934
1935 r = decompress_blob(compression, payload, size, &f->compress_buffer, &rsize, 0);
1936 if (r < 0)
1937 return r;
1938
1939 if (ret_data)
1940 *ret_data = f->compress_buffer;
1941 if (ret_size)
1942 *ret_size = rsize;
1943 #else
1944 return -EPROTONOSUPPORT;
1945 #endif
1946 } else {
1947 if (field && (size < field_length + 1 || memcmp(payload, field, field_length) != 0 || payload[field_length] != '=')) {
1948 if (ret_data)
1949 *ret_data = NULL;
1950 if (ret_size)
1951 *ret_size = 0;
1952 return 0;
1953 }
1954
1955 if (ret_data)
1956 *ret_data = payload;
1957 if (ret_size)
1958 *ret_size = (size_t) size;
1959 }
1960
1961 return 1;
1962 }
1963
1964 int journal_file_data_payload(
1965 JournalFile *f,
1966 Object *o,
1967 uint64_t offset,
1968 const char *field,
1969 size_t field_length,
1970 size_t data_threshold,
1971 void **ret_data,
1972 size_t *ret_size) {
1973
1974 uint64_t size;
1975 Compression c;
1976 int r;
1977
1978 assert(f);
1979 assert(!field == (field_length == 0)); /* These must be specified together. */
1980
1981 if (!o) {
1982 r = journal_file_move_to_object(f, OBJECT_DATA, offset, &o);
1983 if (r < 0)
1984 return r;
1985 }
1986
1987 size = le64toh(READ_NOW(o->object.size));
1988 if (size < journal_file_data_payload_offset(f))
1989 return -EBADMSG;
1990
1991 size -= journal_file_data_payload_offset(f);
1992
1993 c = COMPRESSION_FROM_OBJECT(o);
1994 if (c < 0)
1995 return -EPROTONOSUPPORT;
1996
1997 return maybe_decompress_payload(f, journal_file_data_payload_field(f, o), size, c, field,
1998 field_length, data_threshold, ret_data, ret_size);
1999 }
2000
2001 uint64_t journal_file_entry_n_items(JournalFile *f, Object *o) {
2002 uint64_t sz;
2003
2004 assert(f);
2005 assert(o);
2006
2007 if (o->object.type != OBJECT_ENTRY)
2008 return 0;
2009
2010 sz = le64toh(READ_NOW(o->object.size));
2011 if (sz < offsetof(Object, entry.items))
2012 return 0;
2013
2014 return (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f);
2015 }
2016
2017 uint64_t journal_file_entry_array_n_items(JournalFile *f, Object *o) {
2018 uint64_t sz;
2019
2020 assert(f);
2021 assert(o);
2022
2023 if (o->object.type != OBJECT_ENTRY_ARRAY)
2024 return 0;
2025
2026 sz = le64toh(READ_NOW(o->object.size));
2027 if (sz < offsetof(Object, entry_array.items))
2028 return 0;
2029
2030 return (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f);
2031 }
2032
2033 uint64_t journal_file_hash_table_n_items(Object *o) {
2034 uint64_t sz;
2035
2036 assert(o);
2037
2038 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
2039 return 0;
2040
2041 sz = le64toh(READ_NOW(o->object.size));
2042 if (sz < offsetof(Object, hash_table.items))
2043 return 0;
2044
2045 return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
2046 }
2047
2048 static void write_entry_array_item(JournalFile *f, Object *o, uint64_t i, uint64_t p) {
2049 assert(f);
2050 assert(o);
2051
2052 if (JOURNAL_HEADER_COMPACT(f->header)) {
2053 assert(p <= UINT32_MAX);
2054 o->entry_array.items.compact[i] = htole32(p);
2055 } else
2056 o->entry_array.items.regular[i] = htole64(p);
2057 }
2058
2059 static int link_entry_into_array(
2060 JournalFile *f,
2061 le64_t *first,
2062 le64_t *idx,
2063 le32_t *tail,
2064 le32_t *tidx,
2065 uint64_t p) {
2066
2067 uint64_t n = 0, ap = 0, q, i, a, hidx;
2068 Object *o;
2069 int r;
2070
2071 assert(f);
2072 assert(f->header);
2073 assert(first);
2074 assert(idx);
2075 assert(p > 0);
2076
2077 a = tail ? le32toh(*tail) : le64toh(*first);
2078 hidx = le64toh(READ_NOW(*idx));
2079 i = tidx ? le32toh(READ_NOW(*tidx)) : hidx;
2080
2081 while (a > 0) {
2082 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2083 if (r < 0)
2084 return r;
2085
2086 n = journal_file_entry_array_n_items(f, o);
2087 if (i < n) {
2088 write_entry_array_item(f, o, i, p);
2089 *idx = htole64(hidx + 1);
2090 if (tidx)
2091 *tidx = htole32(le32toh(*tidx) + 1);
2092 return 0;
2093 }
2094
2095 i -= n;
2096 ap = a;
2097 a = le64toh(o->entry_array.next_entry_array_offset);
2098 }
2099
2100 if (hidx > n)
2101 n = (hidx+1) * 2;
2102 else
2103 n = n * 2;
2104
2105 if (n < 4)
2106 n = 4;
2107
2108 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
2109 offsetof(Object, entry_array.items) + n * journal_file_entry_array_item_size(f),
2110 &o, &q);
2111 if (r < 0)
2112 return r;
2113
2114 #if HAVE_GCRYPT
2115 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
2116 if (r < 0)
2117 return r;
2118 #endif
2119
2120 write_entry_array_item(f, o, i, p);
2121
2122 if (ap == 0)
2123 *first = htole64(q);
2124 else {
2125 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
2126 if (r < 0)
2127 return r;
2128
2129 o->entry_array.next_entry_array_offset = htole64(q);
2130 }
2131
2132 if (tail)
2133 *tail = htole32(q);
2134
2135 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
2136 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
2137
2138 *idx = htole64(hidx + 1);
2139 if (tidx)
2140 *tidx = htole32(1);
2141
2142 return 0;
2143 }
2144
2145 static int link_entry_into_array_plus_one(
2146 JournalFile *f,
2147 le64_t *extra,
2148 le64_t *first,
2149 le64_t *idx,
2150 le32_t *tail,
2151 le32_t *tidx,
2152 uint64_t p) {
2153
2154 uint64_t hidx;
2155 int r;
2156
2157 assert(f);
2158 assert(extra);
2159 assert(first);
2160 assert(idx);
2161 assert(p > 0);
2162
2163 hidx = le64toh(READ_NOW(*idx));
2164 if (hidx == UINT64_MAX)
2165 return -EBADMSG;
2166 if (hidx == 0)
2167 *extra = htole64(p);
2168 else {
2169 le64_t i;
2170
2171 i = htole64(hidx - 1);
2172 r = link_entry_into_array(f, first, &i, tail, tidx, p);
2173 if (r < 0)
2174 return r;
2175 }
2176
2177 *idx = htole64(hidx + 1);
2178 return 0;
2179 }
2180
2181 static int journal_file_link_entry_item(JournalFile *f, uint64_t offset, uint64_t p) {
2182 Object *o;
2183 int r;
2184
2185 assert(f);
2186 assert(offset > 0);
2187
2188 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
2189 if (r < 0)
2190 return r;
2191
2192 return link_entry_into_array_plus_one(f,
2193 &o->data.entry_offset,
2194 &o->data.entry_array_offset,
2195 &o->data.n_entries,
2196 JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_offset : NULL,
2197 JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_n_entries : NULL,
2198 offset);
2199 }
2200
2201 static int journal_file_link_entry(
2202 JournalFile *f,
2203 Object *o,
2204 uint64_t offset,
2205 const EntryItem items[],
2206 size_t n_items) {
2207
2208 int r;
2209
2210 assert(f);
2211 assert(f->header);
2212 assert(o);
2213 assert(offset > 0);
2214
2215 if (o->object.type != OBJECT_ENTRY)
2216 return -EINVAL;
2217
2218 __atomic_thread_fence(__ATOMIC_SEQ_CST);
2219
2220 /* Link up the entry itself */
2221 r = link_entry_into_array(f,
2222 &f->header->entry_array_offset,
2223 &f->header->n_entries,
2224 JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_offset) ? &f->header->tail_entry_array_offset : NULL,
2225 JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_n_entries) ? &f->header->tail_entry_array_n_entries : NULL,
2226 offset);
2227 if (r < 0)
2228 return r;
2229
2230 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
2231
2232 if (f->header->head_entry_realtime == 0)
2233 f->header->head_entry_realtime = o->entry.realtime;
2234
2235 f->header->tail_entry_realtime = o->entry.realtime;
2236 f->header->tail_entry_monotonic = o->entry.monotonic;
2237 if (JOURNAL_HEADER_CONTAINS(f->header, tail_entry_offset))
2238 f->header->tail_entry_offset = htole64(offset);
2239 f->newest_mtime = 0; /* we have a new tail entry now, explicitly invalidate newest boot id/timestamp info */
2240
2241 /* Link up the items */
2242 for (uint64_t i = 0; i < n_items; i++) {
2243 int k;
2244
2245 /* If we fail to link an entry item because we can't allocate a new entry array, don't fail
2246 * immediately but try to link the other entry items since it might still be possible to link
2247 * those if they don't require a new entry array to be allocated. */
2248
2249 k = journal_file_link_entry_item(f, offset, items[i].object_offset);
2250 if (k == -E2BIG)
2251 r = k;
2252 else if (k < 0)
2253 return k;
2254 }
2255
2256 return r;
2257 }
2258
2259 static void write_entry_item(JournalFile *f, Object *o, uint64_t i, const EntryItem *item) {
2260 assert(f);
2261 assert(o);
2262 assert(item);
2263
2264 if (JOURNAL_HEADER_COMPACT(f->header)) {
2265 assert(item->object_offset <= UINT32_MAX);
2266 o->entry.items.compact[i].object_offset = htole32(item->object_offset);
2267 } else {
2268 o->entry.items.regular[i].object_offset = htole64(item->object_offset);
2269 o->entry.items.regular[i].hash = htole64(item->hash);
2270 }
2271 }
2272
2273 static int journal_file_append_entry_internal(
2274 JournalFile *f,
2275 const dual_timestamp *ts,
2276 const sd_id128_t *boot_id,
2277 const sd_id128_t *machine_id,
2278 uint64_t xor_hash,
2279 const EntryItem items[],
2280 size_t n_items,
2281 uint64_t *seqnum,
2282 sd_id128_t *seqnum_id,
2283 Object **ret_object,
2284 uint64_t *ret_offset) {
2285
2286 uint64_t np;
2287 uint64_t osize;
2288 Object *o;
2289 int r;
2290
2291 assert(f);
2292 assert(f->header);
2293 assert(ts);
2294 assert(boot_id);
2295 assert(!sd_id128_is_null(*boot_id));
2296 assert(items || n_items == 0);
2297
2298 if (f->strict_order) {
2299 /* If requested be stricter with ordering in this journal file, to make searching via
2300 * bisection fully deterministic. This is an optional feature, so that if desired journal
2301 * files can be written where the ordering is not strictly enforced (in which case bisection
2302 * will yield *a* result, but not the *only* result, when searching for points in
2303 * time). Strict ordering mode is enabled when journald originally writes the files, but
2304 * might not necessarily be if other tools (the remoting tools for example) write journal
2305 * files from combined sources.
2306 *
2307 * Typically, if any of the errors generated here are seen journald will just rotate the
2308 * journal files and start anew. */
2309
2310 if (ts->realtime < le64toh(f->header->tail_entry_realtime))
2311 return log_debug_errno(SYNTHETIC_ERRNO(EREMCHG),
2312 "Realtime timestamp %" PRIu64 " smaller than previous realtime "
2313 "timestamp %" PRIu64 ", refusing entry.",
2314 ts->realtime, le64toh(f->header->tail_entry_realtime));
2315
2316 if (sd_id128_equal(*boot_id, f->header->tail_entry_boot_id) &&
2317 ts->monotonic < le64toh(f->header->tail_entry_monotonic))
2318 return log_debug_errno(
2319 SYNTHETIC_ERRNO(ENOTNAM),
2320 "Monotonic timestamp %" PRIu64
2321 " smaller than previous monotonic timestamp %" PRIu64
2322 " while having the same boot ID, refusing entry.",
2323 ts->monotonic,
2324 le64toh(f->header->tail_entry_monotonic));
2325 }
2326
2327 if (seqnum_id) {
2328 /* Settle the passed in sequence number ID */
2329
2330 if (sd_id128_is_null(*seqnum_id))
2331 *seqnum_id = f->header->seqnum_id; /* Caller has none assigned, then copy the one from the file */
2332 else if (!sd_id128_equal(*seqnum_id, f->header->seqnum_id)) {
2333 /* Different seqnum IDs? We can't allow entries from multiple IDs end up in the same journal.*/
2334 if (le64toh(f->header->n_entries) == 0)
2335 f->header->seqnum_id = *seqnum_id; /* Caller has one, and file so far has no entries, then copy the one from the caller */
2336 else
2337 return log_debug_errno(SYNTHETIC_ERRNO(EILSEQ),
2338 "Sequence number IDs don't match, refusing entry.");
2339 }
2340 }
2341
2342 if (machine_id && sd_id128_is_null(f->header->machine_id))
2343 /* Initialize machine ID when not set yet */
2344 f->header->machine_id = *machine_id;
2345
2346 osize = offsetof(Object, entry.items) + (n_items * journal_file_entry_item_size(f));
2347
2348 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
2349 if (r < 0)
2350 return r;
2351
2352 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
2353 o->entry.realtime = htole64(ts->realtime);
2354 o->entry.monotonic = htole64(ts->monotonic);
2355 o->entry.xor_hash = htole64(xor_hash);
2356 o->entry.boot_id = f->header->tail_entry_boot_id = *boot_id;
2357
2358 for (size_t i = 0; i < n_items; i++)
2359 write_entry_item(f, o, i, &items[i]);
2360
2361 #if HAVE_GCRYPT
2362 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
2363 if (r < 0)
2364 return r;
2365 #endif
2366
2367 r = journal_file_link_entry(f, o, np, items, n_items);
2368 if (r < 0)
2369 return r;
2370
2371 if (ret_object)
2372 *ret_object = o;
2373
2374 if (ret_offset)
2375 *ret_offset = np;
2376
2377 return r;
2378 }
2379
2380 void journal_file_post_change(JournalFile *f) {
2381 assert(f);
2382
2383 if (f->fd < 0)
2384 return;
2385
2386 /* inotify() does not receive IN_MODIFY events from file
2387 * accesses done via mmap(). After each access we hence
2388 * trigger IN_MODIFY by truncating the journal file to its
2389 * current size which triggers IN_MODIFY. */
2390
2391 __atomic_thread_fence(__ATOMIC_SEQ_CST);
2392
2393 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
2394 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
2395 }
2396
2397 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
2398 assert(userdata);
2399
2400 journal_file_post_change(userdata);
2401
2402 return 1;
2403 }
2404
2405 static void schedule_post_change(JournalFile *f) {
2406 sd_event *e;
2407 int r;
2408
2409 assert(f);
2410 assert(f->post_change_timer);
2411
2412 assert_se(e = sd_event_source_get_event(f->post_change_timer));
2413
2414 /* If we are already going down, post the change immediately. */
2415 if (IN_SET(sd_event_get_state(e), SD_EVENT_EXITING, SD_EVENT_FINISHED))
2416 goto fail;
2417
2418 r = sd_event_source_get_enabled(f->post_change_timer, NULL);
2419 if (r < 0) {
2420 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
2421 goto fail;
2422 }
2423 if (r > 0)
2424 return;
2425
2426 r = sd_event_source_set_time_relative(f->post_change_timer, f->post_change_timer_period);
2427 if (r < 0) {
2428 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
2429 goto fail;
2430 }
2431
2432 r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
2433 if (r < 0) {
2434 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
2435 goto fail;
2436 }
2437
2438 return;
2439
2440 fail:
2441 /* On failure, let's simply post the change immediately. */
2442 journal_file_post_change(f);
2443 }
2444
2445 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2446 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
2447 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
2448 int r;
2449
2450 assert(f);
2451 assert_return(!f->post_change_timer, -EINVAL);
2452 assert(e);
2453 assert(t);
2454
2455 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
2456 if (r < 0)
2457 return r;
2458
2459 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
2460 if (r < 0)
2461 return r;
2462
2463 f->post_change_timer = TAKE_PTR(timer);
2464 f->post_change_timer_period = t;
2465
2466 return r;
2467 }
2468
2469 static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
2470 return CMP(ASSERT_PTR(a)->object_offset, ASSERT_PTR(b)->object_offset);
2471 }
2472
2473 static size_t remove_duplicate_entry_items(EntryItem items[], size_t n) {
2474 size_t j = 1;
2475
2476 assert(items || n == 0);
2477
2478 if (n <= 1)
2479 return n;
2480
2481 for (size_t i = 1; i < n; i++)
2482 if (items[i].object_offset != items[j - 1].object_offset)
2483 items[j++] = items[i];
2484
2485 return j;
2486 }
2487
2488 int journal_file_append_entry(
2489 JournalFile *f,
2490 const dual_timestamp *ts,
2491 const sd_id128_t *boot_id,
2492 const struct iovec iovec[],
2493 size_t n_iovec,
2494 uint64_t *seqnum,
2495 sd_id128_t *seqnum_id,
2496 Object **ret_object,
2497 uint64_t *ret_offset) {
2498
2499 _cleanup_free_ EntryItem *items_alloc = NULL;
2500 EntryItem *items;
2501 uint64_t xor_hash = 0;
2502 struct dual_timestamp _ts;
2503 sd_id128_t _boot_id, _machine_id, *machine_id;
2504 int r;
2505
2506 assert(f);
2507 assert(f->header);
2508 assert(iovec);
2509 assert(n_iovec > 0);
2510
2511 if (ts) {
2512 if (!VALID_REALTIME(ts->realtime))
2513 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2514 "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
2515 ts->realtime);
2516 if (!VALID_MONOTONIC(ts->monotonic))
2517 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2518 "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
2519 ts->monotonic);
2520 } else {
2521 dual_timestamp_get(&_ts);
2522 ts = &_ts;
2523 }
2524
2525 if (boot_id) {
2526 if (sd_id128_is_null(*boot_id))
2527 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG), "Empty boot ID, refusing entry.");
2528 } else {
2529 r = sd_id128_get_boot(&_boot_id);
2530 if (r < 0)
2531 return r;
2532
2533 boot_id = &_boot_id;
2534 }
2535
2536 r = sd_id128_get_machine(&_machine_id);
2537 if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r))
2538 /* Gracefully handle the machine ID not being initialized yet */
2539 machine_id = NULL;
2540 else if (r < 0)
2541 return r;
2542 else
2543 machine_id = &_machine_id;
2544
2545 #if HAVE_GCRYPT
2546 r = journal_file_maybe_append_tag(f, ts->realtime);
2547 if (r < 0)
2548 return r;
2549 #endif
2550
2551 if (n_iovec < ALLOCA_MAX / sizeof(EntryItem) / 2)
2552 items = newa(EntryItem, n_iovec);
2553 else {
2554 items_alloc = new(EntryItem, n_iovec);
2555 if (!items_alloc)
2556 return -ENOMEM;
2557
2558 items = items_alloc;
2559 }
2560
2561 for (size_t i = 0; i < n_iovec; i++) {
2562 uint64_t p;
2563 Object *o;
2564
2565 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
2566 if (r < 0)
2567 return r;
2568
2569 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2570 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2571 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2572 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2573 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2574 * hash here for that. This also has the benefit that cursors for old and new journal files
2575 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2576 * files things are easier, we can just take the value from the stored record directly. */
2577
2578 if (JOURNAL_HEADER_KEYED_HASH(f->header))
2579 xor_hash ^= jenkins_hash64(iovec[i].iov_base, iovec[i].iov_len);
2580 else
2581 xor_hash ^= le64toh(o->data.hash);
2582
2583 items[i] = (EntryItem) {
2584 .object_offset = p,
2585 .hash = le64toh(o->data.hash),
2586 };
2587 }
2588
2589 /* Order by the position on disk, in order to improve seek
2590 * times for rotating media. */
2591 typesafe_qsort(items, n_iovec, entry_item_cmp);
2592 n_iovec = remove_duplicate_entry_items(items, n_iovec);
2593
2594 r = journal_file_append_entry_internal(
2595 f,
2596 ts,
2597 boot_id,
2598 machine_id,
2599 xor_hash,
2600 items,
2601 n_iovec,
2602 seqnum,
2603 seqnum_id,
2604 ret_object,
2605 ret_offset);
2606
2607 /* If the memory mapping triggered a SIGBUS then we return an
2608 * IO error and ignore the error code passed down to us, since
2609 * it is very likely just an effect of a nullified replacement
2610 * mapping page */
2611
2612 if (mmap_cache_fd_got_sigbus(f->cache_fd))
2613 r = -EIO;
2614
2615 if (f->post_change_timer)
2616 schedule_post_change(f);
2617 else
2618 journal_file_post_change(f);
2619
2620 return r;
2621 }
2622
2623 typedef struct ChainCacheItem {
2624 uint64_t first; /* The offset of the entry array object at the beginning of the chain,
2625 * i.e., le64toh(f->header->entry_array_offset), or le64toh(o->data.entry_offset). */
2626 uint64_t array; /* The offset of the cached entry array object. */
2627 uint64_t begin; /* The offset of the first item in the cached array. */
2628 uint64_t total; /* The total number of items in all arrays before the cached one in the chain. */
2629 uint64_t last_index; /* The last index we looked at in the cached array, to optimize locality when bisecting. */
2630 } ChainCacheItem;
2631
2632 static void chain_cache_put(
2633 OrderedHashmap *h,
2634 ChainCacheItem *ci,
2635 uint64_t first,
2636 uint64_t array,
2637 uint64_t begin,
2638 uint64_t total,
2639 uint64_t last_index) {
2640
2641 assert(h);
2642
2643 if (!ci) {
2644 /* If the chain item to cache for this chain is the
2645 * first one it's not worth caching anything */
2646 if (array == first)
2647 return;
2648
2649 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2650 ci = ordered_hashmap_steal_first(h);
2651 assert(ci);
2652 } else {
2653 ci = new(ChainCacheItem, 1);
2654 if (!ci)
2655 return;
2656 }
2657
2658 ci->first = first;
2659
2660 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2661 free(ci);
2662 return;
2663 }
2664 } else
2665 assert(ci->first == first);
2666
2667 ci->array = array;
2668 ci->begin = begin;
2669 ci->total = total;
2670 ci->last_index = last_index;
2671 }
2672
2673 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2674 assert(i);
2675
2676 /* Increase or decrease the specified index, in the right direction. */
2677
2678 if (direction == DIRECTION_DOWN) {
2679 if (*i >= n - 1)
2680 return 0;
2681
2682 (*i)++;
2683 } else {
2684 if (*i <= 0)
2685 return 0;
2686
2687 (*i)--;
2688 }
2689
2690 return 1;
2691 }
2692
2693 static int bump_entry_array(
2694 JournalFile *f,
2695 Object *o, /* the current entry array object. */
2696 uint64_t offset, /* the offset of the entry array object. */
2697 uint64_t first, /* The offset of the first entry array object in the chain. */
2698 direction_t direction,
2699 uint64_t *ret) {
2700
2701 int r;
2702
2703 assert(f);
2704 assert(ret);
2705
2706 if (direction == DIRECTION_DOWN) {
2707 assert(o);
2708 assert(o->object.type == OBJECT_ENTRY_ARRAY);
2709
2710 *ret = le64toh(o->entry_array.next_entry_array_offset);
2711 } else {
2712
2713 /* Entry array chains are a singly linked list, so to find the previous array in the chain, we have
2714 * to start iterating from the top. */
2715
2716 assert(offset > 0);
2717
2718 uint64_t p = first, q = 0;
2719 while (p > 0 && p != offset) {
2720 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, p, &o);
2721 if (r < 0)
2722 return r;
2723
2724 q = p;
2725 p = le64toh(o->entry_array.next_entry_array_offset);
2726 }
2727
2728 /* If we can't find the previous entry array in the entry array chain, we're likely dealing with a
2729 * corrupted journal file. */
2730 if (p == 0)
2731 return -EBADMSG;
2732
2733 *ret = q;
2734 }
2735
2736 return *ret > 0;
2737 }
2738
2739 static int generic_array_get(
2740 JournalFile *f,
2741 uint64_t first, /* The offset of the first entry array object in the chain. */
2742 uint64_t i, /* The index of the target object counted from the beginning of the entry array chain. */
2743 direction_t direction,
2744 Object **ret_object, /* The found object. */
2745 uint64_t *ret_offset) { /* The offset of the found object. */
2746
2747 uint64_t a, t = 0, k;
2748 ChainCacheItem *ci;
2749 Object *o = NULL;
2750 int r;
2751
2752 assert(f);
2753
2754 /* FIXME: fix return value assignment on success. */
2755
2756 a = first;
2757
2758 /* Try the chain cache first */
2759 ci = ordered_hashmap_get(f->chain_cache, &first);
2760 if (ci && i > ci->total) {
2761 a = ci->array;
2762 i -= ci->total;
2763 t = ci->total;
2764 }
2765
2766 while (a > 0) {
2767 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2768 if (IN_SET(r, -EBADMSG, -EADDRNOTAVAIL)) {
2769 /* If there's corruption and we're going downwards, let's pretend we reached the
2770 * final entry in the entry array chain. */
2771
2772 if (direction == DIRECTION_DOWN)
2773 return 0;
2774
2775 /* If there's corruption and we're going upwards, move back to the previous entry
2776 * array and start iterating entries from there. */
2777
2778 i = UINT64_MAX;
2779 break;
2780 }
2781 if (r < 0)
2782 return r;
2783
2784 k = journal_file_entry_array_n_items(f, o);
2785 if (k == 0)
2786 return 0;
2787
2788 if (i < k)
2789 break;
2790
2791 /* The index is larger than the number of elements in the array. Let's move to the next array. */
2792 i -= k;
2793 t += k;
2794 a = le64toh(o->entry_array.next_entry_array_offset);
2795 }
2796
2797 /* If we've found the right location, now look for the first non-corrupt entry object (in the right
2798 * direction). */
2799
2800 while (a > 0) {
2801 if (i == UINT64_MAX) {
2802 r = bump_entry_array(f, o, a, first, direction, &a);
2803 if (r <= 0)
2804 return r;
2805
2806 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2807 if (r < 0)
2808 return r;
2809
2810 k = journal_file_entry_array_n_items(f, o);
2811 if (k == 0)
2812 break;
2813
2814 if (direction == DIRECTION_DOWN)
2815 i = 0;
2816 else {
2817 /* We moved to the previous array. The total must be decreased. */
2818 if (t < k)
2819 return -EBADMSG; /* chain cache is broken ? */
2820
2821 i = k - 1;
2822 t -= k;
2823 }
2824 }
2825
2826 do {
2827 uint64_t p;
2828
2829 p = journal_file_entry_array_item(f, o, i);
2830
2831 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret_object);
2832 if (r >= 0) {
2833 /* Let's cache this item for the next invocation */
2834 chain_cache_put(f->chain_cache, ci, first, a, journal_file_entry_array_item(f, o, 0), t, i);
2835
2836 if (ret_offset)
2837 *ret_offset = p;
2838
2839 return 1;
2840 }
2841 if (!IN_SET(r, -EADDRNOTAVAIL, -EBADMSG))
2842 return r;
2843
2844 /* OK, so this entry is borked. Most likely some entry didn't get synced to
2845 * disk properly, let's see if the next one might work for us instead. */
2846 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2847
2848 } while (bump_array_index(&i, direction, k) > 0);
2849
2850 /* All entries tried in the above do-while loop are broken. Let's move to the next (or previous) array. */
2851
2852 if (direction == DIRECTION_DOWN)
2853 /* We are going to the next array, the total must be incremented. */
2854 t += k;
2855
2856 i = UINT64_MAX;
2857 }
2858
2859 return 0;
2860 }
2861
2862 enum {
2863 TEST_FOUND,
2864 TEST_LEFT,
2865 TEST_RIGHT
2866 };
2867
2868 static int generic_array_bisect_one(
2869 JournalFile *f,
2870 uint64_t a, /* offset of entry array object. */
2871 uint64_t i, /* index of the entry item we will test. */
2872 uint64_t needle,
2873 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2874 direction_t direction,
2875 uint64_t *left,
2876 uint64_t *right,
2877 uint64_t *ret_offset) {
2878
2879 Object *array;
2880 uint64_t p;
2881 int r;
2882
2883 assert(f);
2884 assert(test_object);
2885 assert(left);
2886 assert(right);
2887 assert(*left <= i);
2888 assert(i <= *right);
2889
2890 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2891 if (r < 0)
2892 return r;
2893
2894 p = journal_file_entry_array_item(f, array, i);
2895 if (p <= 0)
2896 r = -EBADMSG;
2897 else
2898 r = test_object(f, p, needle);
2899 if (IN_SET(r, -EBADMSG, -EADDRNOTAVAIL)) {
2900 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short.");
2901 *right = i;
2902 return -ENOANO; /* recognizable error */
2903 }
2904 if (r < 0)
2905 return r;
2906
2907 if (r == TEST_FOUND)
2908 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2909
2910 if (r == TEST_RIGHT)
2911 *right = i;
2912 else
2913 *left = i + 1;
2914
2915 if (ret_offset)
2916 *ret_offset = p;
2917
2918 return r;
2919 }
2920
2921 static int generic_array_bisect(
2922 JournalFile *f,
2923 uint64_t first,
2924 uint64_t n,
2925 uint64_t needle,
2926 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2927 direction_t direction,
2928 Object **ret_object,
2929 uint64_t *ret_offset,
2930 uint64_t *ret_idx) {
2931
2932 /* Given an entry array chain, this function finds the object "closest" to the given needle in the
2933 * chain, taking into account the provided direction. A function can be provided to determine how
2934 * an object is matched against the given needle.
2935 *
2936 * Given a journal file, the offset of an object and the needle, the test_object() function should
2937 * return TEST_LEFT if the needle is located earlier in the entry array chain, TEST_LEFT if the
2938 * needle is located later in the entry array chain and TEST_FOUND if the object matches the needle.
2939 * If test_object() returns TEST_FOUND for a specific object, that object's information will be used
2940 * to populate the return values of this function. If test_object() never returns TEST_FOUND, the
2941 * return values are populated with the details of one of the objects closest to the needle. If the
2942 * direction is DIRECTION_UP, the earlier object is used. Otherwise, the later object is used.
2943 */
2944
2945 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = UINT64_MAX;
2946 bool subtract_one = false;
2947 ChainCacheItem *ci;
2948 Object *array;
2949 int r;
2950
2951 assert(f);
2952 assert(test_object);
2953
2954 /* Start with the first array in the chain */
2955 a = first;
2956
2957 ci = ordered_hashmap_get(f->chain_cache, &first);
2958 if (ci && n > ci->total && ci->begin != 0) {
2959 /* Ah, we have iterated this bisection array chain previously! Let's see if we can skip ahead
2960 * in the chain, as far as the last time. But we can't jump backwards in the chain, so let's
2961 * check that first. */
2962
2963 r = test_object(f, ci->begin, needle);
2964 if (r < 0)
2965 return r;
2966
2967 if (r == TEST_LEFT) {
2968 /* OK, what we are looking for is right of the begin of this EntryArray, so let's
2969 * jump straight to previously cached array in the chain */
2970
2971 a = ci->array;
2972 n -= ci->total;
2973 t = ci->total;
2974 last_index = ci->last_index;
2975 }
2976 }
2977
2978 while (a > 0) {
2979 uint64_t left = 0, right, k, lp;
2980
2981 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2982 if (r < 0)
2983 return r;
2984
2985 k = journal_file_entry_array_n_items(f, array);
2986 right = MIN(k, n);
2987 if (right <= 0)
2988 return 0;
2989
2990 right--;
2991 r = generic_array_bisect_one(f, a, right, needle, test_object, direction, &left, &right, &lp);
2992 if (r == -ENOANO) {
2993 n = right;
2994 continue;
2995 }
2996 if (r < 0)
2997 return r;
2998
2999 if (r == TEST_RIGHT) {
3000 /* If we cached the last index we looked at, let's try to not to jump too wildly
3001 * around and see if we can limit the range to look at early to the immediate
3002 * neighbors of the last index we looked at. */
3003
3004 if (last_index > 0 && last_index - 1 < right) {
3005 r = generic_array_bisect_one(f, a, last_index - 1, needle, test_object, direction, &left, &right, NULL);
3006 if (r < 0 && r != -ENOANO)
3007 return r;
3008 }
3009
3010 if (last_index < right) {
3011 r = generic_array_bisect_one(f, a, last_index + 1, needle, test_object, direction, &left, &right, NULL);
3012 if (r < 0 && r != -ENOANO)
3013 return r;
3014 }
3015
3016 for (;;) {
3017 if (left == right) {
3018 if (direction == DIRECTION_UP)
3019 subtract_one = true;
3020
3021 i = left;
3022 goto found;
3023 }
3024
3025 assert(left < right);
3026 i = (left + right) / 2;
3027
3028 r = generic_array_bisect_one(f, a, i, needle, test_object, direction, &left, &right, NULL);
3029 if (r < 0 && r != -ENOANO)
3030 return r;
3031 }
3032 }
3033
3034 if (k >= n) {
3035 if (direction == DIRECTION_UP) {
3036 i = n;
3037 subtract_one = true;
3038 goto found;
3039 }
3040
3041 return 0;
3042 }
3043
3044 last_p = lp;
3045
3046 n -= k;
3047 t += k;
3048 last_index = UINT64_MAX;
3049 a = le64toh(array->entry_array.next_entry_array_offset);
3050 }
3051
3052 return 0;
3053
3054 found:
3055 if (subtract_one && t == 0 && i == 0)
3056 return 0;
3057
3058 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
3059 if (r < 0)
3060 return r;
3061
3062 p = journal_file_entry_array_item(f, array, 0);
3063 if (p <= 0)
3064 return -EBADMSG;
3065
3066 /* Let's cache this item for the next invocation */
3067 chain_cache_put(f->chain_cache, ci, first, a, p, t, subtract_one ? (i > 0 ? i-1 : UINT64_MAX) : i);
3068
3069 if (subtract_one && i == 0)
3070 p = last_p;
3071 else if (subtract_one)
3072 p = journal_file_entry_array_item(f, array, i - 1);
3073 else
3074 p = journal_file_entry_array_item(f, array, i);
3075
3076 if (ret_object) {
3077 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret_object);
3078 if (r < 0)
3079 return r;
3080 }
3081
3082 if (ret_offset)
3083 *ret_offset = p;
3084
3085 if (ret_idx)
3086 *ret_idx = t + i + (subtract_one ? -1 : 0);
3087
3088 return 1;
3089 }
3090
3091 static int generic_array_bisect_plus_one(
3092 JournalFile *f,
3093 uint64_t extra,
3094 uint64_t first,
3095 uint64_t n,
3096 uint64_t needle,
3097 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
3098 direction_t direction,
3099 Object **ret_object,
3100 uint64_t *ret_offset) {
3101
3102 int r;
3103
3104 assert(f);
3105 assert(test_object);
3106
3107 if (n <= 0)
3108 return 0;
3109
3110 /* This bisects the array in object 'first', but first checks an extra. */
3111 r = test_object(f, extra, needle);
3112 if (r < 0)
3113 return r;
3114
3115 if (direction == DIRECTION_DOWN) {
3116 /* If we are going downwards, then we need to return the first object that passes the test.
3117 * When there is no object that passes the test, we need to return the first object that
3118 * test_object() returns TEST_RIGHT for. */
3119 if (IN_SET(r,
3120 TEST_FOUND, /* The 'extra' object passes the test. Hence, this is the first
3121 * object that passes the test. */
3122 TEST_RIGHT)) /* The 'extra' object is the first object that test_object() returns
3123 * TEST_RIGHT for, and no object exists even in the chained arrays
3124 * that passes the test. */
3125 goto use_extra; /* The 'extra' object is exactly the one we are looking for. It is
3126 * not necessary to bisect the chained arrays. */
3127
3128 /* Otherwise, the 'extra' object is not the one we are looking for. Search in the arrays. */
3129
3130 } else {
3131 /* If we are going upwards, then we need to return the last object that passes the test.
3132 * When there is no object that passes the test, we need to return the the last object that
3133 * test_object() returns TEST_LEFT for. */
3134 if (r == TEST_RIGHT)
3135 return 0; /* Not only the 'extra' object, but also all objects in the chained arrays
3136 * will never get TEST_FOUND or TEST_LEFT. The object we are looking for
3137 * does not exist. */
3138
3139 /* Even if the 'extra' object passes the test, there may be multiple objects in the arrays
3140 * that also pass the test. Hence, we need to bisect the arrays for finding the last matching
3141 * object. */
3142 }
3143
3144 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret_object, ret_offset, NULL);
3145 if (r != 0)
3146 return r; /* When > 0, the found object is the first (or last, when DIRECTION_UP) object.
3147 * Hence, return the found object now. */
3148
3149 /* No matching object found in the chained arrays.
3150 * DIRECTION_DOWN : the 'extra' object neither matches the condition. There is no matching object.
3151 * DIRECTION_UP : the 'extra' object matches the condition. So, return it. */
3152 if (direction == DIRECTION_DOWN)
3153 return 0;
3154
3155 use_extra:
3156 if (ret_object) {
3157 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
3158 if (r < 0)
3159 return r;
3160 }
3161
3162 if (ret_offset)
3163 *ret_offset = extra;
3164
3165 return 1;
3166 }
3167
3168 static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
3169 assert(f);
3170 assert(p > 0);
3171
3172 if (p == needle)
3173 return TEST_FOUND;
3174 else if (p < needle)
3175 return TEST_LEFT;
3176 else
3177 return TEST_RIGHT;
3178 }
3179
3180 int journal_file_move_to_entry_by_offset(
3181 JournalFile *f,
3182 uint64_t p,
3183 direction_t direction,
3184 Object **ret_object,
3185 uint64_t *ret_offset) {
3186
3187 assert(f);
3188 assert(f->header);
3189
3190 return generic_array_bisect(
3191 f,
3192 le64toh(f->header->entry_array_offset),
3193 le64toh(f->header->n_entries),
3194 p,
3195 test_object_offset,
3196 direction,
3197 ret_object, ret_offset, NULL);
3198 }
3199
3200 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
3201 uint64_t sq;
3202 Object *o;
3203 int r;
3204
3205 assert(f);
3206 assert(p > 0);
3207
3208 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
3209 if (r < 0)
3210 return r;
3211
3212 sq = le64toh(READ_NOW(o->entry.seqnum));
3213 if (sq == needle)
3214 return TEST_FOUND;
3215 else if (sq < needle)
3216 return TEST_LEFT;
3217 else
3218 return TEST_RIGHT;
3219 }
3220
3221 int journal_file_move_to_entry_by_seqnum(
3222 JournalFile *f,
3223 uint64_t seqnum,
3224 direction_t direction,
3225 Object **ret_object,
3226 uint64_t *ret_offset) {
3227
3228 assert(f);
3229 assert(f->header);
3230
3231 return generic_array_bisect(
3232 f,
3233 le64toh(f->header->entry_array_offset),
3234 le64toh(f->header->n_entries),
3235 seqnum,
3236 test_object_seqnum,
3237 direction,
3238 ret_object, ret_offset, NULL);
3239 }
3240
3241 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
3242 Object *o;
3243 uint64_t rt;
3244 int r;
3245
3246 assert(f);
3247 assert(p > 0);
3248
3249 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
3250 if (r < 0)
3251 return r;
3252
3253 rt = le64toh(READ_NOW(o->entry.realtime));
3254 if (rt == needle)
3255 return TEST_FOUND;
3256 else if (rt < needle)
3257 return TEST_LEFT;
3258 else
3259 return TEST_RIGHT;
3260 }
3261
3262 int journal_file_move_to_entry_by_realtime(
3263 JournalFile *f,
3264 uint64_t realtime,
3265 direction_t direction,
3266 Object **ret_object,
3267 uint64_t *ret_offset) {
3268
3269 assert(f);
3270 assert(f->header);
3271
3272 return generic_array_bisect(
3273 f,
3274 le64toh(f->header->entry_array_offset),
3275 le64toh(f->header->n_entries),
3276 realtime,
3277 test_object_realtime,
3278 direction,
3279 ret_object, ret_offset, NULL);
3280 }
3281
3282 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
3283 Object *o;
3284 uint64_t m;
3285 int r;
3286
3287 assert(f);
3288 assert(p > 0);
3289
3290 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
3291 if (r < 0)
3292 return r;
3293
3294 m = le64toh(READ_NOW(o->entry.monotonic));
3295 if (m == needle)
3296 return TEST_FOUND;
3297 else if (m < needle)
3298 return TEST_LEFT;
3299 else
3300 return TEST_RIGHT;
3301 }
3302
3303 static int find_data_object_by_boot_id(
3304 JournalFile *f,
3305 sd_id128_t boot_id,
3306 Object **ret_object,
3307 uint64_t *ret_offset) {
3308
3309 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
3310
3311 assert(f);
3312
3313 sd_id128_to_string(boot_id, t + 9);
3314 return journal_file_find_data_object(f, t, sizeof(t) - 1, ret_object, ret_offset);
3315 }
3316
3317 int journal_file_move_to_entry_by_monotonic(
3318 JournalFile *f,
3319 sd_id128_t boot_id,
3320 uint64_t monotonic,
3321 direction_t direction,
3322 Object **ret_object,
3323 uint64_t *ret_offset) {
3324
3325 Object *o;
3326 int r;
3327
3328 assert(f);
3329
3330 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
3331 if (r <= 0)
3332 return r;
3333
3334 return generic_array_bisect_plus_one(
3335 f,
3336 le64toh(o->data.entry_offset),
3337 le64toh(o->data.entry_array_offset),
3338 le64toh(o->data.n_entries),
3339 monotonic,
3340 test_object_monotonic,
3341 direction,
3342 ret_object, ret_offset);
3343 }
3344
3345 void journal_file_reset_location(JournalFile *f) {
3346 assert(f);
3347
3348 f->location_type = LOCATION_HEAD;
3349 f->current_offset = 0;
3350 f->current_seqnum = 0;
3351 f->current_realtime = 0;
3352 f->current_monotonic = 0;
3353 zero(f->current_boot_id);
3354 f->current_xor_hash = 0;
3355
3356 /* Also reset the previous reading direction. Otherwise, next_beyond_location() may wrongly handle we
3357 * already hit EOF. See issue #29216. */
3358 f->last_direction = _DIRECTION_INVALID;
3359 }
3360
3361 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
3362 assert(f);
3363 assert(o);
3364
3365 f->location_type = LOCATION_SEEK;
3366 f->current_offset = offset;
3367 f->current_seqnum = le64toh(o->entry.seqnum);
3368 f->current_realtime = le64toh(o->entry.realtime);
3369 f->current_monotonic = le64toh(o->entry.monotonic);
3370 f->current_boot_id = o->entry.boot_id;
3371 f->current_xor_hash = le64toh(o->entry.xor_hash);
3372 }
3373
3374 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
3375
3376 /* Consider it an error if any of the two offsets is uninitialized */
3377 if (old_offset == 0 || new_offset == 0)
3378 return false;
3379
3380 /* If we go down, the new offset must be larger than the old one. */
3381 return direction == DIRECTION_DOWN ?
3382 new_offset > old_offset :
3383 new_offset < old_offset;
3384 }
3385
3386 int journal_file_next_entry(
3387 JournalFile *f,
3388 uint64_t p,
3389 direction_t direction,
3390 Object **ret_object,
3391 uint64_t *ret_offset) {
3392
3393 uint64_t i, n, q;
3394 Object *o;
3395 int r;
3396
3397 assert(f);
3398 assert(f->header);
3399
3400 /* FIXME: fix return value assignment. */
3401
3402 n = le64toh(READ_NOW(f->header->n_entries));
3403 if (n <= 0)
3404 return 0;
3405
3406 /* When the input offset 'p' is zero, return the first (or last on DIRECTION_UP) entry. */
3407 if (p == 0)
3408 return generic_array_get(f,
3409 le64toh(f->header->entry_array_offset),
3410 direction == DIRECTION_DOWN ? 0 : n - 1,
3411 direction,
3412 ret_object, ret_offset);
3413
3414 /* Otherwise, first find the nearest entry object. */
3415 r = generic_array_bisect(f,
3416 le64toh(f->header->entry_array_offset),
3417 le64toh(f->header->n_entries),
3418 p,
3419 test_object_offset,
3420 direction,
3421 ret_object ? &o : NULL, &q, &i);
3422 if (r <= 0)
3423 return r;
3424
3425 assert(direction == DIRECTION_DOWN ? p <= q : q <= p);
3426
3427 /* If the input offset 'p' points to an entry object, generic_array_bisect() should provides
3428 * the same offset, and the index needs to be shifted. Otherwise, use the found object as is,
3429 * as it is the nearest entry object from the input offset 'p'. */
3430
3431 if (p != q)
3432 goto found;
3433
3434 r = bump_array_index(&i, direction, n);
3435 if (r <= 0)
3436 return r;
3437
3438 /* And jump to it */
3439 r = generic_array_get(f, le64toh(f->header->entry_array_offset), i, direction, ret_object ? &o : NULL, &q);
3440 if (r <= 0)
3441 return r;
3442
3443 /* Ensure our array is properly ordered. */
3444 if (!check_properly_ordered(q, p, direction))
3445 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
3446 "%s: entry array not properly ordered at entry index %" PRIu64,
3447 f->path, i);
3448 found:
3449 if (ret_object)
3450 *ret_object = o;
3451 if (ret_offset)
3452 *ret_offset = q;
3453
3454 return 1;
3455 }
3456
3457 int journal_file_move_to_entry_for_data(
3458 JournalFile *f,
3459 Object *d,
3460 direction_t direction,
3461 Object **ret_object,
3462 uint64_t *ret_offset) {
3463
3464 uint64_t extra, first, n;
3465 int r = 0;
3466
3467 assert(f);
3468 assert(d);
3469 assert(d->object.type == OBJECT_DATA);
3470 assert(IN_SET(direction, DIRECTION_DOWN, DIRECTION_UP));
3471
3472 /* FIXME: fix return value assignment. */
3473
3474 /* This returns the first (when the direction is down, otherwise the last) entry linked to the
3475 * specified data object. */
3476
3477 n = le64toh(d->data.n_entries);
3478 if (n <= 0)
3479 return 0;
3480 n--; /* n_entries is the number of entries linked to the data object, including the 'extra' entry. */
3481
3482 extra = le64toh(d->data.entry_offset);
3483 first = le64toh(d->data.entry_array_offset);
3484
3485 if (direction == DIRECTION_DOWN && extra > 0) {
3486 /* When we are going downwards, first try to read the extra entry. */
3487 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
3488 if (r >= 0)
3489 goto use_extra;
3490 if (!IN_SET(r, -EADDRNOTAVAIL, -EBADMSG))
3491 return r;
3492 }
3493
3494 if (n > 0) {
3495 /* DIRECTION_DOWN : The extra entry is broken, falling back to the entries in the array.
3496 * DIRECTION_UP : Try to find a valid entry in the array from the tail. */
3497 r = generic_array_get(f,
3498 first,
3499 direction == DIRECTION_DOWN ? 0 : n - 1,
3500 direction,
3501 ret_object, ret_offset);
3502 if (!IN_SET(r, 0, -EADDRNOTAVAIL, -EBADMSG))
3503 return r; /* found or critical error. */
3504 }
3505
3506 if (direction == DIRECTION_UP && extra > 0) {
3507 /* No valid entry exists in the chained array, falling back to the extra entry. */
3508 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
3509 if (r >= 0)
3510 goto use_extra;
3511 }
3512
3513 return r;
3514
3515 use_extra:
3516 if (ret_offset)
3517 *ret_offset = extra;
3518
3519 return 1;
3520 }
3521
3522 int journal_file_move_to_entry_by_offset_for_data(
3523 JournalFile *f,
3524 Object *d,
3525 uint64_t p,
3526 direction_t direction,
3527 Object **ret, uint64_t *ret_offset) {
3528
3529 assert(f);
3530 assert(d);
3531 assert(d->object.type == OBJECT_DATA);
3532
3533 return generic_array_bisect_plus_one(
3534 f,
3535 le64toh(d->data.entry_offset),
3536 le64toh(d->data.entry_array_offset),
3537 le64toh(d->data.n_entries),
3538 p,
3539 test_object_offset,
3540 direction,
3541 ret, ret_offset);
3542 }
3543
3544 int journal_file_move_to_entry_by_monotonic_for_data(
3545 JournalFile *f,
3546 Object *d,
3547 sd_id128_t boot_id,
3548 uint64_t monotonic,
3549 direction_t direction,
3550 Object **ret_object,
3551 uint64_t *ret_offset) {
3552
3553 uint64_t z, entry_offset, entry_array_offset, n_entries;
3554 Object *o, *entry;
3555 int r;
3556
3557 assert(f);
3558 assert(d);
3559 assert(d->object.type == OBJECT_DATA);
3560
3561 /* Save all the required data before the data object gets invalidated. */
3562 entry_offset = le64toh(READ_NOW(d->data.entry_offset));
3563 entry_array_offset = le64toh(READ_NOW(d->data.entry_array_offset));
3564 n_entries = le64toh(READ_NOW(d->data.n_entries));
3565
3566 /* First, seek by time */
3567 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
3568 if (r <= 0)
3569 return r;
3570
3571 r = generic_array_bisect_plus_one(f,
3572 le64toh(o->data.entry_offset),
3573 le64toh(o->data.entry_array_offset),
3574 le64toh(o->data.n_entries),
3575 monotonic,
3576 test_object_monotonic,
3577 direction,
3578 NULL, &z);
3579 if (r <= 0)
3580 return r;
3581
3582 /* And now, continue seeking until we find an entry that exists in both bisection arrays. */
3583 for (;;) {
3584 uint64_t p;
3585
3586 /* The journal entry found by the above bisect_plus_one() may not have the specified data,
3587 * that is, it may not be linked in the data object. So, we need to check that. */
3588
3589 r = generic_array_bisect_plus_one(f,
3590 entry_offset,
3591 entry_array_offset,
3592 n_entries,
3593 z,
3594 test_object_offset,
3595 direction,
3596 ret_object ? &entry : NULL, &p);
3597 if (r <= 0)
3598 return r;
3599 if (p == z)
3600 break; /* The journal entry has the specified data. Yay! */
3601
3602 /* If the entry does not have the data, then move to the next (or previous, depends on the
3603 * 'direction') entry linked to the data object. But, the next entry may be in another boot.
3604 * So, we need to check that the entry has the matching boot ID. */
3605
3606 r = generic_array_bisect_plus_one(f,
3607 le64toh(o->data.entry_offset),
3608 le64toh(o->data.entry_array_offset),
3609 le64toh(o->data.n_entries),
3610 p,
3611 test_object_offset,
3612 direction,
3613 ret_object ? &entry : NULL, &z);
3614 if (r <= 0)
3615 return r;
3616 if (p == z)
3617 break; /* The journal entry has the specified boot ID. Yay! */
3618
3619 /* If not, let's try to the next entry... */
3620 }
3621
3622 if (ret_object)
3623 *ret_object = entry;
3624 if (ret_offset)
3625 *ret_offset = z;
3626 return 1;
3627 }
3628
3629 int journal_file_move_to_entry_by_seqnum_for_data(
3630 JournalFile *f,
3631 Object *d,
3632 uint64_t seqnum,
3633 direction_t direction,
3634 Object **ret_object,
3635 uint64_t *ret_offset) {
3636
3637 assert(f);
3638 assert(d);
3639 assert(d->object.type == OBJECT_DATA);
3640
3641 return generic_array_bisect_plus_one(
3642 f,
3643 le64toh(d->data.entry_offset),
3644 le64toh(d->data.entry_array_offset),
3645 le64toh(d->data.n_entries),
3646 seqnum,
3647 test_object_seqnum,
3648 direction,
3649 ret_object, ret_offset);
3650 }
3651
3652 int journal_file_move_to_entry_by_realtime_for_data(
3653 JournalFile *f,
3654 Object *d,
3655 uint64_t realtime,
3656 direction_t direction,
3657 Object **ret, uint64_t *ret_offset) {
3658
3659 assert(f);
3660 assert(d);
3661 assert(d->object.type == OBJECT_DATA);
3662
3663 return generic_array_bisect_plus_one(
3664 f,
3665 le64toh(d->data.entry_offset),
3666 le64toh(d->data.entry_array_offset),
3667 le64toh(d->data.n_entries),
3668 realtime,
3669 test_object_realtime,
3670 direction,
3671 ret, ret_offset);
3672 }
3673
3674 void journal_file_dump(JournalFile *f) {
3675 Object *o;
3676 uint64_t p;
3677 int r;
3678
3679 assert(f);
3680 assert(f->header);
3681
3682 journal_file_print_header(f);
3683
3684 p = le64toh(READ_NOW(f->header->header_size));
3685 while (p != 0) {
3686 const char *s;
3687 Compression c;
3688
3689 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3690 if (r < 0)
3691 goto fail;
3692
3693 s = journal_object_type_to_string(o->object.type);
3694
3695 switch (o->object.type) {
3696
3697 case OBJECT_ENTRY:
3698 assert(s);
3699
3700 printf("Type: %s seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3701 s,
3702 le64toh(o->entry.seqnum),
3703 le64toh(o->entry.monotonic),
3704 le64toh(o->entry.realtime));
3705 break;
3706
3707 case OBJECT_TAG:
3708 assert(s);
3709
3710 printf("Type: %s seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3711 s,
3712 le64toh(o->tag.seqnum),
3713 le64toh(o->tag.epoch));
3714 break;
3715
3716 default:
3717 if (s)
3718 printf("Type: %s \n", s);
3719 else
3720 printf("Type: unknown (%i)", o->object.type);
3721
3722 break;
3723 }
3724
3725 c = COMPRESSION_FROM_OBJECT(o);
3726 if (c > COMPRESSION_NONE)
3727 printf("Flags: %s\n",
3728 compression_to_string(c));
3729
3730 if (p == le64toh(f->header->tail_object_offset))
3731 p = 0;
3732 else
3733 p += ALIGN64(le64toh(o->object.size));
3734 }
3735
3736 return;
3737 fail:
3738 log_error("File corrupt");
3739 }
3740
3741 /* Note: the lifetime of the compound literal is the immediately surrounding block. */
3742 #define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
3743
3744 void journal_file_print_header(JournalFile *f) {
3745 struct stat st;
3746
3747 assert(f);
3748 assert(f->header);
3749
3750 printf("File path: %s\n"
3751 "File ID: %s\n"
3752 "Machine ID: %s\n"
3753 "Boot ID: %s\n"
3754 "Sequential number ID: %s\n"
3755 "State: %s\n"
3756 "Compatible flags:%s%s%s\n"
3757 "Incompatible flags:%s%s%s%s%s%s\n"
3758 "Header size: %"PRIu64"\n"
3759 "Arena size: %"PRIu64"\n"
3760 "Data hash table size: %"PRIu64"\n"
3761 "Field hash table size: %"PRIu64"\n"
3762 "Rotate suggested: %s\n"
3763 "Head sequential number: %"PRIu64" (%"PRIx64")\n"
3764 "Tail sequential number: %"PRIu64" (%"PRIx64")\n"
3765 "Head realtime timestamp: %s (%"PRIx64")\n"
3766 "Tail realtime timestamp: %s (%"PRIx64")\n"
3767 "Tail monotonic timestamp: %s (%"PRIx64")\n"
3768 "Objects: %"PRIu64"\n"
3769 "Entry objects: %"PRIu64"\n",
3770 f->path,
3771 SD_ID128_TO_STRING(f->header->file_id),
3772 SD_ID128_TO_STRING(f->header->machine_id),
3773 SD_ID128_TO_STRING(f->header->tail_entry_boot_id),
3774 SD_ID128_TO_STRING(f->header->seqnum_id),
3775 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3776 f->header->state == STATE_ONLINE ? "ONLINE" :
3777 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3778 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3779 JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f->header) ? " TAIL_ENTRY_BOOT_ID" : "",
3780 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3781 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3782 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3783 JOURNAL_HEADER_COMPRESSED_ZSTD(f->header) ? " COMPRESSED-ZSTD" : "",
3784 JOURNAL_HEADER_KEYED_HASH(f->header) ? " KEYED-HASH" : "",
3785 JOURNAL_HEADER_COMPACT(f->header) ? " COMPACT" : "",
3786 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3787 le64toh(f->header->header_size),
3788 le64toh(f->header->arena_size),
3789 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3790 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3791 yes_no(journal_file_rotate_suggested(f, 0, LOG_DEBUG)),
3792 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3793 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3794 FORMAT_TIMESTAMP_SAFE(le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3795 FORMAT_TIMESTAMP_SAFE(le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3796 FORMAT_TIMESPAN(le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3797 le64toh(f->header->n_objects),
3798 le64toh(f->header->n_entries));
3799
3800 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3801 printf("Data objects: %"PRIu64"\n"
3802 "Data hash table fill: %.1f%%\n",
3803 le64toh(f->header->n_data),
3804 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3805
3806 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3807 printf("Field objects: %"PRIu64"\n"
3808 "Field hash table fill: %.1f%%\n",
3809 le64toh(f->header->n_fields),
3810 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3811
3812 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3813 printf("Tag objects: %"PRIu64"\n",
3814 le64toh(f->header->n_tags));
3815 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3816 printf("Entry array objects: %"PRIu64"\n",
3817 le64toh(f->header->n_entry_arrays));
3818
3819 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth))
3820 printf("Deepest field hash chain: %" PRIu64"\n",
3821 f->header->field_hash_chain_depth);
3822
3823 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth))
3824 printf("Deepest data hash chain: %" PRIu64"\n",
3825 f->header->data_hash_chain_depth);
3826
3827 if (fstat(f->fd, &st) >= 0)
3828 printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st.st_blocks * 512ULL));
3829 }
3830
3831 static int journal_file_warn_btrfs(JournalFile *f) {
3832 unsigned attrs;
3833 int r;
3834
3835 assert(f);
3836
3837 /* Before we write anything, check if the COW logic is turned
3838 * off on btrfs. Given our write pattern that is quite
3839 * unfriendly to COW file systems this should greatly improve
3840 * performance on COW file systems, such as btrfs, at the
3841 * expense of data integrity features (which shouldn't be too
3842 * bad, given that we do our own checksumming). */
3843
3844 r = fd_is_fs_type(f->fd, BTRFS_SUPER_MAGIC);
3845 if (r < 0)
3846 return log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "Failed to determine if journal is on btrfs: %m");
3847 if (r == 0)
3848 return 0;
3849
3850 r = read_attr_fd(f->fd, &attrs);
3851 if (r < 0)
3852 return log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "Failed to read file attributes: %m");
3853
3854 if (attrs & FS_NOCOW_FL) {
3855 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3856 return 0;
3857 }
3858
3859 log_ratelimit_notice(JOURNAL_LOG_RATELIMIT,
3860 "Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3861 "This is likely to slow down journal access substantially, please consider turning "
3862 "off the copy-on-write file attribute on the journal directory, using chattr +C.",
3863 f->path);
3864
3865 return 1;
3866 }
3867
3868 static void journal_default_metrics(JournalMetrics *m, int fd, bool compact) {
3869 struct statvfs ss;
3870 uint64_t fs_size = 0;
3871
3872 assert(m);
3873 assert(fd >= 0);
3874
3875 if (fstatvfs(fd, &ss) >= 0)
3876 fs_size = u64_multiply_safe(ss.f_frsize, ss.f_blocks);
3877 else
3878 log_debug_errno(errno, "Failed to determine disk size: %m");
3879
3880 if (m->max_use == UINT64_MAX) {
3881
3882 if (fs_size > 0)
3883 m->max_use = CLAMP(PAGE_ALIGN_U64(fs_size / 10), /* 10% of file system size */
3884 MAX_USE_LOWER, MAX_USE_UPPER);
3885 else
3886 m->max_use = MAX_USE_LOWER;
3887 } else {
3888 m->max_use = PAGE_ALIGN_U64(m->max_use);
3889
3890 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3891 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3892 }
3893
3894 if (m->min_use == UINT64_MAX) {
3895 if (fs_size > 0)
3896 m->min_use = CLAMP(PAGE_ALIGN_U64(fs_size / 50), /* 2% of file system size */
3897 MIN_USE_LOW, MIN_USE_HIGH);
3898 else
3899 m->min_use = MIN_USE_LOW;
3900 }
3901
3902 if (m->min_use > m->max_use)
3903 m->min_use = m->max_use;
3904
3905 if (m->max_size == UINT64_MAX)
3906 m->max_size = MIN(PAGE_ALIGN_U64(m->max_use / 8), /* 8 chunks */
3907 MAX_SIZE_UPPER);
3908 else
3909 m->max_size = PAGE_ALIGN_U64(m->max_size);
3910
3911 if (compact && m->max_size > JOURNAL_COMPACT_SIZE_MAX)
3912 m->max_size = JOURNAL_COMPACT_SIZE_MAX;
3913
3914 if (m->max_size != 0) {
3915 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3916 m->max_size = JOURNAL_FILE_SIZE_MIN;
3917
3918 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3919 m->max_use = m->max_size*2;
3920 }
3921
3922 if (m->min_size == UINT64_MAX)
3923 m->min_size = JOURNAL_FILE_SIZE_MIN;
3924 else
3925 m->min_size = CLAMP(PAGE_ALIGN_U64(m->min_size),
3926 JOURNAL_FILE_SIZE_MIN,
3927 m->max_size ?: UINT64_MAX);
3928
3929 if (m->keep_free == UINT64_MAX) {
3930 if (fs_size > 0)
3931 m->keep_free = MIN(PAGE_ALIGN_U64(fs_size / 20), /* 5% of file system size */
3932 KEEP_FREE_UPPER);
3933 else
3934 m->keep_free = DEFAULT_KEEP_FREE;
3935 }
3936
3937 if (m->n_max_files == UINT64_MAX)
3938 m->n_max_files = DEFAULT_N_MAX_FILES;
3939
3940 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3941 FORMAT_BYTES(m->min_use),
3942 FORMAT_BYTES(m->max_use),
3943 FORMAT_BYTES(m->max_size),
3944 FORMAT_BYTES(m->min_size),
3945 FORMAT_BYTES(m->keep_free),
3946 m->n_max_files);
3947 }
3948
3949 int journal_file_open(
3950 int fd,
3951 const char *fname,
3952 int open_flags,
3953 JournalFileFlags file_flags,
3954 mode_t mode,
3955 uint64_t compress_threshold_bytes,
3956 JournalMetrics *metrics,
3957 MMapCache *mmap_cache,
3958 JournalFile *template,
3959 JournalFile **ret) {
3960
3961 bool newly_created = false;
3962 JournalFile *f;
3963 void *h;
3964 int r;
3965
3966 assert(fd >= 0 || fname);
3967 assert(file_flags >= 0);
3968 assert(file_flags <= _JOURNAL_FILE_FLAGS_MAX);
3969 assert(mmap_cache);
3970 assert(ret);
3971
3972 if (!IN_SET((open_flags & O_ACCMODE), O_RDONLY, O_RDWR))
3973 return -EINVAL;
3974
3975 if ((open_flags & O_ACCMODE) == O_RDONLY && FLAGS_SET(open_flags, O_CREAT))
3976 return -EINVAL;
3977
3978 if (fname && (open_flags & O_CREAT) && !endswith(fname, ".journal"))
3979 return -EINVAL;
3980
3981 f = new(JournalFile, 1);
3982 if (!f)
3983 return -ENOMEM;
3984
3985 *f = (JournalFile) {
3986 .fd = fd,
3987 .mode = mode,
3988 .open_flags = open_flags,
3989 .compress_threshold_bytes = compress_threshold_bytes == UINT64_MAX ?
3990 DEFAULT_COMPRESS_THRESHOLD :
3991 MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
3992 .strict_order = FLAGS_SET(file_flags, JOURNAL_STRICT_ORDER),
3993 .newest_boot_id_prioq_idx = PRIOQ_IDX_NULL,
3994 .last_direction = _DIRECTION_INVALID,
3995 };
3996
3997 if (fname) {
3998 f->path = strdup(fname);
3999 if (!f->path) {
4000 r = -ENOMEM;
4001 goto fail;
4002 }
4003 } else {
4004 assert(fd >= 0);
4005
4006 /* If we don't know the path, fill in something explanatory and vaguely useful */
4007 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
4008 r = -ENOMEM;
4009 goto fail;
4010 }
4011 }
4012
4013 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
4014 if (!f->chain_cache) {
4015 r = -ENOMEM;
4016 goto fail;
4017 }
4018
4019 if (f->fd < 0) {
4020 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
4021 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
4022 * it doesn't hurt in that case. */
4023
4024 f->fd = openat_report_new(AT_FDCWD, f->path, f->open_flags|O_CLOEXEC|O_NONBLOCK, f->mode, &newly_created);
4025 if (f->fd < 0) {
4026 r = f->fd;
4027 goto fail;
4028 }
4029
4030 /* fds we opened here by us should also be closed by us. */
4031 f->close_fd = true;
4032
4033 r = fd_nonblock(f->fd, false);
4034 if (r < 0)
4035 goto fail;
4036
4037 if (!newly_created) {
4038 r = journal_file_fstat(f);
4039 if (r < 0)
4040 goto fail;
4041 }
4042 } else {
4043 r = journal_file_fstat(f);
4044 if (r < 0)
4045 goto fail;
4046
4047 /* If we just got the fd passed in, we don't really know if we created the file anew */
4048 newly_created = f->last_stat.st_size == 0 && journal_file_writable(f);
4049 }
4050
4051 r = mmap_cache_add_fd(mmap_cache, f->fd, mmap_prot_from_open_flags(open_flags), &f->cache_fd);
4052 if (r < 0)
4053 goto fail;
4054
4055 if (newly_created) {
4056 (void) journal_file_warn_btrfs(f);
4057
4058 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
4059 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
4060 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
4061 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
4062 * solely on mtime/atime/ctime of the file. */
4063 (void) fd_setcrtime(f->fd, 0);
4064
4065 r = journal_file_init_header(f, file_flags, template);
4066 if (r < 0)
4067 goto fail;
4068
4069 r = journal_file_fstat(f);
4070 if (r < 0)
4071 goto fail;
4072 }
4073
4074 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
4075 r = -ENODATA;
4076 goto fail;
4077 }
4078
4079 r = mmap_cache_fd_get(f->cache_fd, MMAP_CACHE_CATEGORY_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h);
4080 if (r == -EINVAL) {
4081 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
4082 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
4083 * code. */
4084 r = -EAFNOSUPPORT;
4085 goto fail;
4086 }
4087 if (r < 0)
4088 goto fail;
4089
4090 f->header = h;
4091
4092 if (!newly_created) {
4093 r = journal_file_verify_header(f);
4094 if (r < 0)
4095 goto fail;
4096 }
4097
4098 #if HAVE_GCRYPT
4099 if (!newly_created && journal_file_writable(f) && JOURNAL_HEADER_SEALED(f->header)) {
4100 r = journal_file_fss_load(f);
4101 if (r < 0)
4102 goto fail;
4103 }
4104 #endif
4105
4106 if (journal_file_writable(f)) {
4107 if (metrics) {
4108 journal_default_metrics(metrics, f->fd, JOURNAL_HEADER_COMPACT(f->header));
4109 f->metrics = *metrics;
4110 } else if (template)
4111 f->metrics = template->metrics;
4112
4113 r = journal_file_refresh_header(f);
4114 if (r < 0)
4115 goto fail;
4116 }
4117
4118 #if HAVE_GCRYPT
4119 r = journal_file_hmac_setup(f);
4120 if (r < 0)
4121 goto fail;
4122 #endif
4123
4124 if (newly_created) {
4125 r = journal_file_setup_field_hash_table(f);
4126 if (r < 0)
4127 goto fail;
4128
4129 r = journal_file_setup_data_hash_table(f);
4130 if (r < 0)
4131 goto fail;
4132
4133 #if HAVE_GCRYPT
4134 r = journal_file_append_first_tag(f);
4135 if (r < 0)
4136 goto fail;
4137 #endif
4138 }
4139
4140 if (mmap_cache_fd_got_sigbus(f->cache_fd)) {
4141 r = -EIO;
4142 goto fail;
4143 }
4144
4145 if (template && template->post_change_timer) {
4146 r = journal_file_enable_post_change_timer(
4147 f,
4148 sd_event_source_get_event(template->post_change_timer),
4149 template->post_change_timer_period);
4150
4151 if (r < 0)
4152 goto fail;
4153 }
4154
4155 /* The file is opened now successfully, thus we take possession of any passed in fd. */
4156 f->close_fd = true;
4157
4158 if (DEBUG_LOGGING) {
4159 static int last_seal = -1, last_keyed_hash = -1;
4160 static Compression last_compression = _COMPRESSION_INVALID;
4161 static uint64_t last_bytes = UINT64_MAX;
4162
4163 if (last_seal != JOURNAL_HEADER_SEALED(f->header) ||
4164 last_keyed_hash != JOURNAL_HEADER_KEYED_HASH(f->header) ||
4165 last_compression != JOURNAL_FILE_COMPRESSION(f) ||
4166 last_bytes != f->compress_threshold_bytes) {
4167
4168 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
4169 yes_no(JOURNAL_HEADER_SEALED(f->header)), yes_no(JOURNAL_HEADER_KEYED_HASH(f->header)),
4170 compression_to_string(JOURNAL_FILE_COMPRESSION(f)), FORMAT_BYTES(f->compress_threshold_bytes));
4171 last_seal = JOURNAL_HEADER_SEALED(f->header);
4172 last_keyed_hash = JOURNAL_HEADER_KEYED_HASH(f->header);
4173 last_compression = JOURNAL_FILE_COMPRESSION(f);
4174 last_bytes = f->compress_threshold_bytes;
4175 }
4176 }
4177
4178 *ret = f;
4179 return 0;
4180
4181 fail:
4182 if (f->cache_fd && mmap_cache_fd_got_sigbus(f->cache_fd))
4183 r = -EIO;
4184
4185 (void) journal_file_close(f);
4186
4187 if (newly_created && fd < 0)
4188 (void) unlink(fname);
4189
4190 return r;
4191 }
4192
4193 int journal_file_parse_uid_from_filename(const char *path, uid_t *ret_uid) {
4194 _cleanup_free_ char *buf = NULL, *p = NULL;
4195 const char *a, *b, *at;
4196 int r;
4197
4198 /* This helper returns -EREMOTE when the filename doesn't match user online/offline journal
4199 * pattern. Hence it currently doesn't parse archived or disposed user journals. */
4200
4201 assert(path);
4202 assert(ret_uid);
4203
4204 r = path_extract_filename(path, &p);
4205 if (r < 0)
4206 return r;
4207 if (r == O_DIRECTORY)
4208 return -EISDIR;
4209
4210 a = startswith(p, "user-");
4211 if (!a)
4212 return -EREMOTE;
4213 b = endswith(p, ".journal");
4214 if (!b)
4215 return -EREMOTE;
4216
4217 at = strchr(a, '@');
4218 if (at)
4219 return -EREMOTE;
4220
4221 buf = strndup(a, b-a);
4222 if (!buf)
4223 return -ENOMEM;
4224
4225 return parse_uid(buf, ret_uid);
4226 }
4227
4228 int journal_file_archive(JournalFile *f, char **ret_previous_path) {
4229 _cleanup_free_ char *p = NULL;
4230
4231 assert(f);
4232
4233 if (!journal_file_writable(f))
4234 return -EINVAL;
4235
4236 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
4237 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
4238 if (path_startswith(f->path, "/proc/self/fd"))
4239 return -EINVAL;
4240
4241 if (!endswith(f->path, ".journal"))
4242 return -EINVAL;
4243
4244 if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
4245 (int) strlen(f->path) - 8, f->path,
4246 SD_ID128_FORMAT_VAL(f->header->seqnum_id),
4247 le64toh(f->header->head_entry_seqnum),
4248 le64toh(f->header->head_entry_realtime)) < 0)
4249 return -ENOMEM;
4250
4251 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
4252 * ignore that case. */
4253 if (rename(f->path, p) < 0 && errno != ENOENT)
4254 return -errno;
4255
4256 /* Sync the rename to disk */
4257 (void) fsync_directory_of_file(f->fd);
4258
4259 if (ret_previous_path)
4260 *ret_previous_path = f->path;
4261 else
4262 free(f->path);
4263
4264 f->path = TAKE_PTR(p);
4265
4266 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
4267 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
4268 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
4269 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
4270 * occurs. */
4271 f->archive = true;
4272
4273 return 0;
4274 }
4275
4276 int journal_file_dispose(int dir_fd, const char *fname) {
4277 _cleanup_free_ char *p = NULL;
4278
4279 assert(fname);
4280
4281 /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
4282 * this is done without looking into the file or changing any of its contents. The idea is that this is called
4283 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
4284 * for writing anymore. */
4285
4286 if (!endswith(fname, ".journal"))
4287 return -EINVAL;
4288
4289 if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
4290 (int) strlen(fname) - 8, fname,
4291 now(CLOCK_REALTIME),
4292 random_u64()) < 0)
4293 return -ENOMEM;
4294
4295 if (renameat(dir_fd, fname, dir_fd, p) < 0)
4296 return -errno;
4297
4298 return 0;
4299 }
4300
4301 int journal_file_copy_entry(
4302 JournalFile *from,
4303 JournalFile *to,
4304 Object *o,
4305 uint64_t p,
4306 uint64_t *seqnum,
4307 sd_id128_t *seqnum_id) {
4308
4309 _cleanup_free_ EntryItem *items_alloc = NULL;
4310 EntryItem *items;
4311 uint64_t n, m = 0, xor_hash = 0;
4312 sd_id128_t boot_id;
4313 dual_timestamp ts;
4314 int r;
4315
4316 assert(from);
4317 assert(to);
4318 assert(o);
4319 assert(p > 0);
4320
4321 if (!journal_file_writable(to))
4322 return -EPERM;
4323
4324 ts = (dual_timestamp) {
4325 .monotonic = le64toh(o->entry.monotonic),
4326 .realtime = le64toh(o->entry.realtime),
4327 };
4328 boot_id = o->entry.boot_id;
4329
4330 n = journal_file_entry_n_items(from, o);
4331 if (n == 0)
4332 return 0;
4333
4334 if (n < ALLOCA_MAX / sizeof(EntryItem) / 2)
4335 items = newa(EntryItem, n);
4336 else {
4337 items_alloc = new(EntryItem, n);
4338 if (!items_alloc)
4339 return -ENOMEM;
4340
4341 items = items_alloc;
4342 }
4343
4344 for (uint64_t i = 0; i < n; i++) {
4345 uint64_t h, q;
4346 void *data;
4347 size_t l;
4348 Object *u;
4349
4350 q = journal_file_entry_item_object_offset(from, o, i);
4351 r = journal_file_data_payload(from, NULL, q, NULL, 0, 0, &data, &l);
4352 if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
4353 log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", i);
4354 continue;
4355 }
4356 if (r < 0)
4357 return r;
4358 assert(r > 0);
4359
4360 if (l == 0)
4361 return -EBADMSG;
4362
4363 r = journal_file_append_data(to, data, l, &u, &h);
4364 if (r < 0)
4365 return r;
4366
4367 if (JOURNAL_HEADER_KEYED_HASH(to->header))
4368 xor_hash ^= jenkins_hash64(data, l);
4369 else
4370 xor_hash ^= le64toh(u->data.hash);
4371
4372 items[m++] = (EntryItem) {
4373 .object_offset = h,
4374 .hash = le64toh(u->data.hash),
4375 };
4376 }
4377
4378 if (m == 0)
4379 return 0;
4380
4381 r = journal_file_append_entry_internal(
4382 to,
4383 &ts,
4384 &boot_id,
4385 &from->header->machine_id,
4386 xor_hash,
4387 items,
4388 m,
4389 seqnum,
4390 seqnum_id,
4391 /* ret_object= */ NULL,
4392 /* ret_offset= */ NULL);
4393
4394 if (mmap_cache_fd_got_sigbus(to->cache_fd))
4395 return -EIO;
4396
4397 return r;
4398 }
4399
4400 void journal_reset_metrics(JournalMetrics *m) {
4401 assert(m);
4402
4403 /* Set everything to "pick automatic values". */
4404
4405 *m = (JournalMetrics) {
4406 .min_use = UINT64_MAX,
4407 .max_use = UINT64_MAX,
4408 .min_size = UINT64_MAX,
4409 .max_size = UINT64_MAX,
4410 .keep_free = UINT64_MAX,
4411 .n_max_files = UINT64_MAX,
4412 };
4413 }
4414
4415 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *ret_from, usec_t *ret_to) {
4416 assert(f);
4417 assert(f->header);
4418 assert(ret_from || ret_to);
4419
4420 if (ret_from) {
4421 if (f->header->head_entry_realtime == 0)
4422 return -ENOENT;
4423
4424 *ret_from = le64toh(f->header->head_entry_realtime);
4425 }
4426
4427 if (ret_to) {
4428 if (f->header->tail_entry_realtime == 0)
4429 return -ENOENT;
4430
4431 *ret_to = le64toh(f->header->tail_entry_realtime);
4432 }
4433
4434 return 1;
4435 }
4436
4437 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *ret_from, usec_t *ret_to) {
4438 Object *o;
4439 uint64_t p;
4440 int r;
4441
4442 assert(f);
4443 assert(ret_from || ret_to);
4444
4445 /* FIXME: fix return value assignment on success with 0. */
4446
4447 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
4448 if (r <= 0)
4449 return r;
4450
4451 if (le64toh(o->data.n_entries) <= 0)
4452 return 0;
4453
4454 if (ret_from) {
4455 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
4456 if (r < 0)
4457 return r;
4458
4459 *ret_from = le64toh(o->entry.monotonic);
4460 }
4461
4462 if (ret_to) {
4463 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
4464 if (r < 0)
4465 return r;
4466
4467 r = journal_file_move_to_entry_for_data(f, o, DIRECTION_UP, &o, NULL);
4468 if (r <= 0)
4469 return r;
4470
4471 *ret_to = le64toh(o->entry.monotonic);
4472 }
4473
4474 return 1;
4475 }
4476
4477 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec, int log_level) {
4478 assert(f);
4479 assert(f->header);
4480
4481 /* If we gained new header fields we gained new features,
4482 * hence suggest a rotation */
4483 if (le64toh(f->header->header_size) < sizeof(Header)) {
4484 log_ratelimit_full(log_level, JOURNAL_LOG_RATELIMIT,
4485 "%s uses an outdated header, suggesting rotation.", f->path);
4486 return true;
4487 }
4488
4489 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4490 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4491 * need the n_data field, which only exists in newer versions. */
4492
4493 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
4494 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4495 log_ratelimit_full(
4496 log_level, JOURNAL_LOG_RATELIMIT,
4497 "Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %"PRIu64" file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
4498 f->path,
4499 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
4500 le64toh(f->header->n_data),
4501 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
4502 (uint64_t) f->last_stat.st_size,
4503 f->last_stat.st_size / le64toh(f->header->n_data));
4504 return true;
4505 }
4506
4507 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
4508 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4509 log_ratelimit_full(
4510 log_level, JOURNAL_LOG_RATELIMIT,
4511 "Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
4512 f->path,
4513 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
4514 le64toh(f->header->n_fields),
4515 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
4516 return true;
4517 }
4518
4519 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4520 * longest chain is longer than some threshold, let's suggest rotation. */
4521 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) &&
4522 le64toh(f->header->data_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4523 log_ratelimit_full(
4524 log_level, JOURNAL_LOG_RATELIMIT,
4525 "Data hash table of %s has deepest hash chain of length %" PRIu64 ", suggesting rotation.",
4526 f->path, le64toh(f->header->data_hash_chain_depth));
4527 return true;
4528 }
4529
4530 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) &&
4531 le64toh(f->header->field_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4532 log_ratelimit_full(
4533 log_level, JOURNAL_LOG_RATELIMIT,
4534 "Field hash table of %s has deepest hash chain of length at %" PRIu64 ", suggesting rotation.",
4535 f->path, le64toh(f->header->field_hash_chain_depth));
4536 return true;
4537 }
4538
4539 /* Are the data objects properly indexed by field objects? */
4540 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
4541 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
4542 le64toh(f->header->n_data) > 0 &&
4543 le64toh(f->header->n_fields) == 0) {
4544 log_ratelimit_full(
4545 log_level, JOURNAL_LOG_RATELIMIT,
4546 "Data objects of %s are not indexed by field objects, suggesting rotation.",
4547 f->path);
4548 return true;
4549 }
4550
4551 if (max_file_usec > 0) {
4552 usec_t t, h;
4553
4554 h = le64toh(f->header->head_entry_realtime);
4555 t = now(CLOCK_REALTIME);
4556
4557 if (h > 0 && t > h + max_file_usec) {
4558 log_ratelimit_full(
4559 log_level, JOURNAL_LOG_RATELIMIT,
4560 "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
4561 f->path, FORMAT_TIMESPAN(max_file_usec, USEC_PER_SEC));
4562 return true;
4563 }
4564 }
4565
4566 return false;
4567 }
4568
4569 static const char * const journal_object_type_table[] = {
4570 [OBJECT_UNUSED] = "unused",
4571 [OBJECT_DATA] = "data",
4572 [OBJECT_FIELD] = "field",
4573 [OBJECT_ENTRY] = "entry",
4574 [OBJECT_DATA_HASH_TABLE] = "data hash table",
4575 [OBJECT_FIELD_HASH_TABLE] = "field hash table",
4576 [OBJECT_ENTRY_ARRAY] = "entry array",
4577 [OBJECT_TAG] = "tag",
4578 };
4579
4580 DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type, ObjectType);