1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
6 #include <linux/magic.h>
10 #include <sys/statvfs.h>
16 #include "alloc-util.h"
17 #include "chattr-util.h"
21 #include "format-util.h"
23 #include "id128-util.h"
24 #include "journal-authenticate.h"
25 #include "journal-def.h"
26 #include "journal-file.h"
27 #include "journal-internal.h"
29 #include "memory-util.h"
30 #include "missing_threads.h"
31 #include "path-util.h"
33 #include "random-util.h"
35 #include "sort-util.h"
36 #include "stat-util.h"
37 #include "string-table.h"
38 #include "string-util.h"
40 #include "sync-util.h"
41 #include "user-util.h"
42 #include "xattr-util.h"
44 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
45 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
47 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
48 #define MIN_COMPRESS_THRESHOLD (8ULL)
50 /* This is the minimum journal file size */
51 #define JOURNAL_FILE_SIZE_MIN (512 * U64_KB) /* 512 KiB */
52 #define JOURNAL_COMPACT_SIZE_MAX ((uint64_t) UINT32_MAX) /* 4 GiB */
54 /* These are the lower and upper bounds if we deduce the max_use value from the file system size */
55 #define MAX_USE_LOWER (1 * U64_MB) /* 1 MiB */
56 #define MAX_USE_UPPER (4 * U64_GB) /* 4 GiB */
58 /* Those are the lower and upper bounds for the minimal use limit,
59 * i.e. how much we'll use even if keep_free suggests otherwise. */
60 #define MIN_USE_LOW (1 * U64_MB) /* 1 MiB */
61 #define MIN_USE_HIGH (16 * U64_MB) /* 16 MiB */
63 /* This is the upper bound if we deduce max_size from max_use */
64 #define MAX_SIZE_UPPER (128 * U64_MB) /* 128 MiB */
66 /* This is the upper bound if we deduce the keep_free value from the file system size */
67 #define KEEP_FREE_UPPER (4 * U64_GB) /* 4 GiB */
69 /* This is the keep_free value when we can't determine the system size */
70 #define DEFAULT_KEEP_FREE (1 * U64_MB) /* 1 MB */
72 /* This is the default maximum number of journal files to keep around. */
73 #define DEFAULT_N_MAX_FILES 100
75 /* n_data was the first entry we added after the initial file format design */
76 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
78 /* How many entries to keep in the entry array chain cache at max */
79 #define CHAIN_CACHE_MAX 20
81 /* How much to increase the journal file size at once each time we allocate something new. */
82 #define FILE_SIZE_INCREASE (8 * U64_MB) /* 8MB */
84 /* Reread fstat() of the file for detecting deletions at least this often */
85 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
87 /* Longest hash chain to rotate after */
88 #define HASH_CHAIN_DEPTH_MAX 100
91 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
94 static int mmap_prot_from_open_flags(int flags
) {
95 switch (flags
& O_ACCMODE
) {
101 return PROT_READ
|PROT_WRITE
;
103 assert_not_reached();
107 int journal_file_tail_end_by_pread(JournalFile
*f
, uint64_t *ret_offset
) {
115 /* Same as journal_file_tail_end_by_mmap() below, but operates with pread() to avoid the mmap cache
116 * (and thus is thread safe) */
118 p
= le64toh(f
->header
->tail_object_offset
);
120 p
= le64toh(f
->header
->header_size
);
125 r
= journal_file_read_object_header(f
, OBJECT_UNUSED
, p
, &tail
);
129 sz
= le64toh(tail
.object
.size
);
130 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
134 if (p
> UINT64_MAX
- sz
)
145 int journal_file_tail_end_by_mmap(JournalFile
*f
, uint64_t *ret_offset
) {
153 /* Same as journal_file_tail_end_by_pread() above, but operates with the usual mmap logic */
155 p
= le64toh(f
->header
->tail_object_offset
);
157 p
= le64toh(f
->header
->header_size
);
162 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &tail
);
166 sz
= le64toh(READ_NOW(tail
->object
.size
));
167 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
171 if (p
> UINT64_MAX
- sz
)
182 int journal_file_set_offline_thread_join(JournalFile
*f
) {
187 if (f
->offline_state
== OFFLINE_JOINED
)
190 r
= pthread_join(f
->offline_thread
, NULL
);
194 f
->offline_state
= OFFLINE_JOINED
;
196 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
202 static int journal_file_set_online(JournalFile
*f
) {
207 if (!journal_file_writable(f
))
210 if (f
->fd
< 0 || !f
->header
)
214 switch (f
->offline_state
) {
216 /* No offline thread, no need to wait. */
220 case OFFLINE_SYNCING
: {
221 OfflineState tmp_state
= OFFLINE_SYNCING
;
222 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
223 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
226 /* Canceled syncing prior to offlining, no need to wait. */
230 case OFFLINE_AGAIN_FROM_SYNCING
: {
231 OfflineState tmp_state
= OFFLINE_AGAIN_FROM_SYNCING
;
232 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
233 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
236 /* Canceled restart from syncing, no need to wait. */
240 case OFFLINE_AGAIN_FROM_OFFLINING
: {
241 OfflineState tmp_state
= OFFLINE_AGAIN_FROM_OFFLINING
;
242 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
243 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
246 /* Canceled restart from offlining, must wait for offlining to complete however. */
251 r
= journal_file_set_offline_thread_join(f
);
261 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
264 switch (f
->header
->state
) {
269 f
->header
->state
= STATE_ONLINE
;
278 JournalFile
* journal_file_close(JournalFile
*f
) {
282 assert(f
->newest_boot_id_prioq_idx
== PRIOQ_IDX_NULL
);
285 mmap_cache_fd_free(f
->cache_fd
);
291 ordered_hashmap_free_free(f
->chain_cache
);
294 free(f
->compress_buffer
);
299 size_t sz
= PAGE_ALIGN(f
->fss_file_size
);
300 assert(sz
< SIZE_MAX
);
301 munmap(f
->fss_file
, sz
);
303 free(f
->fsprg_state
);
308 gcry_md_close(f
->hmac
);
314 static bool keyed_hash_requested(void) {
315 static thread_local
int cached
= -1;
319 r
= getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
322 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
331 static bool compact_mode_requested(void) {
332 static thread_local
int cached
= -1;
336 r
= getenv_bool("SYSTEMD_JOURNAL_COMPACT");
339 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
349 static Compression
getenv_compression(void) {
354 e
= getenv("SYSTEMD_JOURNAL_COMPRESS");
356 return DEFAULT_COMPRESSION
;
358 r
= parse_boolean(e
);
360 return r
? DEFAULT_COMPRESSION
: COMPRESSION_NONE
;
362 c
= compression_from_string(e
);
364 log_debug_errno(c
, "Failed to parse SYSTEMD_JOURNAL_COMPRESS value, ignoring: %s", e
);
365 return DEFAULT_COMPRESSION
;
368 if (!compression_supported(c
)) {
369 log_debug("Unsupported compression algorithm specified, ignoring: %s", e
);
370 return DEFAULT_COMPRESSION
;
377 static Compression
compression_requested(void) {
379 static thread_local Compression cached
= _COMPRESSION_INVALID
;
382 cached
= getenv_compression();
386 return COMPRESSION_NONE
;
390 static int journal_file_init_header(
392 JournalFileFlags file_flags
,
393 JournalFile
*template) {
402 /* Try to load the FSPRG state, and if we can't, then just don't do sealing */
403 seal
= FLAGS_SET(file_flags
, JOURNAL_SEAL
) && journal_file_fss_load(f
) >= 0;
407 .header_size
= htole64(ALIGN64(sizeof(h
))),
408 .incompatible_flags
= htole32(
409 FLAGS_SET(file_flags
, JOURNAL_COMPRESS
) * COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(compression_requested()) |
410 keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH
|
411 compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT
),
412 .compatible_flags
= htole32(
413 (seal
* (HEADER_COMPATIBLE_SEALED
| HEADER_COMPATIBLE_SEALED_CONTINUOUS
) ) |
414 HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID
),
417 assert_cc(sizeof(h
.signature
) == sizeof(HEADER_SIGNATURE
));
418 memcpy(h
.signature
, HEADER_SIGNATURE
, sizeof(HEADER_SIGNATURE
));
420 r
= sd_id128_randomize(&h
.file_id
);
424 r
= sd_id128_get_machine(&h
.machine_id
);
425 if (r
< 0 && !ERRNO_IS_MACHINE_ID_UNSET(r
))
426 return r
; /* If we have no valid machine ID (test environment?), let's simply leave the
427 * machine ID field all zeroes. */
430 h
.seqnum_id
= template->header
->seqnum_id
;
431 h
.tail_entry_seqnum
= template->header
->tail_entry_seqnum
;
433 h
.seqnum_id
= h
.file_id
;
435 k
= pwrite(f
->fd
, &h
, sizeof(h
), 0);
444 static int journal_file_refresh_header(JournalFile
*f
) {
450 /* We used to update the header's boot ID field here, but we don't do that anymore, as per
451 * HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID */
453 r
= journal_file_set_online(f
);
455 /* Sync the online state to disk; likely just created a new file, also sync the directory this file
457 (void) fsync_full(f
->fd
);
462 static bool warn_wrong_flags(const JournalFile
*f
, bool compatible
) {
463 const uint32_t any
= compatible
? HEADER_COMPATIBLE_ANY
: HEADER_INCOMPATIBLE_ANY
,
464 supported
= compatible
? HEADER_COMPATIBLE_SUPPORTED
: HEADER_INCOMPATIBLE_SUPPORTED
;
465 const char *type
= compatible
? "compatible" : "incompatible";
471 flags
= le32toh(compatible
? f
->header
->compatible_flags
: f
->header
->incompatible_flags
);
473 if (flags
& ~supported
) {
475 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32
,
476 f
->path
, type
, flags
& ~any
);
477 flags
= (flags
& any
) & ~supported
;
481 _cleanup_free_
char *t
= NULL
;
484 if (flags
& HEADER_COMPATIBLE_SEALED
)
485 strv
[n
++] = "sealed";
486 if (flags
& HEADER_COMPATIBLE_SEALED_CONTINUOUS
)
487 strv
[n
++] = "sealed-continuous";
489 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_XZ
)
490 strv
[n
++] = "xz-compressed";
491 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_LZ4
)
492 strv
[n
++] = "lz4-compressed";
493 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
)
494 strv
[n
++] = "zstd-compressed";
495 if (flags
& HEADER_INCOMPATIBLE_KEYED_HASH
)
496 strv
[n
++] = "keyed-hash";
497 if (flags
& HEADER_INCOMPATIBLE_COMPACT
)
498 strv
[n
++] = "compact";
501 assert(n
< ELEMENTSOF(strv
));
503 t
= strv_join((char**) strv
, ", ");
504 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
505 f
->path
, type
, n
> 1 ? "flags" : "flag", strnull(t
));
513 static bool offset_is_valid(uint64_t offset
, uint64_t header_size
, uint64_t tail_object_offset
) {
516 if (!VALID64(offset
))
518 if (offset
< header_size
)
520 if (offset
> tail_object_offset
)
525 static bool hash_table_is_valid(uint64_t offset
, uint64_t size
, uint64_t header_size
, uint64_t arena_size
, uint64_t tail_object_offset
) {
526 if ((offset
== 0) != (size
== 0))
530 if (offset
<= offsetof(Object
, hash_table
.items
))
532 offset
-= offsetof(Object
, hash_table
.items
);
533 if (!offset_is_valid(offset
, header_size
, tail_object_offset
))
535 assert(offset
<= header_size
+ arena_size
);
536 if (size
> header_size
+ arena_size
- offset
)
541 static int journal_file_verify_header(JournalFile
*f
) {
542 uint64_t arena_size
, header_size
;
547 if (memcmp(f
->header
->signature
, HEADER_SIGNATURE
, 8))
550 /* In both read and write mode we refuse to open files with incompatible
551 * flags we don't know. */
552 if (warn_wrong_flags(f
, false))
553 return -EPROTONOSUPPORT
;
555 /* When open for writing we refuse to open files with compatible flags, too. */
556 if (journal_file_writable(f
) && warn_wrong_flags(f
, true))
557 return -EPROTONOSUPPORT
;
559 if (f
->header
->state
>= _STATE_MAX
)
562 header_size
= le64toh(READ_NOW(f
->header
->header_size
));
564 /* The first addition was n_data, so check that we are at least this large */
565 if (header_size
< HEADER_SIZE_MIN
)
568 /* When open for writing we refuse to open files with a mismatch of the header size, i.e. writing to
569 * files implementing older or new header structures. */
570 if (journal_file_writable(f
) && header_size
!= sizeof(Header
))
571 return -EPROTONOSUPPORT
;
573 /* Don't write to journal files without the new boot ID update behavior guarantee. */
574 if (journal_file_writable(f
) && !JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f
->header
))
575 return -EPROTONOSUPPORT
;
577 if (JOURNAL_HEADER_SEALED(f
->header
) && !JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
580 arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
582 if (UINT64_MAX
- header_size
< arena_size
|| header_size
+ arena_size
> (uint64_t) f
->last_stat
.st_size
)
585 uint64_t tail_object_offset
= le64toh(f
->header
->tail_object_offset
);
586 if (!offset_is_valid(tail_object_offset
, header_size
, UINT64_MAX
))
588 if (header_size
+ arena_size
< tail_object_offset
)
590 if (header_size
+ arena_size
- tail_object_offset
< sizeof(ObjectHeader
))
593 if (!hash_table_is_valid(le64toh(f
->header
->data_hash_table_offset
),
594 le64toh(f
->header
->data_hash_table_size
),
595 header_size
, arena_size
, tail_object_offset
))
598 if (!hash_table_is_valid(le64toh(f
->header
->field_hash_table_offset
),
599 le64toh(f
->header
->field_hash_table_size
),
600 header_size
, arena_size
, tail_object_offset
))
603 uint64_t entry_array_offset
= le64toh(f
->header
->entry_array_offset
);
604 if (!offset_is_valid(entry_array_offset
, header_size
, tail_object_offset
))
607 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_offset
)) {
608 uint32_t offset
= le32toh(f
->header
->tail_entry_array_offset
);
609 uint32_t n
= le32toh(f
->header
->tail_entry_array_n_entries
);
611 if (!offset_is_valid(offset
, header_size
, tail_object_offset
))
613 if (entry_array_offset
> offset
)
615 if (entry_array_offset
== 0 && offset
!= 0)
617 if ((offset
== 0) != (n
== 0))
619 assert(offset
<= header_size
+ arena_size
);
620 if ((uint64_t) n
* journal_file_entry_array_item_size(f
) > header_size
+ arena_size
- offset
)
624 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_offset
)) {
625 uint64_t offset
= le64toh(f
->header
->tail_entry_offset
);
627 if (!offset_is_valid(offset
, header_size
, tail_object_offset
))
631 /* When there is an entry object, then these fields must be filled. */
632 if (sd_id128_is_null(f
->header
->tail_entry_boot_id
))
634 if (!VALID_REALTIME(le64toh(f
->header
->head_entry_realtime
)))
636 if (!VALID_REALTIME(le64toh(f
->header
->tail_entry_realtime
)))
638 if (!VALID_MONOTONIC(le64toh(f
->header
->tail_entry_realtime
)))
641 /* Otherwise, the fields must be zero. */
642 if (JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f
->header
) &&
643 !sd_id128_is_null(f
->header
->tail_entry_boot_id
))
645 if (f
->header
->head_entry_realtime
!= 0)
647 if (f
->header
->tail_entry_realtime
!= 0)
649 if (f
->header
->tail_entry_realtime
!= 0)
654 /* Verify number of objects */
655 uint64_t n_objects
= le64toh(f
->header
->n_objects
);
656 if (n_objects
> arena_size
/ sizeof(ObjectHeader
))
659 uint64_t n_entries
= le64toh(f
->header
->n_entries
);
660 if (n_entries
> n_objects
)
663 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
664 le64toh(f
->header
->n_data
) > n_objects
)
667 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
668 le64toh(f
->header
->n_fields
) > n_objects
)
671 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
) &&
672 le64toh(f
->header
->n_tags
) > n_objects
)
675 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
) &&
676 le64toh(f
->header
->n_entry_arrays
) > n_objects
)
679 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_n_entries
) &&
680 le32toh(f
->header
->tail_entry_array_n_entries
) > n_entries
)
683 if (journal_file_writable(f
)) {
684 sd_id128_t machine_id
;
688 r
= sd_id128_get_machine(&machine_id
);
689 if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r
)) /* Gracefully handle the machine ID not being initialized yet */
690 machine_id
= SD_ID128_NULL
;
694 if (!sd_id128_equal(machine_id
, f
->header
->machine_id
))
695 return log_debug_errno(SYNTHETIC_ERRNO(EHOSTDOWN
),
696 "Trying to open journal file from different host for writing, refusing.");
698 state
= f
->header
->state
;
700 if (state
== STATE_ARCHIVED
)
701 return -ESHUTDOWN
; /* Already archived */
702 if (state
== STATE_ONLINE
)
703 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
704 "Journal file %s is already online. Assuming unclean closing.",
706 if (state
!= STATE_OFFLINE
)
707 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
708 "Journal file %s has unknown state %i.",
711 if (f
->header
->field_hash_table_size
== 0 || f
->header
->data_hash_table_size
== 0)
718 int journal_file_fstat(JournalFile
*f
) {
724 if (fstat(f
->fd
, &f
->last_stat
) < 0)
727 f
->last_stat_usec
= now(CLOCK_MONOTONIC
);
729 /* Refuse dealing with files that aren't regular */
730 r
= stat_verify_regular(&f
->last_stat
);
734 /* Refuse appending to files that are already deleted */
735 if (f
->last_stat
.st_nlink
<= 0)
741 static int journal_file_allocate(JournalFile
*f
, uint64_t offset
, uint64_t size
) {
742 uint64_t old_size
, new_size
, old_header_size
, old_arena_size
;
748 /* We assume that this file is not sparse, and we know that for sure, since we always call
749 * posix_fallocate() ourselves */
751 if (size
> PAGE_ALIGN_DOWN_U64(UINT64_MAX
) - offset
)
754 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
757 old_header_size
= le64toh(READ_NOW(f
->header
->header_size
));
758 old_arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
759 if (old_arena_size
> PAGE_ALIGN_DOWN_U64(UINT64_MAX
) - old_header_size
)
762 old_size
= old_header_size
+ old_arena_size
;
764 new_size
= MAX(PAGE_ALIGN_U64(offset
+ size
), old_header_size
);
766 if (new_size
<= old_size
) {
768 /* We already pre-allocated enough space, but before
769 * we write to it, let's check with fstat() if the
770 * file got deleted, in order make sure we don't throw
771 * away the data immediately. Don't check fstat() for
772 * all writes though, but only once ever 10s. */
774 if (f
->last_stat_usec
+ LAST_STAT_REFRESH_USEC
> now(CLOCK_MONOTONIC
))
777 return journal_file_fstat(f
);
780 /* Allocate more space. */
782 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
785 /* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
786 if (JOURNAL_HEADER_COMPACT(f
->header
) && new_size
> UINT32_MAX
)
789 if (new_size
> f
->metrics
.min_size
&& f
->metrics
.keep_free
> 0) {
792 if (fstatvfs(f
->fd
, &svfs
) >= 0) {
795 available
= LESS_BY(u64_multiply_safe(svfs
.f_bfree
, svfs
.f_bsize
), f
->metrics
.keep_free
);
797 if (new_size
- old_size
> available
)
802 /* Increase by larger blocks at once */
803 new_size
= ROUND_UP(new_size
, FILE_SIZE_INCREASE
);
804 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
805 new_size
= f
->metrics
.max_size
;
807 /* Note that the glibc fallocate() fallback is very
808 inefficient, hence we try to minimize the allocation area
810 r
= posix_fallocate_loop(f
->fd
, old_size
, new_size
- old_size
);
814 f
->header
->arena_size
= htole64(new_size
- old_header_size
);
816 return journal_file_fstat(f
);
819 static int journal_file_move_to(
832 /* This function may clear, overwrite, or alter previously cached entries with the same type. After
833 * this function has been called, all previously read objects with the same type may be invalidated,
834 * hence must be re-read before use. */
839 if (size
> UINT64_MAX
- offset
)
842 /* Avoid SIGBUS on invalid accesses */
843 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
) {
844 /* Hmm, out of range? Let's refresh the fstat() data
845 * first, before we trust that check. */
847 r
= journal_file_fstat(f
);
851 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
)
852 return -EADDRNOTAVAIL
;
855 return mmap_cache_fd_get(f
->cache_fd
, type_to_category(type
), keep_always
, offset
, size
, &f
->last_stat
, ret
);
858 static uint64_t minimum_header_size(JournalFile
*f
, Object
*o
) {
860 static const uint64_t table
[] = {
861 [OBJECT_DATA
] = sizeof(DataObject
),
862 [OBJECT_FIELD
] = sizeof(FieldObject
),
863 [OBJECT_ENTRY
] = sizeof(EntryObject
),
864 [OBJECT_DATA_HASH_TABLE
] = sizeof(HashTableObject
),
865 [OBJECT_FIELD_HASH_TABLE
] = sizeof(HashTableObject
),
866 [OBJECT_ENTRY_ARRAY
] = sizeof(EntryArrayObject
),
867 [OBJECT_TAG
] = sizeof(TagObject
),
873 if (o
->object
.type
== OBJECT_DATA
)
874 return journal_file_data_payload_offset(f
);
876 if (o
->object
.type
>= ELEMENTSOF(table
) || table
[o
->object
.type
] <= 0)
877 return sizeof(ObjectHeader
);
879 return table
[o
->object
.type
];
882 static int check_object_header(JournalFile
*f
, Object
*o
, ObjectType type
, uint64_t offset
) {
888 s
= le64toh(READ_NOW(o
->object
.size
));
890 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
891 "Attempt to move to uninitialized object: %" PRIu64
,
894 if (s
< sizeof(ObjectHeader
))
895 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
896 "Attempt to move to overly short object with size %"PRIu64
": %" PRIu64
,
899 if (o
->object
.type
<= OBJECT_UNUSED
|| o
->object
.type
>= _OBJECT_TYPE_MAX
)
900 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
901 "Attempt to move to object with invalid type (%u): %" PRIu64
,
902 o
->object
.type
, offset
);
904 if (type
> OBJECT_UNUSED
&& o
->object
.type
!= type
)
905 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
906 "Found %s object while expecting %s object: %" PRIu64
,
907 journal_object_type_to_string(o
->object
.type
),
908 journal_object_type_to_string(type
),
911 if (s
< minimum_header_size(f
, o
))
912 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
913 "Size of %s object (%"PRIu64
") is smaller than the minimum object size (%"PRIu64
"): %" PRIu64
,
914 journal_object_type_to_string(o
->object
.type
),
916 minimum_header_size(f
, o
),
922 /* Lightweight object checks. We want this to be fast, so that we won't
923 * slowdown every journal_file_move_to_object() call too much. */
924 static int check_object(JournalFile
*f
, Object
*o
, uint64_t offset
) {
928 switch (o
->object
.type
) {
931 if ((le64toh(o
->data
.entry_offset
) == 0) ^ (le64toh(o
->data
.n_entries
) == 0))
932 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
933 "Bad data n_entries: %" PRIu64
": %" PRIu64
,
934 le64toh(o
->data
.n_entries
),
937 if (le64toh(o
->object
.size
) <= journal_file_data_payload_offset(f
))
938 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
939 "Bad data size (<= %zu): %" PRIu64
": %" PRIu64
,
940 journal_file_data_payload_offset(f
),
941 le64toh(o
->object
.size
),
944 if (!VALID64(le64toh(o
->data
.next_hash_offset
)) ||
945 !VALID64(le64toh(o
->data
.next_field_offset
)) ||
946 !VALID64(le64toh(o
->data
.entry_offset
)) ||
947 !VALID64(le64toh(o
->data
.entry_array_offset
)))
948 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
949 "Invalid offset, next_hash_offset=" OFSfmt
", next_field_offset=" OFSfmt
", entry_offset=" OFSfmt
", entry_array_offset=" OFSfmt
": %" PRIu64
,
950 le64toh(o
->data
.next_hash_offset
),
951 le64toh(o
->data
.next_field_offset
),
952 le64toh(o
->data
.entry_offset
),
953 le64toh(o
->data
.entry_array_offset
),
959 if (le64toh(o
->object
.size
) <= offsetof(Object
, field
.payload
))
960 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
961 "Bad field size (<= %zu): %" PRIu64
": %" PRIu64
,
962 offsetof(Object
, field
.payload
),
963 le64toh(o
->object
.size
),
966 if (!VALID64(le64toh(o
->field
.next_hash_offset
)) ||
967 !VALID64(le64toh(o
->field
.head_data_offset
)))
968 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
969 "Invalid offset, next_hash_offset=" OFSfmt
", head_data_offset=" OFSfmt
": %" PRIu64
,
970 le64toh(o
->field
.next_hash_offset
),
971 le64toh(o
->field
.head_data_offset
),
978 sz
= le64toh(READ_NOW(o
->object
.size
));
979 if (sz
< offsetof(Object
, entry
.items
) ||
980 (sz
- offsetof(Object
, entry
.items
)) % journal_file_entry_item_size(f
) != 0)
981 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
982 "Bad entry size (<= %zu): %" PRIu64
": %" PRIu64
,
983 offsetof(Object
, entry
.items
),
987 if ((sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
) <= 0)
988 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
989 "Invalid number items in entry: %" PRIu64
": %" PRIu64
,
990 (sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
),
993 if (le64toh(o
->entry
.seqnum
) <= 0)
994 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
995 "Invalid entry seqnum: %" PRIx64
": %" PRIu64
,
996 le64toh(o
->entry
.seqnum
),
999 if (!VALID_REALTIME(le64toh(o
->entry
.realtime
)))
1000 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1001 "Invalid entry realtime timestamp: %" PRIu64
": %" PRIu64
,
1002 le64toh(o
->entry
.realtime
),
1005 if (!VALID_MONOTONIC(le64toh(o
->entry
.monotonic
)))
1006 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1007 "Invalid entry monotonic timestamp: %" PRIu64
": %" PRIu64
,
1008 le64toh(o
->entry
.monotonic
),
1011 if (sd_id128_is_null(o
->entry
.boot_id
))
1012 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1013 "Invalid object entry with an empty boot ID: %" PRIu64
,
1019 case OBJECT_DATA_HASH_TABLE
:
1020 case OBJECT_FIELD_HASH_TABLE
: {
1023 sz
= le64toh(READ_NOW(o
->object
.size
));
1024 if (sz
< offsetof(Object
, hash_table
.items
) ||
1025 (sz
- offsetof(Object
, hash_table
.items
)) % sizeof(HashItem
) != 0 ||
1026 (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
) <= 0)
1027 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1028 "Invalid %s hash table size: %" PRIu64
": %" PRIu64
,
1029 journal_object_type_to_string(o
->object
.type
),
1036 case OBJECT_ENTRY_ARRAY
: {
1039 sz
= le64toh(READ_NOW(o
->object
.size
));
1040 if (sz
< offsetof(Object
, entry_array
.items
) ||
1041 (sz
- offsetof(Object
, entry_array
.items
)) % journal_file_entry_array_item_size(f
) != 0 ||
1042 (sz
- offsetof(Object
, entry_array
.items
)) / journal_file_entry_array_item_size(f
) <= 0)
1043 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1044 "Invalid object entry array size: %" PRIu64
": %" PRIu64
,
1047 /* Here, we request that the offset of each entry array object is in strictly increasing order. */
1048 next
= le64toh(o
->entry_array
.next_entry_array_offset
);
1049 if (!VALID64(next
) || (next
> 0 && next
<= offset
))
1050 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1051 "Invalid object entry array next_entry_array_offset: %" PRIu64
": %" PRIu64
,
1059 if (le64toh(o
->object
.size
) != sizeof(TagObject
))
1060 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1061 "Invalid object tag size: %" PRIu64
": %" PRIu64
,
1062 le64toh(o
->object
.size
),
1065 if (!VALID_EPOCH(le64toh(o
->tag
.epoch
)))
1066 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1067 "Invalid object tag epoch: %" PRIu64
": %" PRIu64
,
1068 le64toh(o
->tag
.epoch
), offset
);
1076 int journal_file_move_to_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
**ret
) {
1082 /* Even if this function fails, it may clear, overwrite, or alter previously cached entries with the
1083 * same type. After this function has been called, all previously read objects with the same type may
1084 * be invalidated, hence must be re-read before use. */
1086 /* Objects may only be located at multiple of 64 bit */
1087 if (!VALID64(offset
))
1088 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1089 "Attempt to move to %s object at non-64-bit boundary: %" PRIu64
,
1090 journal_object_type_to_string(type
),
1093 /* Object may not be located in the file header */
1094 if (offset
< le64toh(f
->header
->header_size
))
1095 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1096 "Attempt to move to %s object located in file header: %" PRIu64
,
1097 journal_object_type_to_string(type
),
1100 r
= journal_file_move_to(f
, type
, false, offset
, sizeof(ObjectHeader
), (void**) &o
);
1104 r
= check_object_header(f
, o
, type
, offset
);
1108 r
= journal_file_move_to(f
, type
, false, offset
, le64toh(READ_NOW(o
->object
.size
)), (void**) &o
);
1112 r
= check_object_header(f
, o
, type
, offset
);
1116 r
= check_object(f
, o
, offset
);
1126 int journal_file_pin_object(JournalFile
*f
, Object
*o
) {
1130 /* This attaches the mmap window that provides the object to the 'pinning' category. So, reading
1131 * another object with the same type will not invalidate the object, until this function is called
1132 * for another object. */
1133 return mmap_cache_fd_pin(f
->cache_fd
, type_to_category(o
->object
.type
), o
, le64toh(o
->object
.size
));
1136 int journal_file_read_object_header(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
*ret
) {
1143 /* Objects may only be located at multiple of 64 bit */
1144 if (!VALID64(offset
))
1145 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1146 "Attempt to read %s object at non-64-bit boundary: %" PRIu64
,
1147 journal_object_type_to_string(type
), offset
);
1149 /* Object may not be located in the file header */
1150 if (offset
< le64toh(f
->header
->header_size
))
1151 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1152 "Attempt to read %s object located in file header: %" PRIu64
,
1153 journal_object_type_to_string(type
), offset
);
1155 /* This will likely read too much data but it avoids having to call pread() twice. */
1156 n
= pread(f
->fd
, &o
, sizeof(o
), offset
);
1158 return log_debug_errno(errno
, "Failed to read journal %s object at offset: %" PRIu64
,
1159 journal_object_type_to_string(type
), offset
);
1161 if ((size_t) n
< sizeof(o
.object
))
1162 return log_debug_errno(SYNTHETIC_ERRNO(EIO
),
1163 "Failed to read short %s object at offset: %" PRIu64
,
1164 journal_object_type_to_string(type
), offset
);
1166 r
= check_object_header(f
, &o
, type
, offset
);
1170 if ((size_t) n
< minimum_header_size(f
, &o
))
1171 return log_debug_errno(SYNTHETIC_ERRNO(EIO
),
1172 "Short read while reading %s object: %" PRIu64
,
1173 journal_object_type_to_string(type
), offset
);
1175 r
= check_object(f
, &o
, offset
);
1185 static uint64_t inc_seqnum(uint64_t seqnum
) {
1186 if (seqnum
< UINT64_MAX
-1)
1189 return 1; /* skip over UINT64_MAX and 0 when we run out of seqnums and start again */
1192 static uint64_t journal_file_entry_seqnum(
1196 uint64_t next_seqnum
;
1201 /* Picks a new sequence number for the entry we are about to add and returns it. */
1203 next_seqnum
= inc_seqnum(le64toh(f
->header
->tail_entry_seqnum
));
1205 /* If an external seqnum counter was passed, we update both the local and the external one, and set
1206 * it to the maximum of both */
1208 *seqnum
= next_seqnum
= MAX(inc_seqnum(*seqnum
), next_seqnum
);
1210 f
->header
->tail_entry_seqnum
= htole64(next_seqnum
);
1212 if (f
->header
->head_entry_seqnum
== 0)
1213 f
->header
->head_entry_seqnum
= htole64(next_seqnum
);
1218 int journal_file_append_object(
1222 Object
**ret_object
,
1223 uint64_t *ret_offset
) {
1231 assert(type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
);
1232 assert(size
>= sizeof(ObjectHeader
));
1234 r
= journal_file_set_online(f
);
1238 r
= journal_file_tail_end_by_mmap(f
, &p
);
1242 r
= journal_file_allocate(f
, p
, size
);
1246 r
= journal_file_move_to(f
, type
, false, p
, size
, (void**) &o
);
1250 o
->object
= (ObjectHeader
) {
1252 .size
= htole64(size
),
1255 f
->header
->tail_object_offset
= htole64(p
);
1256 f
->header
->n_objects
= htole64(le64toh(f
->header
->n_objects
) + 1);
1267 static int journal_file_setup_data_hash_table(JournalFile
*f
) {
1275 /* We estimate that we need 1 hash table entry per 768 bytes
1276 of journal file and we want to make sure we never get
1277 beyond 75% fill level. Calculate the hash table size for
1278 the maximum file size based on these metrics. */
1280 s
= (f
->metrics
.max_size
* 4 / 768 / 3) * sizeof(HashItem
);
1281 if (s
< DEFAULT_DATA_HASH_TABLE_SIZE
)
1282 s
= DEFAULT_DATA_HASH_TABLE_SIZE
;
1284 log_debug("Reserving %"PRIu64
" entries in data hash table.", s
/ sizeof(HashItem
));
1286 r
= journal_file_append_object(f
,
1287 OBJECT_DATA_HASH_TABLE
,
1288 offsetof(Object
, hash_table
.items
) + s
,
1293 memzero(o
->hash_table
.items
, s
);
1295 f
->header
->data_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1296 f
->header
->data_hash_table_size
= htole64(s
);
1301 static int journal_file_setup_field_hash_table(JournalFile
*f
) {
1309 /* We use a fixed size hash table for the fields as this
1310 * number should grow very slowly only */
1312 s
= DEFAULT_FIELD_HASH_TABLE_SIZE
;
1313 log_debug("Reserving %"PRIu64
" entries in field hash table.", s
/ sizeof(HashItem
));
1315 r
= journal_file_append_object(f
,
1316 OBJECT_FIELD_HASH_TABLE
,
1317 offsetof(Object
, hash_table
.items
) + s
,
1322 memzero(o
->hash_table
.items
, s
);
1324 f
->header
->field_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1325 f
->header
->field_hash_table_size
= htole64(s
);
1330 int journal_file_map_data_hash_table(JournalFile
*f
) {
1338 if (f
->data_hash_table
)
1341 p
= le64toh(f
->header
->data_hash_table_offset
);
1342 s
= le64toh(f
->header
->data_hash_table_size
);
1344 r
= journal_file_move_to(f
,
1345 OBJECT_DATA_HASH_TABLE
,
1352 f
->data_hash_table
= t
;
1356 int journal_file_map_field_hash_table(JournalFile
*f
) {
1364 if (f
->field_hash_table
)
1367 p
= le64toh(f
->header
->field_hash_table_offset
);
1368 s
= le64toh(f
->header
->field_hash_table_size
);
1370 r
= journal_file_move_to(f
,
1371 OBJECT_FIELD_HASH_TABLE
,
1378 f
->field_hash_table
= t
;
1382 static int journal_file_link_field(
1393 assert(f
->field_hash_table
);
1397 if (o
->object
.type
!= OBJECT_FIELD
)
1400 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1404 /* This might alter the window we are looking at */
1405 o
->field
.next_hash_offset
= o
->field
.head_data_offset
= 0;
1408 p
= le64toh(f
->field_hash_table
[h
].tail_hash_offset
);
1410 f
->field_hash_table
[h
].head_hash_offset
= htole64(offset
);
1412 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1416 o
->field
.next_hash_offset
= htole64(offset
);
1419 f
->field_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1421 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
1422 f
->header
->n_fields
= htole64(le64toh(f
->header
->n_fields
) + 1);
1427 static int journal_file_link_data(
1438 assert(f
->data_hash_table
);
1442 if (o
->object
.type
!= OBJECT_DATA
)
1445 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1449 /* This might alter the window we are looking at */
1450 o
->data
.next_hash_offset
= o
->data
.next_field_offset
= 0;
1451 o
->data
.entry_offset
= o
->data
.entry_array_offset
= 0;
1452 o
->data
.n_entries
= 0;
1455 p
= le64toh(f
->data_hash_table
[h
].tail_hash_offset
);
1457 /* Only entry in the hash table is easy */
1458 f
->data_hash_table
[h
].head_hash_offset
= htole64(offset
);
1460 /* Move back to the previous data object, to patch in
1463 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1467 o
->data
.next_hash_offset
= htole64(offset
);
1470 f
->data_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1472 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
1473 f
->header
->n_data
= htole64(le64toh(f
->header
->n_data
) + 1);
1478 static int get_next_hash_offset(
1481 le64_t
*next_hash_offset
,
1483 le64_t
*header_max_depth
) {
1489 assert(next_hash_offset
);
1492 nextp
= le64toh(READ_NOW(*next_hash_offset
));
1494 if (nextp
<= *p
) /* Refuse going in loops */
1495 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1496 "Detected hash item loop in %s, refusing.", f
->path
);
1500 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1501 if (header_max_depth
&& journal_file_writable(f
))
1502 *header_max_depth
= htole64(MAX(*depth
, le64toh(*header_max_depth
)));
1509 int journal_file_find_field_object_with_hash(
1514 Object
**ret_object
,
1515 uint64_t *ret_offset
) {
1517 uint64_t p
, osize
, h
, m
, depth
= 0;
1525 /* If the field hash table is empty, we can't find anything */
1526 if (le64toh(f
->header
->field_hash_table_size
) <= 0)
1529 /* Map the field hash table, if it isn't mapped yet. */
1530 r
= journal_file_map_field_hash_table(f
);
1534 osize
= offsetof(Object
, field
.payload
) + size
;
1536 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1541 p
= le64toh(f
->field_hash_table
[h
].head_hash_offset
);
1545 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1549 if (le64toh(o
->field
.hash
) == hash
&&
1550 le64toh(o
->object
.size
) == osize
&&
1551 memcmp(o
->field
.payload
, field
, size
) == 0) {
1561 r
= get_next_hash_offset(
1564 &o
->field
.next_hash_offset
,
1566 JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) ? &f
->header
->field_hash_chain_depth
: NULL
);
1574 uint64_t journal_file_hash_data(
1581 assert(data
|| sz
== 0);
1583 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1584 * function use siphash. Old journal files use the Jenkins hash. */
1586 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
1587 return siphash24(data
, sz
, f
->header
->file_id
.bytes
);
1589 return jenkins_hash64(data
, sz
);
1592 int journal_file_find_field_object(
1596 Object
**ret_object
,
1597 uint64_t *ret_offset
) {
1603 return journal_file_find_field_object_with_hash(
1606 journal_file_hash_data(f
, field
, size
),
1607 ret_object
, ret_offset
);
1610 int journal_file_find_data_object_with_hash(
1615 Object
**ret_object
,
1616 uint64_t *ret_offset
) {
1618 uint64_t p
, h
, m
, depth
= 0;
1623 assert(data
|| size
== 0);
1625 /* If there's no data hash table, then there's no entry. */
1626 if (le64toh(f
->header
->data_hash_table_size
) <= 0)
1629 /* Map the data hash table, if it isn't mapped yet. */
1630 r
= journal_file_map_data_hash_table(f
);
1634 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1639 p
= le64toh(f
->data_hash_table
[h
].head_hash_offset
);
1646 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1650 if (le64toh(o
->data
.hash
) != hash
)
1653 r
= journal_file_data_payload(f
, o
, p
, NULL
, 0, 0, &d
, &rsize
);
1656 assert(r
> 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
1658 if (memcmp_nn(data
, size
, d
, rsize
) == 0) {
1669 r
= get_next_hash_offset(
1672 &o
->data
.next_hash_offset
,
1674 JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) ? &f
->header
->data_hash_chain_depth
: NULL
);
1682 int journal_file_find_data_object(
1686 Object
**ret_object
,
1687 uint64_t *ret_offset
) {
1690 assert(data
|| size
== 0);
1692 return journal_file_find_data_object_with_hash(
1695 journal_file_hash_data(f
, data
, size
),
1696 ret_object
, ret_offset
);
1699 bool journal_field_valid(const char *p
, size_t l
, bool allow_protected
) {
1700 /* We kinda enforce POSIX syntax recommendations for
1701 environment variables here, but make a couple of additional
1704 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
1711 /* No empty field names */
1715 /* Don't allow names longer than 64 chars */
1719 /* Variables starting with an underscore are protected */
1720 if (!allow_protected
&& p
[0] == '_')
1723 /* Don't allow digits as first character */
1724 if (ascii_isdigit(p
[0]))
1727 /* Only allow A-Z0-9 and '_' */
1728 for (const char *a
= p
; a
< p
+ l
; a
++)
1729 if ((*a
< 'A' || *a
> 'Z') &&
1730 !ascii_isdigit(*a
) &&
1737 static int journal_file_append_field(
1741 Object
**ret_object
,
1742 uint64_t *ret_offset
) {
1753 if (!journal_field_valid(field
, size
, true))
1756 hash
= journal_file_hash_data(f
, field
, size
);
1758 r
= journal_file_find_field_object_with_hash(f
, field
, size
, hash
, ret_object
, ret_offset
);
1764 osize
= offsetof(Object
, field
.payload
) + size
;
1765 r
= journal_file_append_object(f
, OBJECT_FIELD
, osize
, &o
, &p
);
1769 o
->field
.hash
= htole64(hash
);
1770 memcpy(o
->field
.payload
, field
, size
);
1772 r
= journal_file_link_field(f
, o
, p
, hash
);
1776 /* The linking might have altered the window, so let's only pass the offset to hmac which will
1777 * move to the object again if needed. */
1780 r
= journal_file_hmac_put_object(f
, OBJECT_FIELD
, NULL
, p
);
1786 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, ret_object
);
1797 static int maybe_compress_payload(JournalFile
*f
, uint8_t *dst
, const uint8_t *src
, uint64_t size
, size_t *rsize
) {
1801 #if HAVE_COMPRESSION
1805 c
= JOURNAL_FILE_COMPRESSION(f
);
1806 if (c
== COMPRESSION_NONE
|| size
< f
->compress_threshold_bytes
)
1809 r
= compress_blob(c
, src
, size
, dst
, size
- 1, rsize
);
1811 return log_debug_errno(r
, "Failed to compress data object using %s, ignoring: %m", compression_to_string(c
));
1813 log_debug("Compressed data object %"PRIu64
" -> %zu using %s", size
, *rsize
, compression_to_string(c
));
1815 return 1; /* compressed */
1821 static int journal_file_append_data(
1825 Object
**ret_object
,
1826 uint64_t *ret_offset
) {
1828 uint64_t hash
, p
, osize
;
1836 if (!data
|| size
== 0)
1839 hash
= journal_file_hash_data(f
, data
, size
);
1841 r
= journal_file_find_data_object_with_hash(f
, data
, size
, hash
, ret_object
, ret_offset
);
1847 eq
= memchr(data
, '=', size
);
1851 osize
= journal_file_data_payload_offset(f
) + size
;
1852 r
= journal_file_append_object(f
, OBJECT_DATA
, osize
, &o
, &p
);
1856 o
->data
.hash
= htole64(hash
);
1858 r
= maybe_compress_payload(f
, journal_file_data_payload_field(f
, o
), data
, size
, &rsize
);
1860 /* We don't really care failures, let's continue without compression */
1861 memcpy_safe(journal_file_data_payload_field(f
, o
), data
, size
);
1863 Compression c
= JOURNAL_FILE_COMPRESSION(f
);
1865 assert(c
>= 0 && c
< _COMPRESSION_MAX
&& c
!= COMPRESSION_NONE
);
1867 o
->object
.size
= htole64(journal_file_data_payload_offset(f
) + rsize
);
1868 o
->object
.flags
|= COMPRESSION_TO_OBJECT_FLAG(c
);
1871 r
= journal_file_link_data(f
, o
, p
, hash
);
1875 /* The linking might have altered the window, so let's refresh our pointer. */
1876 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1881 r
= journal_file_hmac_put_object(f
, OBJECT_DATA
, o
, p
);
1886 /* Create field object ... */
1887 r
= journal_file_append_field(f
, data
, (uint8_t*) eq
- (uint8_t*) data
, &fo
, NULL
);
1891 /* ... and link it in. */
1892 o
->data
.next_field_offset
= fo
->field
.head_data_offset
;
1893 fo
->field
.head_data_offset
= le64toh(p
);
1904 static int maybe_decompress_payload(
1908 Compression compression
,
1910 size_t field_length
,
1911 size_t data_threshold
,
1917 /* We can't read objects larger than 4G on a 32-bit machine */
1918 if ((uint64_t) (size_t) size
!= size
)
1921 if (compression
!= COMPRESSION_NONE
) {
1922 #if HAVE_COMPRESSION
1927 r
= decompress_startswith(compression
, payload
, size
, &f
->compress_buffer
, field
,
1930 return log_debug_errno(r
,
1931 "Cannot decompress %s object of length %" PRIu64
": %m",
1932 compression_to_string(compression
),
1943 r
= decompress_blob(compression
, payload
, size
, &f
->compress_buffer
, &rsize
, 0);
1948 *ret_data
= f
->compress_buffer
;
1952 return -EPROTONOSUPPORT
;
1955 if (field
&& (size
< field_length
+ 1 || memcmp(payload
, field
, field_length
) != 0 || payload
[field_length
] != '=')) {
1964 *ret_data
= payload
;
1966 *ret_size
= (size_t) size
;
1972 int journal_file_data_payload(
1977 size_t field_length
,
1978 size_t data_threshold
,
1987 assert(!field
== (field_length
== 0)); /* These must be specified together. */
1990 r
= journal_file_move_to_object(f
, OBJECT_DATA
, offset
, &o
);
1995 size
= le64toh(READ_NOW(o
->object
.size
));
1996 if (size
< journal_file_data_payload_offset(f
))
1999 size
-= journal_file_data_payload_offset(f
);
2001 c
= COMPRESSION_FROM_OBJECT(o
);
2003 return -EPROTONOSUPPORT
;
2005 return maybe_decompress_payload(f
, journal_file_data_payload_field(f
, o
), size
, c
, field
,
2006 field_length
, data_threshold
, ret_data
, ret_size
);
2009 uint64_t journal_file_entry_n_items(JournalFile
*f
, Object
*o
) {
2015 if (o
->object
.type
!= OBJECT_ENTRY
)
2018 sz
= le64toh(READ_NOW(o
->object
.size
));
2019 if (sz
< offsetof(Object
, entry
.items
))
2022 return (sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
);
2025 uint64_t journal_file_entry_array_n_items(JournalFile
*f
, Object
*o
) {
2031 if (o
->object
.type
!= OBJECT_ENTRY_ARRAY
)
2034 sz
= le64toh(READ_NOW(o
->object
.size
));
2035 if (sz
< offsetof(Object
, entry_array
.items
))
2038 return (sz
- offsetof(Object
, entry_array
.items
)) / journal_file_entry_array_item_size(f
);
2041 uint64_t journal_file_hash_table_n_items(Object
*o
) {
2046 if (!IN_SET(o
->object
.type
, OBJECT_DATA_HASH_TABLE
, OBJECT_FIELD_HASH_TABLE
))
2049 sz
= le64toh(READ_NOW(o
->object
.size
));
2050 if (sz
< offsetof(Object
, hash_table
.items
))
2053 return (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
);
2056 static void write_entry_array_item(JournalFile
*f
, Object
*o
, uint64_t i
, uint64_t p
) {
2060 if (JOURNAL_HEADER_COMPACT(f
->header
)) {
2061 assert(p
<= UINT32_MAX
);
2062 o
->entry_array
.items
.compact
[i
] = htole32(p
);
2064 o
->entry_array
.items
.regular
[i
] = htole64(p
);
2067 static int link_entry_into_array(
2075 uint64_t n
= 0, ap
= 0, q
, i
, a
, hidx
;
2085 a
= tail
? le32toh(*tail
) : le64toh(*first
);
2086 hidx
= le64toh(READ_NOW(*idx
));
2087 i
= tidx
? le32toh(READ_NOW(*tidx
)) : hidx
;
2090 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2094 n
= journal_file_entry_array_n_items(f
, o
);
2096 write_entry_array_item(f
, o
, i
, p
);
2097 *idx
= htole64(hidx
+ 1);
2099 *tidx
= htole32(le32toh(*tidx
) + 1);
2105 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2116 r
= journal_file_append_object(f
, OBJECT_ENTRY_ARRAY
,
2117 offsetof(Object
, entry_array
.items
) + n
* journal_file_entry_array_item_size(f
),
2123 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY_ARRAY
, o
, q
);
2128 write_entry_array_item(f
, o
, i
, p
);
2131 *first
= htole64(q
);
2133 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, ap
, &o
);
2137 o
->entry_array
.next_entry_array_offset
= htole64(q
);
2143 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
2144 f
->header
->n_entry_arrays
= htole64(le64toh(f
->header
->n_entry_arrays
) + 1);
2146 *idx
= htole64(hidx
+ 1);
2153 static int link_entry_into_array_plus_one(
2171 hidx
= le64toh(READ_NOW(*idx
));
2172 if (hidx
== UINT64_MAX
)
2175 *extra
= htole64(p
);
2179 i
= htole64(hidx
- 1);
2180 r
= link_entry_into_array(f
, first
, &i
, tail
, tidx
, p
);
2185 *idx
= htole64(hidx
+ 1);
2189 static int journal_file_link_entry_item(JournalFile
*f
, uint64_t offset
, uint64_t p
) {
2196 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
2200 return link_entry_into_array_plus_one(f
,
2201 &o
->data
.entry_offset
,
2202 &o
->data
.entry_array_offset
,
2204 JOURNAL_HEADER_COMPACT(f
->header
) ? &o
->data
.compact
.tail_entry_array_offset
: NULL
,
2205 JOURNAL_HEADER_COMPACT(f
->header
) ? &o
->data
.compact
.tail_entry_array_n_entries
: NULL
,
2209 static int journal_file_link_entry(
2213 const EntryItem items
[],
2223 if (o
->object
.type
!= OBJECT_ENTRY
)
2226 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
2228 /* Link up the entry itself */
2229 r
= link_entry_into_array(f
,
2230 &f
->header
->entry_array_offset
,
2231 &f
->header
->n_entries
,
2232 JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_offset
) ? &f
->header
->tail_entry_array_offset
: NULL
,
2233 JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_n_entries
) ? &f
->header
->tail_entry_array_n_entries
: NULL
,
2238 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
2240 if (f
->header
->head_entry_realtime
== 0)
2241 f
->header
->head_entry_realtime
= o
->entry
.realtime
;
2243 f
->header
->tail_entry_realtime
= o
->entry
.realtime
;
2244 f
->header
->tail_entry_monotonic
= o
->entry
.monotonic
;
2245 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_offset
))
2246 f
->header
->tail_entry_offset
= htole64(offset
);
2247 f
->newest_mtime
= 0; /* we have a new tail entry now, explicitly invalidate newest boot id/timestamp info */
2249 /* Link up the items */
2250 for (uint64_t i
= 0; i
< n_items
; i
++) {
2253 /* If we fail to link an entry item because we can't allocate a new entry array, don't fail
2254 * immediately but try to link the other entry items since it might still be possible to link
2255 * those if they don't require a new entry array to be allocated. */
2257 k
= journal_file_link_entry_item(f
, offset
, items
[i
].object_offset
);
2267 static void write_entry_item(JournalFile
*f
, Object
*o
, uint64_t i
, const EntryItem
*item
) {
2272 if (JOURNAL_HEADER_COMPACT(f
->header
)) {
2273 assert(item
->object_offset
<= UINT32_MAX
);
2274 o
->entry
.items
.compact
[i
].object_offset
= htole32(item
->object_offset
);
2276 o
->entry
.items
.regular
[i
].object_offset
= htole64(item
->object_offset
);
2277 o
->entry
.items
.regular
[i
].hash
= htole64(item
->hash
);
2281 static int journal_file_append_entry_internal(
2283 const dual_timestamp
*ts
,
2284 const sd_id128_t
*boot_id
,
2285 const sd_id128_t
*machine_id
,
2287 const EntryItem items
[],
2290 sd_id128_t
*seqnum_id
,
2291 Object
**ret_object
,
2292 uint64_t *ret_offset
) {
2303 assert(!sd_id128_is_null(*boot_id
));
2304 assert(items
|| n_items
== 0);
2306 if (f
->strict_order
) {
2307 /* If requested be stricter with ordering in this journal file, to make searching via
2308 * bisection fully deterministic. This is an optional feature, so that if desired journal
2309 * files can be written where the ordering is not strictly enforced (in which case bisection
2310 * will yield *a* result, but not the *only* result, when searching for points in
2311 * time). Strict ordering mode is enabled when journald originally writes the files, but
2312 * might not necessarily be if other tools (the remoting tools for example) write journal
2313 * files from combined sources.
2315 * Typically, if any of the errors generated here are seen journald will just rotate the
2316 * journal files and start anew. */
2318 if (ts
->realtime
< le64toh(f
->header
->tail_entry_realtime
))
2319 return log_debug_errno(SYNTHETIC_ERRNO(EREMCHG
),
2320 "Realtime timestamp %" PRIu64
" smaller than previous realtime "
2321 "timestamp %" PRIu64
", refusing entry.",
2322 ts
->realtime
, le64toh(f
->header
->tail_entry_realtime
));
2324 if (sd_id128_equal(*boot_id
, f
->header
->tail_entry_boot_id
) &&
2325 ts
->monotonic
< le64toh(f
->header
->tail_entry_monotonic
))
2326 return log_debug_errno(
2327 SYNTHETIC_ERRNO(ENOTNAM
),
2328 "Monotonic timestamp %" PRIu64
2329 " smaller than previous monotonic timestamp %" PRIu64
2330 " while having the same boot ID, refusing entry.",
2332 le64toh(f
->header
->tail_entry_monotonic
));
2336 /* Settle the passed in sequence number ID */
2338 if (sd_id128_is_null(*seqnum_id
))
2339 *seqnum_id
= f
->header
->seqnum_id
; /* Caller has none assigned, then copy the one from the file */
2340 else if (!sd_id128_equal(*seqnum_id
, f
->header
->seqnum_id
)) {
2341 /* Different seqnum IDs? We can't allow entries from multiple IDs end up in the same journal.*/
2342 if (le64toh(f
->header
->n_entries
) == 0)
2343 f
->header
->seqnum_id
= *seqnum_id
; /* Caller has one, and file so far has no entries, then copy the one from the caller */
2345 return log_debug_errno(SYNTHETIC_ERRNO(EILSEQ
),
2346 "Sequence number IDs don't match, refusing entry.");
2350 if (machine_id
&& sd_id128_is_null(f
->header
->machine_id
))
2351 /* Initialize machine ID when not set yet */
2352 f
->header
->machine_id
= *machine_id
;
2354 osize
= offsetof(Object
, entry
.items
) + (n_items
* journal_file_entry_item_size(f
));
2356 r
= journal_file_append_object(f
, OBJECT_ENTRY
, osize
, &o
, &np
);
2360 o
->entry
.seqnum
= htole64(journal_file_entry_seqnum(f
, seqnum
));
2361 o
->entry
.realtime
= htole64(ts
->realtime
);
2362 o
->entry
.monotonic
= htole64(ts
->monotonic
);
2363 o
->entry
.xor_hash
= htole64(xor_hash
);
2364 o
->entry
.boot_id
= f
->header
->tail_entry_boot_id
= *boot_id
;
2366 for (size_t i
= 0; i
< n_items
; i
++)
2367 write_entry_item(f
, o
, i
, &items
[i
]);
2370 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY
, o
, np
);
2375 r
= journal_file_link_entry(f
, o
, np
, items
, n_items
);
2388 void journal_file_post_change(JournalFile
*f
) {
2394 /* inotify() does not receive IN_MODIFY events from file
2395 * accesses done via mmap(). After each access we hence
2396 * trigger IN_MODIFY by truncating the journal file to its
2397 * current size which triggers IN_MODIFY. */
2399 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
2401 if (ftruncate(f
->fd
, f
->last_stat
.st_size
) < 0)
2402 log_debug_errno(errno
, "Failed to truncate file to its own size: %m");
2405 static int post_change_thunk(sd_event_source
*timer
, uint64_t usec
, void *userdata
) {
2408 journal_file_post_change(userdata
);
2413 static void schedule_post_change(JournalFile
*f
) {
2418 assert(f
->post_change_timer
);
2420 assert_se(e
= sd_event_source_get_event(f
->post_change_timer
));
2422 /* If we are already going down, post the change immediately. */
2423 if (IN_SET(sd_event_get_state(e
), SD_EVENT_EXITING
, SD_EVENT_FINISHED
))
2426 r
= sd_event_source_get_enabled(f
->post_change_timer
, NULL
);
2428 log_debug_errno(r
, "Failed to get ftruncate timer state: %m");
2434 r
= sd_event_source_set_time_relative(f
->post_change_timer
, f
->post_change_timer_period
);
2436 log_debug_errno(r
, "Failed to set time for scheduling ftruncate: %m");
2440 r
= sd_event_source_set_enabled(f
->post_change_timer
, SD_EVENT_ONESHOT
);
2442 log_debug_errno(r
, "Failed to enable scheduled ftruncate: %m");
2449 /* On failure, let's simply post the change immediately. */
2450 journal_file_post_change(f
);
2453 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2454 int journal_file_enable_post_change_timer(JournalFile
*f
, sd_event
*e
, usec_t t
) {
2455 _cleanup_(sd_event_source_unrefp
) sd_event_source
*timer
= NULL
;
2459 assert_return(!f
->post_change_timer
, -EINVAL
);
2463 r
= sd_event_add_time(e
, &timer
, CLOCK_MONOTONIC
, 0, 0, post_change_thunk
, f
);
2467 r
= sd_event_source_set_enabled(timer
, SD_EVENT_OFF
);
2471 f
->post_change_timer
= TAKE_PTR(timer
);
2472 f
->post_change_timer_period
= t
;
2477 static int entry_item_cmp(const EntryItem
*a
, const EntryItem
*b
) {
2478 return CMP(ASSERT_PTR(a
)->object_offset
, ASSERT_PTR(b
)->object_offset
);
2481 static size_t remove_duplicate_entry_items(EntryItem items
[], size_t n
) {
2484 assert(items
|| n
== 0);
2489 for (size_t i
= 1; i
< n
; i
++)
2490 if (items
[i
].object_offset
!= items
[j
- 1].object_offset
)
2491 items
[j
++] = items
[i
];
2496 int journal_file_append_entry(
2498 const dual_timestamp
*ts
,
2499 const sd_id128_t
*boot_id
,
2500 const struct iovec iovec
[],
2503 sd_id128_t
*seqnum_id
,
2504 Object
**ret_object
,
2505 uint64_t *ret_offset
) {
2507 _cleanup_free_ EntryItem
*items_alloc
= NULL
;
2509 uint64_t xor_hash
= 0;
2510 struct dual_timestamp _ts
;
2511 sd_id128_t _boot_id
, _machine_id
, *machine_id
;
2517 assert(n_iovec
> 0);
2520 if (!VALID_REALTIME(ts
->realtime
))
2521 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2522 "Invalid realtime timestamp %" PRIu64
", refusing entry.",
2524 if (!VALID_MONOTONIC(ts
->monotonic
))
2525 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2526 "Invalid monotomic timestamp %" PRIu64
", refusing entry.",
2529 dual_timestamp_now(&_ts
);
2534 if (sd_id128_is_null(*boot_id
))
2535 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
), "Empty boot ID, refusing entry.");
2537 r
= sd_id128_get_boot(&_boot_id
);
2541 boot_id
= &_boot_id
;
2544 r
= sd_id128_get_machine(&_machine_id
);
2545 if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r
))
2546 /* Gracefully handle the machine ID not being initialized yet */
2551 machine_id
= &_machine_id
;
2554 r
= journal_file_maybe_append_tag(f
, ts
->realtime
);
2559 if (n_iovec
< ALLOCA_MAX
/ sizeof(EntryItem
) / 2)
2560 items
= newa(EntryItem
, n_iovec
);
2562 items_alloc
= new(EntryItem
, n_iovec
);
2566 items
= items_alloc
;
2569 for (size_t i
= 0; i
< n_iovec
; i
++) {
2573 r
= journal_file_append_data(f
, iovec
[i
].iov_base
, iovec
[i
].iov_len
, &o
, &p
);
2577 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2578 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2579 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2580 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2581 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2582 * hash here for that. This also has the benefit that cursors for old and new journal files
2583 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2584 * files things are easier, we can just take the value from the stored record directly. */
2586 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
2587 xor_hash
^= jenkins_hash64(iovec
[i
].iov_base
, iovec
[i
].iov_len
);
2589 xor_hash
^= le64toh(o
->data
.hash
);
2591 items
[i
] = (EntryItem
) {
2593 .hash
= le64toh(o
->data
.hash
),
2597 /* Order by the position on disk, in order to improve seek
2598 * times for rotating media. */
2599 typesafe_qsort(items
, n_iovec
, entry_item_cmp
);
2600 n_iovec
= remove_duplicate_entry_items(items
, n_iovec
);
2602 r
= journal_file_append_entry_internal(
2615 /* If the memory mapping triggered a SIGBUS then we return an
2616 * IO error and ignore the error code passed down to us, since
2617 * it is very likely just an effect of a nullified replacement
2620 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
2623 if (f
->post_change_timer
)
2624 schedule_post_change(f
);
2626 journal_file_post_change(f
);
2631 typedef struct ChainCacheItem
{
2632 uint64_t first
; /* The offset of the entry array object at the beginning of the chain,
2633 * i.e., le64toh(f->header->entry_array_offset), or le64toh(o->data.entry_offset). */
2634 uint64_t array
; /* The offset of the cached entry array object. */
2635 uint64_t begin
; /* The offset of the first item in the cached array. */
2636 uint64_t total
; /* The total number of items in all arrays before the cached one in the chain. */
2637 uint64_t last_index
; /* The last index we looked at in the cached array, to optimize locality when bisecting. */
2640 static void chain_cache_put(
2647 uint64_t last_index
) {
2652 /* If the chain item to cache for this chain is the
2653 * first one it's not worth caching anything */
2657 if (ordered_hashmap_size(h
) >= CHAIN_CACHE_MAX
) {
2658 ci
= ordered_hashmap_steal_first(h
);
2661 ci
= new(ChainCacheItem
, 1);
2668 if (ordered_hashmap_put(h
, &ci
->first
, ci
) < 0) {
2673 assert(ci
->first
== first
);
2678 ci
->last_index
= last_index
;
2681 static int bump_array_index(uint64_t *i
, direction_t direction
, uint64_t n
) {
2684 /* Increase or decrease the specified index, in the right direction. */
2686 if (direction
== DIRECTION_DOWN
) {
2701 static int bump_entry_array(
2703 Object
*o
, /* the current entry array object. */
2704 uint64_t offset
, /* the offset of the entry array object. */
2705 uint64_t first
, /* The offset of the first entry array object in the chain. */
2706 direction_t direction
,
2714 if (direction
== DIRECTION_DOWN
) {
2716 assert(o
->object
.type
== OBJECT_ENTRY_ARRAY
);
2718 *ret
= le64toh(o
->entry_array
.next_entry_array_offset
);
2721 /* Entry array chains are a singly linked list, so to find the previous array in the chain, we have
2722 * to start iterating from the top. */
2726 uint64_t p
= first
, q
= 0;
2727 while (p
> 0 && p
!= offset
) {
2728 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, p
, &o
);
2733 p
= le64toh(o
->entry_array
.next_entry_array_offset
);
2736 /* If we can't find the previous entry array in the entry array chain, we're likely dealing with a
2737 * corrupted journal file. */
2747 static int generic_array_get(
2749 uint64_t first
, /* The offset of the first entry array object in the chain. */
2750 uint64_t i
, /* The index of the target object counted from the beginning of the entry array chain. */
2751 direction_t direction
,
2752 Object
**ret_object
, /* The found object. */
2753 uint64_t *ret_offset
) { /* The offset of the found object. */
2755 uint64_t a
, t
= 0, k
;
2762 /* FIXME: fix return value assignment on success. */
2766 /* Try the chain cache first */
2767 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2768 if (ci
&& i
> ci
->total
) {
2775 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2776 if (IN_SET(r
, -EBADMSG
, -EADDRNOTAVAIL
)) {
2777 /* If there's corruption and we're going downwards, let's pretend we reached the
2778 * final entry in the entry array chain. */
2780 if (direction
== DIRECTION_DOWN
)
2783 /* If there's corruption and we're going upwards, move back to the previous entry
2784 * array and start iterating entries from there. */
2792 k
= journal_file_entry_array_n_items(f
, o
);
2799 /* The index is larger than the number of elements in the array. Let's move to the next array. */
2802 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2805 /* If we've found the right location, now look for the first non-corrupt entry object (in the right
2809 if (i
== UINT64_MAX
) {
2810 r
= bump_entry_array(f
, o
, a
, first
, direction
, &a
);
2814 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2818 k
= journal_file_entry_array_n_items(f
, o
);
2822 if (direction
== DIRECTION_DOWN
)
2825 /* We moved to the previous array. The total must be decreased. */
2827 return -EBADMSG
; /* chain cache is broken ? */
2837 p
= journal_file_entry_array_item(f
, o
, i
);
2839 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, ret_object
);
2841 /* Let's cache this item for the next invocation */
2842 chain_cache_put(f
->chain_cache
, ci
, first
, a
, journal_file_entry_array_item(f
, o
, 0), t
, i
);
2849 if (!IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
))
2852 /* OK, so this entry is borked. Most likely some entry didn't get synced to
2853 * disk properly, let's see if the next one might work for us instead. */
2854 log_debug_errno(r
, "Entry item %" PRIu64
" is bad, skipping over it.", i
);
2856 } while (bump_array_index(&i
, direction
, k
) > 0);
2858 /* All entries tried in the above do-while loop are broken. Let's move to the next (or previous) array. */
2860 if (direction
== DIRECTION_DOWN
)
2861 /* We are going to the next array, the total must be incremented. */
2871 TEST_FOUND
, /* The current object passes the test. */
2872 TEST_LEFT
, /* The current object is in an earlier position, and the object we are looking
2873 * for should exist in a later position. */
2874 TEST_RIGHT
, /* The current object is in a later position, and the object we are looking for
2875 * should exist in an earlier position. */
2876 TEST_GOTO_NEXT
, /* No matching object exists in this array and earlier arrays, go to the next array. */
2877 TEST_GOTO_PREVIOUS
, /* No matching object exists in this array and later arrays, go to the previous array. */
2880 static int generic_array_bisect_step(
2882 Object
*array
, /* entry array object */
2883 uint64_t i
, /* index of the entry item in the array we will test. */
2885 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2886 direction_t direction
,
2887 uint64_t *m
, /* The maximum number of the entries we will check in the array. */
2888 uint64_t *left
, /* The index of the left boundary in the array. */
2889 uint64_t *right
) { /* The index of the right boundary in the array. */
2896 assert(test_object
);
2901 assert(i
<= *right
);
2902 assert(*right
< *m
);
2904 p
= journal_file_entry_array_item(f
, array
, i
);
2908 r
= test_object(f
, p
, needle
);
2909 if (IN_SET(r
, -EBADMSG
, -EADDRNOTAVAIL
)) {
2910 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short.");
2913 /* This happens on two situations:
2915 * a) i == 0 (hence, *left == 0):
2916 * The first entry in the array is corrupted, let's go back to the previous array.
2918 * b) *right == *left or *left + 1, and we are going to downwards:
2919 * In that case, the (i-1)-th object has been already tested in the previous call,
2920 * which returned TEST_LEFT. See below. So, there is no matching entry in this
2921 * array nor in the whole entry array chain. */
2922 assert(i
== 0 || (*right
- *left
<= 1 && direction
== DIRECTION_DOWN
));
2923 return TEST_GOTO_PREVIOUS
;
2926 /* Otherwise, cutting the array short. So, here we limit the number of elements we will see
2927 * in this array, and set the right boundary to the last possibly non-corrupted object. */
2935 if (r
== TEST_FOUND
)
2936 /* There may be multiple entries that match with the needle. When the direction is down, we
2937 * need to find the first matching entry, hence the right boundary can be moved, but the left
2938 * one cannot. Similarly, when the direction is up, we need to find the last matching entry,
2939 * hence the left boundary can be moved, but the right one cannot. */
2940 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2942 if (r
== TEST_RIGHT
) {
2943 /* Currently, left --- needle --- i --- right, hence we can move the right boundary to i. */
2944 if (direction
== DIRECTION_DOWN
)
2948 return TEST_GOTO_PREVIOUS
;
2952 /* Currently, left --- i --- needle --- right, hence we can move the left boundary to i. */
2953 if (direction
== DIRECTION_DOWN
) {
2954 /* Note, here *m is always positive, as by the assertions at the beginning, we have
2955 * 0 <= *left <= i <= *right < m */
2957 return TEST_GOTO_NEXT
;
2967 static int generic_array_bisect(
2969 uint64_t first
, /* The offset of the first entry array object in the chain. */
2970 uint64_t n
, /* The total number of elements in the chain of the entry array. */
2971 uint64_t needle
, /* The target value (e.g. seqnum, monotonic, realtime, ...). */
2972 int (*test_object
)(JournalFile
*f
,
2973 uint64_t p
, /* the offset of the (data or entry) object that will be tested. */
2975 direction_t direction
,
2976 Object
**ret_object
, /* The found object. */
2977 uint64_t *ret_offset
, /* The offset of the found object. */
2978 uint64_t *ret_idx
) { /* The index of the found object counted from the beginning of the entry array chain. */
2980 /* Given an entry array chain, this function finds the object "closest" to the given needle in the
2981 * chain, taking into account the provided direction. A function can be provided to determine how
2982 * an object is matched against the given needle.
2984 * Given a journal file, the offset of an object and the needle, the test_object() function should
2985 * return TEST_RIGHT if the needle is located earlier in the entry array chain, TEST_LEFT if the
2986 * needle is located later in the entry array chain, and TEST_FOUND if the object matches the needle.
2987 * If test_object() returns TEST_FOUND for a specific object, that object's information will be used
2988 * to populate the return values of this function. If test_object() never returns TEST_FOUND, the
2989 * return values are populated with the details of one of the objects closest to the needle. If the
2990 * direction is DIRECTION_UP, the earlier object is used. Otherwise, the later object is used.
2991 * If there are multiple objects that test_object() return TEST_FOUND for, then the first matching
2992 * object returned when direction is DIRECTION_DOWN. Otherwise the last object is returned. */
2994 uint64_t a
, p
, t
= 0, i
, last_index
= UINT64_MAX
;
3000 assert(test_object
);
3005 /* Start with the first array in the chain */
3008 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
3009 if (ci
&& n
> ci
->total
&& ci
->begin
!= 0) {
3010 /* Ah, we have iterated this bisection array chain previously! Let's see if we can skip ahead
3011 * in the chain, as far as the last time. But we can't jump backwards in the chain, so let's
3012 * check that first. */
3014 r
= test_object(f
, ci
->begin
, needle
);
3015 if (IN_SET(r
, -EBADMSG
, -EADDRNOTAVAIL
))
3016 log_debug_errno(r
, "Cached entry is corrupted, ignoring: %m");
3019 else if (r
== TEST_LEFT
) {
3020 /* OK, what we are looking for is right of the begin of this EntryArray, so let's
3021 * jump straight to previously cached array in the chain */
3026 last_index
= ci
->last_index
;
3031 uint64_t left
, right
, k
, m
, m_original
;
3033 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
3037 k
= journal_file_entry_array_n_items(f
, array
);
3038 m
= m_original
= MIN(k
, n
);
3045 if (direction
== DIRECTION_UP
) {
3046 /* If we're going upwards, the last entry of the previous array may pass the test,
3047 * and the first entry of the current array may not pass. In that case, the last
3048 * entry of the previous array must be returned. Hence, we need to test the first
3049 * entry of the current array. */
3050 r
= generic_array_bisect_step(f
, array
, 0, needle
, test_object
, direction
, &m
, &left
, &right
);
3053 if (r
== TEST_GOTO_PREVIOUS
)
3057 /* Test the last entry of this array, to determine if we should go to the next array. */
3058 r
= generic_array_bisect_step(f
, array
, right
, needle
, test_object
, direction
, &m
, &left
, &right
);
3061 if (r
== TEST_GOTO_PREVIOUS
)
3064 /* The expected entry should be in this array, (or the last entry of the previous array). */
3065 if (r
== TEST_RIGHT
) {
3067 /* If we cached the last index we looked at, let's try to not to jump too wildly
3068 * around and see if we can limit the range to look at early to the immediate
3069 * neighbors of the last index we looked at. */
3071 if (last_index
> 0 && left
< last_index
- 1 && last_index
- 1 < right
) {
3072 r
= generic_array_bisect_step(f
, array
, last_index
- 1, needle
, test_object
, direction
, &m
, &left
, &right
);
3075 if (r
== TEST_GOTO_PREVIOUS
)
3079 if (last_index
< UINT64_MAX
&& left
< last_index
+ 1 && last_index
+ 1 < right
) {
3080 r
= generic_array_bisect_step(f
, array
, last_index
+ 1, needle
, test_object
, direction
, &m
, &left
, &right
);
3083 if (r
== TEST_GOTO_PREVIOUS
)
3088 if (left
== right
) {
3089 /* We found one or more corrupted entries in generic_array_bisect_step().
3090 * In that case, the entry pointed by 'right' may not be tested.
3092 * When we are going to downwards, the entry object pointed by 'left'
3093 * has not been tested yet, Hence, even if left == right, we still
3094 * have to check the final entry to see if it actually matches.
3096 * On the other hand, when we are going to upwards, the entry pointed
3097 * by 'left' is always tested, So, it is not necessary to test the
3098 * final entry again. */
3099 if (m
!= m_original
&& direction
== DIRECTION_DOWN
) {
3100 r
= generic_array_bisect_step(f
, array
, left
, needle
, test_object
, direction
, &m
, &left
, &right
);
3103 if (IN_SET(r
, TEST_GOTO_PREVIOUS
, TEST_GOTO_NEXT
))
3104 return 0; /* The entry does not pass the test, or is corrupted */
3107 assert(left
== right
);
3114 assert(left
< right
);
3115 i
= (left
+ right
+ (direction
== DIRECTION_UP
)) / 2;
3117 r
= generic_array_bisect_step(f
, array
, i
, needle
, test_object
, direction
, &m
, &left
, &right
);
3120 if (r
== TEST_GOTO_PREVIOUS
)
3122 if (r
== TEST_GOTO_NEXT
)
3123 return 0; /* Found a corrupt entry, and the array was cut short. */
3127 /* Not found in this array (or the last entry of this array should be returned), go to the next array. */
3128 assert(r
== (direction
== DIRECTION_DOWN
? TEST_GOTO_NEXT
: TEST_LEFT
));
3131 if (direction
== DIRECTION_UP
) {
3142 last_index
= UINT64_MAX
;
3143 a
= le64toh(array
->entry_array
.next_entry_array_offset
);
3149 /* Not found in the current array, return the last entry of the previous array. */
3150 assert(r
== TEST_GOTO_PREVIOUS
);
3152 /* The current array is the first in the chain. no previous array. */
3156 /* When we are going downwards, there is no matching entries in the previous array. */
3157 if (direction
== DIRECTION_DOWN
)
3160 /* Indicate to go to the previous array later. Note, do not move to the previous array here,
3161 * as that may invalidate the current array object in the mmap cache and
3162 * journal_file_entry_array_item() below may read invalid address. */
3166 p
= journal_file_entry_array_item(f
, array
, 0);
3170 /* Let's cache this item for the next invocation */
3171 chain_cache_put(f
->chain_cache
, ci
, first
, a
, p
, t
, i
);
3173 if (i
== UINT64_MAX
) {
3176 /* Get the last entry of the previous array. */
3178 r
= bump_entry_array(f
, NULL
, a
, first
, DIRECTION_UP
, &a
);
3182 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
3186 m
= journal_file_entry_array_n_items(f
, array
);
3187 if (m
== 0 || t
< m
)
3194 p
= journal_file_entry_array_item(f
, array
, i
);
3199 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, ret_object
);
3213 static int generic_array_bisect_for_data(
3217 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
3218 direction_t direction
,
3219 Object
**ret_object
,
3220 uint64_t *ret_offset
) {
3222 uint64_t extra
, first
, n
;
3227 assert(d
->object
.type
== OBJECT_DATA
);
3228 assert(test_object
);
3230 n
= le64toh(d
->data
.n_entries
);
3233 n
--; /* n_entries is the number of entries linked to the data object, including the 'extra' entry. */
3235 extra
= le64toh(d
->data
.entry_offset
);
3236 first
= le64toh(d
->data
.entry_array_offset
);
3238 /* This bisects the array in object 'first', but first checks an extra. */
3239 r
= test_object(f
, extra
, needle
);
3243 if (direction
== DIRECTION_DOWN
) {
3244 /* If we are going downwards, then we need to return the first object that passes the test.
3245 * When there is no object that passes the test, we need to return the first object that
3246 * test_object() returns TEST_RIGHT for. */
3248 TEST_FOUND
, /* The 'extra' object passes the test. Hence, this is the first
3249 * object that passes the test. */
3250 TEST_RIGHT
)) /* The 'extra' object is the first object that test_object() returns
3251 * TEST_RIGHT for, and no object exists even in the chained arrays
3252 * that passes the test. */
3253 goto use_extra
; /* The 'extra' object is exactly the one we are looking for. It is
3254 * not necessary to bisect the chained arrays. */
3256 /* Otherwise, the 'extra' object is not the one we are looking for. Search in the arrays. */
3259 /* If we are going upwards, then we need to return the last object that passes the test.
3260 * When there is no object that passes the test, we need to return the the last object that
3261 * test_object() returns TEST_LEFT for. */
3262 if (r
== TEST_RIGHT
)
3263 return 0; /* Not only the 'extra' object, but also all objects in the chained arrays
3264 * will never get TEST_FOUND or TEST_LEFT. The object we are looking for
3265 * does not exist. */
3267 /* Even if the 'extra' object passes the test, there may be multiple objects in the arrays
3268 * that also pass the test. Hence, we need to bisect the arrays for finding the last matching
3272 r
= generic_array_bisect(f
, first
, n
, needle
, test_object
, direction
, ret_object
, ret_offset
, NULL
);
3274 return r
; /* When > 0, the found object is the first (or last, when DIRECTION_UP) object.
3275 * Hence, return the found object now. */
3277 /* No matching object found in the chained arrays.
3278 * DIRECTION_DOWN : the 'extra' object neither matches the condition. There is no matching object.
3279 * DIRECTION_UP : the 'extra' object matches the condition. So, return it. */
3280 if (direction
== DIRECTION_DOWN
)
3285 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
3291 *ret_offset
= extra
;
3296 static int test_object_offset(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3302 else if (p
< needle
)
3308 int journal_file_move_to_entry_by_offset(
3311 direction_t direction
,
3312 Object
**ret_object
,
3313 uint64_t *ret_offset
) {
3318 return generic_array_bisect(
3320 le64toh(f
->header
->entry_array_offset
),
3321 le64toh(f
->header
->n_entries
),
3325 ret_object
, ret_offset
, NULL
);
3328 static int test_object_seqnum(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3336 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3340 sq
= le64toh(READ_NOW(o
->entry
.seqnum
));
3343 else if (sq
< needle
)
3349 int journal_file_move_to_entry_by_seqnum(
3352 direction_t direction
,
3353 Object
**ret_object
,
3354 uint64_t *ret_offset
) {
3359 return generic_array_bisect(
3361 le64toh(f
->header
->entry_array_offset
),
3362 le64toh(f
->header
->n_entries
),
3366 ret_object
, ret_offset
, NULL
);
3369 static int test_object_realtime(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3377 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3381 rt
= le64toh(READ_NOW(o
->entry
.realtime
));
3384 else if (rt
< needle
)
3390 int journal_file_move_to_entry_by_realtime(
3393 direction_t direction
,
3394 Object
**ret_object
,
3395 uint64_t *ret_offset
) {
3400 return generic_array_bisect(
3402 le64toh(f
->header
->entry_array_offset
),
3403 le64toh(f
->header
->n_entries
),
3405 test_object_realtime
,
3407 ret_object
, ret_offset
, NULL
);
3410 static int test_object_monotonic(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3418 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3422 m
= le64toh(READ_NOW(o
->entry
.monotonic
));
3425 else if (m
< needle
)
3431 static int find_data_object_by_boot_id(
3434 Object
**ret_object
,
3435 uint64_t *ret_offset
) {
3437 char t
[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
3441 sd_id128_to_string(boot_id
, t
+ 9);
3442 return journal_file_find_data_object(f
, t
, sizeof(t
) - 1, ret_object
, ret_offset
);
3445 int journal_file_move_to_entry_by_monotonic(
3449 direction_t direction
,
3450 Object
**ret_object
,
3451 uint64_t *ret_offset
) {
3458 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
3462 return generic_array_bisect_for_data(
3466 test_object_monotonic
,
3468 ret_object
, ret_offset
);
3471 void journal_file_reset_location(JournalFile
*f
) {
3474 f
->location_type
= LOCATION_HEAD
;
3475 f
->current_offset
= 0;
3476 f
->current_seqnum
= 0;
3477 f
->current_realtime
= 0;
3478 f
->current_monotonic
= 0;
3479 zero(f
->current_boot_id
);
3480 f
->current_xor_hash
= 0;
3482 /* Also reset the previous reading direction. Otherwise, next_beyond_location() may wrongly handle we
3483 * already hit EOF. See issue #29216. */
3484 f
->last_direction
= _DIRECTION_INVALID
;
3487 void journal_file_save_location(JournalFile
*f
, Object
*o
, uint64_t offset
) {
3491 f
->location_type
= LOCATION_SEEK
;
3492 f
->current_offset
= offset
;
3493 f
->current_seqnum
= le64toh(o
->entry
.seqnum
);
3494 f
->current_realtime
= le64toh(o
->entry
.realtime
);
3495 f
->current_monotonic
= le64toh(o
->entry
.monotonic
);
3496 f
->current_boot_id
= o
->entry
.boot_id
;
3497 f
->current_xor_hash
= le64toh(o
->entry
.xor_hash
);
3500 static bool check_properly_ordered(uint64_t new_offset
, uint64_t old_offset
, direction_t direction
) {
3502 /* Consider it an error if any of the two offsets is uninitialized */
3503 if (old_offset
== 0 || new_offset
== 0)
3506 /* If we go down, the new offset must be larger than the old one. */
3507 return direction
== DIRECTION_DOWN
?
3508 new_offset
> old_offset
:
3509 new_offset
< old_offset
;
3512 int journal_file_next_entry(
3515 direction_t direction
,
3516 Object
**ret_object
,
3517 uint64_t *ret_offset
) {
3526 /* FIXME: fix return value assignment. */
3528 n
= le64toh(READ_NOW(f
->header
->n_entries
));
3532 /* When the input offset 'p' is zero, return the first (or last on DIRECTION_UP) entry. */
3534 return generic_array_get(f
,
3535 le64toh(f
->header
->entry_array_offset
),
3536 direction
== DIRECTION_DOWN
? 0 : n
- 1,
3538 ret_object
, ret_offset
);
3540 /* Otherwise, first find the nearest entry object. */
3541 r
= generic_array_bisect(f
,
3542 le64toh(f
->header
->entry_array_offset
),
3543 le64toh(f
->header
->n_entries
),
3547 ret_object
? &o
: NULL
, &q
, &i
);
3551 assert(direction
== DIRECTION_DOWN
? p
<= q
: q
<= p
);
3553 /* If the input offset 'p' points to an entry object, generic_array_bisect() should provides
3554 * the same offset, and the index needs to be shifted. Otherwise, use the found object as is,
3555 * as it is the nearest entry object from the input offset 'p'. */
3560 r
= bump_array_index(&i
, direction
, n
);
3564 /* And jump to it */
3565 r
= generic_array_get(f
, le64toh(f
->header
->entry_array_offset
), i
, direction
, ret_object
? &o
: NULL
, &q
);
3569 /* Ensure our array is properly ordered. */
3570 if (!check_properly_ordered(q
, p
, direction
))
3571 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
3572 "%s: entry array not properly ordered at entry index %" PRIu64
,
3583 int journal_file_move_to_entry_for_data(
3586 direction_t direction
,
3587 Object
**ret_object
,
3588 uint64_t *ret_offset
) {
3590 uint64_t extra
, first
, n
;
3595 assert(d
->object
.type
== OBJECT_DATA
);
3596 assert(IN_SET(direction
, DIRECTION_DOWN
, DIRECTION_UP
));
3598 /* FIXME: fix return value assignment. */
3600 /* This returns the first (when the direction is down, otherwise the last) entry linked to the
3601 * specified data object. */
3603 n
= le64toh(d
->data
.n_entries
);
3606 n
--; /* n_entries is the number of entries linked to the data object, including the 'extra' entry. */
3608 extra
= le64toh(d
->data
.entry_offset
);
3609 first
= le64toh(d
->data
.entry_array_offset
);
3611 if (direction
== DIRECTION_DOWN
&& extra
> 0) {
3612 /* When we are going downwards, first try to read the extra entry. */
3613 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
3616 if (!IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
))
3621 /* DIRECTION_DOWN : The extra entry is broken, falling back to the entries in the array.
3622 * DIRECTION_UP : Try to find a valid entry in the array from the tail. */
3623 r
= generic_array_get(f
,
3625 direction
== DIRECTION_DOWN
? 0 : n
- 1,
3627 ret_object
, ret_offset
);
3628 if (!IN_SET(r
, 0, -EADDRNOTAVAIL
, -EBADMSG
))
3629 return r
; /* found or critical error. */
3632 if (direction
== DIRECTION_UP
&& extra
> 0) {
3633 /* No valid entry exists in the chained array, falling back to the extra entry. */
3634 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
3643 *ret_offset
= extra
;
3648 int journal_file_move_to_entry_by_offset_for_data(
3652 direction_t direction
,
3653 Object
**ret
, uint64_t *ret_offset
) {
3657 assert(d
->object
.type
== OBJECT_DATA
);
3659 return generic_array_bisect_for_data(
3668 int journal_file_move_to_entry_by_monotonic_for_data(
3673 direction_t direction
,
3674 Object
**ret_object
,
3675 uint64_t *ret_offset
) {
3683 assert(d
->object
.type
== OBJECT_DATA
);
3685 /* First, pin the given data object, before reading the _BOOT_ID= data object below. */
3686 r
= journal_file_pin_object(f
, d
);
3690 /* Then, read a data object for _BOOT_ID= and seek by time. */
3691 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
3695 r
= generic_array_bisect_for_data(f
,
3698 test_object_monotonic
,
3704 /* And now, continue seeking until we find an entry that exists in both bisection arrays. */
3708 /* The journal entry found by the above bisect_plus_one() may not have the specified data,
3709 * that is, it may not be linked in the data object. So, we need to check that. */
3711 r
= journal_file_move_to_entry_by_offset_for_data(
3712 f
, d
, z
, direction
, ret_object
? &entry
: NULL
, &p
);
3716 break; /* The journal entry has the specified data. Yay! */
3718 /* If the entry does not have the data, then move to the next (or previous, depends on the
3719 * 'direction') entry linked to the data object. But, the next entry may be in another boot.
3720 * So, we need to check that the entry has the matching boot ID. */
3722 r
= journal_file_move_to_entry_by_offset_for_data(
3723 f
, o
, p
, direction
, ret_object
? &entry
: NULL
, &z
);
3727 break; /* The journal entry has the specified boot ID. Yay! */
3729 /* If not, let's try to the next entry... */
3733 *ret_object
= entry
;
3739 int journal_file_move_to_entry_by_seqnum_for_data(
3743 direction_t direction
,
3744 Object
**ret_object
,
3745 uint64_t *ret_offset
) {
3749 assert(d
->object
.type
== OBJECT_DATA
);
3751 return generic_array_bisect_for_data(
3757 ret_object
, ret_offset
);
3760 int journal_file_move_to_entry_by_realtime_for_data(
3764 direction_t direction
,
3765 Object
**ret
, uint64_t *ret_offset
) {
3769 assert(d
->object
.type
== OBJECT_DATA
);
3771 return generic_array_bisect_for_data(
3775 test_object_realtime
,
3780 void journal_file_dump(JournalFile
*f
) {
3788 journal_file_print_header(f
);
3790 p
= le64toh(READ_NOW(f
->header
->header_size
));
3795 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &o
);
3799 s
= journal_object_type_to_string(o
->object
.type
);
3801 switch (o
->object
.type
) {
3806 printf("Type: %s seqnum=%"PRIu64
" monotonic=%"PRIu64
" realtime=%"PRIu64
"\n",
3808 le64toh(o
->entry
.seqnum
),
3809 le64toh(o
->entry
.monotonic
),
3810 le64toh(o
->entry
.realtime
));
3816 printf("Type: %s seqnum=%"PRIu64
" epoch=%"PRIu64
"\n",
3818 le64toh(o
->tag
.seqnum
),
3819 le64toh(o
->tag
.epoch
));
3824 printf("Type: %s \n", s
);
3826 printf("Type: unknown (%i)", o
->object
.type
);
3831 c
= COMPRESSION_FROM_OBJECT(o
);
3832 if (c
> COMPRESSION_NONE
)
3833 printf("Flags: %s\n",
3834 compression_to_string(c
));
3836 if (p
== le64toh(f
->header
->tail_object_offset
))
3839 p
+= ALIGN64(le64toh(o
->object
.size
));
3844 log_error("File corrupt");
3847 /* Note: the lifetime of the compound literal is the immediately surrounding block. */
3848 #define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
3850 void journal_file_print_header(JournalFile
*f
) {
3856 printf("File path: %s\n"
3860 "Sequential number ID: %s\n"
3862 "Compatible flags:%s%s%s%s\n"
3863 "Incompatible flags:%s%s%s%s%s%s\n"
3864 "Header size: %"PRIu64
"\n"
3865 "Arena size: %"PRIu64
"\n"
3866 "Data hash table size: %"PRIu64
"\n"
3867 "Field hash table size: %"PRIu64
"\n"
3868 "Rotate suggested: %s\n"
3869 "Head sequential number: %"PRIu64
" (%"PRIx64
")\n"
3870 "Tail sequential number: %"PRIu64
" (%"PRIx64
")\n"
3871 "Head realtime timestamp: %s (%"PRIx64
")\n"
3872 "Tail realtime timestamp: %s (%"PRIx64
")\n"
3873 "Tail monotonic timestamp: %s (%"PRIx64
")\n"
3874 "Objects: %"PRIu64
"\n"
3875 "Entry objects: %"PRIu64
"\n",
3877 SD_ID128_TO_STRING(f
->header
->file_id
),
3878 SD_ID128_TO_STRING(f
->header
->machine_id
),
3879 SD_ID128_TO_STRING(f
->header
->tail_entry_boot_id
),
3880 SD_ID128_TO_STRING(f
->header
->seqnum_id
),
3881 f
->header
->state
== STATE_OFFLINE
? "OFFLINE" :
3882 f
->header
->state
== STATE_ONLINE
? "ONLINE" :
3883 f
->header
->state
== STATE_ARCHIVED
? "ARCHIVED" : "UNKNOWN",
3884 JOURNAL_HEADER_SEALED(f
->header
) ? " SEALED" : "",
3885 JOURNAL_HEADER_SEALED_CONTINUOUS(f
->header
) ? " SEALED_CONTINUOUS" : "",
3886 JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f
->header
) ? " TAIL_ENTRY_BOOT_ID" : "",
3887 (le32toh(f
->header
->compatible_flags
) & ~HEADER_COMPATIBLE_ANY
) ? " ???" : "",
3888 JOURNAL_HEADER_COMPRESSED_XZ(f
->header
) ? " COMPRESSED-XZ" : "",
3889 JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
) ? " COMPRESSED-LZ4" : "",
3890 JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
) ? " COMPRESSED-ZSTD" : "",
3891 JOURNAL_HEADER_KEYED_HASH(f
->header
) ? " KEYED-HASH" : "",
3892 JOURNAL_HEADER_COMPACT(f
->header
) ? " COMPACT" : "",
3893 (le32toh(f
->header
->incompatible_flags
) & ~HEADER_INCOMPATIBLE_ANY
) ? " ???" : "",
3894 le64toh(f
->header
->header_size
),
3895 le64toh(f
->header
->arena_size
),
3896 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3897 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
),
3898 yes_no(journal_file_rotate_suggested(f
, 0, LOG_DEBUG
)),
3899 le64toh(f
->header
->head_entry_seqnum
), le64toh(f
->header
->head_entry_seqnum
),
3900 le64toh(f
->header
->tail_entry_seqnum
), le64toh(f
->header
->tail_entry_seqnum
),
3901 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->head_entry_realtime
)), le64toh(f
->header
->head_entry_realtime
),
3902 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->tail_entry_realtime
)), le64toh(f
->header
->tail_entry_realtime
),
3903 FORMAT_TIMESPAN(le64toh(f
->header
->tail_entry_monotonic
), USEC_PER_MSEC
), le64toh(f
->header
->tail_entry_monotonic
),
3904 le64toh(f
->header
->n_objects
),
3905 le64toh(f
->header
->n_entries
));
3907 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3908 printf("Data objects: %"PRIu64
"\n"
3909 "Data hash table fill: %.1f%%\n",
3910 le64toh(f
->header
->n_data
),
3911 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))));
3913 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3914 printf("Field objects: %"PRIu64
"\n"
3915 "Field hash table fill: %.1f%%\n",
3916 le64toh(f
->header
->n_fields
),
3917 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))));
3919 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
))
3920 printf("Tag objects: %"PRIu64
"\n",
3921 le64toh(f
->header
->n_tags
));
3922 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
3923 printf("Entry array objects: %"PRIu64
"\n",
3924 le64toh(f
->header
->n_entry_arrays
));
3926 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
))
3927 printf("Deepest field hash chain: %" PRIu64
"\n",
3928 f
->header
->field_hash_chain_depth
);
3930 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
))
3931 printf("Deepest data hash chain: %" PRIu64
"\n",
3932 f
->header
->data_hash_chain_depth
);
3934 if (fstat(f
->fd
, &st
) >= 0)
3935 printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st
.st_blocks
* 512ULL));
3938 static int journal_file_warn_btrfs(JournalFile
*f
) {
3944 /* Before we write anything, check if the COW logic is turned
3945 * off on btrfs. Given our write pattern that is quite
3946 * unfriendly to COW file systems this should greatly improve
3947 * performance on COW file systems, such as btrfs, at the
3948 * expense of data integrity features (which shouldn't be too
3949 * bad, given that we do our own checksumming). */
3951 r
= fd_is_fs_type(f
->fd
, BTRFS_SUPER_MAGIC
);
3953 return log_ratelimit_warning_errno(r
, JOURNAL_LOG_RATELIMIT
, "Failed to determine if journal is on btrfs: %m");
3957 r
= read_attr_fd(f
->fd
, &attrs
);
3959 return log_ratelimit_warning_errno(r
, JOURNAL_LOG_RATELIMIT
, "Failed to read file attributes: %m");
3961 if (attrs
& FS_NOCOW_FL
) {
3962 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3966 log_ratelimit_notice(JOURNAL_LOG_RATELIMIT
,
3967 "Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3968 "This is likely to slow down journal access substantially, please consider turning "
3969 "off the copy-on-write file attribute on the journal directory, using chattr +C.",
3975 static void journal_default_metrics(JournalMetrics
*m
, int fd
, bool compact
) {
3977 uint64_t fs_size
= 0;
3982 if (fstatvfs(fd
, &ss
) >= 0)
3983 fs_size
= u64_multiply_safe(ss
.f_frsize
, ss
.f_blocks
);
3985 log_debug_errno(errno
, "Failed to determine disk size: %m");
3987 if (m
->max_use
== UINT64_MAX
) {
3990 m
->max_use
= CLAMP(PAGE_ALIGN_U64(fs_size
/ 10), /* 10% of file system size */
3991 MAX_USE_LOWER
, MAX_USE_UPPER
);
3993 m
->max_use
= MAX_USE_LOWER
;
3995 m
->max_use
= PAGE_ALIGN_U64(m
->max_use
);
3997 if (m
->max_use
!= 0 && m
->max_use
< JOURNAL_FILE_SIZE_MIN
*2)
3998 m
->max_use
= JOURNAL_FILE_SIZE_MIN
*2;
4001 if (m
->min_use
== UINT64_MAX
) {
4003 m
->min_use
= CLAMP(PAGE_ALIGN_U64(fs_size
/ 50), /* 2% of file system size */
4004 MIN_USE_LOW
, MIN_USE_HIGH
);
4006 m
->min_use
= MIN_USE_LOW
;
4009 if (m
->min_use
> m
->max_use
)
4010 m
->min_use
= m
->max_use
;
4012 if (m
->max_size
== UINT64_MAX
)
4013 m
->max_size
= MIN(PAGE_ALIGN_U64(m
->max_use
/ 8), /* 8 chunks */
4016 m
->max_size
= PAGE_ALIGN_U64(m
->max_size
);
4018 if (compact
&& m
->max_size
> JOURNAL_COMPACT_SIZE_MAX
)
4019 m
->max_size
= JOURNAL_COMPACT_SIZE_MAX
;
4021 if (m
->max_size
!= 0) {
4022 if (m
->max_size
< JOURNAL_FILE_SIZE_MIN
)
4023 m
->max_size
= JOURNAL_FILE_SIZE_MIN
;
4025 if (m
->max_use
!= 0 && m
->max_size
*2 > m
->max_use
)
4026 m
->max_use
= m
->max_size
*2;
4029 if (m
->min_size
== UINT64_MAX
)
4030 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
4032 m
->min_size
= CLAMP(PAGE_ALIGN_U64(m
->min_size
),
4033 JOURNAL_FILE_SIZE_MIN
,
4034 m
->max_size
?: UINT64_MAX
);
4036 if (m
->keep_free
== UINT64_MAX
) {
4038 m
->keep_free
= MIN(PAGE_ALIGN_U64(fs_size
/ 20), /* 5% of file system size */
4041 m
->keep_free
= DEFAULT_KEEP_FREE
;
4044 if (m
->n_max_files
== UINT64_MAX
)
4045 m
->n_max_files
= DEFAULT_N_MAX_FILES
;
4047 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64
,
4048 FORMAT_BYTES(m
->min_use
),
4049 FORMAT_BYTES(m
->max_use
),
4050 FORMAT_BYTES(m
->max_size
),
4051 FORMAT_BYTES(m
->min_size
),
4052 FORMAT_BYTES(m
->keep_free
),
4056 int journal_file_open(
4060 JournalFileFlags file_flags
,
4062 uint64_t compress_threshold_bytes
,
4063 JournalMetrics
*metrics
,
4064 MMapCache
*mmap_cache
,
4065 JournalFile
*template,
4066 JournalFile
**ret
) {
4068 bool newly_created
= false;
4073 assert(fd
>= 0 || fname
);
4074 assert(file_flags
>= 0);
4075 assert(file_flags
<= _JOURNAL_FILE_FLAGS_MAX
);
4079 if (!IN_SET((open_flags
& O_ACCMODE
), O_RDONLY
, O_RDWR
))
4082 if ((open_flags
& O_ACCMODE
) == O_RDONLY
&& FLAGS_SET(open_flags
, O_CREAT
))
4085 if (fname
&& (open_flags
& O_CREAT
) && !endswith(fname
, ".journal"))
4088 f
= new(JournalFile
, 1);
4092 *f
= (JournalFile
) {
4095 .open_flags
= open_flags
,
4096 .compress_threshold_bytes
= compress_threshold_bytes
== UINT64_MAX
?
4097 DEFAULT_COMPRESS_THRESHOLD
:
4098 MAX(MIN_COMPRESS_THRESHOLD
, compress_threshold_bytes
),
4099 .strict_order
= FLAGS_SET(file_flags
, JOURNAL_STRICT_ORDER
),
4100 .newest_boot_id_prioq_idx
= PRIOQ_IDX_NULL
,
4101 .last_direction
= _DIRECTION_INVALID
,
4105 f
->path
= strdup(fname
);
4113 /* If we don't know the path, fill in something explanatory and vaguely useful */
4114 if (asprintf(&f
->path
, "/proc/self/%i", fd
) < 0) {
4120 f
->chain_cache
= ordered_hashmap_new(&uint64_hash_ops
);
4121 if (!f
->chain_cache
) {
4127 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
4128 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
4129 * it doesn't hurt in that case. */
4131 f
->fd
= openat_report_new(AT_FDCWD
, f
->path
, f
->open_flags
|O_CLOEXEC
|O_NONBLOCK
, f
->mode
, &newly_created
);
4137 /* fds we opened here by us should also be closed by us. */
4140 r
= fd_nonblock(f
->fd
, false);
4144 if (!newly_created
) {
4145 r
= journal_file_fstat(f
);
4150 r
= journal_file_fstat(f
);
4154 /* If we just got the fd passed in, we don't really know if we created the file anew */
4155 newly_created
= f
->last_stat
.st_size
== 0 && journal_file_writable(f
);
4158 r
= mmap_cache_add_fd(mmap_cache
, f
->fd
, mmap_prot_from_open_flags(open_flags
), &f
->cache_fd
);
4162 if (newly_created
) {
4163 (void) journal_file_warn_btrfs(f
);
4165 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
4166 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
4167 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
4168 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
4169 * solely on mtime/atime/ctime of the file. */
4170 (void) fd_setcrtime(f
->fd
, 0);
4172 r
= journal_file_init_header(f
, file_flags
, template);
4176 r
= journal_file_fstat(f
);
4181 if (f
->last_stat
.st_size
< (off_t
) HEADER_SIZE_MIN
) {
4186 r
= mmap_cache_fd_get(f
->cache_fd
, MMAP_CACHE_CATEGORY_HEADER
, true, 0, PAGE_ALIGN(sizeof(Header
)), &f
->last_stat
, &h
);
4188 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
4189 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
4199 if (!newly_created
) {
4200 r
= journal_file_verify_header(f
);
4206 if (!newly_created
&& journal_file_writable(f
) && JOURNAL_HEADER_SEALED(f
->header
)) {
4207 r
= journal_file_fss_load(f
);
4213 if (journal_file_writable(f
)) {
4215 journal_default_metrics(metrics
, f
->fd
, JOURNAL_HEADER_COMPACT(f
->header
));
4216 f
->metrics
= *metrics
;
4217 } else if (template)
4218 f
->metrics
= template->metrics
;
4220 r
= journal_file_refresh_header(f
);
4226 r
= journal_file_hmac_setup(f
);
4231 if (newly_created
) {
4232 r
= journal_file_setup_field_hash_table(f
);
4236 r
= journal_file_setup_data_hash_table(f
);
4241 r
= journal_file_append_first_tag(f
);
4247 if (mmap_cache_fd_got_sigbus(f
->cache_fd
)) {
4252 if (template && template->post_change_timer
) {
4253 r
= journal_file_enable_post_change_timer(
4255 sd_event_source_get_event(template->post_change_timer
),
4256 template->post_change_timer_period
);
4262 /* The file is opened now successfully, thus we take possession of any passed in fd. */
4265 if (DEBUG_LOGGING
) {
4266 static int last_seal
= -1, last_keyed_hash
= -1;
4267 static Compression last_compression
= _COMPRESSION_INVALID
;
4268 static uint64_t last_bytes
= UINT64_MAX
;
4270 if (last_seal
!= JOURNAL_HEADER_SEALED(f
->header
) ||
4271 last_keyed_hash
!= JOURNAL_HEADER_KEYED_HASH(f
->header
) ||
4272 last_compression
!= JOURNAL_FILE_COMPRESSION(f
) ||
4273 last_bytes
!= f
->compress_threshold_bytes
) {
4275 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
4276 yes_no(JOURNAL_HEADER_SEALED(f
->header
)), yes_no(JOURNAL_HEADER_KEYED_HASH(f
->header
)),
4277 compression_to_string(JOURNAL_FILE_COMPRESSION(f
)), FORMAT_BYTES(f
->compress_threshold_bytes
));
4278 last_seal
= JOURNAL_HEADER_SEALED(f
->header
);
4279 last_keyed_hash
= JOURNAL_HEADER_KEYED_HASH(f
->header
);
4280 last_compression
= JOURNAL_FILE_COMPRESSION(f
);
4281 last_bytes
= f
->compress_threshold_bytes
;
4289 if (f
->cache_fd
&& mmap_cache_fd_got_sigbus(f
->cache_fd
))
4292 (void) journal_file_close(f
);
4294 if (newly_created
&& fd
< 0)
4295 (void) unlink(fname
);
4300 int journal_file_parse_uid_from_filename(const char *path
, uid_t
*ret_uid
) {
4301 _cleanup_free_
char *buf
= NULL
, *p
= NULL
;
4302 const char *a
, *b
, *at
;
4305 /* This helper returns -EREMOTE when the filename doesn't match user online/offline journal
4306 * pattern. Hence it currently doesn't parse archived or disposed user journals. */
4311 r
= path_extract_filename(path
, &p
);
4314 if (r
== O_DIRECTORY
)
4317 a
= startswith(p
, "user-");
4320 b
= endswith(p
, ".journal");
4324 at
= strchr(a
, '@');
4328 buf
= strndup(a
, b
-a
);
4332 return parse_uid(buf
, ret_uid
);
4335 int journal_file_archive(JournalFile
*f
, char **ret_previous_path
) {
4336 _cleanup_free_
char *p
= NULL
;
4340 if (!journal_file_writable(f
))
4343 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
4344 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
4345 if (path_startswith(f
->path
, "/proc/self/fd"))
4348 if (!endswith(f
->path
, ".journal"))
4351 if (asprintf(&p
, "%.*s@" SD_ID128_FORMAT_STR
"-%016"PRIx64
"-%016"PRIx64
".journal",
4352 (int) strlen(f
->path
) - 8, f
->path
,
4353 SD_ID128_FORMAT_VAL(f
->header
->seqnum_id
),
4354 le64toh(f
->header
->head_entry_seqnum
),
4355 le64toh(f
->header
->head_entry_realtime
)) < 0)
4358 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
4359 * ignore that case. */
4360 if (rename(f
->path
, p
) < 0 && errno
!= ENOENT
)
4363 /* Sync the rename to disk */
4364 (void) fsync_directory_of_file(f
->fd
);
4366 if (ret_previous_path
)
4367 *ret_previous_path
= f
->path
;
4371 f
->path
= TAKE_PTR(p
);
4373 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
4374 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
4375 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
4376 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
4383 int journal_file_dispose(int dir_fd
, const char *fname
) {
4384 _cleanup_free_
char *p
= NULL
;
4388 /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
4389 * this is done without looking into the file or changing any of its contents. The idea is that this is called
4390 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
4391 * for writing anymore. */
4393 if (!endswith(fname
, ".journal"))
4396 if (asprintf(&p
, "%.*s@%016" PRIx64
"-%016" PRIx64
".journal~",
4397 (int) strlen(fname
) - 8, fname
,
4398 now(CLOCK_REALTIME
),
4402 if (renameat(dir_fd
, fname
, dir_fd
, p
) < 0)
4408 int journal_file_copy_entry(
4414 sd_id128_t
*seqnum_id
) {
4416 _cleanup_free_ EntryItem
*items_alloc
= NULL
;
4418 uint64_t n
, m
= 0, xor_hash
= 0;
4428 if (!journal_file_writable(to
))
4431 ts
= (dual_timestamp
) {
4432 .monotonic
= le64toh(o
->entry
.monotonic
),
4433 .realtime
= le64toh(o
->entry
.realtime
),
4435 boot_id
= o
->entry
.boot_id
;
4437 n
= journal_file_entry_n_items(from
, o
);
4441 if (n
< ALLOCA_MAX
/ sizeof(EntryItem
) / 2)
4442 items
= newa(EntryItem
, n
);
4444 items_alloc
= new(EntryItem
, n
);
4448 items
= items_alloc
;
4451 for (uint64_t i
= 0; i
< n
; i
++) {
4457 q
= journal_file_entry_item_object_offset(from
, o
, i
);
4458 r
= journal_file_data_payload(from
, NULL
, q
, NULL
, 0, 0, &data
, &l
);
4459 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
)) {
4460 log_debug_errno(r
, "Entry item %"PRIu64
" data object is bad, skipping over it: %m", i
);
4470 r
= journal_file_append_data(to
, data
, l
, &u
, &h
);
4474 if (JOURNAL_HEADER_KEYED_HASH(to
->header
))
4475 xor_hash
^= jenkins_hash64(data
, l
);
4477 xor_hash
^= le64toh(u
->data
.hash
);
4479 items
[m
++] = (EntryItem
) {
4481 .hash
= le64toh(u
->data
.hash
),
4488 r
= journal_file_append_entry_internal(
4492 &from
->header
->machine_id
,
4498 /* ret_object= */ NULL
,
4499 /* ret_offset= */ NULL
);
4501 if (mmap_cache_fd_got_sigbus(to
->cache_fd
))
4507 void journal_reset_metrics(JournalMetrics
*m
) {
4510 /* Set everything to "pick automatic values". */
4512 *m
= (JournalMetrics
) {
4513 .min_use
= UINT64_MAX
,
4514 .max_use
= UINT64_MAX
,
4515 .min_size
= UINT64_MAX
,
4516 .max_size
= UINT64_MAX
,
4517 .keep_free
= UINT64_MAX
,
4518 .n_max_files
= UINT64_MAX
,
4522 int journal_file_get_cutoff_realtime_usec(JournalFile
*f
, usec_t
*ret_from
, usec_t
*ret_to
) {
4525 assert(ret_from
|| ret_to
);
4528 if (f
->header
->head_entry_realtime
== 0)
4531 *ret_from
= le64toh(f
->header
->head_entry_realtime
);
4535 if (f
->header
->tail_entry_realtime
== 0)
4538 *ret_to
= le64toh(f
->header
->tail_entry_realtime
);
4544 int journal_file_get_cutoff_monotonic_usec(JournalFile
*f
, sd_id128_t boot_id
, usec_t
*ret_from
, usec_t
*ret_to
) {
4550 assert(ret_from
|| ret_to
);
4552 /* FIXME: fix return value assignment on success with 0. */
4554 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &p
);
4558 if (le64toh(o
->data
.n_entries
) <= 0)
4562 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, le64toh(o
->data
.entry_offset
), &o
);
4566 *ret_from
= le64toh(o
->entry
.monotonic
);
4570 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
4574 r
= journal_file_move_to_entry_for_data(f
, o
, DIRECTION_UP
, &o
, NULL
);
4578 *ret_to
= le64toh(o
->entry
.monotonic
);
4584 bool journal_file_rotate_suggested(JournalFile
*f
, usec_t max_file_usec
, int log_level
) {
4588 /* If we gained new header fields we gained new features,
4589 * hence suggest a rotation */
4590 if (le64toh(f
->header
->header_size
) < sizeof(Header
)) {
4591 log_ratelimit_full(log_level
, JOURNAL_LOG_RATELIMIT
,
4592 "%s uses an outdated header, suggesting rotation.", f
->path
);
4596 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4597 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4598 * need the n_data field, which only exists in newer versions. */
4600 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
4601 if (le64toh(f
->header
->n_data
) * 4ULL > (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4603 log_level
, JOURNAL_LOG_RATELIMIT
,
4604 "Data hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items, %"PRIu64
" file size, %"PRIu64
" bytes per hash table item), suggesting rotation.",
4606 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))),
4607 le64toh(f
->header
->n_data
),
4608 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
4609 (uint64_t) f
->last_stat
.st_size
,
4610 f
->last_stat
.st_size
/ le64toh(f
->header
->n_data
));
4614 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
4615 if (le64toh(f
->header
->n_fields
) * 4ULL > (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4617 log_level
, JOURNAL_LOG_RATELIMIT
,
4618 "Field hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items), suggesting rotation.",
4620 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))),
4621 le64toh(f
->header
->n_fields
),
4622 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
));
4626 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4627 * longest chain is longer than some threshold, let's suggest rotation. */
4628 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) &&
4629 le64toh(f
->header
->data_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4631 log_level
, JOURNAL_LOG_RATELIMIT
,
4632 "Data hash table of %s has deepest hash chain of length %" PRIu64
", suggesting rotation.",
4633 f
->path
, le64toh(f
->header
->data_hash_chain_depth
));
4637 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) &&
4638 le64toh(f
->header
->field_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4640 log_level
, JOURNAL_LOG_RATELIMIT
,
4641 "Field hash table of %s has deepest hash chain of length at %" PRIu64
", suggesting rotation.",
4642 f
->path
, le64toh(f
->header
->field_hash_chain_depth
));
4646 /* Are the data objects properly indexed by field objects? */
4647 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
4648 JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
4649 le64toh(f
->header
->n_data
) > 0 &&
4650 le64toh(f
->header
->n_fields
) == 0) {
4652 log_level
, JOURNAL_LOG_RATELIMIT
,
4653 "Data objects of %s are not indexed by field objects, suggesting rotation.",
4658 if (max_file_usec
> 0) {
4661 h
= le64toh(f
->header
->head_entry_realtime
);
4662 t
= now(CLOCK_REALTIME
);
4664 if (h
> 0 && t
> h
+ max_file_usec
) {
4666 log_level
, JOURNAL_LOG_RATELIMIT
,
4667 "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
4668 f
->path
, FORMAT_TIMESPAN(max_file_usec
, USEC_PER_SEC
));
4676 static const char * const journal_object_type_table
[] = {
4677 [OBJECT_UNUSED
] = "unused",
4678 [OBJECT_DATA
] = "data",
4679 [OBJECT_FIELD
] = "field",
4680 [OBJECT_ENTRY
] = "entry",
4681 [OBJECT_DATA_HASH_TABLE
] = "data hash table",
4682 [OBJECT_FIELD_HASH_TABLE
] = "field hash table",
4683 [OBJECT_ENTRY_ARRAY
] = "entry array",
4684 [OBJECT_TAG
] = "tag",
4687 DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type
, ObjectType
);