1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
6 #include <linux/magic.h>
10 #include <sys/statvfs.h>
16 #include "alloc-util.h"
17 #include "chattr-util.h"
21 #include "format-util.h"
23 #include "id128-util.h"
24 #include "journal-authenticate.h"
25 #include "journal-def.h"
26 #include "journal-file.h"
27 #include "journal-internal.h"
29 #include "memory-util.h"
30 #include "missing_threads.h"
31 #include "path-util.h"
33 #include "random-util.h"
35 #include "sort-util.h"
36 #include "stat-util.h"
37 #include "string-table.h"
38 #include "string-util.h"
40 #include "sync-util.h"
41 #include "user-util.h"
42 #include "xattr-util.h"
44 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
45 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
47 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
48 #define MIN_COMPRESS_THRESHOLD (8ULL)
50 /* This is the minimum journal file size */
51 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
52 #define JOURNAL_COMPACT_SIZE_MAX UINT32_MAX /* 4 GiB */
54 /* These are the lower and upper bounds if we deduce the max_use value
55 * from the file system size */
56 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
57 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
59 /* Those are the lower and upper bounds for the minimal use limit,
60 * i.e. how much we'll use even if keep_free suggests otherwise. */
61 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
62 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
64 /* This is the upper bound if we deduce max_size from max_use */
65 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
67 /* This is the upper bound if we deduce the keep_free value from the
69 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
71 /* This is the keep_free value when we can't determine the system
73 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
75 /* This is the default maximum number of journal files to keep around. */
76 #define DEFAULT_N_MAX_FILES 100
78 /* n_data was the first entry we added after the initial file format design */
79 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
81 /* How many entries to keep in the entry array chain cache at max */
82 #define CHAIN_CACHE_MAX 20
84 /* How much to increase the journal file size at once each time we allocate something new. */
85 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
87 /* Reread fstat() of the file for detecting deletions at least this often */
88 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
90 /* The mmap context to use for the header we pick as one above the last defined typed */
91 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
93 /* Longest hash chain to rotate after */
94 #define HASH_CHAIN_DEPTH_MAX 100
97 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
100 static int mmap_prot_from_open_flags(int flags
) {
101 switch (flags
& O_ACCMODE
) {
107 return PROT_READ
|PROT_WRITE
;
109 assert_not_reached();
113 int journal_file_tail_end_by_pread(JournalFile
*f
, uint64_t *ret_offset
) {
121 /* Same as journal_file_tail_end_by_mmap() below, but operates with pread() to avoid the mmap cache
122 * (and thus is thread safe) */
124 p
= le64toh(f
->header
->tail_object_offset
);
126 p
= le64toh(f
->header
->header_size
);
131 r
= journal_file_read_object_header(f
, OBJECT_UNUSED
, p
, &tail
);
135 sz
= le64toh(tail
.object
.size
);
136 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
140 if (p
> UINT64_MAX
- sz
)
151 int journal_file_tail_end_by_mmap(JournalFile
*f
, uint64_t *ret_offset
) {
159 /* Same as journal_file_tail_end_by_pread() above, but operates with the usual mmap logic */
161 p
= le64toh(f
->header
->tail_object_offset
);
163 p
= le64toh(f
->header
->header_size
);
168 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &tail
);
172 sz
= le64toh(READ_NOW(tail
->object
.size
));
173 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
177 if (p
> UINT64_MAX
- sz
)
188 int journal_file_set_offline_thread_join(JournalFile
*f
) {
193 if (f
->offline_state
== OFFLINE_JOINED
)
196 r
= pthread_join(f
->offline_thread
, NULL
);
200 f
->offline_state
= OFFLINE_JOINED
;
202 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
208 static int journal_file_set_online(JournalFile
*f
) {
213 if (!journal_file_writable(f
))
216 if (f
->fd
< 0 || !f
->header
)
220 switch (f
->offline_state
) {
222 /* No offline thread, no need to wait. */
226 case OFFLINE_SYNCING
: {
227 OfflineState tmp_state
= OFFLINE_SYNCING
;
228 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
229 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
232 /* Canceled syncing prior to offlining, no need to wait. */
236 case OFFLINE_AGAIN_FROM_SYNCING
: {
237 OfflineState tmp_state
= OFFLINE_AGAIN_FROM_SYNCING
;
238 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
239 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
242 /* Canceled restart from syncing, no need to wait. */
246 case OFFLINE_AGAIN_FROM_OFFLINING
: {
247 OfflineState tmp_state
= OFFLINE_AGAIN_FROM_OFFLINING
;
248 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
249 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
252 /* Canceled restart from offlining, must wait for offlining to complete however. */
257 r
= journal_file_set_offline_thread_join(f
);
267 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
270 switch (f
->header
->state
) {
275 f
->header
->state
= STATE_ONLINE
;
284 JournalFile
* journal_file_close(JournalFile
*f
) {
288 assert(f
->newest_boot_id_prioq_idx
== PRIOQ_IDX_NULL
);
291 mmap_cache_fd_free(f
->cache_fd
);
297 ordered_hashmap_free_free(f
->chain_cache
);
300 free(f
->compress_buffer
);
305 munmap(f
->fss_file
, PAGE_ALIGN(f
->fss_file_size
));
307 free(f
->fsprg_state
);
312 gcry_md_close(f
->hmac
);
318 static bool keyed_hash_requested(void) {
319 static thread_local
int cached
= -1;
323 r
= getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
326 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
335 static bool compact_mode_requested(void) {
336 static thread_local
int cached
= -1;
340 r
= getenv_bool("SYSTEMD_JOURNAL_COMPACT");
343 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
353 static Compression
getenv_compression(void) {
358 e
= getenv("SYSTEMD_JOURNAL_COMPRESS");
360 return DEFAULT_COMPRESSION
;
362 r
= parse_boolean(e
);
364 return r
? DEFAULT_COMPRESSION
: COMPRESSION_NONE
;
366 c
= compression_from_string(e
);
368 log_debug_errno(c
, "Failed to parse SYSTEMD_JOURNAL_COMPRESS value, ignoring: %s", e
);
369 return DEFAULT_COMPRESSION
;
372 if (!compression_supported(c
)) {
373 log_debug("Unsupported compression algorithm specified, ignoring: %s", e
);
374 return DEFAULT_COMPRESSION
;
381 static Compression
compression_requested(void) {
383 static thread_local Compression cached
= _COMPRESSION_INVALID
;
386 cached
= getenv_compression();
390 return COMPRESSION_NONE
;
394 static int journal_file_init_header(
396 JournalFileFlags file_flags
,
397 JournalFile
*template) {
406 /* Try to load the FSPRG state, and if we can't, then just don't do sealing */
407 seal
= FLAGS_SET(file_flags
, JOURNAL_SEAL
) && journal_file_fss_load(f
) >= 0;
411 .header_size
= htole64(ALIGN64(sizeof(h
))),
412 .incompatible_flags
= htole32(
413 FLAGS_SET(file_flags
, JOURNAL_COMPRESS
) * COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(compression_requested()) |
414 keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH
|
415 compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT
),
416 .compatible_flags
= htole32(
417 (seal
* HEADER_COMPATIBLE_SEALED
) |
418 HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID
),
421 assert_cc(sizeof(h
.signature
) == sizeof(HEADER_SIGNATURE
));
422 memcpy(h
.signature
, HEADER_SIGNATURE
, sizeof(HEADER_SIGNATURE
));
424 r
= sd_id128_randomize(&h
.file_id
);
428 r
= sd_id128_get_machine(&h
.machine_id
);
429 if (r
< 0 && !ERRNO_IS_MACHINE_ID_UNSET(r
))
430 return r
; /* If we have no valid machine ID (test environment?), let's simply leave the
431 * machine ID field all zeroes. */
434 h
.seqnum_id
= template->header
->seqnum_id
;
435 h
.tail_entry_seqnum
= template->header
->tail_entry_seqnum
;
437 h
.seqnum_id
= h
.file_id
;
439 k
= pwrite(f
->fd
, &h
, sizeof(h
), 0);
448 static int journal_file_refresh_header(JournalFile
*f
) {
454 /* We used to update the header's boot ID field here, but we don't do that anymore, as per
455 * HEADER_COMPATIBLE_TAIL_ENTRY_BOOT_ID */
457 r
= journal_file_set_online(f
);
459 /* Sync the online state to disk; likely just created a new file, also sync the directory this file
461 (void) fsync_full(f
->fd
);
466 static bool warn_wrong_flags(const JournalFile
*f
, bool compatible
) {
467 const uint32_t any
= compatible
? HEADER_COMPATIBLE_ANY
: HEADER_INCOMPATIBLE_ANY
,
468 supported
= compatible
? HEADER_COMPATIBLE_SUPPORTED
: HEADER_INCOMPATIBLE_SUPPORTED
;
469 const char *type
= compatible
? "compatible" : "incompatible";
475 flags
= le32toh(compatible
? f
->header
->compatible_flags
: f
->header
->incompatible_flags
);
477 if (flags
& ~supported
) {
479 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32
,
480 f
->path
, type
, flags
& ~any
);
481 flags
= (flags
& any
) & ~supported
;
485 _cleanup_free_
char *t
= NULL
;
488 if (flags
& HEADER_COMPATIBLE_SEALED
)
489 strv
[n
++] = "sealed";
491 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_XZ
)
492 strv
[n
++] = "xz-compressed";
493 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_LZ4
)
494 strv
[n
++] = "lz4-compressed";
495 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
)
496 strv
[n
++] = "zstd-compressed";
497 if (flags
& HEADER_INCOMPATIBLE_KEYED_HASH
)
498 strv
[n
++] = "keyed-hash";
499 if (flags
& HEADER_INCOMPATIBLE_COMPACT
)
500 strv
[n
++] = "compact";
503 assert(n
< ELEMENTSOF(strv
));
505 t
= strv_join((char**) strv
, ", ");
506 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
507 f
->path
, type
, n
> 1 ? "flags" : "flag", strnull(t
));
515 static bool offset_is_valid(uint64_t offset
, uint64_t header_size
, uint64_t tail_object_offset
) {
518 if (!VALID64(offset
))
520 if (offset
< header_size
)
522 if (offset
> tail_object_offset
)
527 static bool hash_table_is_valid(uint64_t offset
, uint64_t size
, uint64_t header_size
, uint64_t arena_size
, uint64_t tail_object_offset
) {
528 if ((offset
== 0) != (size
== 0))
532 if (offset
<= offsetof(Object
, hash_table
.items
))
534 offset
-= offsetof(Object
, hash_table
.items
);
535 if (!offset_is_valid(offset
, header_size
, tail_object_offset
))
537 assert(offset
<= header_size
+ arena_size
);
538 if (size
> header_size
+ arena_size
- offset
)
543 static int journal_file_verify_header(JournalFile
*f
) {
544 uint64_t arena_size
, header_size
;
549 if (memcmp(f
->header
->signature
, HEADER_SIGNATURE
, 8))
552 /* In both read and write mode we refuse to open files with incompatible
553 * flags we don't know. */
554 if (warn_wrong_flags(f
, false))
555 return -EPROTONOSUPPORT
;
557 /* When open for writing we refuse to open files with compatible flags, too. */
558 if (journal_file_writable(f
) && warn_wrong_flags(f
, true))
559 return -EPROTONOSUPPORT
;
561 if (f
->header
->state
>= _STATE_MAX
)
564 header_size
= le64toh(READ_NOW(f
->header
->header_size
));
566 /* The first addition was n_data, so check that we are at least this large */
567 if (header_size
< HEADER_SIZE_MIN
)
570 /* When open for writing we refuse to open files with a mismatch of the header size, i.e. writing to
571 * files implementing older or new header structures. */
572 if (journal_file_writable(f
) && header_size
!= sizeof(Header
))
573 return -EPROTONOSUPPORT
;
575 /* Don't write to journal files without the new boot ID update behavior guarantee. */
576 if (journal_file_writable(f
) && !JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f
->header
))
577 return -EPROTONOSUPPORT
;
579 if (JOURNAL_HEADER_SEALED(f
->header
) && !JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
582 arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
584 if (UINT64_MAX
- header_size
< arena_size
|| header_size
+ arena_size
> (uint64_t) f
->last_stat
.st_size
)
587 uint64_t tail_object_offset
= le64toh(f
->header
->tail_object_offset
);
588 if (!offset_is_valid(tail_object_offset
, header_size
, UINT64_MAX
))
590 if (header_size
+ arena_size
< tail_object_offset
)
592 if (header_size
+ arena_size
- tail_object_offset
< sizeof(ObjectHeader
))
595 if (!hash_table_is_valid(le64toh(f
->header
->data_hash_table_offset
),
596 le64toh(f
->header
->data_hash_table_size
),
597 header_size
, arena_size
, tail_object_offset
))
600 if (!hash_table_is_valid(le64toh(f
->header
->field_hash_table_offset
),
601 le64toh(f
->header
->field_hash_table_size
),
602 header_size
, arena_size
, tail_object_offset
))
605 uint64_t entry_array_offset
= le64toh(f
->header
->entry_array_offset
);
606 if (!offset_is_valid(entry_array_offset
, header_size
, tail_object_offset
))
609 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_offset
)) {
610 uint32_t offset
= le32toh(f
->header
->tail_entry_array_offset
);
611 uint32_t n
= le32toh(f
->header
->tail_entry_array_n_entries
);
613 if (!offset_is_valid(offset
, header_size
, tail_object_offset
))
615 if (entry_array_offset
> offset
)
617 if (entry_array_offset
== 0 && offset
!= 0)
619 if ((offset
== 0) != (n
== 0))
621 assert(offset
<= header_size
+ arena_size
);
622 if ((uint64_t) n
* journal_file_entry_array_item_size(f
) > header_size
+ arena_size
- offset
)
626 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_offset
)) {
627 uint64_t offset
= le64toh(f
->header
->tail_entry_offset
);
629 if (!offset_is_valid(offset
, header_size
, tail_object_offset
))
633 /* When there is an entry object, then these fields must be filled. */
634 if (sd_id128_is_null(f
->header
->tail_entry_boot_id
))
636 if (!VALID_REALTIME(le64toh(f
->header
->head_entry_realtime
)))
638 if (!VALID_REALTIME(le64toh(f
->header
->tail_entry_realtime
)))
640 if (!VALID_MONOTONIC(le64toh(f
->header
->tail_entry_realtime
)))
643 /* Otherwise, the fields must be zero. */
644 if (JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f
->header
) &&
645 !sd_id128_is_null(f
->header
->tail_entry_boot_id
))
647 if (f
->header
->head_entry_realtime
!= 0)
649 if (f
->header
->tail_entry_realtime
!= 0)
651 if (f
->header
->tail_entry_realtime
!= 0)
656 /* Verify number of objects */
657 uint64_t n_objects
= le64toh(f
->header
->n_objects
);
658 if (n_objects
> arena_size
/ sizeof(ObjectHeader
))
661 uint64_t n_entries
= le64toh(f
->header
->n_entries
);
662 if (n_entries
> n_objects
)
665 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
666 le64toh(f
->header
->n_data
) > n_objects
)
669 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
670 le64toh(f
->header
->n_fields
) > n_objects
)
673 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
) &&
674 le64toh(f
->header
->n_tags
) > n_objects
)
677 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
) &&
678 le64toh(f
->header
->n_entry_arrays
) > n_objects
)
681 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_n_entries
) &&
682 le32toh(f
->header
->tail_entry_array_n_entries
) > n_entries
)
685 if (journal_file_writable(f
)) {
686 sd_id128_t machine_id
;
690 r
= sd_id128_get_machine(&machine_id
);
691 if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r
)) /* Gracefully handle the machine ID not being initialized yet */
692 machine_id
= SD_ID128_NULL
;
696 if (!sd_id128_equal(machine_id
, f
->header
->machine_id
))
697 return log_debug_errno(SYNTHETIC_ERRNO(EHOSTDOWN
),
698 "Trying to open journal file from different host for writing, refusing.");
700 state
= f
->header
->state
;
702 if (state
== STATE_ARCHIVED
)
703 return -ESHUTDOWN
; /* Already archived */
704 if (state
== STATE_ONLINE
)
705 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
706 "Journal file %s is already online. Assuming unclean closing.",
708 if (state
!= STATE_OFFLINE
)
709 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
710 "Journal file %s has unknown state %i.",
713 if (f
->header
->field_hash_table_size
== 0 || f
->header
->data_hash_table_size
== 0)
720 int journal_file_fstat(JournalFile
*f
) {
726 if (fstat(f
->fd
, &f
->last_stat
) < 0)
729 f
->last_stat_usec
= now(CLOCK_MONOTONIC
);
731 /* Refuse dealing with files that aren't regular */
732 r
= stat_verify_regular(&f
->last_stat
);
736 /* Refuse appending to files that are already deleted */
737 if (f
->last_stat
.st_nlink
<= 0)
743 static int journal_file_allocate(JournalFile
*f
, uint64_t offset
, uint64_t size
) {
744 uint64_t old_size
, new_size
, old_header_size
, old_arena_size
;
750 /* We assume that this file is not sparse, and we know that for sure, since we always call
751 * posix_fallocate() ourselves */
753 if (size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - offset
)
756 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
759 old_header_size
= le64toh(READ_NOW(f
->header
->header_size
));
760 old_arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
761 if (old_arena_size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - old_header_size
)
764 old_size
= old_header_size
+ old_arena_size
;
766 new_size
= MAX(PAGE_ALIGN(offset
+ size
), old_header_size
);
768 if (new_size
<= old_size
) {
770 /* We already pre-allocated enough space, but before
771 * we write to it, let's check with fstat() if the
772 * file got deleted, in order make sure we don't throw
773 * away the data immediately. Don't check fstat() for
774 * all writes though, but only once ever 10s. */
776 if (f
->last_stat_usec
+ LAST_STAT_REFRESH_USEC
> now(CLOCK_MONOTONIC
))
779 return journal_file_fstat(f
);
782 /* Allocate more space. */
784 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
787 /* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
788 if (JOURNAL_HEADER_COMPACT(f
->header
) && new_size
> UINT32_MAX
)
791 if (new_size
> f
->metrics
.min_size
&& f
->metrics
.keep_free
> 0) {
794 if (fstatvfs(f
->fd
, &svfs
) >= 0) {
797 available
= LESS_BY((uint64_t) svfs
.f_bfree
* (uint64_t) svfs
.f_bsize
, f
->metrics
.keep_free
);
799 if (new_size
- old_size
> available
)
804 /* Increase by larger blocks at once */
805 new_size
= ROUND_UP(new_size
, FILE_SIZE_INCREASE
);
806 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
807 new_size
= f
->metrics
.max_size
;
809 /* Note that the glibc fallocate() fallback is very
810 inefficient, hence we try to minimize the allocation area
812 r
= posix_fallocate_loop(f
->fd
, old_size
, new_size
- old_size
);
816 f
->header
->arena_size
= htole64(new_size
- old_header_size
);
818 return journal_file_fstat(f
);
821 static unsigned type_to_context(ObjectType type
) {
822 /* One context for each type, plus one catch-all for the rest */
823 assert_cc(_OBJECT_TYPE_MAX
<= MMAP_CACHE_MAX_CONTEXTS
);
824 assert_cc(CONTEXT_HEADER
< MMAP_CACHE_MAX_CONTEXTS
);
825 return type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
? type
: 0;
828 static int journal_file_move_to(
841 /* This function may clear, overwrite, or alter previously cached entries. After this function has
842 * been called, all objects except for one obtained by this function are invalidated and must be
843 * re-read before use. */
848 if (size
> UINT64_MAX
- offset
)
851 /* Avoid SIGBUS on invalid accesses */
852 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
) {
853 /* Hmm, out of range? Let's refresh the fstat() data
854 * first, before we trust that check. */
856 r
= journal_file_fstat(f
);
860 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
)
861 return -EADDRNOTAVAIL
;
864 return mmap_cache_fd_get(f
->cache_fd
, type_to_context(type
), keep_always
, offset
, size
, &f
->last_stat
, ret
);
867 static uint64_t minimum_header_size(JournalFile
*f
, Object
*o
) {
869 static const uint64_t table
[] = {
870 [OBJECT_DATA
] = sizeof(DataObject
),
871 [OBJECT_FIELD
] = sizeof(FieldObject
),
872 [OBJECT_ENTRY
] = sizeof(EntryObject
),
873 [OBJECT_DATA_HASH_TABLE
] = sizeof(HashTableObject
),
874 [OBJECT_FIELD_HASH_TABLE
] = sizeof(HashTableObject
),
875 [OBJECT_ENTRY_ARRAY
] = sizeof(EntryArrayObject
),
876 [OBJECT_TAG
] = sizeof(TagObject
),
882 if (o
->object
.type
== OBJECT_DATA
)
883 return journal_file_data_payload_offset(f
);
885 if (o
->object
.type
>= ELEMENTSOF(table
) || table
[o
->object
.type
] <= 0)
886 return sizeof(ObjectHeader
);
888 return table
[o
->object
.type
];
891 static int check_object_header(JournalFile
*f
, Object
*o
, ObjectType type
, uint64_t offset
) {
897 s
= le64toh(READ_NOW(o
->object
.size
));
899 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
900 "Attempt to move to uninitialized object: %" PRIu64
,
903 if (s
< sizeof(ObjectHeader
))
904 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
905 "Attempt to move to overly short object with size %"PRIu64
": %" PRIu64
,
908 if (o
->object
.type
<= OBJECT_UNUSED
|| o
->object
.type
>= _OBJECT_TYPE_MAX
)
909 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
910 "Attempt to move to object with invalid type (%u): %" PRIu64
,
911 o
->object
.type
, offset
);
913 if (type
> OBJECT_UNUSED
&& o
->object
.type
!= type
)
914 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
915 "Found %s object while expecting %s object: %" PRIu64
,
916 journal_object_type_to_string(o
->object
.type
),
917 journal_object_type_to_string(type
),
920 if (s
< minimum_header_size(f
, o
))
921 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
922 "Size of %s object (%"PRIu64
") is smaller than the minimum object size (%"PRIu64
"): %" PRIu64
,
923 journal_object_type_to_string(o
->object
.type
),
925 minimum_header_size(f
, o
),
931 /* Lightweight object checks. We want this to be fast, so that we won't
932 * slowdown every journal_file_move_to_object() call too much. */
933 static int check_object(JournalFile
*f
, Object
*o
, uint64_t offset
) {
937 switch (o
->object
.type
) {
940 if ((le64toh(o
->data
.entry_offset
) == 0) ^ (le64toh(o
->data
.n_entries
) == 0))
941 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
942 "Bad data n_entries: %" PRIu64
": %" PRIu64
,
943 le64toh(o
->data
.n_entries
),
946 if (le64toh(o
->object
.size
) <= journal_file_data_payload_offset(f
))
947 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
948 "Bad data size (<= %zu): %" PRIu64
": %" PRIu64
,
949 journal_file_data_payload_offset(f
),
950 le64toh(o
->object
.size
),
953 if (!VALID64(le64toh(o
->data
.next_hash_offset
)) ||
954 !VALID64(le64toh(o
->data
.next_field_offset
)) ||
955 !VALID64(le64toh(o
->data
.entry_offset
)) ||
956 !VALID64(le64toh(o
->data
.entry_array_offset
)))
957 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
958 "Invalid offset, next_hash_offset=" OFSfmt
", next_field_offset=" OFSfmt
", entry_offset=" OFSfmt
", entry_array_offset=" OFSfmt
": %" PRIu64
,
959 le64toh(o
->data
.next_hash_offset
),
960 le64toh(o
->data
.next_field_offset
),
961 le64toh(o
->data
.entry_offset
),
962 le64toh(o
->data
.entry_array_offset
),
968 if (le64toh(o
->object
.size
) <= offsetof(Object
, field
.payload
))
969 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
970 "Bad field size (<= %zu): %" PRIu64
": %" PRIu64
,
971 offsetof(Object
, field
.payload
),
972 le64toh(o
->object
.size
),
975 if (!VALID64(le64toh(o
->field
.next_hash_offset
)) ||
976 !VALID64(le64toh(o
->field
.head_data_offset
)))
977 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
978 "Invalid offset, next_hash_offset=" OFSfmt
", head_data_offset=" OFSfmt
": %" PRIu64
,
979 le64toh(o
->field
.next_hash_offset
),
980 le64toh(o
->field
.head_data_offset
),
987 sz
= le64toh(READ_NOW(o
->object
.size
));
988 if (sz
< offsetof(Object
, entry
.items
) ||
989 (sz
- offsetof(Object
, entry
.items
)) % journal_file_entry_item_size(f
) != 0)
990 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
991 "Bad entry size (<= %zu): %" PRIu64
": %" PRIu64
,
992 offsetof(Object
, entry
.items
),
996 if ((sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
) <= 0)
997 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
998 "Invalid number items in entry: %" PRIu64
": %" PRIu64
,
999 (sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
),
1002 if (le64toh(o
->entry
.seqnum
) <= 0)
1003 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1004 "Invalid entry seqnum: %" PRIx64
": %" PRIu64
,
1005 le64toh(o
->entry
.seqnum
),
1008 if (!VALID_REALTIME(le64toh(o
->entry
.realtime
)))
1009 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1010 "Invalid entry realtime timestamp: %" PRIu64
": %" PRIu64
,
1011 le64toh(o
->entry
.realtime
),
1014 if (!VALID_MONOTONIC(le64toh(o
->entry
.monotonic
)))
1015 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1016 "Invalid entry monotonic timestamp: %" PRIu64
": %" PRIu64
,
1017 le64toh(o
->entry
.monotonic
),
1020 if (sd_id128_is_null(o
->entry
.boot_id
))
1021 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1022 "Invalid object entry with an empty boot ID: %" PRIu64
,
1028 case OBJECT_DATA_HASH_TABLE
:
1029 case OBJECT_FIELD_HASH_TABLE
: {
1032 sz
= le64toh(READ_NOW(o
->object
.size
));
1033 if (sz
< offsetof(Object
, hash_table
.items
) ||
1034 (sz
- offsetof(Object
, hash_table
.items
)) % sizeof(HashItem
) != 0 ||
1035 (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
) <= 0)
1036 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1037 "Invalid %s hash table size: %" PRIu64
": %" PRIu64
,
1038 journal_object_type_to_string(o
->object
.type
),
1045 case OBJECT_ENTRY_ARRAY
: {
1048 sz
= le64toh(READ_NOW(o
->object
.size
));
1049 if (sz
< offsetof(Object
, entry_array
.items
) ||
1050 (sz
- offsetof(Object
, entry_array
.items
)) % journal_file_entry_array_item_size(f
) != 0 ||
1051 (sz
- offsetof(Object
, entry_array
.items
)) / journal_file_entry_array_item_size(f
) <= 0)
1052 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1053 "Invalid object entry array size: %" PRIu64
": %" PRIu64
,
1056 /* Here, we request that the offset of each entry array object is in strictly increasing order. */
1057 next
= le64toh(o
->entry_array
.next_entry_array_offset
);
1058 if (!VALID64(next
) || (next
> 0 && next
<= offset
))
1059 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1060 "Invalid object entry array next_entry_array_offset: %" PRIu64
": %" PRIu64
,
1068 if (le64toh(o
->object
.size
) != sizeof(TagObject
))
1069 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1070 "Invalid object tag size: %" PRIu64
": %" PRIu64
,
1071 le64toh(o
->object
.size
),
1074 if (!VALID_EPOCH(le64toh(o
->tag
.epoch
)))
1075 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1076 "Invalid object tag epoch: %" PRIu64
": %" PRIu64
,
1077 le64toh(o
->tag
.epoch
), offset
);
1085 int journal_file_move_to_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
**ret
) {
1091 /* Even if this function fails, it may clear, overwrite, or alter previously cached entries. After
1092 * this function has been called, all objects except for one obtained by this function are
1093 * invalidated and must be re-read before use.. */
1095 /* Objects may only be located at multiple of 64 bit */
1096 if (!VALID64(offset
))
1097 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1098 "Attempt to move to %s object at non-64-bit boundary: %" PRIu64
,
1099 journal_object_type_to_string(type
),
1102 /* Object may not be located in the file header */
1103 if (offset
< le64toh(f
->header
->header_size
))
1104 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1105 "Attempt to move to %s object located in file header: %" PRIu64
,
1106 journal_object_type_to_string(type
),
1109 r
= journal_file_move_to(f
, type
, false, offset
, sizeof(ObjectHeader
), (void**) &o
);
1113 r
= check_object_header(f
, o
, type
, offset
);
1117 r
= journal_file_move_to(f
, type
, false, offset
, le64toh(READ_NOW(o
->object
.size
)), (void**) &o
);
1121 r
= check_object_header(f
, o
, type
, offset
);
1125 r
= check_object(f
, o
, offset
);
1135 int journal_file_read_object_header(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
*ret
) {
1142 /* Objects may only be located at multiple of 64 bit */
1143 if (!VALID64(offset
))
1144 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1145 "Attempt to read %s object at non-64-bit boundary: %" PRIu64
,
1146 journal_object_type_to_string(type
), offset
);
1148 /* Object may not be located in the file header */
1149 if (offset
< le64toh(f
->header
->header_size
))
1150 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1151 "Attempt to read %s object located in file header: %" PRIu64
,
1152 journal_object_type_to_string(type
), offset
);
1154 /* This will likely read too much data but it avoids having to call pread() twice. */
1155 n
= pread(f
->fd
, &o
, sizeof(o
), offset
);
1157 return log_debug_errno(errno
, "Failed to read journal %s object at offset: %" PRIu64
,
1158 journal_object_type_to_string(type
), offset
);
1160 if ((size_t) n
< sizeof(o
.object
))
1161 return log_debug_errno(SYNTHETIC_ERRNO(EIO
),
1162 "Failed to read short %s object at offset: %" PRIu64
,
1163 journal_object_type_to_string(type
), offset
);
1165 r
= check_object_header(f
, &o
, type
, offset
);
1169 if ((size_t) n
< minimum_header_size(f
, &o
))
1170 return log_debug_errno(SYNTHETIC_ERRNO(EIO
),
1171 "Short read while reading %s object: %" PRIu64
,
1172 journal_object_type_to_string(type
), offset
);
1174 r
= check_object(f
, &o
, offset
);
1184 static uint64_t inc_seqnum(uint64_t seqnum
) {
1185 if (seqnum
< UINT64_MAX
-1)
1188 return 1; /* skip over UINT64_MAX and 0 when we run out of seqnums and start again */
1191 static uint64_t journal_file_entry_seqnum(
1195 uint64_t next_seqnum
;
1200 /* Picks a new sequence number for the entry we are about to add and returns it. */
1202 next_seqnum
= inc_seqnum(le64toh(f
->header
->tail_entry_seqnum
));
1204 /* If an external seqnum counter was passed, we update both the local and the external one, and set
1205 * it to the maximum of both */
1207 *seqnum
= next_seqnum
= MAX(inc_seqnum(*seqnum
), next_seqnum
);
1209 f
->header
->tail_entry_seqnum
= htole64(next_seqnum
);
1211 if (f
->header
->head_entry_seqnum
== 0)
1212 f
->header
->head_entry_seqnum
= htole64(next_seqnum
);
1217 int journal_file_append_object(
1221 Object
**ret_object
,
1222 uint64_t *ret_offset
) {
1230 assert(type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
);
1231 assert(size
>= sizeof(ObjectHeader
));
1233 r
= journal_file_set_online(f
);
1237 r
= journal_file_tail_end_by_mmap(f
, &p
);
1241 r
= journal_file_allocate(f
, p
, size
);
1245 r
= journal_file_move_to(f
, type
, false, p
, size
, (void**) &o
);
1249 o
->object
= (ObjectHeader
) {
1251 .size
= htole64(size
),
1254 f
->header
->tail_object_offset
= htole64(p
);
1255 f
->header
->n_objects
= htole64(le64toh(f
->header
->n_objects
) + 1);
1266 static int journal_file_setup_data_hash_table(JournalFile
*f
) {
1274 /* We estimate that we need 1 hash table entry per 768 bytes
1275 of journal file and we want to make sure we never get
1276 beyond 75% fill level. Calculate the hash table size for
1277 the maximum file size based on these metrics. */
1279 s
= (f
->metrics
.max_size
* 4 / 768 / 3) * sizeof(HashItem
);
1280 if (s
< DEFAULT_DATA_HASH_TABLE_SIZE
)
1281 s
= DEFAULT_DATA_HASH_TABLE_SIZE
;
1283 log_debug("Reserving %"PRIu64
" entries in data hash table.", s
/ sizeof(HashItem
));
1285 r
= journal_file_append_object(f
,
1286 OBJECT_DATA_HASH_TABLE
,
1287 offsetof(Object
, hash_table
.items
) + s
,
1292 memzero(o
->hash_table
.items
, s
);
1294 f
->header
->data_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1295 f
->header
->data_hash_table_size
= htole64(s
);
1300 static int journal_file_setup_field_hash_table(JournalFile
*f
) {
1308 /* We use a fixed size hash table for the fields as this
1309 * number should grow very slowly only */
1311 s
= DEFAULT_FIELD_HASH_TABLE_SIZE
;
1312 log_debug("Reserving %"PRIu64
" entries in field hash table.", s
/ sizeof(HashItem
));
1314 r
= journal_file_append_object(f
,
1315 OBJECT_FIELD_HASH_TABLE
,
1316 offsetof(Object
, hash_table
.items
) + s
,
1321 memzero(o
->hash_table
.items
, s
);
1323 f
->header
->field_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1324 f
->header
->field_hash_table_size
= htole64(s
);
1329 int journal_file_map_data_hash_table(JournalFile
*f
) {
1337 if (f
->data_hash_table
)
1340 p
= le64toh(f
->header
->data_hash_table_offset
);
1341 s
= le64toh(f
->header
->data_hash_table_size
);
1343 r
= journal_file_move_to(f
,
1344 OBJECT_DATA_HASH_TABLE
,
1351 f
->data_hash_table
= t
;
1355 int journal_file_map_field_hash_table(JournalFile
*f
) {
1363 if (f
->field_hash_table
)
1366 p
= le64toh(f
->header
->field_hash_table_offset
);
1367 s
= le64toh(f
->header
->field_hash_table_size
);
1369 r
= journal_file_move_to(f
,
1370 OBJECT_FIELD_HASH_TABLE
,
1377 f
->field_hash_table
= t
;
1381 static int journal_file_link_field(
1392 assert(f
->field_hash_table
);
1396 if (o
->object
.type
!= OBJECT_FIELD
)
1399 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1403 /* This might alter the window we are looking at */
1404 o
->field
.next_hash_offset
= o
->field
.head_data_offset
= 0;
1407 p
= le64toh(f
->field_hash_table
[h
].tail_hash_offset
);
1409 f
->field_hash_table
[h
].head_hash_offset
= htole64(offset
);
1411 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1415 o
->field
.next_hash_offset
= htole64(offset
);
1418 f
->field_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1420 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
1421 f
->header
->n_fields
= htole64(le64toh(f
->header
->n_fields
) + 1);
1426 static int journal_file_link_data(
1437 assert(f
->data_hash_table
);
1441 if (o
->object
.type
!= OBJECT_DATA
)
1444 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1448 /* This might alter the window we are looking at */
1449 o
->data
.next_hash_offset
= o
->data
.next_field_offset
= 0;
1450 o
->data
.entry_offset
= o
->data
.entry_array_offset
= 0;
1451 o
->data
.n_entries
= 0;
1454 p
= le64toh(f
->data_hash_table
[h
].tail_hash_offset
);
1456 /* Only entry in the hash table is easy */
1457 f
->data_hash_table
[h
].head_hash_offset
= htole64(offset
);
1459 /* Move back to the previous data object, to patch in
1462 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1466 o
->data
.next_hash_offset
= htole64(offset
);
1469 f
->data_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1471 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
1472 f
->header
->n_data
= htole64(le64toh(f
->header
->n_data
) + 1);
1477 static int get_next_hash_offset(
1480 le64_t
*next_hash_offset
,
1482 le64_t
*header_max_depth
) {
1488 assert(next_hash_offset
);
1491 nextp
= le64toh(READ_NOW(*next_hash_offset
));
1493 if (nextp
<= *p
) /* Refuse going in loops */
1494 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1495 "Detected hash item loop in %s, refusing.", f
->path
);
1499 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1500 if (header_max_depth
&& journal_file_writable(f
))
1501 *header_max_depth
= htole64(MAX(*depth
, le64toh(*header_max_depth
)));
1508 int journal_file_find_field_object_with_hash(
1513 Object
**ret_object
,
1514 uint64_t *ret_offset
) {
1516 uint64_t p
, osize
, h
, m
, depth
= 0;
1524 /* If the field hash table is empty, we can't find anything */
1525 if (le64toh(f
->header
->field_hash_table_size
) <= 0)
1528 /* Map the field hash table, if it isn't mapped yet. */
1529 r
= journal_file_map_field_hash_table(f
);
1533 osize
= offsetof(Object
, field
.payload
) + size
;
1535 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1540 p
= le64toh(f
->field_hash_table
[h
].head_hash_offset
);
1544 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1548 if (le64toh(o
->field
.hash
) == hash
&&
1549 le64toh(o
->object
.size
) == osize
&&
1550 memcmp(o
->field
.payload
, field
, size
) == 0) {
1560 r
= get_next_hash_offset(
1563 &o
->field
.next_hash_offset
,
1565 JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) ? &f
->header
->field_hash_chain_depth
: NULL
);
1573 uint64_t journal_file_hash_data(
1580 assert(data
|| sz
== 0);
1582 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1583 * function use siphash. Old journal files use the Jenkins hash. */
1585 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
1586 return siphash24(data
, sz
, f
->header
->file_id
.bytes
);
1588 return jenkins_hash64(data
, sz
);
1591 int journal_file_find_field_object(
1595 Object
**ret_object
,
1596 uint64_t *ret_offset
) {
1602 return journal_file_find_field_object_with_hash(
1605 journal_file_hash_data(f
, field
, size
),
1606 ret_object
, ret_offset
);
1609 int journal_file_find_data_object_with_hash(
1614 Object
**ret_object
,
1615 uint64_t *ret_offset
) {
1617 uint64_t p
, h
, m
, depth
= 0;
1622 assert(data
|| size
== 0);
1624 /* If there's no data hash table, then there's no entry. */
1625 if (le64toh(f
->header
->data_hash_table_size
) <= 0)
1628 /* Map the data hash table, if it isn't mapped yet. */
1629 r
= journal_file_map_data_hash_table(f
);
1633 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1638 p
= le64toh(f
->data_hash_table
[h
].head_hash_offset
);
1645 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1649 if (le64toh(o
->data
.hash
) != hash
)
1652 r
= journal_file_data_payload(f
, o
, p
, NULL
, 0, 0, &d
, &rsize
);
1655 assert(r
> 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
1657 if (memcmp_nn(data
, size
, d
, rsize
) == 0) {
1668 r
= get_next_hash_offset(
1671 &o
->data
.next_hash_offset
,
1673 JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) ? &f
->header
->data_hash_chain_depth
: NULL
);
1681 int journal_file_find_data_object(
1685 Object
**ret_object
,
1686 uint64_t *ret_offset
) {
1689 assert(data
|| size
== 0);
1691 return journal_file_find_data_object_with_hash(
1694 journal_file_hash_data(f
, data
, size
),
1695 ret_object
, ret_offset
);
1698 bool journal_field_valid(const char *p
, size_t l
, bool allow_protected
) {
1699 /* We kinda enforce POSIX syntax recommendations for
1700 environment variables here, but make a couple of additional
1703 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
1710 /* No empty field names */
1714 /* Don't allow names longer than 64 chars */
1718 /* Variables starting with an underscore are protected */
1719 if (!allow_protected
&& p
[0] == '_')
1722 /* Don't allow digits as first character */
1723 if (ascii_isdigit(p
[0]))
1726 /* Only allow A-Z0-9 and '_' */
1727 for (const char *a
= p
; a
< p
+ l
; a
++)
1728 if ((*a
< 'A' || *a
> 'Z') &&
1729 !ascii_isdigit(*a
) &&
1736 static int journal_file_append_field(
1740 Object
**ret_object
,
1741 uint64_t *ret_offset
) {
1752 if (!journal_field_valid(field
, size
, true))
1755 hash
= journal_file_hash_data(f
, field
, size
);
1757 r
= journal_file_find_field_object_with_hash(f
, field
, size
, hash
, ret_object
, ret_offset
);
1763 osize
= offsetof(Object
, field
.payload
) + size
;
1764 r
= journal_file_append_object(f
, OBJECT_FIELD
, osize
, &o
, &p
);
1768 o
->field
.hash
= htole64(hash
);
1769 memcpy(o
->field
.payload
, field
, size
);
1771 r
= journal_file_link_field(f
, o
, p
, hash
);
1775 /* The linking might have altered the window, so let's only pass the offset to hmac which will
1776 * move to the object again if needed. */
1779 r
= journal_file_hmac_put_object(f
, OBJECT_FIELD
, NULL
, p
);
1785 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, ret_object
);
1796 static int maybe_compress_payload(JournalFile
*f
, uint8_t *dst
, const uint8_t *src
, uint64_t size
, size_t *rsize
) {
1800 #if HAVE_COMPRESSION
1804 c
= JOURNAL_FILE_COMPRESSION(f
);
1805 if (c
== COMPRESSION_NONE
|| size
< f
->compress_threshold_bytes
)
1808 r
= compress_blob(c
, src
, size
, dst
, size
- 1, rsize
);
1810 return log_debug_errno(r
, "Failed to compress data object using %s, ignoring: %m", compression_to_string(c
));
1812 log_debug("Compressed data object %"PRIu64
" -> %zu using %s", size
, *rsize
, compression_to_string(c
));
1814 return 1; /* compressed */
1820 static int journal_file_append_data(
1824 Object
**ret_object
,
1825 uint64_t *ret_offset
) {
1827 uint64_t hash
, p
, osize
;
1835 if (!data
|| size
== 0)
1838 hash
= journal_file_hash_data(f
, data
, size
);
1840 r
= journal_file_find_data_object_with_hash(f
, data
, size
, hash
, ret_object
, ret_offset
);
1846 eq
= memchr(data
, '=', size
);
1850 osize
= journal_file_data_payload_offset(f
) + size
;
1851 r
= journal_file_append_object(f
, OBJECT_DATA
, osize
, &o
, &p
);
1855 o
->data
.hash
= htole64(hash
);
1857 r
= maybe_compress_payload(f
, journal_file_data_payload_field(f
, o
), data
, size
, &rsize
);
1859 /* We don't really care failures, let's continue without compression */
1860 memcpy_safe(journal_file_data_payload_field(f
, o
), data
, size
);
1862 Compression c
= JOURNAL_FILE_COMPRESSION(f
);
1864 assert(c
>= 0 && c
< _COMPRESSION_MAX
&& c
!= COMPRESSION_NONE
);
1866 o
->object
.size
= htole64(journal_file_data_payload_offset(f
) + rsize
);
1867 o
->object
.flags
|= COMPRESSION_TO_OBJECT_FLAG(c
);
1870 r
= journal_file_link_data(f
, o
, p
, hash
);
1874 /* The linking might have altered the window, so let's refresh our pointer. */
1875 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1880 r
= journal_file_hmac_put_object(f
, OBJECT_DATA
, o
, p
);
1885 /* Create field object ... */
1886 r
= journal_file_append_field(f
, data
, (uint8_t*) eq
- (uint8_t*) data
, &fo
, NULL
);
1890 /* ... and link it in. */
1891 o
->data
.next_field_offset
= fo
->field
.head_data_offset
;
1892 fo
->field
.head_data_offset
= le64toh(p
);
1903 static int maybe_decompress_payload(
1907 Compression compression
,
1909 size_t field_length
,
1910 size_t data_threshold
,
1916 /* We can't read objects larger than 4G on a 32-bit machine */
1917 if ((uint64_t) (size_t) size
!= size
)
1920 if (compression
!= COMPRESSION_NONE
) {
1921 #if HAVE_COMPRESSION
1926 r
= decompress_startswith(compression
, payload
, size
, &f
->compress_buffer
, field
,
1929 return log_debug_errno(r
,
1930 "Cannot decompress %s object of length %" PRIu64
": %m",
1931 compression_to_string(compression
),
1942 r
= decompress_blob(compression
, payload
, size
, &f
->compress_buffer
, &rsize
, 0);
1947 *ret_data
= f
->compress_buffer
;
1951 return -EPROTONOSUPPORT
;
1954 if (field
&& (size
< field_length
+ 1 || memcmp(payload
, field
, field_length
) != 0 || payload
[field_length
] != '=')) {
1963 *ret_data
= payload
;
1965 *ret_size
= (size_t) size
;
1971 int journal_file_data_payload(
1976 size_t field_length
,
1977 size_t data_threshold
,
1986 assert(!field
== (field_length
== 0)); /* These must be specified together. */
1989 r
= journal_file_move_to_object(f
, OBJECT_DATA
, offset
, &o
);
1994 size
= le64toh(READ_NOW(o
->object
.size
));
1995 if (size
< journal_file_data_payload_offset(f
))
1998 size
-= journal_file_data_payload_offset(f
);
2000 c
= COMPRESSION_FROM_OBJECT(o
);
2002 return -EPROTONOSUPPORT
;
2004 return maybe_decompress_payload(f
, journal_file_data_payload_field(f
, o
), size
, c
, field
,
2005 field_length
, data_threshold
, ret_data
, ret_size
);
2008 uint64_t journal_file_entry_n_items(JournalFile
*f
, Object
*o
) {
2014 if (o
->object
.type
!= OBJECT_ENTRY
)
2017 sz
= le64toh(READ_NOW(o
->object
.size
));
2018 if (sz
< offsetof(Object
, entry
.items
))
2021 return (sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
);
2024 uint64_t journal_file_entry_array_n_items(JournalFile
*f
, Object
*o
) {
2030 if (o
->object
.type
!= OBJECT_ENTRY_ARRAY
)
2033 sz
= le64toh(READ_NOW(o
->object
.size
));
2034 if (sz
< offsetof(Object
, entry_array
.items
))
2037 return (sz
- offsetof(Object
, entry_array
.items
)) / journal_file_entry_array_item_size(f
);
2040 uint64_t journal_file_hash_table_n_items(Object
*o
) {
2045 if (!IN_SET(o
->object
.type
, OBJECT_DATA_HASH_TABLE
, OBJECT_FIELD_HASH_TABLE
))
2048 sz
= le64toh(READ_NOW(o
->object
.size
));
2049 if (sz
< offsetof(Object
, hash_table
.items
))
2052 return (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
);
2055 static void write_entry_array_item(JournalFile
*f
, Object
*o
, uint64_t i
, uint64_t p
) {
2059 if (JOURNAL_HEADER_COMPACT(f
->header
)) {
2060 assert(p
<= UINT32_MAX
);
2061 o
->entry_array
.items
.compact
[i
] = htole32(p
);
2063 o
->entry_array
.items
.regular
[i
] = htole64(p
);
2066 static int link_entry_into_array(
2074 uint64_t n
= 0, ap
= 0, q
, i
, a
, hidx
;
2084 a
= tail
? le32toh(*tail
) : le64toh(*first
);
2085 hidx
= le64toh(READ_NOW(*idx
));
2086 i
= tidx
? le32toh(READ_NOW(*tidx
)) : hidx
;
2089 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2093 n
= journal_file_entry_array_n_items(f
, o
);
2095 write_entry_array_item(f
, o
, i
, p
);
2096 *idx
= htole64(hidx
+ 1);
2098 *tidx
= htole32(le32toh(*tidx
) + 1);
2104 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2115 r
= journal_file_append_object(f
, OBJECT_ENTRY_ARRAY
,
2116 offsetof(Object
, entry_array
.items
) + n
* journal_file_entry_array_item_size(f
),
2122 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY_ARRAY
, o
, q
);
2127 write_entry_array_item(f
, o
, i
, p
);
2130 *first
= htole64(q
);
2132 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, ap
, &o
);
2136 o
->entry_array
.next_entry_array_offset
= htole64(q
);
2142 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
2143 f
->header
->n_entry_arrays
= htole64(le64toh(f
->header
->n_entry_arrays
) + 1);
2145 *idx
= htole64(hidx
+ 1);
2152 static int link_entry_into_array_plus_one(
2170 hidx
= le64toh(READ_NOW(*idx
));
2171 if (hidx
== UINT64_MAX
)
2174 *extra
= htole64(p
);
2178 i
= htole64(hidx
- 1);
2179 r
= link_entry_into_array(f
, first
, &i
, tail
, tidx
, p
);
2184 *idx
= htole64(hidx
+ 1);
2188 static int journal_file_link_entry_item(JournalFile
*f
, uint64_t offset
, uint64_t p
) {
2195 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
2199 return link_entry_into_array_plus_one(f
,
2200 &o
->data
.entry_offset
,
2201 &o
->data
.entry_array_offset
,
2203 JOURNAL_HEADER_COMPACT(f
->header
) ? &o
->data
.compact
.tail_entry_array_offset
: NULL
,
2204 JOURNAL_HEADER_COMPACT(f
->header
) ? &o
->data
.compact
.tail_entry_array_n_entries
: NULL
,
2208 static int journal_file_link_entry(
2212 const EntryItem items
[],
2222 if (o
->object
.type
!= OBJECT_ENTRY
)
2225 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
2227 /* Link up the entry itself */
2228 r
= link_entry_into_array(f
,
2229 &f
->header
->entry_array_offset
,
2230 &f
->header
->n_entries
,
2231 JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_offset
) ? &f
->header
->tail_entry_array_offset
: NULL
,
2232 JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_n_entries
) ? &f
->header
->tail_entry_array_n_entries
: NULL
,
2237 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
2239 if (f
->header
->head_entry_realtime
== 0)
2240 f
->header
->head_entry_realtime
= o
->entry
.realtime
;
2242 f
->header
->tail_entry_realtime
= o
->entry
.realtime
;
2243 f
->header
->tail_entry_monotonic
= o
->entry
.monotonic
;
2244 if (JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_offset
))
2245 f
->header
->tail_entry_offset
= htole64(offset
);
2246 f
->newest_mtime
= 0; /* we have a new tail entry now, explicitly invalidate newest boot id/timestamp info */
2248 /* Link up the items */
2249 for (uint64_t i
= 0; i
< n_items
; i
++) {
2252 /* If we fail to link an entry item because we can't allocate a new entry array, don't fail
2253 * immediately but try to link the other entry items since it might still be possible to link
2254 * those if they don't require a new entry array to be allocated. */
2256 k
= journal_file_link_entry_item(f
, offset
, items
[i
].object_offset
);
2266 static void write_entry_item(JournalFile
*f
, Object
*o
, uint64_t i
, const EntryItem
*item
) {
2271 if (JOURNAL_HEADER_COMPACT(f
->header
)) {
2272 assert(item
->object_offset
<= UINT32_MAX
);
2273 o
->entry
.items
.compact
[i
].object_offset
= htole32(item
->object_offset
);
2275 o
->entry
.items
.regular
[i
].object_offset
= htole64(item
->object_offset
);
2276 o
->entry
.items
.regular
[i
].hash
= htole64(item
->hash
);
2280 static int journal_file_append_entry_internal(
2282 const dual_timestamp
*ts
,
2283 const sd_id128_t
*boot_id
,
2284 const sd_id128_t
*machine_id
,
2286 const EntryItem items
[],
2289 sd_id128_t
*seqnum_id
,
2290 Object
**ret_object
,
2291 uint64_t *ret_offset
) {
2302 assert(!sd_id128_is_null(*boot_id
));
2303 assert(items
|| n_items
== 0);
2305 if (f
->strict_order
) {
2306 /* If requested be stricter with ordering in this journal file, to make searching via
2307 * bisection fully deterministic. This is an optional feature, so that if desired journal
2308 * files can be written where the ordering is not strictly enforced (in which case bisection
2309 * will yield *a* result, but not the *only* result, when searching for points in
2310 * time). Strict ordering mode is enabled when journald originally writes the files, but
2311 * might not necessarily be if other tools (the remoting tools for example) write journal
2312 * files from combined sources.
2314 * Typically, if any of the errors generated here are seen journald will just rotate the
2315 * journal files and start anew. */
2317 if (ts
->realtime
< le64toh(f
->header
->tail_entry_realtime
))
2318 return log_debug_errno(SYNTHETIC_ERRNO(EREMCHG
),
2319 "Realtime timestamp %" PRIu64
" smaller than previous realtime "
2320 "timestamp %" PRIu64
", refusing entry.",
2321 ts
->realtime
, le64toh(f
->header
->tail_entry_realtime
));
2323 if (sd_id128_equal(*boot_id
, f
->header
->tail_entry_boot_id
) &&
2324 ts
->monotonic
< le64toh(f
->header
->tail_entry_monotonic
))
2325 return log_debug_errno(
2326 SYNTHETIC_ERRNO(ENOTNAM
),
2327 "Monotonic timestamp %" PRIu64
2328 " smaller than previous monotonic timestamp %" PRIu64
2329 " while having the same boot ID, refusing entry.",
2331 le64toh(f
->header
->tail_entry_monotonic
));
2335 /* Settle the passed in sequence number ID */
2337 if (sd_id128_is_null(*seqnum_id
))
2338 *seqnum_id
= f
->header
->seqnum_id
; /* Caller has none assigned, then copy the one from the file */
2339 else if (!sd_id128_equal(*seqnum_id
, f
->header
->seqnum_id
)) {
2340 /* Different seqnum IDs? We can't allow entries from multiple IDs end up in the same journal.*/
2341 if (le64toh(f
->header
->n_entries
) == 0)
2342 f
->header
->seqnum_id
= *seqnum_id
; /* Caller has one, and file so far has no entries, then copy the one from the caller */
2344 return log_debug_errno(SYNTHETIC_ERRNO(EILSEQ
),
2345 "Sequence number IDs don't match, refusing entry.");
2349 if (machine_id
&& sd_id128_is_null(f
->header
->machine_id
))
2350 /* Initialize machine ID when not set yet */
2351 f
->header
->machine_id
= *machine_id
;
2353 osize
= offsetof(Object
, entry
.items
) + (n_items
* journal_file_entry_item_size(f
));
2355 r
= journal_file_append_object(f
, OBJECT_ENTRY
, osize
, &o
, &np
);
2359 o
->entry
.seqnum
= htole64(journal_file_entry_seqnum(f
, seqnum
));
2360 o
->entry
.realtime
= htole64(ts
->realtime
);
2361 o
->entry
.monotonic
= htole64(ts
->monotonic
);
2362 o
->entry
.xor_hash
= htole64(xor_hash
);
2363 o
->entry
.boot_id
= f
->header
->tail_entry_boot_id
= *boot_id
;
2365 for (size_t i
= 0; i
< n_items
; i
++)
2366 write_entry_item(f
, o
, i
, &items
[i
]);
2369 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY
, o
, np
);
2374 r
= journal_file_link_entry(f
, o
, np
, items
, n_items
);
2387 void journal_file_post_change(JournalFile
*f
) {
2393 /* inotify() does not receive IN_MODIFY events from file
2394 * accesses done via mmap(). After each access we hence
2395 * trigger IN_MODIFY by truncating the journal file to its
2396 * current size which triggers IN_MODIFY. */
2398 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
2400 if (ftruncate(f
->fd
, f
->last_stat
.st_size
) < 0)
2401 log_debug_errno(errno
, "Failed to truncate file to its own size: %m");
2404 static int post_change_thunk(sd_event_source
*timer
, uint64_t usec
, void *userdata
) {
2407 journal_file_post_change(userdata
);
2412 static void schedule_post_change(JournalFile
*f
) {
2417 assert(f
->post_change_timer
);
2419 assert_se(e
= sd_event_source_get_event(f
->post_change_timer
));
2421 /* If we are already going down, post the change immediately. */
2422 if (IN_SET(sd_event_get_state(e
), SD_EVENT_EXITING
, SD_EVENT_FINISHED
))
2425 r
= sd_event_source_get_enabled(f
->post_change_timer
, NULL
);
2427 log_debug_errno(r
, "Failed to get ftruncate timer state: %m");
2433 r
= sd_event_source_set_time_relative(f
->post_change_timer
, f
->post_change_timer_period
);
2435 log_debug_errno(r
, "Failed to set time for scheduling ftruncate: %m");
2439 r
= sd_event_source_set_enabled(f
->post_change_timer
, SD_EVENT_ONESHOT
);
2441 log_debug_errno(r
, "Failed to enable scheduled ftruncate: %m");
2448 /* On failure, let's simply post the change immediately. */
2449 journal_file_post_change(f
);
2452 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2453 int journal_file_enable_post_change_timer(JournalFile
*f
, sd_event
*e
, usec_t t
) {
2454 _cleanup_(sd_event_source_unrefp
) sd_event_source
*timer
= NULL
;
2458 assert_return(!f
->post_change_timer
, -EINVAL
);
2462 r
= sd_event_add_time(e
, &timer
, CLOCK_MONOTONIC
, 0, 0, post_change_thunk
, f
);
2466 r
= sd_event_source_set_enabled(timer
, SD_EVENT_OFF
);
2470 f
->post_change_timer
= TAKE_PTR(timer
);
2471 f
->post_change_timer_period
= t
;
2476 static int entry_item_cmp(const EntryItem
*a
, const EntryItem
*b
) {
2477 return CMP(ASSERT_PTR(a
)->object_offset
, ASSERT_PTR(b
)->object_offset
);
2480 static size_t remove_duplicate_entry_items(EntryItem items
[], size_t n
) {
2483 assert(items
|| n
== 0);
2488 for (size_t i
= 1; i
< n
; i
++)
2489 if (items
[i
].object_offset
!= items
[j
- 1].object_offset
)
2490 items
[j
++] = items
[i
];
2495 int journal_file_append_entry(
2497 const dual_timestamp
*ts
,
2498 const sd_id128_t
*boot_id
,
2499 const struct iovec iovec
[],
2502 sd_id128_t
*seqnum_id
,
2503 Object
**ret_object
,
2504 uint64_t *ret_offset
) {
2506 _cleanup_free_ EntryItem
*items_alloc
= NULL
;
2508 uint64_t xor_hash
= 0;
2509 struct dual_timestamp _ts
;
2510 sd_id128_t _boot_id
, _machine_id
, *machine_id
;
2516 assert(n_iovec
> 0);
2519 if (!VALID_REALTIME(ts
->realtime
))
2520 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2521 "Invalid realtime timestamp %" PRIu64
", refusing entry.",
2523 if (!VALID_MONOTONIC(ts
->monotonic
))
2524 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2525 "Invalid monotomic timestamp %" PRIu64
", refusing entry.",
2528 dual_timestamp_get(&_ts
);
2533 if (sd_id128_is_null(*boot_id
))
2534 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
), "Empty boot ID, refusing entry.");
2536 r
= sd_id128_get_boot(&_boot_id
);
2540 boot_id
= &_boot_id
;
2543 r
= sd_id128_get_machine(&_machine_id
);
2544 if (ERRNO_IS_NEG_MACHINE_ID_UNSET(r
))
2545 /* Gracefully handle the machine ID not being initialized yet */
2550 machine_id
= &_machine_id
;
2553 r
= journal_file_maybe_append_tag(f
, ts
->realtime
);
2558 if (n_iovec
< ALLOCA_MAX
/ sizeof(EntryItem
) / 2)
2559 items
= newa(EntryItem
, n_iovec
);
2561 items_alloc
= new(EntryItem
, n_iovec
);
2565 items
= items_alloc
;
2568 for (size_t i
= 0; i
< n_iovec
; i
++) {
2572 r
= journal_file_append_data(f
, iovec
[i
].iov_base
, iovec
[i
].iov_len
, &o
, &p
);
2576 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2577 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2578 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2579 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2580 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2581 * hash here for that. This also has the benefit that cursors for old and new journal files
2582 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2583 * files things are easier, we can just take the value from the stored record directly. */
2585 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
2586 xor_hash
^= jenkins_hash64(iovec
[i
].iov_base
, iovec
[i
].iov_len
);
2588 xor_hash
^= le64toh(o
->data
.hash
);
2590 items
[i
] = (EntryItem
) {
2592 .hash
= le64toh(o
->data
.hash
),
2596 /* Order by the position on disk, in order to improve seek
2597 * times for rotating media. */
2598 typesafe_qsort(items
, n_iovec
, entry_item_cmp
);
2599 n_iovec
= remove_duplicate_entry_items(items
, n_iovec
);
2601 r
= journal_file_append_entry_internal(
2614 /* If the memory mapping triggered a SIGBUS then we return an
2615 * IO error and ignore the error code passed down to us, since
2616 * it is very likely just an effect of a nullified replacement
2619 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
2622 if (f
->post_change_timer
)
2623 schedule_post_change(f
);
2625 journal_file_post_change(f
);
2630 typedef struct ChainCacheItem
{
2631 uint64_t first
; /* the array at the beginning of the chain */
2632 uint64_t array
; /* the cached array */
2633 uint64_t begin
; /* the first item in the cached array */
2634 uint64_t total
; /* the total number of items in all arrays before this one in the chain */
2635 uint64_t last_index
; /* the last index we looked at, to optimize locality when bisecting */
2638 static void chain_cache_put(
2645 uint64_t last_index
) {
2650 /* If the chain item to cache for this chain is the
2651 * first one it's not worth caching anything */
2655 if (ordered_hashmap_size(h
) >= CHAIN_CACHE_MAX
) {
2656 ci
= ordered_hashmap_steal_first(h
);
2659 ci
= new(ChainCacheItem
, 1);
2666 if (ordered_hashmap_put(h
, &ci
->first
, ci
) < 0) {
2671 assert(ci
->first
== first
);
2676 ci
->last_index
= last_index
;
2679 static int bump_array_index(uint64_t *i
, direction_t direction
, uint64_t n
) {
2682 /* Increase or decrease the specified index, in the right direction. */
2684 if (direction
== DIRECTION_DOWN
) {
2699 static int bump_entry_array(
2701 Object
*o
, /* the current entry array object. */
2702 uint64_t offset
, /* the offset of the entry array object. */
2703 uint64_t first
, /* The offset of the first entry array object in the chain. */
2704 direction_t direction
,
2712 if (direction
== DIRECTION_DOWN
) {
2714 assert(o
->object
.type
== OBJECT_ENTRY_ARRAY
);
2716 *ret
= le64toh(o
->entry_array
.next_entry_array_offset
);
2719 /* Entry array chains are a singly linked list, so to find the previous array in the chain, we have
2720 * to start iterating from the top. */
2724 uint64_t p
= first
, q
= 0;
2725 while (p
> 0 && p
!= offset
) {
2726 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, p
, &o
);
2731 p
= le64toh(o
->entry_array
.next_entry_array_offset
);
2734 /* If we can't find the previous entry array in the entry array chain, we're likely dealing with a
2735 * corrupted journal file. */
2745 static int generic_array_get(
2749 direction_t direction
,
2750 Object
**ret_object
,
2751 uint64_t *ret_offset
) {
2753 uint64_t a
, t
= 0, k
;
2760 /* FIXME: fix return value assignment on success. */
2764 /* Try the chain cache first */
2765 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2766 if (ci
&& i
> ci
->total
) {
2773 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2774 if (IN_SET(r
, -EBADMSG
, -EADDRNOTAVAIL
)) {
2775 /* If there's corruption and we're going downwards, let's pretend we reached the
2776 * final entry in the entry array chain. */
2778 if (direction
== DIRECTION_DOWN
)
2781 /* If there's corruption and we're going upwards, move back to the previous entry
2782 * array and start iterating entries from there. */
2790 k
= journal_file_entry_array_n_items(f
, o
);
2796 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2799 /* If we've found the right location, now look for the first non-corrupt entry object (in the right
2803 /* In the first iteration of the while loop, we reuse i, k and o from the previous while
2805 if (i
== UINT64_MAX
) {
2806 r
= bump_entry_array(f
, o
, a
, first
, direction
, &a
);
2810 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2814 k
= journal_file_entry_array_n_items(f
, o
);
2818 if (direction
== DIRECTION_DOWN
)
2821 /* We moved to the previous array. The total must be decreased. */
2823 return -EBADMSG
; /* chain cache is broken ? */
2833 p
= journal_file_entry_array_item(f
, o
, i
);
2835 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, ret_object
);
2837 /* Let's cache this item for the next invocation */
2838 chain_cache_put(f
->chain_cache
, ci
, first
, a
, journal_file_entry_array_item(f
, o
, 0), t
, i
);
2845 if (!IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
))
2848 /* OK, so this entry is borked. Most likely some entry didn't get synced to
2849 * disk properly, let's see if the next one might work for us instead. */
2850 log_debug_errno(r
, "Entry item %" PRIu64
" is bad, skipping over it.", i
);
2852 } while (bump_array_index(&i
, direction
, k
) > 0);
2854 if (direction
== DIRECTION_DOWN
)
2855 /* We are going to the next array, the total must be incremented. */
2864 static int generic_array_get_plus_one(
2869 direction_t direction
,
2870 Object
**ret_object
,
2871 uint64_t *ret_offset
) {
2877 /* FIXME: fix return value assignment on success. */
2880 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
2881 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
))
2882 return generic_array_get(f
, first
, 0, direction
, ret_object
, ret_offset
);
2887 *ret_offset
= extra
;
2892 return generic_array_get(f
, first
, i
- 1, direction
, ret_object
, ret_offset
);
2901 static int generic_array_bisect_one(
2903 uint64_t a
, /* offset of entry array object. */
2904 uint64_t i
, /* index of the entry item we will test. */
2906 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2907 direction_t direction
,
2910 uint64_t *ret_offset
) {
2917 assert(test_object
);
2921 assert(i
<= *right
);
2923 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
2927 p
= journal_file_entry_array_item(f
, array
, i
);
2931 r
= test_object(f
, p
, needle
);
2932 if (IN_SET(r
, -EBADMSG
, -EADDRNOTAVAIL
)) {
2933 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short.");
2935 return -ENOANO
; /* recognizable error */
2940 if (r
== TEST_FOUND
)
2941 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2943 if (r
== TEST_RIGHT
)
2954 static int generic_array_bisect(
2959 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2960 direction_t direction
,
2961 Object
**ret_object
,
2962 uint64_t *ret_offset
,
2963 uint64_t *ret_idx
) {
2965 /* Given an entry array chain, this function finds the object "closest" to the given needle in the
2966 * chain, taking into account the provided direction. A function can be provided to determine how
2967 * an object is matched against the given needle.
2969 * Given a journal file, the offset of an object and the needle, the test_object() function should
2970 * return TEST_LEFT if the needle is located earlier in the entry array chain, TEST_LEFT if the
2971 * needle is located later in the entry array chain and TEST_FOUND if the object matches the needle.
2972 * If test_object() returns TEST_FOUND for a specific object, that object's information will be used
2973 * to populate the return values of this function. If test_object() never returns TEST_FOUND, the
2974 * return values are populated with the details of one of the objects closest to the needle. If the
2975 * direction is DIRECTION_UP, the earlier object is used. Otherwise, the later object is used.
2978 uint64_t a
, p
, t
= 0, i
= 0, last_p
= 0, last_index
= UINT64_MAX
;
2979 bool subtract_one
= false;
2985 assert(test_object
);
2987 /* Start with the first array in the chain */
2990 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2991 if (ci
&& n
> ci
->total
&& ci
->begin
!= 0) {
2992 /* Ah, we have iterated this bisection array chain previously! Let's see if we can skip ahead
2993 * in the chain, as far as the last time. But we can't jump backwards in the chain, so let's
2994 * check that first. */
2996 r
= test_object(f
, ci
->begin
, needle
);
3000 if (r
== TEST_LEFT
) {
3001 /* OK, what we are looking for is right of the begin of this EntryArray, so let's
3002 * jump straight to previously cached array in the chain */
3007 last_index
= ci
->last_index
;
3012 uint64_t left
= 0, right
, k
, lp
;
3014 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
3018 k
= journal_file_entry_array_n_items(f
, array
);
3024 r
= generic_array_bisect_one(f
, a
, right
, needle
, test_object
, direction
, &left
, &right
, &lp
);
3032 if (r
== TEST_RIGHT
) {
3033 /* If we cached the last index we looked at, let's try to not to jump too wildly
3034 * around and see if we can limit the range to look at early to the immediate
3035 * neighbors of the last index we looked at. */
3037 if (last_index
> 0 && last_index
- 1 < right
) {
3038 r
= generic_array_bisect_one(f
, a
, last_index
- 1, needle
, test_object
, direction
, &left
, &right
, NULL
);
3039 if (r
< 0 && r
!= -ENOANO
)
3043 if (last_index
< right
) {
3044 r
= generic_array_bisect_one(f
, a
, last_index
+ 1, needle
, test_object
, direction
, &left
, &right
, NULL
);
3045 if (r
< 0 && r
!= -ENOANO
)
3050 if (left
== right
) {
3051 if (direction
== DIRECTION_UP
)
3052 subtract_one
= true;
3058 assert(left
< right
);
3059 i
= (left
+ right
) / 2;
3061 r
= generic_array_bisect_one(f
, a
, i
, needle
, test_object
, direction
, &left
, &right
, NULL
);
3062 if (r
< 0 && r
!= -ENOANO
)
3068 if (direction
== DIRECTION_UP
) {
3070 subtract_one
= true;
3081 last_index
= UINT64_MAX
;
3082 a
= le64toh(array
->entry_array
.next_entry_array_offset
);
3088 if (subtract_one
&& t
== 0 && i
== 0)
3091 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
3095 p
= journal_file_entry_array_item(f
, array
, 0);
3099 /* Let's cache this item for the next invocation */
3100 chain_cache_put(f
->chain_cache
, ci
, first
, a
, p
, t
, subtract_one
? (i
> 0 ? i
-1 : UINT64_MAX
) : i
);
3102 if (subtract_one
&& i
== 0)
3104 else if (subtract_one
)
3105 p
= journal_file_entry_array_item(f
, array
, i
- 1);
3107 p
= journal_file_entry_array_item(f
, array
, i
);
3110 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, ret_object
);
3119 *ret_idx
= t
+ i
+ (subtract_one
? -1 : 0);
3124 static int generic_array_bisect_plus_one(
3130 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
3131 direction_t direction
,
3132 Object
**ret_object
,
3133 uint64_t *ret_offset
,
3134 uint64_t *ret_idx
) {
3137 bool step_back
= false;
3140 assert(test_object
);
3145 /* This bisects the array in object 'first', but first checks
3147 r
= test_object(f
, extra
, needle
);
3151 if (r
== TEST_FOUND
)
3152 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
3154 /* if we are looking with DIRECTION_UP then we need to first
3155 see if in the actual array there is a matching entry, and
3156 return the last one of that. But if there isn't any we need
3157 to return this one. Hence remember this, and return it
3160 step_back
= direction
== DIRECTION_UP
;
3162 if (r
== TEST_RIGHT
) {
3163 if (direction
== DIRECTION_DOWN
)
3169 r
= generic_array_bisect(f
, first
, n
-1, needle
, test_object
, direction
, ret_object
, ret_offset
, ret_idx
);
3171 if (r
== 0 && step_back
)
3174 if (r
> 0 && ret_idx
)
3181 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
3187 *ret_offset
= extra
;
3195 static int test_object_offset(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3201 else if (p
< needle
)
3207 int journal_file_move_to_entry_by_offset(
3210 direction_t direction
,
3211 Object
**ret_object
,
3212 uint64_t *ret_offset
) {
3217 return generic_array_bisect(
3219 le64toh(f
->header
->entry_array_offset
),
3220 le64toh(f
->header
->n_entries
),
3224 ret_object
, ret_offset
, NULL
);
3227 static int test_object_seqnum(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3235 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3239 sq
= le64toh(READ_NOW(o
->entry
.seqnum
));
3242 else if (sq
< needle
)
3248 int journal_file_move_to_entry_by_seqnum(
3251 direction_t direction
,
3252 Object
**ret_object
,
3253 uint64_t *ret_offset
) {
3258 return generic_array_bisect(
3260 le64toh(f
->header
->entry_array_offset
),
3261 le64toh(f
->header
->n_entries
),
3265 ret_object
, ret_offset
, NULL
);
3268 static int test_object_realtime(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3276 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3280 rt
= le64toh(READ_NOW(o
->entry
.realtime
));
3283 else if (rt
< needle
)
3289 int journal_file_move_to_entry_by_realtime(
3292 direction_t direction
,
3293 Object
**ret_object
,
3294 uint64_t *ret_offset
) {
3299 return generic_array_bisect(
3301 le64toh(f
->header
->entry_array_offset
),
3302 le64toh(f
->header
->n_entries
),
3304 test_object_realtime
,
3306 ret_object
, ret_offset
, NULL
);
3309 static int test_object_monotonic(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3317 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3321 m
= le64toh(READ_NOW(o
->entry
.monotonic
));
3324 else if (m
< needle
)
3330 static int find_data_object_by_boot_id(
3333 Object
**ret_object
,
3334 uint64_t *ret_offset
) {
3336 char t
[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
3340 sd_id128_to_string(boot_id
, t
+ 9);
3341 return journal_file_find_data_object(f
, t
, sizeof(t
) - 1, ret_object
, ret_offset
);
3344 int journal_file_move_to_entry_by_monotonic(
3348 direction_t direction
,
3349 Object
**ret_object
,
3350 uint64_t *ret_offset
) {
3357 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
3361 return generic_array_bisect_plus_one(
3363 le64toh(o
->data
.entry_offset
),
3364 le64toh(o
->data
.entry_array_offset
),
3365 le64toh(o
->data
.n_entries
),
3367 test_object_monotonic
,
3369 ret_object
, ret_offset
, NULL
);
3372 void journal_file_reset_location(JournalFile
*f
) {
3375 f
->location_type
= LOCATION_HEAD
;
3376 f
->current_offset
= 0;
3377 f
->current_seqnum
= 0;
3378 f
->current_realtime
= 0;
3379 f
->current_monotonic
= 0;
3380 zero(f
->current_boot_id
);
3381 f
->current_xor_hash
= 0;
3383 /* Also reset the previous reading direction. Otherwise, next_beyond_location() may wrongly handle we
3384 * already hit EOF. See issue #29216. */
3385 f
->last_direction
= _DIRECTION_INVALID
;
3388 void journal_file_save_location(JournalFile
*f
, Object
*o
, uint64_t offset
) {
3392 f
->location_type
= LOCATION_SEEK
;
3393 f
->current_offset
= offset
;
3394 f
->current_seqnum
= le64toh(o
->entry
.seqnum
);
3395 f
->current_realtime
= le64toh(o
->entry
.realtime
);
3396 f
->current_monotonic
= le64toh(o
->entry
.monotonic
);
3397 f
->current_boot_id
= o
->entry
.boot_id
;
3398 f
->current_xor_hash
= le64toh(o
->entry
.xor_hash
);
3401 static bool check_properly_ordered(uint64_t new_offset
, uint64_t old_offset
, direction_t direction
) {
3403 /* Consider it an error if any of the two offsets is uninitialized */
3404 if (old_offset
== 0 || new_offset
== 0)
3407 /* If we go down, the new offset must be larger than the old one. */
3408 return direction
== DIRECTION_DOWN
?
3409 new_offset
> old_offset
:
3410 new_offset
< old_offset
;
3413 int journal_file_next_entry(
3416 direction_t direction
,
3417 Object
**ret_object
,
3418 uint64_t *ret_offset
) {
3426 /* FIXME: fix return value assignment. */
3428 n
= le64toh(READ_NOW(f
->header
->n_entries
));
3433 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
3435 r
= generic_array_bisect(f
,
3436 le64toh(f
->header
->entry_array_offset
),
3437 le64toh(f
->header
->n_entries
),
3446 r
= bump_array_index(&i
, direction
, n
);
3451 /* And jump to it */
3452 r
= generic_array_get(f
, le64toh(f
->header
->entry_array_offset
), i
, direction
, ret_object
, &ofs
);
3456 /* Ensure our array is properly ordered. */
3457 if (p
> 0 && !check_properly_ordered(ofs
, p
, direction
))
3458 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
3459 "%s: entry array not properly ordered at entry %" PRIu64
,
3468 int journal_file_next_entry_for_data(
3471 direction_t direction
,
3472 Object
**ret_object
,
3473 uint64_t *ret_offset
) {
3480 assert(d
->object
.type
== OBJECT_DATA
);
3482 /* FIXME: fix return value assignment. */
3484 n
= le64toh(READ_NOW(d
->data
.n_entries
));
3488 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
3490 r
= generic_array_get_plus_one(f
,
3491 le64toh(d
->data
.entry_offset
),
3492 le64toh(d
->data
.entry_array_offset
),
3505 int journal_file_move_to_entry_by_offset_for_data(
3509 direction_t direction
,
3510 Object
**ret
, uint64_t *ret_offset
) {
3514 assert(d
->object
.type
== OBJECT_DATA
);
3516 return generic_array_bisect_plus_one(
3518 le64toh(d
->data
.entry_offset
),
3519 le64toh(d
->data
.entry_array_offset
),
3520 le64toh(d
->data
.n_entries
),
3524 ret
, ret_offset
, NULL
);
3527 int journal_file_move_to_entry_by_monotonic_for_data(
3532 direction_t direction
,
3533 Object
**ret_object
,
3534 uint64_t *ret_offset
) {
3536 uint64_t b
, z
, entry_offset
, entry_array_offset
, n_entries
;
3542 assert(d
->object
.type
== OBJECT_DATA
);
3544 /* Save all the required data before the data object gets invalidated. */
3545 entry_offset
= le64toh(READ_NOW(d
->data
.entry_offset
));
3546 entry_array_offset
= le64toh(READ_NOW(d
->data
.entry_array_offset
));
3547 n_entries
= le64toh(READ_NOW(d
->data
.n_entries
));
3549 /* First, seek by time */
3550 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &b
);
3554 r
= generic_array_bisect_plus_one(f
,
3555 le64toh(o
->data
.entry_offset
),
3556 le64toh(o
->data
.entry_array_offset
),
3557 le64toh(o
->data
.n_entries
),
3559 test_object_monotonic
,
3565 /* And now, continue seeking until we find an entry that
3566 * exists in both bisection arrays */
3568 r
= journal_file_move_to_object(f
, OBJECT_DATA
, b
, &o
);
3575 r
= generic_array_bisect_plus_one(f
,
3586 r
= generic_array_bisect_plus_one(f
,
3587 le64toh(o
->data
.entry_offset
),
3588 le64toh(o
->data
.entry_array_offset
),
3589 le64toh(o
->data
.n_entries
),
3600 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, q
, ret_object
);
3615 int journal_file_move_to_entry_by_seqnum_for_data(
3619 direction_t direction
,
3620 Object
**ret_object
,
3621 uint64_t *ret_offset
) {
3625 assert(d
->object
.type
== OBJECT_DATA
);
3627 return generic_array_bisect_plus_one(
3629 le64toh(d
->data
.entry_offset
),
3630 le64toh(d
->data
.entry_array_offset
),
3631 le64toh(d
->data
.n_entries
),
3635 ret_object
, ret_offset
, NULL
);
3638 int journal_file_move_to_entry_by_realtime_for_data(
3642 direction_t direction
,
3643 Object
**ret
, uint64_t *ret_offset
) {
3647 assert(d
->object
.type
== OBJECT_DATA
);
3649 return generic_array_bisect_plus_one(
3651 le64toh(d
->data
.entry_offset
),
3652 le64toh(d
->data
.entry_array_offset
),
3653 le64toh(d
->data
.n_entries
),
3655 test_object_realtime
,
3657 ret
, ret_offset
, NULL
);
3660 void journal_file_dump(JournalFile
*f
) {
3668 journal_file_print_header(f
);
3670 p
= le64toh(READ_NOW(f
->header
->header_size
));
3675 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &o
);
3679 s
= journal_object_type_to_string(o
->object
.type
);
3681 switch (o
->object
.type
) {
3686 printf("Type: %s seqnum=%"PRIu64
" monotonic=%"PRIu64
" realtime=%"PRIu64
"\n",
3688 le64toh(o
->entry
.seqnum
),
3689 le64toh(o
->entry
.monotonic
),
3690 le64toh(o
->entry
.realtime
));
3696 printf("Type: %s seqnum=%"PRIu64
" epoch=%"PRIu64
"\n",
3698 le64toh(o
->tag
.seqnum
),
3699 le64toh(o
->tag
.epoch
));
3704 printf("Type: %s \n", s
);
3706 printf("Type: unknown (%i)", o
->object
.type
);
3711 c
= COMPRESSION_FROM_OBJECT(o
);
3712 if (c
> COMPRESSION_NONE
)
3713 printf("Flags: %s\n",
3714 compression_to_string(c
));
3716 if (p
== le64toh(f
->header
->tail_object_offset
))
3719 p
+= ALIGN64(le64toh(o
->object
.size
));
3724 log_error("File corrupt");
3727 /* Note: the lifetime of the compound literal is the immediately surrounding block. */
3728 #define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
3730 void journal_file_print_header(JournalFile
*f
) {
3736 printf("File path: %s\n"
3740 "Sequential number ID: %s\n"
3742 "Compatible flags:%s%s%s\n"
3743 "Incompatible flags:%s%s%s%s%s%s\n"
3744 "Header size: %"PRIu64
"\n"
3745 "Arena size: %"PRIu64
"\n"
3746 "Data hash table size: %"PRIu64
"\n"
3747 "Field hash table size: %"PRIu64
"\n"
3748 "Rotate suggested: %s\n"
3749 "Head sequential number: %"PRIu64
" (%"PRIx64
")\n"
3750 "Tail sequential number: %"PRIu64
" (%"PRIx64
")\n"
3751 "Head realtime timestamp: %s (%"PRIx64
")\n"
3752 "Tail realtime timestamp: %s (%"PRIx64
")\n"
3753 "Tail monotonic timestamp: %s (%"PRIx64
")\n"
3754 "Objects: %"PRIu64
"\n"
3755 "Entry objects: %"PRIu64
"\n",
3757 SD_ID128_TO_STRING(f
->header
->file_id
),
3758 SD_ID128_TO_STRING(f
->header
->machine_id
),
3759 SD_ID128_TO_STRING(f
->header
->tail_entry_boot_id
),
3760 SD_ID128_TO_STRING(f
->header
->seqnum_id
),
3761 f
->header
->state
== STATE_OFFLINE
? "OFFLINE" :
3762 f
->header
->state
== STATE_ONLINE
? "ONLINE" :
3763 f
->header
->state
== STATE_ARCHIVED
? "ARCHIVED" : "UNKNOWN",
3764 JOURNAL_HEADER_SEALED(f
->header
) ? " SEALED" : "",
3765 JOURNAL_HEADER_TAIL_ENTRY_BOOT_ID(f
->header
) ? " TAIL_ENTRY_BOOT_ID" : "",
3766 (le32toh(f
->header
->compatible_flags
) & ~HEADER_COMPATIBLE_ANY
) ? " ???" : "",
3767 JOURNAL_HEADER_COMPRESSED_XZ(f
->header
) ? " COMPRESSED-XZ" : "",
3768 JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
) ? " COMPRESSED-LZ4" : "",
3769 JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
) ? " COMPRESSED-ZSTD" : "",
3770 JOURNAL_HEADER_KEYED_HASH(f
->header
) ? " KEYED-HASH" : "",
3771 JOURNAL_HEADER_COMPACT(f
->header
) ? " COMPACT" : "",
3772 (le32toh(f
->header
->incompatible_flags
) & ~HEADER_INCOMPATIBLE_ANY
) ? " ???" : "",
3773 le64toh(f
->header
->header_size
),
3774 le64toh(f
->header
->arena_size
),
3775 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3776 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
),
3777 yes_no(journal_file_rotate_suggested(f
, 0, LOG_DEBUG
)),
3778 le64toh(f
->header
->head_entry_seqnum
), le64toh(f
->header
->head_entry_seqnum
),
3779 le64toh(f
->header
->tail_entry_seqnum
), le64toh(f
->header
->tail_entry_seqnum
),
3780 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->head_entry_realtime
)), le64toh(f
->header
->head_entry_realtime
),
3781 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->tail_entry_realtime
)), le64toh(f
->header
->tail_entry_realtime
),
3782 FORMAT_TIMESPAN(le64toh(f
->header
->tail_entry_monotonic
), USEC_PER_MSEC
), le64toh(f
->header
->tail_entry_monotonic
),
3783 le64toh(f
->header
->n_objects
),
3784 le64toh(f
->header
->n_entries
));
3786 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3787 printf("Data objects: %"PRIu64
"\n"
3788 "Data hash table fill: %.1f%%\n",
3789 le64toh(f
->header
->n_data
),
3790 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))));
3792 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3793 printf("Field objects: %"PRIu64
"\n"
3794 "Field hash table fill: %.1f%%\n",
3795 le64toh(f
->header
->n_fields
),
3796 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))));
3798 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
))
3799 printf("Tag objects: %"PRIu64
"\n",
3800 le64toh(f
->header
->n_tags
));
3801 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
3802 printf("Entry array objects: %"PRIu64
"\n",
3803 le64toh(f
->header
->n_entry_arrays
));
3805 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
))
3806 printf("Deepest field hash chain: %" PRIu64
"\n",
3807 f
->header
->field_hash_chain_depth
);
3809 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
))
3810 printf("Deepest data hash chain: %" PRIu64
"\n",
3811 f
->header
->data_hash_chain_depth
);
3813 if (fstat(f
->fd
, &st
) >= 0)
3814 printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st
.st_blocks
* 512ULL));
3817 static int journal_file_warn_btrfs(JournalFile
*f
) {
3823 /* Before we write anything, check if the COW logic is turned
3824 * off on btrfs. Given our write pattern that is quite
3825 * unfriendly to COW file systems this should greatly improve
3826 * performance on COW file systems, such as btrfs, at the
3827 * expense of data integrity features (which shouldn't be too
3828 * bad, given that we do our own checksumming). */
3830 r
= fd_is_fs_type(f
->fd
, BTRFS_SUPER_MAGIC
);
3832 return log_ratelimit_warning_errno(r
, JOURNAL_LOG_RATELIMIT
, "Failed to determine if journal is on btrfs: %m");
3836 r
= read_attr_fd(f
->fd
, &attrs
);
3838 return log_ratelimit_warning_errno(r
, JOURNAL_LOG_RATELIMIT
, "Failed to read file attributes: %m");
3840 if (attrs
& FS_NOCOW_FL
) {
3841 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3845 log_ratelimit_notice(JOURNAL_LOG_RATELIMIT
,
3846 "Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3847 "This is likely to slow down journal access substantially, please consider turning "
3848 "off the copy-on-write file attribute on the journal directory, using chattr +C.",
3854 static void journal_default_metrics(JournalMetrics
*m
, int fd
, bool compact
) {
3856 uint64_t fs_size
= 0;
3861 if (fstatvfs(fd
, &ss
) >= 0)
3862 fs_size
= ss
.f_frsize
* ss
.f_blocks
;
3864 log_debug_errno(errno
, "Failed to determine disk size: %m");
3866 if (m
->max_use
== UINT64_MAX
) {
3869 m
->max_use
= CLAMP(PAGE_ALIGN(fs_size
/ 10), /* 10% of file system size */
3870 MAX_USE_LOWER
, MAX_USE_UPPER
);
3872 m
->max_use
= MAX_USE_LOWER
;
3874 m
->max_use
= PAGE_ALIGN(m
->max_use
);
3876 if (m
->max_use
!= 0 && m
->max_use
< JOURNAL_FILE_SIZE_MIN
*2)
3877 m
->max_use
= JOURNAL_FILE_SIZE_MIN
*2;
3880 if (m
->min_use
== UINT64_MAX
) {
3882 m
->min_use
= CLAMP(PAGE_ALIGN(fs_size
/ 50), /* 2% of file system size */
3883 MIN_USE_LOW
, MIN_USE_HIGH
);
3885 m
->min_use
= MIN_USE_LOW
;
3888 if (m
->min_use
> m
->max_use
)
3889 m
->min_use
= m
->max_use
;
3891 if (m
->max_size
== UINT64_MAX
)
3892 m
->max_size
= MIN(PAGE_ALIGN(m
->max_use
/ 8), /* 8 chunks */
3895 m
->max_size
= PAGE_ALIGN(m
->max_size
);
3897 if (compact
&& m
->max_size
> JOURNAL_COMPACT_SIZE_MAX
)
3898 m
->max_size
= JOURNAL_COMPACT_SIZE_MAX
;
3900 if (m
->max_size
!= 0) {
3901 if (m
->max_size
< JOURNAL_FILE_SIZE_MIN
)
3902 m
->max_size
= JOURNAL_FILE_SIZE_MIN
;
3904 if (m
->max_use
!= 0 && m
->max_size
*2 > m
->max_use
)
3905 m
->max_use
= m
->max_size
*2;
3908 if (m
->min_size
== UINT64_MAX
)
3909 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
3911 m
->min_size
= CLAMP(PAGE_ALIGN(m
->min_size
),
3912 JOURNAL_FILE_SIZE_MIN
,
3913 m
->max_size
?: UINT64_MAX
);
3915 if (m
->keep_free
== UINT64_MAX
) {
3917 m
->keep_free
= MIN(PAGE_ALIGN(fs_size
/ 20), /* 5% of file system size */
3920 m
->keep_free
= DEFAULT_KEEP_FREE
;
3923 if (m
->n_max_files
== UINT64_MAX
)
3924 m
->n_max_files
= DEFAULT_N_MAX_FILES
;
3926 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64
,
3927 FORMAT_BYTES(m
->min_use
),
3928 FORMAT_BYTES(m
->max_use
),
3929 FORMAT_BYTES(m
->max_size
),
3930 FORMAT_BYTES(m
->min_size
),
3931 FORMAT_BYTES(m
->keep_free
),
3935 int journal_file_open(
3939 JournalFileFlags file_flags
,
3941 uint64_t compress_threshold_bytes
,
3942 JournalMetrics
*metrics
,
3943 MMapCache
*mmap_cache
,
3944 JournalFile
*template,
3945 JournalFile
**ret
) {
3947 bool newly_created
= false;
3952 assert(fd
>= 0 || fname
);
3953 assert(file_flags
>= 0);
3954 assert(file_flags
<= _JOURNAL_FILE_FLAGS_MAX
);
3958 if (!IN_SET((open_flags
& O_ACCMODE
), O_RDONLY
, O_RDWR
))
3961 if ((open_flags
& O_ACCMODE
) == O_RDONLY
&& FLAGS_SET(open_flags
, O_CREAT
))
3964 if (fname
&& (open_flags
& O_CREAT
) && !endswith(fname
, ".journal"))
3967 f
= new(JournalFile
, 1);
3971 *f
= (JournalFile
) {
3974 .open_flags
= open_flags
,
3975 .compress_threshold_bytes
= compress_threshold_bytes
== UINT64_MAX
?
3976 DEFAULT_COMPRESS_THRESHOLD
:
3977 MAX(MIN_COMPRESS_THRESHOLD
, compress_threshold_bytes
),
3978 .strict_order
= FLAGS_SET(file_flags
, JOURNAL_STRICT_ORDER
),
3979 .newest_boot_id_prioq_idx
= PRIOQ_IDX_NULL
,
3980 .last_direction
= _DIRECTION_INVALID
,
3984 f
->path
= strdup(fname
);
3992 /* If we don't know the path, fill in something explanatory and vaguely useful */
3993 if (asprintf(&f
->path
, "/proc/self/%i", fd
) < 0) {
3999 f
->chain_cache
= ordered_hashmap_new(&uint64_hash_ops
);
4000 if (!f
->chain_cache
) {
4006 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
4007 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
4008 * it doesn't hurt in that case. */
4010 f
->fd
= openat_report_new(AT_FDCWD
, f
->path
, f
->open_flags
|O_CLOEXEC
|O_NONBLOCK
, f
->mode
, &newly_created
);
4016 /* fds we opened here by us should also be closed by us. */
4019 r
= fd_nonblock(f
->fd
, false);
4023 if (!newly_created
) {
4024 r
= journal_file_fstat(f
);
4029 r
= journal_file_fstat(f
);
4033 /* If we just got the fd passed in, we don't really know if we created the file anew */
4034 newly_created
= f
->last_stat
.st_size
== 0 && journal_file_writable(f
);
4037 f
->cache_fd
= mmap_cache_add_fd(mmap_cache
, f
->fd
, mmap_prot_from_open_flags(open_flags
));
4043 if (newly_created
) {
4044 (void) journal_file_warn_btrfs(f
);
4046 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
4047 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
4048 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
4049 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
4050 * solely on mtime/atime/ctime of the file. */
4051 (void) fd_setcrtime(f
->fd
, 0);
4053 r
= journal_file_init_header(f
, file_flags
, template);
4057 r
= journal_file_fstat(f
);
4062 if (f
->last_stat
.st_size
< (off_t
) HEADER_SIZE_MIN
) {
4067 r
= mmap_cache_fd_get(f
->cache_fd
, CONTEXT_HEADER
, true, 0, PAGE_ALIGN(sizeof(Header
)), &f
->last_stat
, &h
);
4069 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
4070 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
4080 if (!newly_created
) {
4081 r
= journal_file_verify_header(f
);
4087 if (!newly_created
&& journal_file_writable(f
) && JOURNAL_HEADER_SEALED(f
->header
)) {
4088 r
= journal_file_fss_load(f
);
4094 if (journal_file_writable(f
)) {
4096 journal_default_metrics(metrics
, f
->fd
, JOURNAL_HEADER_COMPACT(f
->header
));
4097 f
->metrics
= *metrics
;
4098 } else if (template)
4099 f
->metrics
= template->metrics
;
4101 r
= journal_file_refresh_header(f
);
4107 r
= journal_file_hmac_setup(f
);
4112 if (newly_created
) {
4113 r
= journal_file_setup_field_hash_table(f
);
4117 r
= journal_file_setup_data_hash_table(f
);
4122 r
= journal_file_append_first_tag(f
);
4128 if (mmap_cache_fd_got_sigbus(f
->cache_fd
)) {
4133 if (template && template->post_change_timer
) {
4134 r
= journal_file_enable_post_change_timer(
4136 sd_event_source_get_event(template->post_change_timer
),
4137 template->post_change_timer_period
);
4143 /* The file is opened now successfully, thus we take possession of any passed in fd. */
4146 if (DEBUG_LOGGING
) {
4147 static int last_seal
= -1, last_keyed_hash
= -1;
4148 static Compression last_compression
= _COMPRESSION_INVALID
;
4149 static uint64_t last_bytes
= UINT64_MAX
;
4151 if (last_seal
!= JOURNAL_HEADER_SEALED(f
->header
) ||
4152 last_keyed_hash
!= JOURNAL_HEADER_KEYED_HASH(f
->header
) ||
4153 last_compression
!= JOURNAL_FILE_COMPRESSION(f
) ||
4154 last_bytes
!= f
->compress_threshold_bytes
) {
4156 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
4157 yes_no(JOURNAL_HEADER_SEALED(f
->header
)), yes_no(JOURNAL_HEADER_KEYED_HASH(f
->header
)),
4158 compression_to_string(JOURNAL_FILE_COMPRESSION(f
)), FORMAT_BYTES(f
->compress_threshold_bytes
));
4159 last_seal
= JOURNAL_HEADER_SEALED(f
->header
);
4160 last_keyed_hash
= JOURNAL_HEADER_KEYED_HASH(f
->header
);
4161 last_compression
= JOURNAL_FILE_COMPRESSION(f
);
4162 last_bytes
= f
->compress_threshold_bytes
;
4170 if (f
->cache_fd
&& mmap_cache_fd_got_sigbus(f
->cache_fd
))
4173 (void) journal_file_close(f
);
4175 if (newly_created
&& fd
< 0)
4176 (void) unlink(fname
);
4181 int journal_file_parse_uid_from_filename(const char *path
, uid_t
*ret_uid
) {
4182 _cleanup_free_
char *buf
= NULL
, *p
= NULL
;
4183 const char *a
, *b
, *at
;
4186 /* This helper returns -EREMOTE when the filename doesn't match user online/offline journal
4187 * pattern. Hence it currently doesn't parse archived or disposed user journals. */
4192 r
= path_extract_filename(path
, &p
);
4195 if (r
== O_DIRECTORY
)
4198 a
= startswith(p
, "user-");
4201 b
= endswith(p
, ".journal");
4205 at
= strchr(a
, '@');
4209 buf
= strndup(a
, b
-a
);
4213 return parse_uid(buf
, ret_uid
);
4216 int journal_file_archive(JournalFile
*f
, char **ret_previous_path
) {
4217 _cleanup_free_
char *p
= NULL
;
4221 if (!journal_file_writable(f
))
4224 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
4225 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
4226 if (path_startswith(f
->path
, "/proc/self/fd"))
4229 if (!endswith(f
->path
, ".journal"))
4232 if (asprintf(&p
, "%.*s@" SD_ID128_FORMAT_STR
"-%016"PRIx64
"-%016"PRIx64
".journal",
4233 (int) strlen(f
->path
) - 8, f
->path
,
4234 SD_ID128_FORMAT_VAL(f
->header
->seqnum_id
),
4235 le64toh(f
->header
->head_entry_seqnum
),
4236 le64toh(f
->header
->head_entry_realtime
)) < 0)
4239 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
4240 * ignore that case. */
4241 if (rename(f
->path
, p
) < 0 && errno
!= ENOENT
)
4244 /* Sync the rename to disk */
4245 (void) fsync_directory_of_file(f
->fd
);
4247 if (ret_previous_path
)
4248 *ret_previous_path
= f
->path
;
4252 f
->path
= TAKE_PTR(p
);
4254 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
4255 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
4256 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
4257 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
4264 int journal_file_dispose(int dir_fd
, const char *fname
) {
4265 _cleanup_free_
char *p
= NULL
;
4269 /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
4270 * this is done without looking into the file or changing any of its contents. The idea is that this is called
4271 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
4272 * for writing anymore. */
4274 if (!endswith(fname
, ".journal"))
4277 if (asprintf(&p
, "%.*s@%016" PRIx64
"-%016" PRIx64
".journal~",
4278 (int) strlen(fname
) - 8, fname
,
4279 now(CLOCK_REALTIME
),
4283 if (renameat(dir_fd
, fname
, dir_fd
, p
) < 0)
4289 int journal_file_copy_entry(
4295 sd_id128_t
*seqnum_id
) {
4297 _cleanup_free_ EntryItem
*items_alloc
= NULL
;
4299 uint64_t n
, m
= 0, xor_hash
= 0;
4309 if (!journal_file_writable(to
))
4312 ts
= (dual_timestamp
) {
4313 .monotonic
= le64toh(o
->entry
.monotonic
),
4314 .realtime
= le64toh(o
->entry
.realtime
),
4316 boot_id
= o
->entry
.boot_id
;
4318 n
= journal_file_entry_n_items(from
, o
);
4322 if (n
< ALLOCA_MAX
/ sizeof(EntryItem
) / 2)
4323 items
= newa(EntryItem
, n
);
4325 items_alloc
= new(EntryItem
, n
);
4329 items
= items_alloc
;
4332 for (uint64_t i
= 0; i
< n
; i
++) {
4338 q
= journal_file_entry_item_object_offset(from
, o
, i
);
4339 r
= journal_file_data_payload(from
, NULL
, q
, NULL
, 0, 0, &data
, &l
);
4340 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
)) {
4341 log_debug_errno(r
, "Entry item %"PRIu64
" data object is bad, skipping over it: %m", i
);
4351 r
= journal_file_append_data(to
, data
, l
, &u
, &h
);
4355 if (JOURNAL_HEADER_KEYED_HASH(to
->header
))
4356 xor_hash
^= jenkins_hash64(data
, l
);
4358 xor_hash
^= le64toh(u
->data
.hash
);
4360 items
[m
++] = (EntryItem
) {
4362 .hash
= le64toh(u
->data
.hash
),
4366 /* The above journal_file_data_payload() may clear or overwrite cached object. Hence, we need
4367 * to re-read the object from the cache. */
4368 r
= journal_file_move_to_object(from
, OBJECT_ENTRY
, p
, &o
);
4376 r
= journal_file_append_entry_internal(
4380 &from
->header
->machine_id
,
4386 /* ret_object= */ NULL
,
4387 /* ret_offset= */ NULL
);
4389 if (mmap_cache_fd_got_sigbus(to
->cache_fd
))
4395 void journal_reset_metrics(JournalMetrics
*m
) {
4398 /* Set everything to "pick automatic values". */
4400 *m
= (JournalMetrics
) {
4401 .min_use
= UINT64_MAX
,
4402 .max_use
= UINT64_MAX
,
4403 .min_size
= UINT64_MAX
,
4404 .max_size
= UINT64_MAX
,
4405 .keep_free
= UINT64_MAX
,
4406 .n_max_files
= UINT64_MAX
,
4410 int journal_file_get_cutoff_realtime_usec(JournalFile
*f
, usec_t
*ret_from
, usec_t
*ret_to
) {
4413 assert(ret_from
|| ret_to
);
4416 if (f
->header
->head_entry_realtime
== 0)
4419 *ret_from
= le64toh(f
->header
->head_entry_realtime
);
4423 if (f
->header
->tail_entry_realtime
== 0)
4426 *ret_to
= le64toh(f
->header
->tail_entry_realtime
);
4432 int journal_file_get_cutoff_monotonic_usec(JournalFile
*f
, sd_id128_t boot_id
, usec_t
*ret_from
, usec_t
*ret_to
) {
4438 assert(ret_from
|| ret_to
);
4440 /* FIXME: fix return value assignment on success with 0. */
4442 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &p
);
4446 if (le64toh(o
->data
.n_entries
) <= 0)
4450 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, le64toh(o
->data
.entry_offset
), &o
);
4454 *ret_from
= le64toh(o
->entry
.monotonic
);
4458 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
4462 r
= generic_array_get_plus_one(f
,
4463 le64toh(o
->data
.entry_offset
),
4464 le64toh(o
->data
.entry_array_offset
),
4465 le64toh(o
->data
.n_entries
) - 1,
4471 *ret_to
= le64toh(o
->entry
.monotonic
);
4477 bool journal_file_rotate_suggested(JournalFile
*f
, usec_t max_file_usec
, int log_level
) {
4481 /* If we gained new header fields we gained new features,
4482 * hence suggest a rotation */
4483 if (le64toh(f
->header
->header_size
) < sizeof(Header
)) {
4484 log_ratelimit_full(log_level
, JOURNAL_LOG_RATELIMIT
,
4485 "%s uses an outdated header, suggesting rotation.", f
->path
);
4489 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4490 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4491 * need the n_data field, which only exists in newer versions. */
4493 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
4494 if (le64toh(f
->header
->n_data
) * 4ULL > (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4496 log_level
, JOURNAL_LOG_RATELIMIT
,
4497 "Data hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items, %"PRIu64
" file size, %"PRIu64
" bytes per hash table item), suggesting rotation.",
4499 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))),
4500 le64toh(f
->header
->n_data
),
4501 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
4502 (uint64_t) f
->last_stat
.st_size
,
4503 f
->last_stat
.st_size
/ le64toh(f
->header
->n_data
));
4507 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
4508 if (le64toh(f
->header
->n_fields
) * 4ULL > (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4510 log_level
, JOURNAL_LOG_RATELIMIT
,
4511 "Field hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items), suggesting rotation.",
4513 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))),
4514 le64toh(f
->header
->n_fields
),
4515 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
));
4519 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4520 * longest chain is longer than some threshold, let's suggest rotation. */
4521 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) &&
4522 le64toh(f
->header
->data_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4524 log_level
, JOURNAL_LOG_RATELIMIT
,
4525 "Data hash table of %s has deepest hash chain of length %" PRIu64
", suggesting rotation.",
4526 f
->path
, le64toh(f
->header
->data_hash_chain_depth
));
4530 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) &&
4531 le64toh(f
->header
->field_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4533 log_level
, JOURNAL_LOG_RATELIMIT
,
4534 "Field hash table of %s has deepest hash chain of length at %" PRIu64
", suggesting rotation.",
4535 f
->path
, le64toh(f
->header
->field_hash_chain_depth
));
4539 /* Are the data objects properly indexed by field objects? */
4540 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
4541 JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
4542 le64toh(f
->header
->n_data
) > 0 &&
4543 le64toh(f
->header
->n_fields
) == 0) {
4545 log_level
, JOURNAL_LOG_RATELIMIT
,
4546 "Data objects of %s are not indexed by field objects, suggesting rotation.",
4551 if (max_file_usec
> 0) {
4554 h
= le64toh(f
->header
->head_entry_realtime
);
4555 t
= now(CLOCK_REALTIME
);
4557 if (h
> 0 && t
> h
+ max_file_usec
) {
4559 log_level
, JOURNAL_LOG_RATELIMIT
,
4560 "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
4561 f
->path
, FORMAT_TIMESPAN(max_file_usec
, USEC_PER_SEC
));
4569 static const char * const journal_object_type_table
[] = {
4570 [OBJECT_UNUSED
] = "unused",
4571 [OBJECT_DATA
] = "data",
4572 [OBJECT_FIELD
] = "field",
4573 [OBJECT_ENTRY
] = "entry",
4574 [OBJECT_DATA_HASH_TABLE
] = "data hash table",
4575 [OBJECT_FIELD_HASH_TABLE
] = "field hash table",
4576 [OBJECT_ENTRY_ARRAY
] = "entry array",
4577 [OBJECT_TAG
] = "tag",
4580 DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type
, ObjectType
);