1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
6 #include <linux/magic.h>
10 #include <sys/statvfs.h>
16 #include "alloc-util.h"
17 #include "chattr-util.h"
21 #include "format-util.h"
23 #include "journal-authenticate.h"
24 #include "journal-def.h"
25 #include "journal-file.h"
27 #include "memory-util.h"
28 #include "path-util.h"
29 #include "random-util.h"
31 #include "sort-util.h"
32 #include "stat-util.h"
33 #include "string-table.h"
34 #include "string-util.h"
36 #include "sync-util.h"
37 #include "xattr-util.h"
39 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
40 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
42 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
43 #define MIN_COMPRESS_THRESHOLD (8ULL)
45 /* This is the minimum journal file size */
46 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
47 #define JOURNAL_COMPACT_SIZE_MAX UINT32_MAX /* 4 GiB */
49 /* These are the lower and upper bounds if we deduce the max_use value
50 * from the file system size */
51 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
52 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
54 /* Those are the lower and upper bounds for the minimal use limit,
55 * i.e. how much we'll use even if keep_free suggests otherwise. */
56 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
57 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
59 /* This is the upper bound if we deduce max_size from max_use */
60 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
62 /* This is the upper bound if we deduce the keep_free value from the
64 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
66 /* This is the keep_free value when we can't determine the system
68 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
70 /* This is the default maximum number of journal files to keep around. */
71 #define DEFAULT_N_MAX_FILES 100
73 /* n_data was the first entry we added after the initial file format design */
74 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
76 /* How many entries to keep in the entry array chain cache at max */
77 #define CHAIN_CACHE_MAX 20
79 /* How much to increase the journal file size at once each time we allocate something new. */
80 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
82 /* Reread fstat() of the file for detecting deletions at least this often */
83 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
85 /* The mmap context to use for the header we pick as one above the last defined typed */
86 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
88 /* Longest hash chain to rotate after */
89 #define HASH_CHAIN_DEPTH_MAX 100
92 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
95 static int mmap_prot_from_open_flags(int flags
) {
96 switch (flags
& O_ACCMODE
) {
102 return PROT_READ
|PROT_WRITE
;
104 assert_not_reached();
108 int journal_file_tail_end_by_pread(JournalFile
*f
, uint64_t *ret_offset
) {
116 /* Same as journal_file_tail_end_by_mmap() below, but operates with pread() to avoid the mmap cache
117 * (and thus is thread safe) */
119 p
= le64toh(f
->header
->tail_object_offset
);
121 p
= le64toh(f
->header
->header_size
);
126 r
= journal_file_read_object_header(f
, OBJECT_UNUSED
, p
, &tail
);
130 sz
= le64toh(tail
.object
.size
);
131 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
135 if (p
> UINT64_MAX
- sz
)
146 int journal_file_tail_end_by_mmap(JournalFile
*f
, uint64_t *ret_offset
) {
154 /* Same as journal_file_tail_end_by_pread() above, but operates with the usual mmap logic */
156 p
= le64toh(f
->header
->tail_object_offset
);
158 p
= le64toh(f
->header
->header_size
);
163 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &tail
);
167 sz
= le64toh(READ_NOW(tail
->object
.size
));
168 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
172 if (p
> UINT64_MAX
- sz
)
183 int journal_file_set_offline_thread_join(JournalFile
*f
) {
188 if (f
->offline_state
== OFFLINE_JOINED
)
191 r
= pthread_join(f
->offline_thread
, NULL
);
195 f
->offline_state
= OFFLINE_JOINED
;
197 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
203 static int journal_file_set_online(JournalFile
*f
) {
208 if (!journal_file_writable(f
))
211 if (f
->fd
< 0 || !f
->header
)
215 switch (f
->offline_state
) {
217 /* No offline thread, no need to wait. */
221 case OFFLINE_SYNCING
: {
222 OfflineState tmp_state
= OFFLINE_SYNCING
;
223 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
224 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
227 /* Canceled syncing prior to offlining, no need to wait. */
231 case OFFLINE_AGAIN_FROM_SYNCING
: {
232 OfflineState tmp_state
= OFFLINE_AGAIN_FROM_SYNCING
;
233 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
234 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
237 /* Canceled restart from syncing, no need to wait. */
241 case OFFLINE_AGAIN_FROM_OFFLINING
: {
242 OfflineState tmp_state
= OFFLINE_AGAIN_FROM_OFFLINING
;
243 if (!__atomic_compare_exchange_n(&f
->offline_state
, &tmp_state
, OFFLINE_CANCEL
,
244 false, __ATOMIC_SEQ_CST
, __ATOMIC_SEQ_CST
))
247 /* Canceled restart from offlining, must wait for offlining to complete however. */
252 r
= journal_file_set_offline_thread_join(f
);
262 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
265 switch (f
->header
->state
) {
270 f
->header
->state
= STATE_ONLINE
;
279 JournalFile
* journal_file_close(JournalFile
*f
) {
284 mmap_cache_fd_free(f
->cache_fd
);
290 ordered_hashmap_free_free(f
->chain_cache
);
293 free(f
->compress_buffer
);
298 munmap(f
->fss_file
, PAGE_ALIGN(f
->fss_file_size
));
300 free(f
->fsprg_state
);
305 gcry_md_close(f
->hmac
);
311 static bool keyed_hash_requested(void) {
314 r
= getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
318 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
323 static bool compact_mode_requested(void) {
326 r
= getenv_bool("SYSTEMD_JOURNAL_COMPACT");
330 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
335 static int journal_file_init_header(JournalFile
*f
, JournalFileFlags file_flags
, JournalFile
*template) {
344 /* Try to load the FSPRG state, and if we can't, then just don't do sealing */
345 seal
= FLAGS_SET(file_flags
, JOURNAL_SEAL
) && journal_file_fss_load(f
) >= 0;
348 memcpy(h
.signature
, HEADER_SIGNATURE
, 8);
349 h
.header_size
= htole64(ALIGN64(sizeof(h
)));
351 h
.incompatible_flags
|= htole32(
352 FLAGS_SET(file_flags
, JOURNAL_COMPRESS
) *
353 COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(DEFAULT_COMPRESSION
) |
354 keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH
|
355 compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT
);
357 h
.compatible_flags
= htole32(seal
* HEADER_COMPATIBLE_SEALED
);
359 r
= sd_id128_randomize(&h
.file_id
);
364 h
.seqnum_id
= template->header
->seqnum_id
;
365 h
.tail_entry_seqnum
= template->header
->tail_entry_seqnum
;
367 h
.seqnum_id
= h
.file_id
;
369 k
= pwrite(f
->fd
, &h
, sizeof(h
), 0);
379 static int journal_file_refresh_header(JournalFile
*f
) {
385 r
= sd_id128_get_machine(&f
->header
->machine_id
);
386 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
387 /* We don't have a machine-id, let's continue without */
388 zero(f
->header
->machine_id
);
392 r
= sd_id128_get_boot(&f
->header
->boot_id
);
396 r
= journal_file_set_online(f
);
398 /* Sync the online state to disk; likely just created a new file, also sync the directory this file
400 (void) fsync_full(f
->fd
);
405 static bool warn_wrong_flags(const JournalFile
*f
, bool compatible
) {
406 const uint32_t any
= compatible
? HEADER_COMPATIBLE_ANY
: HEADER_INCOMPATIBLE_ANY
,
407 supported
= compatible
? HEADER_COMPATIBLE_SUPPORTED
: HEADER_INCOMPATIBLE_SUPPORTED
;
408 const char *type
= compatible
? "compatible" : "incompatible";
414 flags
= le32toh(compatible
? f
->header
->compatible_flags
: f
->header
->incompatible_flags
);
416 if (flags
& ~supported
) {
418 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32
,
419 f
->path
, type
, flags
& ~any
);
420 flags
= (flags
& any
) & ~supported
;
424 _cleanup_free_
char *t
= NULL
;
427 if (flags
& HEADER_COMPATIBLE_SEALED
)
428 strv
[n
++] = "sealed";
430 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_XZ
)
431 strv
[n
++] = "xz-compressed";
432 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_LZ4
)
433 strv
[n
++] = "lz4-compressed";
434 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
)
435 strv
[n
++] = "zstd-compressed";
436 if (flags
& HEADER_INCOMPATIBLE_KEYED_HASH
)
437 strv
[n
++] = "keyed-hash";
438 if (flags
& HEADER_INCOMPATIBLE_COMPACT
)
439 strv
[n
++] = "compact";
442 assert(n
< ELEMENTSOF(strv
));
444 t
= strv_join((char**) strv
, ", ");
445 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
446 f
->path
, type
, n
> 1 ? "flags" : "flag", strnull(t
));
454 static int journal_file_verify_header(JournalFile
*f
) {
455 uint64_t arena_size
, header_size
;
460 if (memcmp(f
->header
->signature
, HEADER_SIGNATURE
, 8))
463 /* In both read and write mode we refuse to open files with incompatible
464 * flags we don't know. */
465 if (warn_wrong_flags(f
, false))
466 return -EPROTONOSUPPORT
;
468 /* When open for writing we refuse to open files with compatible flags, too. */
469 if (journal_file_writable(f
) && warn_wrong_flags(f
, true))
470 return -EPROTONOSUPPORT
;
472 if (f
->header
->state
>= _STATE_MAX
)
475 header_size
= le64toh(READ_NOW(f
->header
->header_size
));
477 /* The first addition was n_data, so check that we are at least this large */
478 if (header_size
< HEADER_SIZE_MIN
)
481 if (JOURNAL_HEADER_SEALED(f
->header
) && !JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
484 arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
486 if (UINT64_MAX
- header_size
< arena_size
|| header_size
+ arena_size
> (uint64_t) f
->last_stat
.st_size
)
489 if (le64toh(f
->header
->tail_object_offset
) > header_size
+ arena_size
)
492 if (!VALID64(le64toh(f
->header
->data_hash_table_offset
)) ||
493 !VALID64(le64toh(f
->header
->field_hash_table_offset
)) ||
494 !VALID64(le64toh(f
->header
->tail_object_offset
)) ||
495 !VALID64(le64toh(f
->header
->entry_array_offset
)))
498 if (journal_file_writable(f
)) {
499 sd_id128_t machine_id
;
503 r
= sd_id128_get_machine(&machine_id
);
507 if (!sd_id128_equal(machine_id
, f
->header
->machine_id
))
510 state
= f
->header
->state
;
512 if (state
== STATE_ARCHIVED
)
513 return -ESHUTDOWN
; /* Already archived */
514 else if (state
== STATE_ONLINE
)
515 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
516 "Journal file %s is already online. Assuming unclean closing.",
518 else if (state
!= STATE_OFFLINE
)
519 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
520 "Journal file %s has unknown state %i.",
523 if (f
->header
->field_hash_table_size
== 0 || f
->header
->data_hash_table_size
== 0)
526 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
527 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
529 if (le64toh(f
->header
->tail_entry_realtime
) > now(CLOCK_REALTIME
))
530 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY
),
531 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
538 int journal_file_fstat(JournalFile
*f
) {
544 if (fstat(f
->fd
, &f
->last_stat
) < 0)
547 f
->last_stat_usec
= now(CLOCK_MONOTONIC
);
549 /* Refuse dealing with files that aren't regular */
550 r
= stat_verify_regular(&f
->last_stat
);
554 /* Refuse appending to files that are already deleted */
555 if (f
->last_stat
.st_nlink
<= 0)
561 static int journal_file_allocate(JournalFile
*f
, uint64_t offset
, uint64_t size
) {
562 uint64_t old_size
, new_size
, old_header_size
, old_arena_size
;
568 /* We assume that this file is not sparse, and we know that for sure, since we always call
569 * posix_fallocate() ourselves */
571 if (size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - offset
)
574 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
577 old_header_size
= le64toh(READ_NOW(f
->header
->header_size
));
578 old_arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
579 if (old_arena_size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - old_header_size
)
582 old_size
= old_header_size
+ old_arena_size
;
584 new_size
= MAX(PAGE_ALIGN(offset
+ size
), old_header_size
);
586 if (new_size
<= old_size
) {
588 /* We already pre-allocated enough space, but before
589 * we write to it, let's check with fstat() if the
590 * file got deleted, in order make sure we don't throw
591 * away the data immediately. Don't check fstat() for
592 * all writes though, but only once ever 10s. */
594 if (f
->last_stat_usec
+ LAST_STAT_REFRESH_USEC
> now(CLOCK_MONOTONIC
))
597 return journal_file_fstat(f
);
600 /* Allocate more space. */
602 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
605 /* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
606 if (JOURNAL_HEADER_COMPACT(f
->header
) && new_size
> UINT32_MAX
)
609 if (new_size
> f
->metrics
.min_size
&& f
->metrics
.keep_free
> 0) {
612 if (fstatvfs(f
->fd
, &svfs
) >= 0) {
615 available
= LESS_BY((uint64_t) svfs
.f_bfree
* (uint64_t) svfs
.f_bsize
, f
->metrics
.keep_free
);
617 if (new_size
- old_size
> available
)
622 /* Increase by larger blocks at once */
623 new_size
= DIV_ROUND_UP(new_size
, FILE_SIZE_INCREASE
) * FILE_SIZE_INCREASE
;
624 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
625 new_size
= f
->metrics
.max_size
;
627 /* Note that the glibc fallocate() fallback is very
628 inefficient, hence we try to minimize the allocation area
630 r
= posix_fallocate_loop(f
->fd
, old_size
, new_size
- old_size
);
634 f
->header
->arena_size
= htole64(new_size
- old_header_size
);
636 return journal_file_fstat(f
);
639 static unsigned type_to_context(ObjectType type
) {
640 /* One context for each type, plus one catch-all for the rest */
641 assert_cc(_OBJECT_TYPE_MAX
<= MMAP_CACHE_MAX_CONTEXTS
);
642 assert_cc(CONTEXT_HEADER
< MMAP_CACHE_MAX_CONTEXTS
);
643 return type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
? type
: 0;
646 static int journal_file_move_to(
659 /* This function may clear, overwrite, or alter previously cached entries. After this function has
660 * been called, all objects except for one obtained by this function are invalidated and must be
661 * re-read before use. */
666 if (size
> UINT64_MAX
- offset
)
669 /* Avoid SIGBUS on invalid accesses */
670 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
) {
671 /* Hmm, out of range? Let's refresh the fstat() data
672 * first, before we trust that check. */
674 r
= journal_file_fstat(f
);
678 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
)
679 return -EADDRNOTAVAIL
;
682 return mmap_cache_fd_get(f
->cache_fd
, type_to_context(type
), keep_always
, offset
, size
, &f
->last_stat
, ret
);
685 static uint64_t minimum_header_size(JournalFile
*f
, Object
*o
) {
687 static const uint64_t table
[] = {
688 [OBJECT_DATA
] = sizeof(DataObject
),
689 [OBJECT_FIELD
] = sizeof(FieldObject
),
690 [OBJECT_ENTRY
] = sizeof(EntryObject
),
691 [OBJECT_DATA_HASH_TABLE
] = sizeof(HashTableObject
),
692 [OBJECT_FIELD_HASH_TABLE
] = sizeof(HashTableObject
),
693 [OBJECT_ENTRY_ARRAY
] = sizeof(EntryArrayObject
),
694 [OBJECT_TAG
] = sizeof(TagObject
),
700 if (o
->object
.type
== OBJECT_DATA
)
701 return journal_file_data_payload_offset(f
);
703 if (o
->object
.type
>= ELEMENTSOF(table
) || table
[o
->object
.type
] <= 0)
704 return sizeof(ObjectHeader
);
706 return table
[o
->object
.type
];
709 static int check_object_header(JournalFile
*f
, Object
*o
, ObjectType type
, uint64_t offset
) {
715 s
= le64toh(READ_NOW(o
->object
.size
));
717 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
718 "Attempt to move to uninitialized object: %" PRIu64
,
721 if (s
< sizeof(ObjectHeader
))
722 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
723 "Attempt to move to overly short object: %" PRIu64
,
726 if (o
->object
.type
<= OBJECT_UNUSED
)
727 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
728 "Attempt to move to object with invalid type: %" PRIu64
,
731 if (type
> OBJECT_UNUSED
&& o
->object
.type
!= type
)
732 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
733 "Attempt to move to object of unexpected type: %" PRIu64
,
736 if (s
< minimum_header_size(f
, o
))
737 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
738 "Attempt to move to truncated object: %" PRIu64
,
744 /* Lightweight object checks. We want this to be fast, so that we won't
745 * slowdown every journal_file_move_to_object() call too much. */
746 static int check_object(JournalFile
*f
, Object
*o
, uint64_t offset
) {
750 switch (o
->object
.type
) {
753 if ((le64toh(o
->data
.entry_offset
) == 0) ^ (le64toh(o
->data
.n_entries
) == 0))
754 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
755 "Bad n_entries: %" PRIu64
": %" PRIu64
,
756 le64toh(o
->data
.n_entries
),
759 if (le64toh(o
->object
.size
) <= journal_file_data_payload_offset(f
))
760 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
761 "Bad object size (<= %zu): %" PRIu64
": %" PRIu64
,
762 journal_file_data_payload_offset(f
),
763 le64toh(o
->object
.size
),
766 if (!VALID64(le64toh(o
->data
.next_hash_offset
)) ||
767 !VALID64(le64toh(o
->data
.next_field_offset
)) ||
768 !VALID64(le64toh(o
->data
.entry_offset
)) ||
769 !VALID64(le64toh(o
->data
.entry_array_offset
)))
770 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
771 "Invalid offset, next_hash_offset=" OFSfmt
", next_field_offset=" OFSfmt
", entry_offset=" OFSfmt
", entry_array_offset=" OFSfmt
": %" PRIu64
,
772 le64toh(o
->data
.next_hash_offset
),
773 le64toh(o
->data
.next_field_offset
),
774 le64toh(o
->data
.entry_offset
),
775 le64toh(o
->data
.entry_array_offset
),
781 if (le64toh(o
->object
.size
) <= offsetof(Object
, field
.payload
))
782 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
783 "Bad field size (<= %zu): %" PRIu64
": %" PRIu64
,
784 offsetof(Object
, field
.payload
),
785 le64toh(o
->object
.size
),
788 if (!VALID64(le64toh(o
->field
.next_hash_offset
)) ||
789 !VALID64(le64toh(o
->field
.head_data_offset
)))
790 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
791 "Invalid offset, next_hash_offset=" OFSfmt
", head_data_offset=" OFSfmt
": %" PRIu64
,
792 le64toh(o
->field
.next_hash_offset
),
793 le64toh(o
->field
.head_data_offset
),
800 sz
= le64toh(READ_NOW(o
->object
.size
));
801 if (sz
< offsetof(Object
, entry
.items
) ||
802 (sz
- offsetof(Object
, entry
.items
)) % journal_file_entry_item_size(f
) != 0)
803 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
804 "Bad entry size (<= %zu): %" PRIu64
": %" PRIu64
,
805 offsetof(Object
, entry
.items
),
809 if ((sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
) <= 0)
810 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
811 "Invalid number items in entry: %" PRIu64
": %" PRIu64
,
812 (sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
),
815 if (le64toh(o
->entry
.seqnum
) <= 0)
816 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
817 "Invalid entry seqnum: %" PRIx64
": %" PRIu64
,
818 le64toh(o
->entry
.seqnum
),
821 if (!VALID_REALTIME(le64toh(o
->entry
.realtime
)))
822 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
823 "Invalid entry realtime timestamp: %" PRIu64
": %" PRIu64
,
824 le64toh(o
->entry
.realtime
),
827 if (!VALID_MONOTONIC(le64toh(o
->entry
.monotonic
)))
828 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
829 "Invalid entry monotonic timestamp: %" PRIu64
": %" PRIu64
,
830 le64toh(o
->entry
.monotonic
),
836 case OBJECT_DATA_HASH_TABLE
:
837 case OBJECT_FIELD_HASH_TABLE
: {
840 sz
= le64toh(READ_NOW(o
->object
.size
));
841 if (sz
< offsetof(Object
, hash_table
.items
) ||
842 (sz
- offsetof(Object
, hash_table
.items
)) % sizeof(HashItem
) != 0 ||
843 (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
) <= 0)
844 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
845 "Invalid %s hash table size: %" PRIu64
": %" PRIu64
,
846 o
->object
.type
== OBJECT_DATA_HASH_TABLE
? "data" : "field",
853 case OBJECT_ENTRY_ARRAY
: {
856 sz
= le64toh(READ_NOW(o
->object
.size
));
857 if (sz
< offsetof(Object
, entry_array
.items
) ||
858 (sz
- offsetof(Object
, entry_array
.items
)) % journal_file_entry_array_item_size(f
) != 0 ||
859 (sz
- offsetof(Object
, entry_array
.items
)) / journal_file_entry_array_item_size(f
) <= 0)
860 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
861 "Invalid object entry array size: %" PRIu64
": %" PRIu64
,
865 if (!VALID64(le64toh(o
->entry_array
.next_entry_array_offset
)))
866 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
867 "Invalid object entry array next_entry_array_offset: " OFSfmt
": %" PRIu64
,
868 le64toh(o
->entry_array
.next_entry_array_offset
),
875 if (le64toh(o
->object
.size
) != sizeof(TagObject
))
876 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
877 "Invalid object tag size: %" PRIu64
": %" PRIu64
,
878 le64toh(o
->object
.size
),
881 if (!VALID_EPOCH(le64toh(o
->tag
.epoch
)))
882 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
883 "Invalid object tag epoch: %" PRIu64
": %" PRIu64
,
884 le64toh(o
->tag
.epoch
), offset
);
892 int journal_file_move_to_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
**ret
) {
898 /* Even if this function fails, it may clear, overwrite, or alter previously cached entries. After
899 * this function has been called, all objects except for one obtained by this function are
900 * invalidated and must be re-read before use.. */
902 /* Objects may only be located at multiple of 64 bit */
903 if (!VALID64(offset
))
904 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
905 "Attempt to move to object at non-64bit boundary: %" PRIu64
,
908 /* Object may not be located in the file header */
909 if (offset
< le64toh(f
->header
->header_size
))
910 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
911 "Attempt to move to object located in file header: %" PRIu64
,
914 r
= journal_file_move_to(f
, type
, false, offset
, sizeof(ObjectHeader
), (void**) &o
);
918 r
= check_object_header(f
, o
, type
, offset
);
922 r
= journal_file_move_to(f
, type
, false, offset
, le64toh(READ_NOW(o
->object
.size
)), (void**) &o
);
926 r
= check_object_header(f
, o
, type
, offset
);
930 r
= check_object(f
, o
, offset
);
940 int journal_file_read_object_header(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
*ret
) {
947 /* Objects may only be located at multiple of 64 bit */
948 if (!VALID64(offset
))
949 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
950 "Attempt to read object at non-64bit boundary: %" PRIu64
,
953 /* Object may not be located in the file header */
954 if (offset
< le64toh(f
->header
->header_size
))
955 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
956 "Attempt to read object located in file header: %" PRIu64
,
959 /* This will likely read too much data but it avoids having to call pread() twice. */
960 n
= pread(f
->fd
, &o
, sizeof(o
), offset
);
962 return log_debug_errno(errno
, "Failed to read journal file at offset: %" PRIu64
,
965 if ((size_t) n
< sizeof(o
.object
))
966 return log_debug_errno(SYNTHETIC_ERRNO(EIO
),
967 "Failed to read short object at offset: %" PRIu64
,
970 r
= check_object_header(f
, &o
, type
, offset
);
974 if ((size_t) n
< minimum_header_size(f
, &o
))
975 return log_debug_errno(SYNTHETIC_ERRNO(EIO
),
976 "Short read while reading object: %" PRIu64
,
979 r
= check_object(f
, &o
, offset
);
989 static uint64_t journal_file_entry_seqnum(
998 /* Picks a new sequence number for the entry we are about to add and returns it. */
1000 ret
= le64toh(f
->header
->tail_entry_seqnum
) + 1;
1003 /* If an external seqnum counter was passed, we update both the local and the external one,
1004 * and set it to the maximum of both */
1006 if (*seqnum
+ 1 > ret
)
1012 f
->header
->tail_entry_seqnum
= htole64(ret
);
1014 if (f
->header
->head_entry_seqnum
== 0)
1015 f
->header
->head_entry_seqnum
= htole64(ret
);
1020 int journal_file_append_object(
1024 Object
**ret_object
,
1025 uint64_t *ret_offset
) {
1033 assert(type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
);
1034 assert(size
>= sizeof(ObjectHeader
));
1036 r
= journal_file_set_online(f
);
1040 r
= journal_file_tail_end_by_mmap(f
, &p
);
1044 r
= journal_file_allocate(f
, p
, size
);
1048 r
= journal_file_move_to(f
, type
, false, p
, size
, (void**) &o
);
1052 o
->object
= (ObjectHeader
) {
1054 .size
= htole64(size
),
1057 f
->header
->tail_object_offset
= htole64(p
);
1058 f
->header
->n_objects
= htole64(le64toh(f
->header
->n_objects
) + 1);
1069 static int journal_file_setup_data_hash_table(JournalFile
*f
) {
1077 /* We estimate that we need 1 hash table entry per 768 bytes
1078 of journal file and we want to make sure we never get
1079 beyond 75% fill level. Calculate the hash table size for
1080 the maximum file size based on these metrics. */
1082 s
= (f
->metrics
.max_size
* 4 / 768 / 3) * sizeof(HashItem
);
1083 if (s
< DEFAULT_DATA_HASH_TABLE_SIZE
)
1084 s
= DEFAULT_DATA_HASH_TABLE_SIZE
;
1086 log_debug("Reserving %"PRIu64
" entries in data hash table.", s
/ sizeof(HashItem
));
1088 r
= journal_file_append_object(f
,
1089 OBJECT_DATA_HASH_TABLE
,
1090 offsetof(Object
, hash_table
.items
) + s
,
1095 memzero(o
->hash_table
.items
, s
);
1097 f
->header
->data_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1098 f
->header
->data_hash_table_size
= htole64(s
);
1103 static int journal_file_setup_field_hash_table(JournalFile
*f
) {
1111 /* We use a fixed size hash table for the fields as this
1112 * number should grow very slowly only */
1114 s
= DEFAULT_FIELD_HASH_TABLE_SIZE
;
1115 log_debug("Reserving %"PRIu64
" entries in field hash table.", s
/ sizeof(HashItem
));
1117 r
= journal_file_append_object(f
,
1118 OBJECT_FIELD_HASH_TABLE
,
1119 offsetof(Object
, hash_table
.items
) + s
,
1124 memzero(o
->hash_table
.items
, s
);
1126 f
->header
->field_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1127 f
->header
->field_hash_table_size
= htole64(s
);
1132 int journal_file_map_data_hash_table(JournalFile
*f
) {
1140 if (f
->data_hash_table
)
1143 p
= le64toh(f
->header
->data_hash_table_offset
);
1144 s
= le64toh(f
->header
->data_hash_table_size
);
1146 r
= journal_file_move_to(f
,
1147 OBJECT_DATA_HASH_TABLE
,
1154 f
->data_hash_table
= t
;
1158 int journal_file_map_field_hash_table(JournalFile
*f
) {
1166 if (f
->field_hash_table
)
1169 p
= le64toh(f
->header
->field_hash_table_offset
);
1170 s
= le64toh(f
->header
->field_hash_table_size
);
1172 r
= journal_file_move_to(f
,
1173 OBJECT_FIELD_HASH_TABLE
,
1180 f
->field_hash_table
= t
;
1184 static int journal_file_link_field(
1195 assert(f
->field_hash_table
);
1199 if (o
->object
.type
!= OBJECT_FIELD
)
1202 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1206 /* This might alter the window we are looking at */
1207 o
->field
.next_hash_offset
= o
->field
.head_data_offset
= 0;
1210 p
= le64toh(f
->field_hash_table
[h
].tail_hash_offset
);
1212 f
->field_hash_table
[h
].head_hash_offset
= htole64(offset
);
1214 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1218 o
->field
.next_hash_offset
= htole64(offset
);
1221 f
->field_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1223 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
1224 f
->header
->n_fields
= htole64(le64toh(f
->header
->n_fields
) + 1);
1229 static int journal_file_link_data(
1240 assert(f
->data_hash_table
);
1244 if (o
->object
.type
!= OBJECT_DATA
)
1247 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1251 /* This might alter the window we are looking at */
1252 o
->data
.next_hash_offset
= o
->data
.next_field_offset
= 0;
1253 o
->data
.entry_offset
= o
->data
.entry_array_offset
= 0;
1254 o
->data
.n_entries
= 0;
1257 p
= le64toh(f
->data_hash_table
[h
].tail_hash_offset
);
1259 /* Only entry in the hash table is easy */
1260 f
->data_hash_table
[h
].head_hash_offset
= htole64(offset
);
1262 /* Move back to the previous data object, to patch in
1265 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1269 o
->data
.next_hash_offset
= htole64(offset
);
1272 f
->data_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1274 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
1275 f
->header
->n_data
= htole64(le64toh(f
->header
->n_data
) + 1);
1280 static int get_next_hash_offset(
1283 le64_t
*next_hash_offset
,
1285 le64_t
*header_max_depth
) {
1291 assert(next_hash_offset
);
1294 nextp
= le64toh(READ_NOW(*next_hash_offset
));
1296 if (nextp
<= *p
) /* Refuse going in loops */
1297 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1298 "Detected hash item loop in %s, refusing.", f
->path
);
1302 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1303 if (header_max_depth
&& journal_file_writable(f
))
1304 *header_max_depth
= htole64(MAX(*depth
, le64toh(*header_max_depth
)));
1311 int journal_file_find_field_object_with_hash(
1316 Object
**ret_object
,
1317 uint64_t *ret_offset
) {
1319 uint64_t p
, osize
, h
, m
, depth
= 0;
1327 /* If the field hash table is empty, we can't find anything */
1328 if (le64toh(f
->header
->field_hash_table_size
) <= 0)
1331 /* Map the field hash table, if it isn't mapped yet. */
1332 r
= journal_file_map_field_hash_table(f
);
1336 osize
= offsetof(Object
, field
.payload
) + size
;
1338 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1343 p
= le64toh(f
->field_hash_table
[h
].head_hash_offset
);
1347 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1351 if (le64toh(o
->field
.hash
) == hash
&&
1352 le64toh(o
->object
.size
) == osize
&&
1353 memcmp(o
->field
.payload
, field
, size
) == 0) {
1363 r
= get_next_hash_offset(
1366 &o
->field
.next_hash_offset
,
1368 JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) ? &f
->header
->field_hash_chain_depth
: NULL
);
1376 uint64_t journal_file_hash_data(
1383 assert(data
|| sz
== 0);
1385 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1386 * function use siphash. Old journal files use the Jenkins hash. */
1388 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
1389 return siphash24(data
, sz
, f
->header
->file_id
.bytes
);
1391 return jenkins_hash64(data
, sz
);
1394 int journal_file_find_field_object(
1398 Object
**ret_object
,
1399 uint64_t *ret_offset
) {
1405 return journal_file_find_field_object_with_hash(
1408 journal_file_hash_data(f
, field
, size
),
1409 ret_object
, ret_offset
);
1412 int journal_file_find_data_object_with_hash(
1417 Object
**ret_object
,
1418 uint64_t *ret_offset
) {
1420 uint64_t p
, h
, m
, depth
= 0;
1425 assert(data
|| size
== 0);
1427 /* If there's no data hash table, then there's no entry. */
1428 if (le64toh(f
->header
->data_hash_table_size
) <= 0)
1431 /* Map the data hash table, if it isn't mapped yet. */
1432 r
= journal_file_map_data_hash_table(f
);
1436 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1441 p
= le64toh(f
->data_hash_table
[h
].head_hash_offset
);
1448 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1452 if (le64toh(o
->data
.hash
) != hash
)
1455 r
= journal_file_data_payload(f
, o
, p
, NULL
, 0, 0, &d
, &rsize
);
1458 assert(r
> 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
1460 if (memcmp_nn(data
, size
, d
, rsize
) == 0) {
1471 r
= get_next_hash_offset(
1474 &o
->data
.next_hash_offset
,
1476 JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) ? &f
->header
->data_hash_chain_depth
: NULL
);
1484 int journal_file_find_data_object(
1488 Object
**ret_object
,
1489 uint64_t *ret_offset
) {
1492 assert(data
|| size
== 0);
1494 return journal_file_find_data_object_with_hash(
1497 journal_file_hash_data(f
, data
, size
),
1498 ret_object
, ret_offset
);
1501 bool journal_field_valid(const char *p
, size_t l
, bool allow_protected
) {
1502 /* We kinda enforce POSIX syntax recommendations for
1503 environment variables here, but make a couple of additional
1506 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
1513 /* No empty field names */
1517 /* Don't allow names longer than 64 chars */
1521 /* Variables starting with an underscore are protected */
1522 if (!allow_protected
&& p
[0] == '_')
1525 /* Don't allow digits as first character */
1526 if (ascii_isdigit(p
[0]))
1529 /* Only allow A-Z0-9 and '_' */
1530 for (const char *a
= p
; a
< p
+ l
; a
++)
1531 if ((*a
< 'A' || *a
> 'Z') &&
1532 !ascii_isdigit(*a
) &&
1539 static int journal_file_append_field(
1543 Object
**ret_object
,
1544 uint64_t *ret_offset
) {
1555 if (!journal_field_valid(field
, size
, true))
1558 hash
= journal_file_hash_data(f
, field
, size
);
1560 r
= journal_file_find_field_object_with_hash(f
, field
, size
, hash
, ret_object
, ret_offset
);
1566 osize
= offsetof(Object
, field
.payload
) + size
;
1567 r
= journal_file_append_object(f
, OBJECT_FIELD
, osize
, &o
, &p
);
1571 o
->field
.hash
= htole64(hash
);
1572 memcpy(o
->field
.payload
, field
, size
);
1574 r
= journal_file_link_field(f
, o
, p
, hash
);
1578 /* The linking might have altered the window, so let's only pass the offset to hmac which will
1579 * move to the object again if needed. */
1582 r
= journal_file_hmac_put_object(f
, OBJECT_FIELD
, NULL
, p
);
1588 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, ret_object
);
1599 static Compression
maybe_compress_payload(JournalFile
*f
, uint8_t *dst
, const uint8_t *src
, uint64_t size
, size_t *rsize
) {
1600 Compression compression
= COMPRESSION_NONE
;
1605 #if HAVE_COMPRESSION
1606 if (JOURNAL_FILE_COMPRESS(f
) && size
>= f
->compress_threshold_bytes
) {
1607 compression
= compress_blob(src
, size
, dst
, size
- 1, rsize
);
1608 if (compression
> 0)
1609 log_debug("Compressed data object %"PRIu64
" -> %zu using %s",
1610 size
, *rsize
, compression_to_string(compression
));
1612 /* Compression didn't work, we don't really care why, let's continue without compression */
1613 compression
= COMPRESSION_NONE
;
1620 static int journal_file_append_data(
1624 Object
**ret_object
,
1625 uint64_t *ret_offset
) {
1627 uint64_t hash
, p
, osize
;
1636 if (!data
|| size
== 0)
1639 hash
= journal_file_hash_data(f
, data
, size
);
1641 r
= journal_file_find_data_object_with_hash(f
, data
, size
, hash
, ret_object
, ret_offset
);
1647 eq
= memchr(data
, '=', size
);
1651 osize
= journal_file_data_payload_offset(f
) + size
;
1652 r
= journal_file_append_object(f
, OBJECT_DATA
, osize
, &o
, &p
);
1656 o
->data
.hash
= htole64(hash
);
1658 c
= maybe_compress_payload(f
, journal_file_data_payload_field(f
, o
), data
, size
, &rsize
);
1660 if (c
!= COMPRESSION_NONE
) {
1661 o
->object
.size
= htole64(journal_file_data_payload_offset(f
) + rsize
);
1662 o
->object
.flags
|= COMPRESSION_TO_OBJECT_FLAG(c
);
1664 memcpy_safe(journal_file_data_payload_field(f
, o
), data
, size
);
1666 r
= journal_file_link_data(f
, o
, p
, hash
);
1670 /* The linking might have altered the window, so let's refresh our pointer. */
1671 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1676 r
= journal_file_hmac_put_object(f
, OBJECT_DATA
, o
, p
);
1681 /* Create field object ... */
1682 r
= journal_file_append_field(f
, data
, (uint8_t*) eq
- (uint8_t*) data
, &fo
, NULL
);
1686 /* ... and link it in. */
1687 o
->data
.next_field_offset
= fo
->field
.head_data_offset
;
1688 fo
->field
.head_data_offset
= le64toh(p
);
1699 static int maybe_decompress_payload(
1703 Compression compression
,
1705 size_t field_length
,
1706 size_t data_threshold
,
1712 /* We can't read objects larger than 4G on a 32bit machine */
1713 if ((uint64_t) (size_t) size
!= size
)
1716 if (compression
!= COMPRESSION_NONE
) {
1717 #if HAVE_COMPRESSION
1722 r
= decompress_startswith(compression
, payload
, size
, &f
->compress_buffer
, field
,
1725 return log_debug_errno(r
,
1726 "Cannot decompress %s object of length %" PRIu64
": %m",
1727 compression_to_string(compression
),
1738 r
= decompress_blob(compression
, payload
, size
, &f
->compress_buffer
, &rsize
, 0);
1743 *ret_data
= f
->compress_buffer
;
1747 return -EPROTONOSUPPORT
;
1750 if (field
&& (size
< field_length
+ 1 || memcmp(payload
, field
, field_length
) != 0 || payload
[field_length
] != '=')) {
1759 *ret_data
= payload
;
1761 *ret_size
= (size_t) size
;
1767 int journal_file_data_payload(
1772 size_t field_length
,
1773 size_t data_threshold
,
1782 assert(!field
== (field_length
== 0)); /* These must be specified together. */
1785 r
= journal_file_move_to_object(f
, OBJECT_DATA
, offset
, &o
);
1790 size
= le64toh(READ_NOW(o
->object
.size
));
1791 if (size
< journal_file_data_payload_offset(f
))
1794 size
-= journal_file_data_payload_offset(f
);
1796 c
= COMPRESSION_FROM_OBJECT(o
);
1798 return -EPROTONOSUPPORT
;
1800 return maybe_decompress_payload(f
, journal_file_data_payload_field(f
, o
), size
, c
, field
,
1801 field_length
, data_threshold
, ret_data
, ret_size
);
1804 uint64_t journal_file_entry_n_items(JournalFile
*f
, Object
*o
) {
1810 if (o
->object
.type
!= OBJECT_ENTRY
)
1813 sz
= le64toh(READ_NOW(o
->object
.size
));
1814 if (sz
< offsetof(Object
, entry
.items
))
1817 return (sz
- offsetof(Object
, entry
.items
)) / journal_file_entry_item_size(f
);
1820 uint64_t journal_file_entry_array_n_items(JournalFile
*f
, Object
*o
) {
1826 if (o
->object
.type
!= OBJECT_ENTRY_ARRAY
)
1829 sz
= le64toh(READ_NOW(o
->object
.size
));
1830 if (sz
< offsetof(Object
, entry_array
.items
))
1833 return (sz
- offsetof(Object
, entry_array
.items
)) / journal_file_entry_array_item_size(f
);
1836 uint64_t journal_file_hash_table_n_items(Object
*o
) {
1841 if (!IN_SET(o
->object
.type
, OBJECT_DATA_HASH_TABLE
, OBJECT_FIELD_HASH_TABLE
))
1844 sz
= le64toh(READ_NOW(o
->object
.size
));
1845 if (sz
< offsetof(Object
, hash_table
.items
))
1848 return (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
);
1851 static void write_entry_array_item(JournalFile
*f
, Object
*o
, uint64_t i
, uint64_t p
) {
1855 if (JOURNAL_HEADER_COMPACT(f
->header
)) {
1856 assert(p
<= UINT32_MAX
);
1857 o
->entry_array
.items
.compact
[i
] = htole32(p
);
1859 o
->entry_array
.items
.regular
[i
] = htole64(p
);
1862 static int link_entry_into_array(
1870 uint64_t n
= 0, ap
= 0, q
, i
, a
, hidx
;
1880 a
= tail
? le32toh(*tail
) : le64toh(*first
);
1881 hidx
= le64toh(READ_NOW(*idx
));
1882 i
= tidx
? le32toh(READ_NOW(*tidx
)) : hidx
;
1885 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
1889 n
= journal_file_entry_array_n_items(f
, o
);
1891 write_entry_array_item(f
, o
, i
, p
);
1892 *idx
= htole64(hidx
+ 1);
1894 *tidx
= htole32(le32toh(*tidx
) + 1);
1900 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
1911 r
= journal_file_append_object(f
, OBJECT_ENTRY_ARRAY
,
1912 offsetof(Object
, entry_array
.items
) + n
* journal_file_entry_array_item_size(f
),
1918 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY_ARRAY
, o
, q
);
1923 write_entry_array_item(f
, o
, i
, p
);
1926 *first
= htole64(q
);
1928 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, ap
, &o
);
1932 o
->entry_array
.next_entry_array_offset
= htole64(q
);
1938 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
1939 f
->header
->n_entry_arrays
= htole64(le64toh(f
->header
->n_entry_arrays
) + 1);
1941 *idx
= htole64(hidx
+ 1);
1948 static int link_entry_into_array_plus_one(
1966 hidx
= le64toh(READ_NOW(*idx
));
1967 if (hidx
== UINT64_MAX
)
1970 *extra
= htole64(p
);
1974 i
= htole64(hidx
- 1);
1975 r
= link_entry_into_array(f
, first
, &i
, tail
, tidx
, p
);
1980 *idx
= htole64(hidx
+ 1);
1984 static int journal_file_link_entry_item(JournalFile
*f
, uint64_t offset
, uint64_t p
) {
1991 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1995 return link_entry_into_array_plus_one(f
,
1996 &o
->data
.entry_offset
,
1997 &o
->data
.entry_array_offset
,
1999 JOURNAL_HEADER_COMPACT(f
->header
) ? &o
->data
.compact
.tail_entry_array_offset
: NULL
,
2000 JOURNAL_HEADER_COMPACT(f
->header
) ? &o
->data
.compact
.tail_entry_array_n_entries
: NULL
,
2004 static int journal_file_link_entry(
2008 const EntryItem items
[],
2018 if (o
->object
.type
!= OBJECT_ENTRY
)
2021 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
2023 /* Link up the entry itself */
2024 r
= link_entry_into_array(f
,
2025 &f
->header
->entry_array_offset
,
2026 &f
->header
->n_entries
,
2027 JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_offset
) ? &f
->header
->tail_entry_array_offset
: NULL
,
2028 JOURNAL_HEADER_CONTAINS(f
->header
, tail_entry_array_n_entries
) ? &f
->header
->tail_entry_array_n_entries
: NULL
,
2033 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
2035 if (f
->header
->head_entry_realtime
== 0)
2036 f
->header
->head_entry_realtime
= o
->entry
.realtime
;
2038 f
->header
->tail_entry_realtime
= o
->entry
.realtime
;
2039 f
->header
->tail_entry_monotonic
= o
->entry
.monotonic
;
2041 /* Link up the items */
2042 for (uint64_t i
= 0; i
< n_items
; i
++) {
2045 /* If we fail to link an entry item because we can't allocate a new entry array, don't fail
2046 * immediately but try to link the other entry items since it might still be possible to link
2047 * those if they don't require a new entry array to be allocated. */
2049 k
= journal_file_link_entry_item(f
, offset
, items
[i
].object_offset
);
2059 static void write_entry_item(JournalFile
*f
, Object
*o
, uint64_t i
, const EntryItem
*item
) {
2064 if (JOURNAL_HEADER_COMPACT(f
->header
)) {
2065 assert(item
->object_offset
<= UINT32_MAX
);
2066 o
->entry
.items
.compact
[i
].object_offset
= htole32(item
->object_offset
);
2068 o
->entry
.items
.regular
[i
].object_offset
= htole64(item
->object_offset
);
2069 o
->entry
.items
.regular
[i
].hash
= htole64(item
->hash
);
2073 static int journal_file_append_entry_internal(
2075 const dual_timestamp
*ts
,
2076 const sd_id128_t
*boot_id
,
2078 const EntryItem items
[],
2081 Object
**ret_object
,
2082 uint64_t *ret_offset
) {
2092 assert(items
|| n_items
== 0);
2094 osize
= offsetof(Object
, entry
.items
) + (n_items
* journal_file_entry_item_size(f
));
2096 r
= journal_file_append_object(f
, OBJECT_ENTRY
, osize
, &o
, &np
);
2100 o
->entry
.seqnum
= htole64(journal_file_entry_seqnum(f
, seqnum
));
2101 o
->entry
.realtime
= htole64(ts
->realtime
);
2102 o
->entry
.monotonic
= htole64(ts
->monotonic
);
2103 o
->entry
.xor_hash
= htole64(xor_hash
);
2105 f
->header
->boot_id
= *boot_id
;
2106 o
->entry
.boot_id
= f
->header
->boot_id
;
2108 for (size_t i
= 0; i
< n_items
; i
++)
2109 write_entry_item(f
, o
, i
, &items
[i
]);
2112 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY
, o
, np
);
2117 r
= journal_file_link_entry(f
, o
, np
, items
, n_items
);
2130 void journal_file_post_change(JournalFile
*f
) {
2136 /* inotify() does not receive IN_MODIFY events from file
2137 * accesses done via mmap(). After each access we hence
2138 * trigger IN_MODIFY by truncating the journal file to its
2139 * current size which triggers IN_MODIFY. */
2141 __atomic_thread_fence(__ATOMIC_SEQ_CST
);
2143 if (ftruncate(f
->fd
, f
->last_stat
.st_size
) < 0)
2144 log_debug_errno(errno
, "Failed to truncate file to its own size: %m");
2147 static int post_change_thunk(sd_event_source
*timer
, uint64_t usec
, void *userdata
) {
2150 journal_file_post_change(userdata
);
2155 static void schedule_post_change(JournalFile
*f
) {
2160 assert(f
->post_change_timer
);
2162 assert_se(e
= sd_event_source_get_event(f
->post_change_timer
));
2164 /* If we are already going down, post the change immediately. */
2165 if (IN_SET(sd_event_get_state(e
), SD_EVENT_EXITING
, SD_EVENT_FINISHED
))
2168 r
= sd_event_source_get_enabled(f
->post_change_timer
, NULL
);
2170 log_debug_errno(r
, "Failed to get ftruncate timer state: %m");
2176 r
= sd_event_source_set_time_relative(f
->post_change_timer
, f
->post_change_timer_period
);
2178 log_debug_errno(r
, "Failed to set time for scheduling ftruncate: %m");
2182 r
= sd_event_source_set_enabled(f
->post_change_timer
, SD_EVENT_ONESHOT
);
2184 log_debug_errno(r
, "Failed to enable scheduled ftruncate: %m");
2191 /* On failure, let's simply post the change immediately. */
2192 journal_file_post_change(f
);
2195 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2196 int journal_file_enable_post_change_timer(JournalFile
*f
, sd_event
*e
, usec_t t
) {
2197 _cleanup_(sd_event_source_unrefp
) sd_event_source
*timer
= NULL
;
2201 assert_return(!f
->post_change_timer
, -EINVAL
);
2205 r
= sd_event_add_time(e
, &timer
, CLOCK_MONOTONIC
, 0, 0, post_change_thunk
, f
);
2209 r
= sd_event_source_set_enabled(timer
, SD_EVENT_OFF
);
2213 f
->post_change_timer
= TAKE_PTR(timer
);
2214 f
->post_change_timer_period
= t
;
2219 static int entry_item_cmp(const EntryItem
*a
, const EntryItem
*b
) {
2220 return CMP(ASSERT_PTR(a
)->object_offset
, ASSERT_PTR(b
)->object_offset
);
2223 static size_t remove_duplicate_entry_items(EntryItem items
[], size_t n
) {
2226 assert(items
|| n
== 0);
2231 for (size_t i
= 1; i
< n
; i
++)
2232 if (items
[i
].object_offset
!= items
[j
- 1].object_offset
)
2233 items
[j
++] = items
[i
];
2238 int journal_file_append_entry(
2240 const dual_timestamp
*ts
,
2241 const sd_id128_t
*boot_id
,
2242 const struct iovec iovec
[],
2245 Object
**ret_object
,
2246 uint64_t *ret_offset
) {
2248 _cleanup_free_ EntryItem
*items_alloc
= NULL
;
2250 uint64_t xor_hash
= 0;
2251 struct dual_timestamp _ts
;
2257 assert(n_iovec
> 0);
2260 if (!VALID_REALTIME(ts
->realtime
))
2261 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2262 "Invalid realtime timestamp %" PRIu64
", refusing entry.",
2264 if (!VALID_MONOTONIC(ts
->monotonic
))
2265 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2266 "Invalid monotomic timestamp %" PRIu64
", refusing entry.",
2269 dual_timestamp_get(&_ts
);
2274 r
= journal_file_maybe_append_tag(f
, ts
->realtime
);
2279 if (n_iovec
< ALLOCA_MAX
/ sizeof(EntryItem
) / 2)
2280 items
= newa(EntryItem
, n_iovec
);
2282 items_alloc
= new(EntryItem
, n_iovec
);
2286 items
= items_alloc
;
2289 for (size_t i
= 0; i
< n_iovec
; i
++) {
2293 r
= journal_file_append_data(f
, iovec
[i
].iov_base
, iovec
[i
].iov_len
, &o
, &p
);
2297 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2298 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2299 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2300 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2301 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2302 * hash here for that. This also has the benefit that cursors for old and new journal files
2303 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2304 * files things are easier, we can just take the value from the stored record directly. */
2306 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
2307 xor_hash
^= jenkins_hash64(iovec
[i
].iov_base
, iovec
[i
].iov_len
);
2309 xor_hash
^= le64toh(o
->data
.hash
);
2311 items
[i
] = (EntryItem
) {
2313 .hash
= le64toh(o
->data
.hash
),
2317 /* Order by the position on disk, in order to improve seek
2318 * times for rotating media. */
2319 typesafe_qsort(items
, n_iovec
, entry_item_cmp
);
2320 n_iovec
= remove_duplicate_entry_items(items
, n_iovec
);
2322 r
= journal_file_append_entry_internal(f
, ts
, boot_id
, xor_hash
, items
, n_iovec
, seqnum
, ret_object
, ret_offset
);
2324 /* If the memory mapping triggered a SIGBUS then we return an
2325 * IO error and ignore the error code passed down to us, since
2326 * it is very likely just an effect of a nullified replacement
2329 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
2332 if (f
->post_change_timer
)
2333 schedule_post_change(f
);
2335 journal_file_post_change(f
);
2340 typedef struct ChainCacheItem
{
2341 uint64_t first
; /* the array at the beginning of the chain */
2342 uint64_t array
; /* the cached array */
2343 uint64_t begin
; /* the first item in the cached array */
2344 uint64_t total
; /* the total number of items in all arrays before this one in the chain */
2345 uint64_t last_index
; /* the last index we looked at, to optimize locality when bisecting */
2348 static void chain_cache_put(
2355 uint64_t last_index
) {
2360 /* If the chain item to cache for this chain is the
2361 * first one it's not worth caching anything */
2365 if (ordered_hashmap_size(h
) >= CHAIN_CACHE_MAX
) {
2366 ci
= ordered_hashmap_steal_first(h
);
2369 ci
= new(ChainCacheItem
, 1);
2376 if (ordered_hashmap_put(h
, &ci
->first
, ci
) < 0) {
2381 assert(ci
->first
== first
);
2386 ci
->last_index
= last_index
;
2389 static int bump_array_index(uint64_t *i
, direction_t direction
, uint64_t n
) {
2392 /* Increase or decrease the specified index, in the right direction. */
2394 if (direction
== DIRECTION_DOWN
) {
2409 static int bump_entry_array(
2414 direction_t direction
,
2424 if (direction
== DIRECTION_DOWN
) {
2426 return le64toh(o
->entry_array
.next_entry_array_offset
);
2429 /* Entry array chains are a singly linked list, so to find the previous array in the chain, we have
2430 * to start iterating from the top. */
2434 while (p
> 0 && p
!= offset
) {
2435 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, p
, &o
);
2440 p
= le64toh(o
->entry_array
.next_entry_array_offset
);
2443 /* If we can't find the previous entry array in the entry array chain, we're likely dealing with a
2444 * corrupted journal file. */
2453 static int generic_array_get(
2457 direction_t direction
,
2458 Object
**ret_object
,
2459 uint64_t *ret_offset
) {
2461 uint64_t p
= 0, a
, t
= 0, k
;
2468 /* FIXME: fix return value assignment on success. */
2472 /* Try the chain cache first */
2473 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2474 if (ci
&& i
> ci
->total
) {
2481 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2482 if (IN_SET(r
, -EBADMSG
, -EADDRNOTAVAIL
)) {
2483 /* If there's corruption and we're going downwards, let's pretend we reached the
2484 * final entry in the entry array chain. */
2486 if (direction
== DIRECTION_DOWN
)
2489 /* If there's corruption and we're going upwards, move back to the previous entry
2490 * array and start iterating entries from there. */
2492 r
= bump_entry_array(f
, NULL
, a
, first
, DIRECTION_UP
, &a
);
2503 k
= journal_file_entry_array_n_items(f
, o
);
2509 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2512 /* If we've found the right location, now look for the first non-corrupt entry object (in the right
2516 /* In the first iteration of the while loop, we reuse i, k and o from the previous while
2518 if (i
== UINT64_MAX
) {
2519 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2523 k
= journal_file_entry_array_n_items(f
, o
);
2527 i
= direction
== DIRECTION_DOWN
? 0 : k
- 1;
2531 p
= journal_file_entry_array_item(f
, o
, i
);
2533 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, ret_object
);
2535 /* Let's cache this item for the next invocation */
2536 chain_cache_put(f
->chain_cache
, ci
, first
, a
, journal_file_entry_array_item(f
, o
, 0), t
, i
);
2543 if (!IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
))
2546 /* OK, so this entry is borked. Most likely some entry didn't get synced to
2547 * disk properly, let's see if the next one might work for us instead. */
2548 log_debug_errno(r
, "Entry item %" PRIu64
" is bad, skipping over it.", i
);
2549 } while (bump_array_index(&i
, direction
, k
) > 0);
2551 r
= bump_entry_array(f
, o
, a
, first
, direction
, &a
);
2562 static int generic_array_get_plus_one(
2567 direction_t direction
,
2568 Object
**ret_object
,
2569 uint64_t *ret_offset
) {
2575 /* FIXME: fix return value assignment on success. */
2578 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
2579 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
))
2580 return generic_array_get(f
, first
, 0, direction
, ret_object
, ret_offset
);
2585 *ret_offset
= extra
;
2590 return generic_array_get(f
, first
, i
- 1, direction
, ret_object
, ret_offset
);
2599 static int generic_array_bisect(
2604 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2605 direction_t direction
,
2606 Object
**ret_object
,
2607 uint64_t *ret_offset
,
2608 uint64_t *ret_idx
) {
2610 /* Given an entry array chain, this function finds the object "closest" to the given needle in the
2611 * chain, taking into account the provided direction. A function can be provided to determine how
2612 * an object is matched against the given needle.
2614 * Given a journal file, the offset of an object and the needle, the test_object() function should
2615 * return TEST_LEFT if the needle is located earlier in the entry array chain, TEST_RIGHT if the
2616 * needle is located later in the entry array chain and TEST_FOUND if the object matches the needle.
2617 * If test_object() returns TEST_FOUND for a specific object, that object's information will be used
2618 * to populate the return values of this function. If test_object() never returns TEST_FOUND, the
2619 * return values are populated with the details of one of the objects closest to the needle. If the
2620 * direction is DIRECTION_UP, the earlier object is used. Otherwise, the later object is used.
2623 uint64_t a
, p
, t
= 0, i
= 0, last_p
= 0, last_index
= UINT64_MAX
;
2624 bool subtract_one
= false;
2625 Object
*array
= NULL
;
2630 assert(test_object
);
2632 /* Start with the first array in the chain */
2635 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2636 if (ci
&& n
> ci
->total
&& ci
->begin
!= 0) {
2637 /* Ah, we have iterated this bisection array chain
2638 * previously! Let's see if we can skip ahead in the
2639 * chain, as far as the last time. But we can't jump
2640 * backwards in the chain, so let's check that
2643 r
= test_object(f
, ci
->begin
, needle
);
2647 if (r
== TEST_LEFT
) {
2648 /* OK, what we are looking for is right of the
2649 * begin of this EntryArray, so let's jump
2650 * straight to previously cached array in the
2656 last_index
= ci
->last_index
;
2661 uint64_t left
, right
, k
, lp
;
2663 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
2667 k
= journal_file_entry_array_n_items(f
, array
);
2673 lp
= p
= journal_file_entry_array_item(f
, array
, i
);
2677 r
= test_object(f
, p
, needle
);
2678 if (r
== -EBADMSG
) {
2679 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2686 if (r
== TEST_FOUND
)
2687 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2689 if (r
== TEST_RIGHT
) {
2693 if (last_index
!= UINT64_MAX
) {
2694 assert(last_index
<= right
);
2696 /* If we cached the last index we
2697 * looked at, let's try to not to jump
2698 * too wildly around and see if we can
2699 * limit the range to look at early to
2700 * the immediate neighbors of the last
2701 * index we looked at. */
2703 if (last_index
> 0) {
2704 uint64_t x
= last_index
- 1;
2706 p
= journal_file_entry_array_item(f
, array
, x
);
2710 r
= test_object(f
, p
, needle
);
2714 if (r
== TEST_FOUND
)
2715 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2717 if (r
== TEST_RIGHT
)
2723 if (last_index
< right
) {
2724 uint64_t y
= last_index
+ 1;
2726 p
= journal_file_entry_array_item(f
, array
, y
);
2730 r
= test_object(f
, p
, needle
);
2734 if (r
== TEST_FOUND
)
2735 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2737 if (r
== TEST_RIGHT
)
2745 if (left
== right
) {
2746 if (direction
== DIRECTION_UP
)
2747 subtract_one
= true;
2753 assert(left
< right
);
2754 i
= (left
+ right
) / 2;
2756 p
= journal_file_entry_array_item(f
, array
, i
);
2760 r
= test_object(f
, p
, needle
);
2761 if (r
== -EBADMSG
) {
2762 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2769 if (r
== TEST_FOUND
)
2770 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2772 if (r
== TEST_RIGHT
)
2780 if (direction
== DIRECTION_UP
) {
2782 subtract_one
= true;
2793 last_index
= UINT64_MAX
;
2794 a
= le64toh(array
->entry_array
.next_entry_array_offset
);
2800 if (subtract_one
&& t
== 0 && i
== 0)
2803 /* Let's cache this item for the next invocation */
2804 chain_cache_put(f
->chain_cache
, ci
, first
, a
, journal_file_entry_array_item(f
, array
, 0), t
, subtract_one
? (i
> 0 ? i
-1 : UINT64_MAX
) : i
);
2806 if (subtract_one
&& i
== 0)
2808 else if (subtract_one
)
2809 p
= journal_file_entry_array_item(f
, array
, i
- 1);
2811 p
= journal_file_entry_array_item(f
, array
, i
);
2814 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, ret_object
);
2823 *ret_idx
= t
+ i
+ (subtract_one
? -1 : 0);
2828 static int generic_array_bisect_plus_one(
2834 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2835 direction_t direction
,
2836 Object
**ret_object
,
2837 uint64_t *ret_offset
,
2838 uint64_t *ret_idx
) {
2841 bool step_back
= false;
2844 assert(test_object
);
2849 /* This bisects the array in object 'first', but first checks
2851 r
= test_object(f
, extra
, needle
);
2855 if (r
== TEST_FOUND
)
2856 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2858 /* if we are looking with DIRECTION_UP then we need to first
2859 see if in the actual array there is a matching entry, and
2860 return the last one of that. But if there isn't any we need
2861 to return this one. Hence remember this, and return it
2864 step_back
= direction
== DIRECTION_UP
;
2866 if (r
== TEST_RIGHT
) {
2867 if (direction
== DIRECTION_DOWN
)
2873 r
= generic_array_bisect(f
, first
, n
-1, needle
, test_object
, direction
, ret_object
, ret_offset
, ret_idx
);
2875 if (r
== 0 && step_back
)
2878 if (r
> 0 && ret_idx
)
2885 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, ret_object
);
2891 *ret_offset
= extra
;
2899 _pure_
static int test_object_offset(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2905 else if (p
< needle
)
2911 int journal_file_move_to_entry_by_offset(
2914 direction_t direction
,
2915 Object
**ret_object
,
2916 uint64_t *ret_offset
) {
2921 return generic_array_bisect(
2923 le64toh(f
->header
->entry_array_offset
),
2924 le64toh(f
->header
->n_entries
),
2928 ret_object
, ret_offset
, NULL
);
2931 static int test_object_seqnum(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2939 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2943 sq
= le64toh(READ_NOW(o
->entry
.seqnum
));
2946 else if (sq
< needle
)
2952 int journal_file_move_to_entry_by_seqnum(
2955 direction_t direction
,
2956 Object
**ret_object
,
2957 uint64_t *ret_offset
) {
2962 return generic_array_bisect(
2964 le64toh(f
->header
->entry_array_offset
),
2965 le64toh(f
->header
->n_entries
),
2969 ret_object
, ret_offset
, NULL
);
2972 static int test_object_realtime(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2980 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2984 rt
= le64toh(READ_NOW(o
->entry
.realtime
));
2987 else if (rt
< needle
)
2993 int journal_file_move_to_entry_by_realtime(
2996 direction_t direction
,
2997 Object
**ret_object
,
2998 uint64_t *ret_offset
) {
3003 return generic_array_bisect(
3005 le64toh(f
->header
->entry_array_offset
),
3006 le64toh(f
->header
->n_entries
),
3008 test_object_realtime
,
3010 ret_object
, ret_offset
, NULL
);
3013 static int test_object_monotonic(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
3021 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
3025 m
= le64toh(READ_NOW(o
->entry
.monotonic
));
3028 else if (m
< needle
)
3034 static int find_data_object_by_boot_id(
3037 Object
**ret_object
,
3038 uint64_t *ret_offset
) {
3040 char t
[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
3044 sd_id128_to_string(boot_id
, t
+ 9);
3045 return journal_file_find_data_object(f
, t
, sizeof(t
) - 1, ret_object
, ret_offset
);
3048 int journal_file_move_to_entry_by_monotonic(
3052 direction_t direction
,
3053 Object
**ret_object
,
3054 uint64_t *ret_offset
) {
3061 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
3067 return generic_array_bisect_plus_one(
3069 le64toh(o
->data
.entry_offset
),
3070 le64toh(o
->data
.entry_array_offset
),
3071 le64toh(o
->data
.n_entries
),
3073 test_object_monotonic
,
3075 ret_object
, ret_offset
, NULL
);
3078 void journal_file_reset_location(JournalFile
*f
) {
3081 f
->location_type
= LOCATION_HEAD
;
3082 f
->current_offset
= 0;
3083 f
->current_seqnum
= 0;
3084 f
->current_realtime
= 0;
3085 f
->current_monotonic
= 0;
3086 zero(f
->current_boot_id
);
3087 f
->current_xor_hash
= 0;
3090 void journal_file_save_location(JournalFile
*f
, Object
*o
, uint64_t offset
) {
3094 f
->location_type
= LOCATION_SEEK
;
3095 f
->current_offset
= offset
;
3096 f
->current_seqnum
= le64toh(o
->entry
.seqnum
);
3097 f
->current_realtime
= le64toh(o
->entry
.realtime
);
3098 f
->current_monotonic
= le64toh(o
->entry
.monotonic
);
3099 f
->current_boot_id
= o
->entry
.boot_id
;
3100 f
->current_xor_hash
= le64toh(o
->entry
.xor_hash
);
3103 int journal_file_compare_locations(JournalFile
*af
, JournalFile
*bf
) {
3110 assert(af
->location_type
== LOCATION_SEEK
);
3111 assert(bf
->location_type
== LOCATION_SEEK
);
3113 /* If contents, timestamps and seqnum match, these entries are
3115 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
) &&
3116 af
->current_monotonic
== bf
->current_monotonic
&&
3117 af
->current_realtime
== bf
->current_realtime
&&
3118 af
->current_xor_hash
== bf
->current_xor_hash
&&
3119 sd_id128_equal(af
->header
->seqnum_id
, bf
->header
->seqnum_id
) &&
3120 af
->current_seqnum
== bf
->current_seqnum
)
3123 if (sd_id128_equal(af
->header
->seqnum_id
, bf
->header
->seqnum_id
)) {
3125 /* If this is from the same seqnum source, compare
3127 r
= CMP(af
->current_seqnum
, bf
->current_seqnum
);
3131 /* Wow! This is weird, different data but the same
3132 * seqnums? Something is borked, but let's make the
3133 * best of it and compare by time. */
3136 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
)) {
3138 /* If the boot id matches, compare monotonic time */
3139 r
= CMP(af
->current_monotonic
, bf
->current_monotonic
);
3144 /* Otherwise, compare UTC time */
3145 r
= CMP(af
->current_realtime
, bf
->current_realtime
);
3149 /* Finally, compare by contents */
3150 return CMP(af
->current_xor_hash
, bf
->current_xor_hash
);
3153 static bool check_properly_ordered(uint64_t new_offset
, uint64_t old_offset
, direction_t direction
) {
3155 /* Consider it an error if any of the two offsets is uninitialized */
3156 if (old_offset
== 0 || new_offset
== 0)
3159 /* If we go down, the new offset must be larger than the old one. */
3160 return direction
== DIRECTION_DOWN
?
3161 new_offset
> old_offset
:
3162 new_offset
< old_offset
;
3165 int journal_file_next_entry(
3168 direction_t direction
,
3169 Object
**ret_object
,
3170 uint64_t *ret_offset
) {
3178 /* FIXME: fix return value assignment. */
3180 n
= le64toh(READ_NOW(f
->header
->n_entries
));
3185 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
3187 r
= generic_array_bisect(f
,
3188 le64toh(f
->header
->entry_array_offset
),
3189 le64toh(f
->header
->n_entries
),
3198 r
= bump_array_index(&i
, direction
, n
);
3203 /* And jump to it */
3204 r
= generic_array_get(f
, le64toh(f
->header
->entry_array_offset
), i
, direction
, ret_object
, &ofs
);
3208 /* Ensure our array is properly ordered. */
3209 if (p
> 0 && !check_properly_ordered(ofs
, p
, direction
))
3210 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
3211 "%s: entry array not properly ordered at entry %" PRIu64
,
3220 int journal_file_next_entry_for_data(
3223 direction_t direction
,
3224 Object
**ret_object
,
3225 uint64_t *ret_offset
) {
3232 assert(d
->object
.type
== OBJECT_DATA
);
3234 /* FIXME: fix return value assignment. */
3236 n
= le64toh(READ_NOW(d
->data
.n_entries
));
3240 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
3242 r
= generic_array_get_plus_one(f
,
3243 le64toh(d
->data
.entry_offset
),
3244 le64toh(d
->data
.entry_array_offset
),
3257 int journal_file_move_to_entry_by_offset_for_data(
3261 direction_t direction
,
3262 Object
**ret
, uint64_t *ret_offset
) {
3266 assert(d
->object
.type
== OBJECT_DATA
);
3268 return generic_array_bisect_plus_one(
3270 le64toh(d
->data
.entry_offset
),
3271 le64toh(d
->data
.entry_array_offset
),
3272 le64toh(d
->data
.n_entries
),
3276 ret
, ret_offset
, NULL
);
3279 int journal_file_move_to_entry_by_monotonic_for_data(
3284 direction_t direction
,
3285 Object
**ret_object
,
3286 uint64_t *ret_offset
) {
3288 uint64_t b
, z
, entry_offset
, entry_array_offset
, n_entries
;
3294 assert(d
->object
.type
== OBJECT_DATA
);
3296 /* Save all the required data before the data object gets invalidated. */
3297 entry_offset
= le64toh(READ_NOW(d
->data
.entry_offset
));
3298 entry_array_offset
= le64toh(READ_NOW(d
->data
.entry_array_offset
));
3299 n_entries
= le64toh(READ_NOW(d
->data
.n_entries
));
3301 /* First, seek by time */
3302 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &b
);
3308 r
= generic_array_bisect_plus_one(f
,
3309 le64toh(o
->data
.entry_offset
),
3310 le64toh(o
->data
.entry_array_offset
),
3311 le64toh(o
->data
.n_entries
),
3313 test_object_monotonic
,
3319 /* And now, continue seeking until we find an entry that
3320 * exists in both bisection arrays */
3322 r
= journal_file_move_to_object(f
, OBJECT_DATA
, b
, &o
);
3329 r
= generic_array_bisect_plus_one(f
,
3340 r
= generic_array_bisect_plus_one(f
,
3341 le64toh(o
->data
.entry_offset
),
3342 le64toh(o
->data
.entry_array_offset
),
3343 le64toh(o
->data
.n_entries
),
3354 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, q
, ret_object
);
3369 int journal_file_move_to_entry_by_seqnum_for_data(
3373 direction_t direction
,
3374 Object
**ret_object
,
3375 uint64_t *ret_offset
) {
3379 assert(d
->object
.type
== OBJECT_DATA
);
3381 return generic_array_bisect_plus_one(
3383 le64toh(d
->data
.entry_offset
),
3384 le64toh(d
->data
.entry_array_offset
),
3385 le64toh(d
->data
.n_entries
),
3389 ret_object
, ret_offset
, NULL
);
3392 int journal_file_move_to_entry_by_realtime_for_data(
3396 direction_t direction
,
3397 Object
**ret
, uint64_t *ret_offset
) {
3401 assert(d
->object
.type
== OBJECT_DATA
);
3403 return generic_array_bisect_plus_one(
3405 le64toh(d
->data
.entry_offset
),
3406 le64toh(d
->data
.entry_array_offset
),
3407 le64toh(d
->data
.n_entries
),
3409 test_object_realtime
,
3411 ret
, ret_offset
, NULL
);
3414 void journal_file_dump(JournalFile
*f
) {
3422 journal_file_print_header(f
);
3424 p
= le64toh(READ_NOW(f
->header
->header_size
));
3429 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &o
);
3433 s
= journal_object_type_to_string(o
->object
.type
);
3435 switch (o
->object
.type
) {
3440 printf("Type: %s seqnum=%"PRIu64
" monotonic=%"PRIu64
" realtime=%"PRIu64
"\n",
3442 le64toh(o
->entry
.seqnum
),
3443 le64toh(o
->entry
.monotonic
),
3444 le64toh(o
->entry
.realtime
));
3450 printf("Type: %s seqnum=%"PRIu64
" epoch=%"PRIu64
"\n",
3452 le64toh(o
->tag
.seqnum
),
3453 le64toh(o
->tag
.epoch
));
3458 printf("Type: %s \n", s
);
3460 printf("Type: unknown (%i)", o
->object
.type
);
3465 c
= COMPRESSION_FROM_OBJECT(o
);
3466 if (c
> COMPRESSION_NONE
)
3467 printf("Flags: %s\n",
3468 compression_to_string(c
));
3470 if (p
== le64toh(f
->header
->tail_object_offset
))
3473 p
+= ALIGN64(le64toh(o
->object
.size
));
3478 log_error("File corrupt");
3481 /* Note: the lifetime of the compound literal is the immediately surrounding block. */
3482 #define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
3484 void journal_file_print_header(JournalFile
*f
) {
3490 printf("File path: %s\n"
3494 "Sequential number ID: %s\n"
3496 "Compatible flags:%s%s\n"
3497 "Incompatible flags:%s%s%s%s%s%s\n"
3498 "Header size: %"PRIu64
"\n"
3499 "Arena size: %"PRIu64
"\n"
3500 "Data hash table size: %"PRIu64
"\n"
3501 "Field hash table size: %"PRIu64
"\n"
3502 "Rotate suggested: %s\n"
3503 "Head sequential number: %"PRIu64
" (%"PRIx64
")\n"
3504 "Tail sequential number: %"PRIu64
" (%"PRIx64
")\n"
3505 "Head realtime timestamp: %s (%"PRIx64
")\n"
3506 "Tail realtime timestamp: %s (%"PRIx64
")\n"
3507 "Tail monotonic timestamp: %s (%"PRIx64
")\n"
3508 "Objects: %"PRIu64
"\n"
3509 "Entry objects: %"PRIu64
"\n",
3511 SD_ID128_TO_STRING(f
->header
->file_id
),
3512 SD_ID128_TO_STRING(f
->header
->machine_id
),
3513 SD_ID128_TO_STRING(f
->header
->boot_id
),
3514 SD_ID128_TO_STRING(f
->header
->seqnum_id
),
3515 f
->header
->state
== STATE_OFFLINE
? "OFFLINE" :
3516 f
->header
->state
== STATE_ONLINE
? "ONLINE" :
3517 f
->header
->state
== STATE_ARCHIVED
? "ARCHIVED" : "UNKNOWN",
3518 JOURNAL_HEADER_SEALED(f
->header
) ? " SEALED" : "",
3519 (le32toh(f
->header
->compatible_flags
) & ~HEADER_COMPATIBLE_ANY
) ? " ???" : "",
3520 JOURNAL_HEADER_COMPRESSED_XZ(f
->header
) ? " COMPRESSED-XZ" : "",
3521 JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
) ? " COMPRESSED-LZ4" : "",
3522 JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
) ? " COMPRESSED-ZSTD" : "",
3523 JOURNAL_HEADER_KEYED_HASH(f
->header
) ? " KEYED-HASH" : "",
3524 JOURNAL_HEADER_COMPACT(f
->header
) ? " COMPACT" : "",
3525 (le32toh(f
->header
->incompatible_flags
) & ~HEADER_INCOMPATIBLE_ANY
) ? " ???" : "",
3526 le64toh(f
->header
->header_size
),
3527 le64toh(f
->header
->arena_size
),
3528 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3529 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
),
3530 yes_no(journal_file_rotate_suggested(f
, 0, LOG_DEBUG
)),
3531 le64toh(f
->header
->head_entry_seqnum
), le64toh(f
->header
->head_entry_seqnum
),
3532 le64toh(f
->header
->tail_entry_seqnum
), le64toh(f
->header
->tail_entry_seqnum
),
3533 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->head_entry_realtime
)), le64toh(f
->header
->head_entry_realtime
),
3534 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->tail_entry_realtime
)), le64toh(f
->header
->tail_entry_realtime
),
3535 FORMAT_TIMESPAN(le64toh(f
->header
->tail_entry_monotonic
), USEC_PER_MSEC
), le64toh(f
->header
->tail_entry_monotonic
),
3536 le64toh(f
->header
->n_objects
),
3537 le64toh(f
->header
->n_entries
));
3539 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3540 printf("Data objects: %"PRIu64
"\n"
3541 "Data hash table fill: %.1f%%\n",
3542 le64toh(f
->header
->n_data
),
3543 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))));
3545 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3546 printf("Field objects: %"PRIu64
"\n"
3547 "Field hash table fill: %.1f%%\n",
3548 le64toh(f
->header
->n_fields
),
3549 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))));
3551 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
))
3552 printf("Tag objects: %"PRIu64
"\n",
3553 le64toh(f
->header
->n_tags
));
3554 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
3555 printf("Entry array objects: %"PRIu64
"\n",
3556 le64toh(f
->header
->n_entry_arrays
));
3558 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
))
3559 printf("Deepest field hash chain: %" PRIu64
"\n",
3560 f
->header
->field_hash_chain_depth
);
3562 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
))
3563 printf("Deepest data hash chain: %" PRIu64
"\n",
3564 f
->header
->data_hash_chain_depth
);
3566 if (fstat(f
->fd
, &st
) >= 0)
3567 printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st
.st_blocks
* 512ULL));
3570 static int journal_file_warn_btrfs(JournalFile
*f
) {
3576 /* Before we write anything, check if the COW logic is turned
3577 * off on btrfs. Given our write pattern that is quite
3578 * unfriendly to COW file systems this should greatly improve
3579 * performance on COW file systems, such as btrfs, at the
3580 * expense of data integrity features (which shouldn't be too
3581 * bad, given that we do our own checksumming). */
3583 r
= fd_is_fs_type(f
->fd
, BTRFS_SUPER_MAGIC
);
3585 return log_warning_errno(r
, "Failed to determine if journal is on btrfs: %m");
3589 r
= read_attr_fd(f
->fd
, &attrs
);
3591 return log_warning_errno(r
, "Failed to read file attributes: %m");
3593 if (attrs
& FS_NOCOW_FL
) {
3594 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3598 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3599 "This is likely to slow down journal access substantially, please consider turning "
3600 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f
->path
);
3605 static void journal_default_metrics(JournalMetrics
*m
, int fd
, bool compact
) {
3607 uint64_t fs_size
= 0;
3612 if (fstatvfs(fd
, &ss
) >= 0)
3613 fs_size
= ss
.f_frsize
* ss
.f_blocks
;
3615 log_debug_errno(errno
, "Failed to determine disk size: %m");
3617 if (m
->max_use
== UINT64_MAX
) {
3620 m
->max_use
= CLAMP(PAGE_ALIGN(fs_size
/ 10), /* 10% of file system size */
3621 MAX_USE_LOWER
, MAX_USE_UPPER
);
3623 m
->max_use
= MAX_USE_LOWER
;
3625 m
->max_use
= PAGE_ALIGN(m
->max_use
);
3627 if (m
->max_use
!= 0 && m
->max_use
< JOURNAL_FILE_SIZE_MIN
*2)
3628 m
->max_use
= JOURNAL_FILE_SIZE_MIN
*2;
3631 if (m
->min_use
== UINT64_MAX
) {
3633 m
->min_use
= CLAMP(PAGE_ALIGN(fs_size
/ 50), /* 2% of file system size */
3634 MIN_USE_LOW
, MIN_USE_HIGH
);
3636 m
->min_use
= MIN_USE_LOW
;
3639 if (m
->min_use
> m
->max_use
)
3640 m
->min_use
= m
->max_use
;
3642 if (m
->max_size
== UINT64_MAX
)
3643 m
->max_size
= MIN(PAGE_ALIGN(m
->max_use
/ 8), /* 8 chunks */
3646 m
->max_size
= PAGE_ALIGN(m
->max_size
);
3648 if (compact
&& m
->max_size
> JOURNAL_COMPACT_SIZE_MAX
)
3649 m
->max_size
= JOURNAL_COMPACT_SIZE_MAX
;
3651 if (m
->max_size
!= 0) {
3652 if (m
->max_size
< JOURNAL_FILE_SIZE_MIN
)
3653 m
->max_size
= JOURNAL_FILE_SIZE_MIN
;
3655 if (m
->max_use
!= 0 && m
->max_size
*2 > m
->max_use
)
3656 m
->max_use
= m
->max_size
*2;
3659 if (m
->min_size
== UINT64_MAX
)
3660 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
3662 m
->min_size
= CLAMP(PAGE_ALIGN(m
->min_size
),
3663 JOURNAL_FILE_SIZE_MIN
,
3664 m
->max_size
?: UINT64_MAX
);
3666 if (m
->keep_free
== UINT64_MAX
) {
3668 m
->keep_free
= MIN(PAGE_ALIGN(fs_size
/ 20), /* 5% of file system size */
3671 m
->keep_free
= DEFAULT_KEEP_FREE
;
3674 if (m
->n_max_files
== UINT64_MAX
)
3675 m
->n_max_files
= DEFAULT_N_MAX_FILES
;
3677 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64
,
3678 FORMAT_BYTES(m
->min_use
),
3679 FORMAT_BYTES(m
->max_use
),
3680 FORMAT_BYTES(m
->max_size
),
3681 FORMAT_BYTES(m
->min_size
),
3682 FORMAT_BYTES(m
->keep_free
),
3686 int journal_file_open(
3690 JournalFileFlags file_flags
,
3692 uint64_t compress_threshold_bytes
,
3693 JournalMetrics
*metrics
,
3694 MMapCache
*mmap_cache
,
3695 JournalFile
*template,
3696 JournalFile
**ret
) {
3698 bool newly_created
= false;
3703 assert(fd
>= 0 || fname
);
3707 if (!IN_SET((open_flags
& O_ACCMODE
), O_RDONLY
, O_RDWR
))
3710 if ((open_flags
& O_ACCMODE
) == O_RDONLY
&& FLAGS_SET(open_flags
, O_CREAT
))
3713 if (fname
&& (open_flags
& O_CREAT
) && !endswith(fname
, ".journal"))
3716 f
= new(JournalFile
, 1);
3720 *f
= (JournalFile
) {
3723 .open_flags
= open_flags
,
3724 .compress_threshold_bytes
= compress_threshold_bytes
== UINT64_MAX
?
3725 DEFAULT_COMPRESS_THRESHOLD
:
3726 MAX(MIN_COMPRESS_THRESHOLD
, compress_threshold_bytes
),
3730 f
->path
= strdup(fname
);
3738 /* If we don't know the path, fill in something explanatory and vaguely useful */
3739 if (asprintf(&f
->path
, "/proc/self/%i", fd
) < 0) {
3745 f
->chain_cache
= ordered_hashmap_new(&uint64_hash_ops
);
3746 if (!f
->chain_cache
) {
3752 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3753 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3754 * it doesn't hurt in that case. */
3756 f
->fd
= openat_report_new(AT_FDCWD
, f
->path
, f
->open_flags
|O_CLOEXEC
|O_NONBLOCK
, f
->mode
, &newly_created
);
3762 /* fds we opened here by us should also be closed by us. */
3765 r
= fd_nonblock(f
->fd
, false);
3769 if (!newly_created
) {
3770 r
= journal_file_fstat(f
);
3775 r
= journal_file_fstat(f
);
3779 /* If we just got the fd passed in, we don't really know if we created the file anew */
3780 newly_created
= f
->last_stat
.st_size
== 0 && journal_file_writable(f
);
3783 f
->cache_fd
= mmap_cache_add_fd(mmap_cache
, f
->fd
, mmap_prot_from_open_flags(open_flags
));
3789 if (newly_created
) {
3790 (void) journal_file_warn_btrfs(f
);
3792 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3793 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3794 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3795 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3796 * solely on mtime/atime/ctime of the file. */
3797 (void) fd_setcrtime(f
->fd
, 0);
3799 r
= journal_file_init_header(f
, file_flags
, template);
3803 r
= journal_file_fstat(f
);
3808 if (f
->last_stat
.st_size
< (off_t
) HEADER_SIZE_MIN
) {
3813 r
= mmap_cache_fd_get(f
->cache_fd
, CONTEXT_HEADER
, true, 0, PAGE_ALIGN(sizeof(Header
)), &f
->last_stat
, &h
);
3815 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3816 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3826 if (!newly_created
) {
3827 r
= journal_file_verify_header(f
);
3833 if (!newly_created
&& journal_file_writable(f
) && JOURNAL_HEADER_SEALED(f
->header
)) {
3834 r
= journal_file_fss_load(f
);
3840 if (journal_file_writable(f
)) {
3842 journal_default_metrics(metrics
, f
->fd
, JOURNAL_HEADER_COMPACT(f
->header
));
3843 f
->metrics
= *metrics
;
3844 } else if (template)
3845 f
->metrics
= template->metrics
;
3847 r
= journal_file_refresh_header(f
);
3853 r
= journal_file_hmac_setup(f
);
3858 if (newly_created
) {
3859 r
= journal_file_setup_field_hash_table(f
);
3863 r
= journal_file_setup_data_hash_table(f
);
3868 r
= journal_file_append_first_tag(f
);
3874 if (mmap_cache_fd_got_sigbus(f
->cache_fd
)) {
3879 if (template && template->post_change_timer
) {
3880 r
= journal_file_enable_post_change_timer(
3882 sd_event_source_get_event(template->post_change_timer
),
3883 template->post_change_timer_period
);
3889 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3892 if (DEBUG_LOGGING
) {
3893 static int last_seal
= -1, last_compress
= -1, last_keyed_hash
= -1;
3894 static uint64_t last_bytes
= UINT64_MAX
;
3896 if (last_seal
!= JOURNAL_HEADER_SEALED(f
->header
) ||
3897 last_keyed_hash
!= JOURNAL_HEADER_KEYED_HASH(f
->header
) ||
3898 last_compress
!= JOURNAL_FILE_COMPRESS(f
) ||
3899 last_bytes
!= f
->compress_threshold_bytes
) {
3901 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
3902 yes_no(JOURNAL_HEADER_SEALED(f
->header
)), yes_no(JOURNAL_HEADER_KEYED_HASH(f
->header
)),
3903 yes_no(JOURNAL_FILE_COMPRESS(f
)), FORMAT_BYTES(f
->compress_threshold_bytes
));
3904 last_seal
= JOURNAL_HEADER_SEALED(f
->header
);
3905 last_keyed_hash
= JOURNAL_HEADER_KEYED_HASH(f
->header
);
3906 last_compress
= JOURNAL_FILE_COMPRESS(f
);
3907 last_bytes
= f
->compress_threshold_bytes
;
3915 if (f
->cache_fd
&& mmap_cache_fd_got_sigbus(f
->cache_fd
))
3918 (void) journal_file_close(f
);
3920 if (newly_created
&& fd
< 0)
3921 (void) unlink(fname
);
3926 int journal_file_archive(JournalFile
*f
, char **ret_previous_path
) {
3927 _cleanup_free_
char *p
= NULL
;
3931 if (!journal_file_writable(f
))
3934 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3935 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3936 if (path_startswith(f
->path
, "/proc/self/fd"))
3939 if (!endswith(f
->path
, ".journal"))
3942 if (asprintf(&p
, "%.*s@" SD_ID128_FORMAT_STR
"-%016"PRIx64
"-%016"PRIx64
".journal",
3943 (int) strlen(f
->path
) - 8, f
->path
,
3944 SD_ID128_FORMAT_VAL(f
->header
->seqnum_id
),
3945 le64toh(f
->header
->head_entry_seqnum
),
3946 le64toh(f
->header
->head_entry_realtime
)) < 0)
3949 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3950 * ignore that case. */
3951 if (rename(f
->path
, p
) < 0 && errno
!= ENOENT
)
3954 /* Sync the rename to disk */
3955 (void) fsync_directory_of_file(f
->fd
);
3957 if (ret_previous_path
)
3958 *ret_previous_path
= f
->path
;
3962 f
->path
= TAKE_PTR(p
);
3964 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3965 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3966 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3967 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3974 int journal_file_dispose(int dir_fd
, const char *fname
) {
3975 _cleanup_free_
char *p
= NULL
;
3979 /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
3980 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3981 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3982 * for writing anymore. */
3984 if (!endswith(fname
, ".journal"))
3987 if (asprintf(&p
, "%.*s@%016" PRIx64
"-%016" PRIx64
".journal~",
3988 (int) strlen(fname
) - 8, fname
,
3989 now(CLOCK_REALTIME
),
3993 if (renameat(dir_fd
, fname
, dir_fd
, p
) < 0)
3999 int journal_file_copy_entry(JournalFile
*from
, JournalFile
*to
, Object
*o
, uint64_t p
) {
4000 _cleanup_free_ EntryItem
*items_alloc
= NULL
;
4002 uint64_t q
, n
, xor_hash
= 0;
4003 const sd_id128_t
*boot_id
;
4012 if (!journal_file_writable(to
))
4015 ts
= (dual_timestamp
) {
4016 .monotonic
= le64toh(o
->entry
.monotonic
),
4017 .realtime
= le64toh(o
->entry
.realtime
),
4019 boot_id
= &o
->entry
.boot_id
;
4021 n
= journal_file_entry_n_items(from
, o
);
4023 if (n
< ALLOCA_MAX
/ sizeof(EntryItem
) / 2)
4024 items
= newa(EntryItem
, n
);
4026 items_alloc
= new(EntryItem
, n
);
4030 items
= items_alloc
;
4033 for (uint64_t i
= 0; i
< n
; i
++) {
4039 q
= journal_file_entry_item_object_offset(from
, o
, i
);
4040 r
= journal_file_data_payload(from
, NULL
, q
, NULL
, 0, 0, &data
, &l
);
4041 if (IN_SET(r
, -EADDRNOTAVAIL
, -EBADMSG
)) {
4042 log_debug_errno(r
, "Entry item %"PRIu64
" data object is bad, skipping over it: %m", i
);
4052 r
= journal_file_append_data(to
, data
, l
, &u
, &h
);
4056 if (JOURNAL_HEADER_KEYED_HASH(to
->header
))
4057 xor_hash
^= jenkins_hash64(data
, l
);
4059 xor_hash
^= le64toh(u
->data
.hash
);
4061 items
[i
] = (EntryItem
) {
4063 .hash
= le64toh(u
->data
.hash
),
4067 /* The above journal_file_data_payload() may clear or overwrite cached object. Hence, we need
4068 * to re-read the object from the cache. */
4069 r
= journal_file_move_to_object(from
, OBJECT_ENTRY
, p
, &o
);
4074 r
= journal_file_append_entry_internal(to
, &ts
, boot_id
, xor_hash
, items
, n
, NULL
, NULL
, NULL
);
4076 if (mmap_cache_fd_got_sigbus(to
->cache_fd
))
4082 void journal_reset_metrics(JournalMetrics
*m
) {
4085 /* Set everything to "pick automatic values". */
4087 *m
= (JournalMetrics
) {
4088 .min_use
= UINT64_MAX
,
4089 .max_use
= UINT64_MAX
,
4090 .min_size
= UINT64_MAX
,
4091 .max_size
= UINT64_MAX
,
4092 .keep_free
= UINT64_MAX
,
4093 .n_max_files
= UINT64_MAX
,
4097 int journal_file_get_cutoff_realtime_usec(JournalFile
*f
, usec_t
*ret_from
, usec_t
*ret_to
) {
4100 assert(ret_from
|| ret_to
);
4103 if (f
->header
->head_entry_realtime
== 0)
4106 *ret_from
= le64toh(f
->header
->head_entry_realtime
);
4110 if (f
->header
->tail_entry_realtime
== 0)
4113 *ret_to
= le64toh(f
->header
->tail_entry_realtime
);
4119 int journal_file_get_cutoff_monotonic_usec(JournalFile
*f
, sd_id128_t boot_id
, usec_t
*ret_from
, usec_t
*ret_to
) {
4125 assert(ret_from
|| ret_to
);
4127 /* FIXME: fix return value assignment on success with 0. */
4129 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &p
);
4133 if (le64toh(o
->data
.n_entries
) <= 0)
4137 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, le64toh(o
->data
.entry_offset
), &o
);
4141 *ret_from
= le64toh(o
->entry
.monotonic
);
4145 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
4149 r
= generic_array_get_plus_one(f
,
4150 le64toh(o
->data
.entry_offset
),
4151 le64toh(o
->data
.entry_array_offset
),
4152 le64toh(o
->data
.n_entries
) - 1,
4158 *ret_to
= le64toh(o
->entry
.monotonic
);
4164 /* Ideally this would be a function parameter but initializers for static fields have to be compile
4165 * time constants so we hardcode the interval instead. */
4166 #define LOG_RATELIMIT ((const RateLimit) { .interval = 60 * USEC_PER_SEC, .burst = 3 })
4168 bool journal_file_rotate_suggested(JournalFile
*f
, usec_t max_file_usec
, int log_level
) {
4172 /* If we gained new header fields we gained new features,
4173 * hence suggest a rotation */
4174 if (le64toh(f
->header
->header_size
) < sizeof(Header
)) {
4175 log_full(log_level
, "%s uses an outdated header, suggesting rotation.", f
->path
);
4179 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4180 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4181 * need the n_data field, which only exists in newer versions. */
4183 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
4184 if (le64toh(f
->header
->n_data
) * 4ULL > (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4186 log_level
, LOG_RATELIMIT
,
4187 "Data hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items, %llu file size, %"PRIu64
" bytes per hash table item), suggesting rotation.",
4189 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))),
4190 le64toh(f
->header
->n_data
),
4191 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
4192 (unsigned long long) f
->last_stat
.st_size
,
4193 f
->last_stat
.st_size
/ le64toh(f
->header
->n_data
));
4197 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
4198 if (le64toh(f
->header
->n_fields
) * 4ULL > (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4200 log_level
, LOG_RATELIMIT
,
4201 "Field hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items), suggesting rotation.",
4203 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))),
4204 le64toh(f
->header
->n_fields
),
4205 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
));
4209 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4210 * longest chain is longer than some threshold, let's suggest rotation. */
4211 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) &&
4212 le64toh(f
->header
->data_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4214 log_level
, LOG_RATELIMIT
,
4215 "Data hash table of %s has deepest hash chain of length %" PRIu64
", suggesting rotation.",
4216 f
->path
, le64toh(f
->header
->data_hash_chain_depth
));
4220 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) &&
4221 le64toh(f
->header
->field_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4223 log_level
, LOG_RATELIMIT
,
4224 "Field hash table of %s has deepest hash chain of length at %" PRIu64
", suggesting rotation.",
4225 f
->path
, le64toh(f
->header
->field_hash_chain_depth
));
4229 /* Are the data objects properly indexed by field objects? */
4230 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
4231 JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
4232 le64toh(f
->header
->n_data
) > 0 &&
4233 le64toh(f
->header
->n_fields
) == 0) {
4235 log_level
, LOG_RATELIMIT
,
4236 "Data objects of %s are not indexed by field objects, suggesting rotation.",
4241 if (max_file_usec
> 0) {
4244 h
= le64toh(f
->header
->head_entry_realtime
);
4245 t
= now(CLOCK_REALTIME
);
4247 if (h
> 0 && t
> h
+ max_file_usec
) {
4249 log_level
, LOG_RATELIMIT
,
4250 "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
4251 f
->path
, FORMAT_TIMESPAN(max_file_usec
, USEC_PER_SEC
));
4259 static const char * const journal_object_type_table
[] = {
4260 [OBJECT_UNUSED
] = "unused",
4261 [OBJECT_DATA
] = "data",
4262 [OBJECT_FIELD
] = "field",
4263 [OBJECT_ENTRY
] = "entry",
4264 [OBJECT_DATA_HASH_TABLE
] = "data hash table",
4265 [OBJECT_FIELD_HASH_TABLE
] = "field hash table",
4266 [OBJECT_ENTRY_ARRAY
] = "entry array",
4267 [OBJECT_TAG
] = "tag",
4270 DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type
, ObjectType
);