1 /* SPDX-License-Identifier: LGPL-2.1+ */
9 #include <sys/statvfs.h>
15 #include "alloc-util.h"
16 #include "btrfs-util.h"
17 #include "chattr-util.h"
21 #include "format-util.h"
23 #include "journal-authenticate.h"
24 #include "journal-def.h"
25 #include "journal-file.h"
27 #include "memory-util.h"
28 #include "path-util.h"
29 #include "random-util.h"
31 #include "sort-util.h"
32 #include "stat-util.h"
33 #include "string-util.h"
35 #include "xattr-util.h"
37 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
38 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
40 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
41 #define MIN_COMPRESS_THRESHOLD (8ULL)
43 /* This is the minimum journal file size */
44 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
46 /* These are the lower and upper bounds if we deduce the max_use value
47 * from the file system size */
48 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
49 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
51 /* Those are the lower and upper bounds for the minimal use limit,
52 * i.e. how much we'll use even if keep_free suggests otherwise. */
53 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
54 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
56 /* This is the upper bound if we deduce max_size from max_use */
57 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
59 /* This is the upper bound if we deduce the keep_free value from the
61 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
63 /* This is the keep_free value when we can't determine the system
65 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
67 /* This is the default maximum number of journal files to keep around. */
68 #define DEFAULT_N_MAX_FILES 100
70 /* n_data was the first entry we added after the initial file format design */
71 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
73 /* How many entries to keep in the entry array chain cache at max */
74 #define CHAIN_CACHE_MAX 20
76 /* How much to increase the journal file size at once each time we allocate something new. */
77 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
79 /* Reread fstat() of the file for detecting deletions at least this often */
80 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
82 /* The mmap context to use for the header we pick as one above the last defined typed */
83 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
85 /* Longest hash chain to rotate after */
86 #define HASH_CHAIN_DEPTH_MAX 100
89 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
92 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
93 * As a result we use atomic operations on f->offline_state for inter-thread communications with
94 * journal_file_set_offline() and journal_file_set_online(). */
95 static void journal_file_set_offline_internal(JournalFile
*f
) {
101 switch (f
->offline_state
) {
103 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_CANCEL
, OFFLINE_DONE
))
107 case OFFLINE_AGAIN_FROM_SYNCING
:
108 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_AGAIN_FROM_SYNCING
, OFFLINE_SYNCING
))
112 case OFFLINE_AGAIN_FROM_OFFLINING
:
113 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_AGAIN_FROM_OFFLINING
, OFFLINE_SYNCING
))
117 case OFFLINE_SYNCING
:
120 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_SYNCING
, OFFLINE_OFFLINING
))
123 f
->header
->state
= f
->archive
? STATE_ARCHIVED
: STATE_OFFLINE
;
127 case OFFLINE_OFFLINING
:
128 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_OFFLINING
, OFFLINE_DONE
))
135 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
141 static void * journal_file_set_offline_thread(void *arg
) {
142 JournalFile
*f
= arg
;
144 (void) pthread_setname_np(pthread_self(), "journal-offline");
146 journal_file_set_offline_internal(f
);
151 static int journal_file_set_offline_thread_join(JournalFile
*f
) {
156 if (f
->offline_state
== OFFLINE_JOINED
)
159 r
= pthread_join(f
->offline_thread
, NULL
);
163 f
->offline_state
= OFFLINE_JOINED
;
165 if (mmap_cache_got_sigbus(f
->mmap
, f
->cache_fd
))
171 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
172 static bool journal_file_set_offline_try_restart(JournalFile
*f
) {
174 switch (f
->offline_state
) {
175 case OFFLINE_AGAIN_FROM_SYNCING
:
176 case OFFLINE_AGAIN_FROM_OFFLINING
:
180 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_CANCEL
, OFFLINE_AGAIN_FROM_SYNCING
))
184 case OFFLINE_SYNCING
:
185 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_SYNCING
, OFFLINE_AGAIN_FROM_SYNCING
))
189 case OFFLINE_OFFLINING
:
190 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_OFFLINING
, OFFLINE_AGAIN_FROM_OFFLINING
))
200 /* Sets a journal offline.
202 * If wait is false then an offline is dispatched in a separate thread for a
203 * subsequent journal_file_set_offline() or journal_file_set_online() of the
204 * same journal to synchronize with.
206 * If wait is true, then either an existing offline thread will be restarted
207 * and joined, or if none exists the offline is simply performed in this
208 * context without involving another thread.
210 int journal_file_set_offline(JournalFile
*f
, bool wait
) {
219 if (f
->fd
< 0 || !f
->header
)
222 /* An offlining journal is implicitly online and may modify f->header->state,
223 * we must also join any potentially lingering offline thread when not online. */
224 if (!journal_file_is_offlining(f
) && f
->header
->state
!= STATE_ONLINE
)
225 return journal_file_set_offline_thread_join(f
);
227 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
228 restarted
= journal_file_set_offline_try_restart(f
);
229 if ((restarted
&& wait
) || !restarted
) {
230 r
= journal_file_set_offline_thread_join(f
);
238 /* Initiate a new offline. */
239 f
->offline_state
= OFFLINE_SYNCING
;
241 if (wait
) /* Without using a thread if waiting. */
242 journal_file_set_offline_internal(f
);
244 sigset_t ss
, saved_ss
;
247 assert_se(sigfillset(&ss
) >= 0);
248 /* Don't block SIGBUS since the offlining thread accesses a memory mapped file.
249 * Asynchronous SIGBUS signals can safely be handled by either thread. */
250 assert_se(sigdelset(&ss
, SIGBUS
) >= 0);
252 r
= pthread_sigmask(SIG_BLOCK
, &ss
, &saved_ss
);
256 r
= pthread_create(&f
->offline_thread
, NULL
, journal_file_set_offline_thread
, f
);
258 k
= pthread_sigmask(SIG_SETMASK
, &saved_ss
, NULL
);
260 f
->offline_state
= OFFLINE_JOINED
;
270 static int journal_file_set_online(JournalFile
*f
) {
278 if (f
->fd
< 0 || !f
->header
)
282 switch (f
->offline_state
) {
284 /* No offline thread, no need to wait. */
288 case OFFLINE_SYNCING
:
289 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_SYNCING
, OFFLINE_CANCEL
))
291 /* Canceled syncing prior to offlining, no need to wait. */
295 case OFFLINE_AGAIN_FROM_SYNCING
:
296 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_AGAIN_FROM_SYNCING
, OFFLINE_CANCEL
))
298 /* Canceled restart from syncing, no need to wait. */
302 case OFFLINE_AGAIN_FROM_OFFLINING
:
303 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_AGAIN_FROM_OFFLINING
, OFFLINE_CANCEL
))
305 /* Canceled restart from offlining, must wait for offlining to complete however. */
310 r
= journal_file_set_offline_thread_join(f
);
320 if (mmap_cache_got_sigbus(f
->mmap
, f
->cache_fd
))
323 switch (f
->header
->state
) {
328 f
->header
->state
= STATE_ONLINE
;
337 bool journal_file_is_offlining(JournalFile
*f
) {
340 __sync_synchronize();
342 if (IN_SET(f
->offline_state
, OFFLINE_DONE
, OFFLINE_JOINED
))
348 JournalFile
* journal_file_close(JournalFile
*f
) {
353 /* Write the final tag */
354 if (f
->seal
&& f
->writable
) {
357 r
= journal_file_append_tag(f
);
359 log_error_errno(r
, "Failed to append tag when closing journal: %m");
363 if (f
->post_change_timer
) {
364 if (sd_event_source_get_enabled(f
->post_change_timer
, NULL
) > 0)
365 journal_file_post_change(f
);
367 sd_event_source_disable_unref(f
->post_change_timer
);
370 journal_file_set_offline(f
, true);
372 if (f
->mmap
&& f
->cache_fd
)
373 mmap_cache_free_fd(f
->mmap
, f
->cache_fd
);
375 if (f
->fd
>= 0 && f
->defrag_on_close
) {
377 /* Be friendly to btrfs: turn COW back on again now,
378 * and defragment the file. We won't write to the file
379 * ever again, hence remove all fragmentation, and
380 * reenable all the good bits COW usually provides
381 * (such as data checksumming). */
383 (void) chattr_fd(f
->fd
, 0, FS_NOCOW_FL
, NULL
);
384 (void) btrfs_defrag_fd(f
->fd
);
391 mmap_cache_unref(f
->mmap
);
393 ordered_hashmap_free_free(f
->chain_cache
);
395 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
396 free(f
->compress_buffer
);
401 munmap(f
->fss_file
, PAGE_ALIGN(f
->fss_file_size
));
403 free(f
->fsprg_state
);
408 gcry_md_close(f
->hmac
);
414 static int journal_file_init_header(JournalFile
*f
, JournalFile
*template) {
421 memcpy(h
.signature
, HEADER_SIGNATURE
, 8);
422 h
.header_size
= htole64(ALIGN64(sizeof(h
)));
424 h
.incompatible_flags
|= htole32(
425 f
->compress_xz
* HEADER_INCOMPATIBLE_COMPRESSED_XZ
|
426 f
->compress_lz4
* HEADER_INCOMPATIBLE_COMPRESSED_LZ4
|
427 f
->compress_zstd
* HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
|
428 f
->keyed_hash
* HEADER_INCOMPATIBLE_KEYED_HASH
);
430 h
.compatible_flags
= htole32(
431 f
->seal
* HEADER_COMPATIBLE_SEALED
);
433 r
= sd_id128_randomize(&h
.file_id
);
438 h
.seqnum_id
= template->header
->seqnum_id
;
439 h
.tail_entry_seqnum
= template->header
->tail_entry_seqnum
;
441 h
.seqnum_id
= h
.file_id
;
443 k
= pwrite(f
->fd
, &h
, sizeof(h
), 0);
453 static int journal_file_refresh_header(JournalFile
*f
) {
459 r
= sd_id128_get_machine(&f
->header
->machine_id
);
460 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
461 /* We don't have a machine-id, let's continue without */
462 zero(f
->header
->machine_id
);
466 r
= sd_id128_get_boot(&f
->header
->boot_id
);
470 r
= journal_file_set_online(f
);
472 /* Sync the online state to disk */
475 /* We likely just created a new file, also sync the directory this file is located in. */
476 (void) fsync_directory_of_file(f
->fd
);
481 static bool warn_wrong_flags(const JournalFile
*f
, bool compatible
) {
482 const uint32_t any
= compatible
? HEADER_COMPATIBLE_ANY
: HEADER_INCOMPATIBLE_ANY
,
483 supported
= compatible
? HEADER_COMPATIBLE_SUPPORTED
: HEADER_INCOMPATIBLE_SUPPORTED
;
484 const char *type
= compatible
? "compatible" : "incompatible";
487 flags
= le32toh(compatible
? f
->header
->compatible_flags
: f
->header
->incompatible_flags
);
489 if (flags
& ~supported
) {
491 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32
,
492 f
->path
, type
, flags
& ~any
);
493 flags
= (flags
& any
) & ~supported
;
497 _cleanup_free_
char *t
= NULL
;
500 if (flags
& HEADER_COMPATIBLE_SEALED
)
501 strv
[n
++] = "sealed";
503 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_XZ
)
504 strv
[n
++] = "xz-compressed";
505 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_LZ4
)
506 strv
[n
++] = "lz4-compressed";
507 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
)
508 strv
[n
++] = "zstd-compressed";
509 if (flags
& HEADER_INCOMPATIBLE_KEYED_HASH
)
510 strv
[n
++] = "keyed-hash";
513 assert(n
< ELEMENTSOF(strv
));
515 t
= strv_join((char**) strv
, ", ");
516 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
517 f
->path
, type
, n
> 1 ? "flags" : "flag", strnull(t
));
525 static int journal_file_verify_header(JournalFile
*f
) {
526 uint64_t arena_size
, header_size
;
531 if (memcmp(f
->header
->signature
, HEADER_SIGNATURE
, 8))
534 /* In both read and write mode we refuse to open files with incompatible
535 * flags we don't know. */
536 if (warn_wrong_flags(f
, false))
537 return -EPROTONOSUPPORT
;
539 /* When open for writing we refuse to open files with compatible flags, too. */
540 if (f
->writable
&& warn_wrong_flags(f
, true))
541 return -EPROTONOSUPPORT
;
543 if (f
->header
->state
>= _STATE_MAX
)
546 header_size
= le64toh(READ_NOW(f
->header
->header_size
));
548 /* The first addition was n_data, so check that we are at least this large */
549 if (header_size
< HEADER_SIZE_MIN
)
552 if (JOURNAL_HEADER_SEALED(f
->header
) && !JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
555 arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
557 if (UINT64_MAX
- header_size
< arena_size
|| header_size
+ arena_size
> (uint64_t) f
->last_stat
.st_size
)
560 if (le64toh(f
->header
->tail_object_offset
) > header_size
+ arena_size
)
563 if (!VALID64(le64toh(f
->header
->data_hash_table_offset
)) ||
564 !VALID64(le64toh(f
->header
->field_hash_table_offset
)) ||
565 !VALID64(le64toh(f
->header
->tail_object_offset
)) ||
566 !VALID64(le64toh(f
->header
->entry_array_offset
)))
570 sd_id128_t machine_id
;
574 r
= sd_id128_get_machine(&machine_id
);
578 if (!sd_id128_equal(machine_id
, f
->header
->machine_id
))
581 state
= f
->header
->state
;
583 if (state
== STATE_ARCHIVED
)
584 return -ESHUTDOWN
; /* Already archived */
585 else if (state
== STATE_ONLINE
)
586 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
587 "Journal file %s is already online. Assuming unclean closing.",
589 else if (state
!= STATE_OFFLINE
)
590 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
591 "Journal file %s has unknown state %i.",
594 if (f
->header
->field_hash_table_size
== 0 || f
->header
->data_hash_table_size
== 0)
597 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
598 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
600 if (le64toh(f
->header
->tail_entry_realtime
) > now(CLOCK_REALTIME
))
601 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY
),
602 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
606 f
->compress_xz
= JOURNAL_HEADER_COMPRESSED_XZ(f
->header
);
607 f
->compress_lz4
= JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
);
608 f
->compress_zstd
= JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
);
610 f
->seal
= JOURNAL_HEADER_SEALED(f
->header
);
612 f
->keyed_hash
= JOURNAL_HEADER_KEYED_HASH(f
->header
);
617 int journal_file_fstat(JournalFile
*f
) {
623 if (fstat(f
->fd
, &f
->last_stat
) < 0)
626 f
->last_stat_usec
= now(CLOCK_MONOTONIC
);
628 /* Refuse dealing with with files that aren't regular */
629 r
= stat_verify_regular(&f
->last_stat
);
633 /* Refuse appending to files that are already deleted */
634 if (f
->last_stat
.st_nlink
<= 0)
640 static int journal_file_allocate(JournalFile
*f
, uint64_t offset
, uint64_t size
) {
641 uint64_t old_size
, new_size
, old_header_size
, old_arena_size
;
647 /* We assume that this file is not sparse, and we know that for sure, since we always call
648 * posix_fallocate() ourselves */
650 if (size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - offset
)
653 if (mmap_cache_got_sigbus(f
->mmap
, f
->cache_fd
))
656 old_header_size
= le64toh(READ_NOW(f
->header
->header_size
));
657 old_arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
658 if (old_arena_size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - old_header_size
)
661 old_size
= old_header_size
+ old_arena_size
;
663 new_size
= MAX(PAGE_ALIGN(offset
+ size
), old_header_size
);
665 if (new_size
<= old_size
) {
667 /* We already pre-allocated enough space, but before
668 * we write to it, let's check with fstat() if the
669 * file got deleted, in order make sure we don't throw
670 * away the data immediately. Don't check fstat() for
671 * all writes though, but only once ever 10s. */
673 if (f
->last_stat_usec
+ LAST_STAT_REFRESH_USEC
> now(CLOCK_MONOTONIC
))
676 return journal_file_fstat(f
);
679 /* Allocate more space. */
681 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
684 if (new_size
> f
->metrics
.min_size
&& f
->metrics
.keep_free
> 0) {
687 if (fstatvfs(f
->fd
, &svfs
) >= 0) {
690 available
= LESS_BY((uint64_t) svfs
.f_bfree
* (uint64_t) svfs
.f_bsize
, f
->metrics
.keep_free
);
692 if (new_size
- old_size
> available
)
697 /* Increase by larger blocks at once */
698 new_size
= DIV_ROUND_UP(new_size
, FILE_SIZE_INCREASE
) * FILE_SIZE_INCREASE
;
699 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
700 new_size
= f
->metrics
.max_size
;
702 /* Note that the glibc fallocate() fallback is very
703 inefficient, hence we try to minimize the allocation area
705 r
= posix_fallocate(f
->fd
, old_size
, new_size
- old_size
);
709 f
->header
->arena_size
= htole64(new_size
- old_header_size
);
711 return journal_file_fstat(f
);
714 static unsigned type_to_context(ObjectType type
) {
715 /* One context for each type, plus one catch-all for the rest */
716 assert_cc(_OBJECT_TYPE_MAX
<= MMAP_CACHE_MAX_CONTEXTS
);
717 assert_cc(CONTEXT_HEADER
< MMAP_CACHE_MAX_CONTEXTS
);
718 return type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
? type
: 0;
721 static int journal_file_move_to(
738 if (size
> UINT64_MAX
- offset
)
741 /* Avoid SIGBUS on invalid accesses */
742 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
) {
743 /* Hmm, out of range? Let's refresh the fstat() data
744 * first, before we trust that check. */
746 r
= journal_file_fstat(f
);
750 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
)
751 return -EADDRNOTAVAIL
;
754 return mmap_cache_get(f
->mmap
, f
->cache_fd
, f
->prot
, type_to_context(type
), keep_always
, offset
, size
, &f
->last_stat
, ret
, ret_size
);
757 static uint64_t minimum_header_size(Object
*o
) {
759 static const uint64_t table
[] = {
760 [OBJECT_DATA
] = sizeof(DataObject
),
761 [OBJECT_FIELD
] = sizeof(FieldObject
),
762 [OBJECT_ENTRY
] = sizeof(EntryObject
),
763 [OBJECT_DATA_HASH_TABLE
] = sizeof(HashTableObject
),
764 [OBJECT_FIELD_HASH_TABLE
] = sizeof(HashTableObject
),
765 [OBJECT_ENTRY_ARRAY
] = sizeof(EntryArrayObject
),
766 [OBJECT_TAG
] = sizeof(TagObject
),
769 if (o
->object
.type
>= ELEMENTSOF(table
) || table
[o
->object
.type
] <= 0)
770 return sizeof(ObjectHeader
);
772 return table
[o
->object
.type
];
775 /* Lightweight object checks. We want this to be fast, so that we won't
776 * slowdown every journal_file_move_to_object() call too much. */
777 static int journal_file_check_object(JournalFile
*f
, uint64_t offset
, Object
*o
) {
781 switch (o
->object
.type
) {
784 if ((le64toh(o
->data
.entry_offset
) == 0) ^ (le64toh(o
->data
.n_entries
) == 0))
785 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
786 "Bad n_entries: %" PRIu64
": %" PRIu64
,
787 le64toh(o
->data
.n_entries
),
790 if (le64toh(o
->object
.size
) <= offsetof(DataObject
, payload
))
791 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
792 "Bad object size (<= %zu): %" PRIu64
": %" PRIu64
,
793 offsetof(DataObject
, payload
),
794 le64toh(o
->object
.size
),
797 if (!VALID64(le64toh(o
->data
.next_hash_offset
)) ||
798 !VALID64(le64toh(o
->data
.next_field_offset
)) ||
799 !VALID64(le64toh(o
->data
.entry_offset
)) ||
800 !VALID64(le64toh(o
->data
.entry_array_offset
)))
801 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
802 "Invalid offset, next_hash_offset=" OFSfmt
", next_field_offset=" OFSfmt
", entry_offset=" OFSfmt
", entry_array_offset=" OFSfmt
": %" PRIu64
,
803 le64toh(o
->data
.next_hash_offset
),
804 le64toh(o
->data
.next_field_offset
),
805 le64toh(o
->data
.entry_offset
),
806 le64toh(o
->data
.entry_array_offset
),
812 if (le64toh(o
->object
.size
) <= offsetof(FieldObject
, payload
))
813 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
814 "Bad field size (<= %zu): %" PRIu64
": %" PRIu64
,
815 offsetof(FieldObject
, payload
),
816 le64toh(o
->object
.size
),
819 if (!VALID64(le64toh(o
->field
.next_hash_offset
)) ||
820 !VALID64(le64toh(o
->field
.head_data_offset
)))
821 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
822 "Invalid offset, next_hash_offset=" OFSfmt
", head_data_offset=" OFSfmt
": %" PRIu64
,
823 le64toh(o
->field
.next_hash_offset
),
824 le64toh(o
->field
.head_data_offset
),
831 sz
= le64toh(READ_NOW(o
->object
.size
));
832 if (sz
< offsetof(EntryObject
, items
) ||
833 (sz
- offsetof(EntryObject
, items
)) % sizeof(EntryItem
) != 0)
834 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
835 "Bad entry size (<= %zu): %" PRIu64
": %" PRIu64
,
836 offsetof(EntryObject
, items
),
840 if ((sz
- offsetof(EntryObject
, items
)) / sizeof(EntryItem
) <= 0)
841 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
842 "Invalid number items in entry: %" PRIu64
": %" PRIu64
,
843 (sz
- offsetof(EntryObject
, items
)) / sizeof(EntryItem
),
846 if (le64toh(o
->entry
.seqnum
) <= 0)
847 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
848 "Invalid entry seqnum: %" PRIx64
": %" PRIu64
,
849 le64toh(o
->entry
.seqnum
),
852 if (!VALID_REALTIME(le64toh(o
->entry
.realtime
)))
853 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
854 "Invalid entry realtime timestamp: %" PRIu64
": %" PRIu64
,
855 le64toh(o
->entry
.realtime
),
858 if (!VALID_MONOTONIC(le64toh(o
->entry
.monotonic
)))
859 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
860 "Invalid entry monotonic timestamp: %" PRIu64
": %" PRIu64
,
861 le64toh(o
->entry
.monotonic
),
867 case OBJECT_DATA_HASH_TABLE
:
868 case OBJECT_FIELD_HASH_TABLE
: {
871 sz
= le64toh(READ_NOW(o
->object
.size
));
872 if (sz
< offsetof(HashTableObject
, items
) ||
873 (sz
- offsetof(HashTableObject
, items
)) % sizeof(HashItem
) != 0 ||
874 (sz
- offsetof(HashTableObject
, items
)) / sizeof(HashItem
) <= 0)
875 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
876 "Invalid %s hash table size: %" PRIu64
": %" PRIu64
,
877 o
->object
.type
== OBJECT_DATA_HASH_TABLE
? "data" : "field",
884 case OBJECT_ENTRY_ARRAY
: {
887 sz
= le64toh(READ_NOW(o
->object
.size
));
888 if (sz
< offsetof(EntryArrayObject
, items
) ||
889 (sz
- offsetof(EntryArrayObject
, items
)) % sizeof(le64_t
) != 0 ||
890 (sz
- offsetof(EntryArrayObject
, items
)) / sizeof(le64_t
) <= 0)
891 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
892 "Invalid object entry array size: %" PRIu64
": %" PRIu64
,
896 if (!VALID64(le64toh(o
->entry_array
.next_entry_array_offset
)))
897 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
898 "Invalid object entry array next_entry_array_offset: " OFSfmt
": %" PRIu64
,
899 le64toh(o
->entry_array
.next_entry_array_offset
),
906 if (le64toh(o
->object
.size
) != sizeof(TagObject
))
907 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
908 "Invalid object tag size: %" PRIu64
": %" PRIu64
,
909 le64toh(o
->object
.size
),
912 if (!VALID_EPOCH(le64toh(o
->tag
.epoch
)))
913 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
914 "Invalid object tag epoch: %" PRIu64
": %" PRIu64
,
915 le64toh(o
->tag
.epoch
), offset
);
923 int journal_file_move_to_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
**ret
) {
933 /* Objects may only be located at multiple of 64 bit */
934 if (!VALID64(offset
))
935 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
936 "Attempt to move to object at non-64bit boundary: %" PRIu64
,
939 /* Object may not be located in the file header */
940 if (offset
< le64toh(f
->header
->header_size
))
941 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
942 "Attempt to move to object located in file header: %" PRIu64
,
945 r
= journal_file_move_to(f
, type
, false, offset
, sizeof(ObjectHeader
), &t
, &tsize
);
950 s
= le64toh(READ_NOW(o
->object
.size
));
953 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
954 "Attempt to move to uninitialized object: %" PRIu64
,
956 if (s
< sizeof(ObjectHeader
))
957 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
958 "Attempt to move to overly short object: %" PRIu64
,
961 if (o
->object
.type
<= OBJECT_UNUSED
)
962 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
963 "Attempt to move to object with invalid type: %" PRIu64
,
966 if (s
< minimum_header_size(o
))
967 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
968 "Attempt to move to truncated object: %" PRIu64
,
971 if (type
> OBJECT_UNUSED
&& o
->object
.type
!= type
)
972 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
973 "Attempt to move to object of unexpected type: %" PRIu64
,
977 r
= journal_file_move_to(f
, type
, false, offset
, s
, &t
, NULL
);
984 r
= journal_file_check_object(f
, offset
, o
);
992 static uint64_t journal_file_entry_seqnum(JournalFile
*f
, uint64_t *seqnum
) {
998 r
= le64toh(f
->header
->tail_entry_seqnum
) + 1;
1001 /* If an external seqnum counter was passed, we update
1002 * both the local and the external one, and set it to
1003 * the maximum of both */
1005 if (*seqnum
+ 1 > r
)
1011 f
->header
->tail_entry_seqnum
= htole64(r
);
1013 if (f
->header
->head_entry_seqnum
== 0)
1014 f
->header
->head_entry_seqnum
= htole64(r
);
1019 int journal_file_append_object(
1024 uint64_t *ret_offset
) {
1033 assert(type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
);
1034 assert(size
>= sizeof(ObjectHeader
));
1036 r
= journal_file_set_online(f
);
1040 p
= le64toh(f
->header
->tail_object_offset
);
1042 p
= le64toh(f
->header
->header_size
);
1046 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &tail
);
1050 sz
= le64toh(READ_NOW(tail
->object
.size
));
1051 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
1055 if (p
> UINT64_MAX
- sz
)
1061 r
= journal_file_allocate(f
, p
, size
);
1065 r
= journal_file_move_to(f
, type
, false, p
, size
, &t
, NULL
);
1070 o
->object
= (ObjectHeader
) {
1072 .size
= htole64(size
),
1075 f
->header
->tail_object_offset
= htole64(p
);
1076 f
->header
->n_objects
= htole64(le64toh(f
->header
->n_objects
) + 1);
1087 static int journal_file_setup_data_hash_table(JournalFile
*f
) {
1095 /* We estimate that we need 1 hash table entry per 768 bytes
1096 of journal file and we want to make sure we never get
1097 beyond 75% fill level. Calculate the hash table size for
1098 the maximum file size based on these metrics. */
1100 s
= (f
->metrics
.max_size
* 4 / 768 / 3) * sizeof(HashItem
);
1101 if (s
< DEFAULT_DATA_HASH_TABLE_SIZE
)
1102 s
= DEFAULT_DATA_HASH_TABLE_SIZE
;
1104 log_debug("Reserving %"PRIu64
" entries in data hash table.", s
/ sizeof(HashItem
));
1106 r
= journal_file_append_object(f
,
1107 OBJECT_DATA_HASH_TABLE
,
1108 offsetof(Object
, hash_table
.items
) + s
,
1113 memzero(o
->hash_table
.items
, s
);
1115 f
->header
->data_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1116 f
->header
->data_hash_table_size
= htole64(s
);
1121 static int journal_file_setup_field_hash_table(JournalFile
*f
) {
1129 /* We use a fixed size hash table for the fields as this
1130 * number should grow very slowly only */
1132 s
= DEFAULT_FIELD_HASH_TABLE_SIZE
;
1133 log_debug("Reserving %"PRIu64
" entries in field hash table.", s
/ sizeof(HashItem
));
1135 r
= journal_file_append_object(f
,
1136 OBJECT_FIELD_HASH_TABLE
,
1137 offsetof(Object
, hash_table
.items
) + s
,
1142 memzero(o
->hash_table
.items
, s
);
1144 f
->header
->field_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1145 f
->header
->field_hash_table_size
= htole64(s
);
1150 int journal_file_map_data_hash_table(JournalFile
*f
) {
1158 if (f
->data_hash_table
)
1161 p
= le64toh(f
->header
->data_hash_table_offset
);
1162 s
= le64toh(f
->header
->data_hash_table_size
);
1164 r
= journal_file_move_to(f
,
1165 OBJECT_DATA_HASH_TABLE
,
1172 f
->data_hash_table
= t
;
1176 int journal_file_map_field_hash_table(JournalFile
*f
) {
1184 if (f
->field_hash_table
)
1187 p
= le64toh(f
->header
->field_hash_table_offset
);
1188 s
= le64toh(f
->header
->field_hash_table_size
);
1190 r
= journal_file_move_to(f
,
1191 OBJECT_FIELD_HASH_TABLE
,
1198 f
->field_hash_table
= t
;
1202 static int journal_file_link_field(
1213 assert(f
->field_hash_table
);
1217 if (o
->object
.type
!= OBJECT_FIELD
)
1220 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1224 /* This might alter the window we are looking at */
1225 o
->field
.next_hash_offset
= o
->field
.head_data_offset
= 0;
1228 p
= le64toh(f
->field_hash_table
[h
].tail_hash_offset
);
1230 f
->field_hash_table
[h
].head_hash_offset
= htole64(offset
);
1232 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1236 o
->field
.next_hash_offset
= htole64(offset
);
1239 f
->field_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1241 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
1242 f
->header
->n_fields
= htole64(le64toh(f
->header
->n_fields
) + 1);
1247 static int journal_file_link_data(
1258 assert(f
->data_hash_table
);
1262 if (o
->object
.type
!= OBJECT_DATA
)
1265 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1269 /* This might alter the window we are looking at */
1270 o
->data
.next_hash_offset
= o
->data
.next_field_offset
= 0;
1271 o
->data
.entry_offset
= o
->data
.entry_array_offset
= 0;
1272 o
->data
.n_entries
= 0;
1275 p
= le64toh(f
->data_hash_table
[h
].tail_hash_offset
);
1277 /* Only entry in the hash table is easy */
1278 f
->data_hash_table
[h
].head_hash_offset
= htole64(offset
);
1280 /* Move back to the previous data object, to patch in
1283 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1287 o
->data
.next_hash_offset
= htole64(offset
);
1290 f
->data_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1292 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
1293 f
->header
->n_data
= htole64(le64toh(f
->header
->n_data
) + 1);
1298 static int next_hash_offset(
1301 le64_t
*next_hash_offset
,
1303 le64_t
*header_max_depth
) {
1307 nextp
= le64toh(READ_NOW(*next_hash_offset
));
1309 if (nextp
<= *p
) /* Refuse going in loops */
1310 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1311 "Detected hash item loop in %s, refusing.", f
->path
);
1315 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1316 if (header_max_depth
&& f
->writable
)
1317 *header_max_depth
= htole64(MAX(*depth
, le64toh(*header_max_depth
)));
1324 int journal_file_find_field_object_with_hash(
1326 const void *field
, uint64_t size
, uint64_t hash
,
1327 Object
**ret
, uint64_t *ret_offset
) {
1329 uint64_t p
, osize
, h
, m
, depth
= 0;
1334 assert(field
&& size
> 0);
1336 /* If the field hash table is empty, we can't find anything */
1337 if (le64toh(f
->header
->field_hash_table_size
) <= 0)
1340 /* Map the field hash table, if it isn't mapped yet. */
1341 r
= journal_file_map_field_hash_table(f
);
1345 osize
= offsetof(Object
, field
.payload
) + size
;
1347 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1352 p
= le64toh(f
->field_hash_table
[h
].head_hash_offset
);
1356 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1360 if (le64toh(o
->field
.hash
) == hash
&&
1361 le64toh(o
->object
.size
) == osize
&&
1362 memcmp(o
->field
.payload
, field
, size
) == 0) {
1372 r
= next_hash_offset(
1375 &o
->field
.next_hash_offset
,
1377 JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) ? &f
->header
->field_hash_chain_depth
: NULL
);
1385 uint64_t journal_file_hash_data(
1391 assert(data
|| sz
== 0);
1393 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1394 * function use siphash. Old journal files use the Jenkins hash. */
1396 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
1397 return siphash24(data
, sz
, f
->header
->file_id
.bytes
);
1399 return jenkins_hash64(data
, sz
);
1402 int journal_file_find_field_object(
1404 const void *field
, uint64_t size
,
1405 Object
**ret
, uint64_t *ret_offset
) {
1408 assert(field
&& size
> 0);
1410 return journal_file_find_field_object_with_hash(
1413 journal_file_hash_data(f
, field
, size
),
1417 int journal_file_find_data_object_with_hash(
1419 const void *data
, uint64_t size
, uint64_t hash
,
1420 Object
**ret
, uint64_t *ret_offset
) {
1422 uint64_t p
, osize
, h
, m
, depth
= 0;
1427 assert(data
|| size
== 0);
1429 /* If there's no data hash table, then there's no entry. */
1430 if (le64toh(f
->header
->data_hash_table_size
) <= 0)
1433 /* Map the data hash table, if it isn't mapped yet. */
1434 r
= journal_file_map_data_hash_table(f
);
1438 osize
= offsetof(Object
, data
.payload
) + size
;
1440 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1445 p
= le64toh(f
->data_hash_table
[h
].head_hash_offset
);
1450 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1454 if (le64toh(o
->data
.hash
) != hash
)
1457 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
) {
1458 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
1462 l
= le64toh(READ_NOW(o
->object
.size
));
1463 if (l
<= offsetof(Object
, data
.payload
))
1466 l
-= offsetof(Object
, data
.payload
);
1468 r
= decompress_blob(o
->object
.flags
& OBJECT_COMPRESSION_MASK
,
1469 o
->data
.payload
, l
, &f
->compress_buffer
, &f
->compress_buffer_size
, &rsize
, 0);
1473 if (rsize
== size
&&
1474 memcmp(f
->compress_buffer
, data
, size
) == 0) {
1485 return -EPROTONOSUPPORT
;
1487 } else if (le64toh(o
->object
.size
) == osize
&&
1488 memcmp(o
->data
.payload
, data
, size
) == 0) {
1500 r
= next_hash_offset(
1503 &o
->data
.next_hash_offset
,
1505 JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) ? &f
->header
->data_hash_chain_depth
: NULL
);
1513 int journal_file_find_data_object(
1515 const void *data
, uint64_t size
,
1516 Object
**ret
, uint64_t *ret_offset
) {
1519 assert(data
|| size
== 0);
1521 return journal_file_find_data_object_with_hash(
1524 journal_file_hash_data(f
, data
, size
),
1528 static int journal_file_append_field(
1530 const void *field
, uint64_t size
,
1531 Object
**ret
, uint64_t *ret_offset
) {
1539 assert(field
&& size
> 0);
1541 hash
= journal_file_hash_data(f
, field
, size
);
1543 r
= journal_file_find_field_object_with_hash(f
, field
, size
, hash
, &o
, &p
);
1557 osize
= offsetof(Object
, field
.payload
) + size
;
1558 r
= journal_file_append_object(f
, OBJECT_FIELD
, osize
, &o
, &p
);
1562 o
->field
.hash
= htole64(hash
);
1563 memcpy(o
->field
.payload
, field
, size
);
1565 r
= journal_file_link_field(f
, o
, p
, hash
);
1569 /* The linking might have altered the window, so let's
1570 * refresh our pointer */
1571 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1576 r
= journal_file_hmac_put_object(f
, OBJECT_FIELD
, o
, p
);
1590 static int journal_file_append_data(
1592 const void *data
, uint64_t size
,
1593 Object
**ret
, uint64_t *ret_offset
) {
1598 int r
, compression
= 0;
1602 assert(data
|| size
== 0);
1604 hash
= journal_file_hash_data(f
, data
, size
);
1606 r
= journal_file_find_data_object_with_hash(f
, data
, size
, hash
, &o
, &p
);
1620 osize
= offsetof(Object
, data
.payload
) + size
;
1621 r
= journal_file_append_object(f
, OBJECT_DATA
, osize
, &o
, &p
);
1625 o
->data
.hash
= htole64(hash
);
1627 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
1628 if (JOURNAL_FILE_COMPRESS(f
) && size
>= f
->compress_threshold_bytes
) {
1631 compression
= compress_blob(data
, size
, o
->data
.payload
, size
- 1, &rsize
);
1633 if (compression
>= 0) {
1634 o
->object
.size
= htole64(offsetof(Object
, data
.payload
) + rsize
);
1635 o
->object
.flags
|= compression
;
1637 log_debug("Compressed data object %"PRIu64
" -> %zu using %s",
1638 size
, rsize
, object_compressed_to_string(compression
));
1640 /* Compression didn't work, we don't really care why, let's continue without compression */
1645 if (compression
== 0)
1646 memcpy_safe(o
->data
.payload
, data
, size
);
1648 r
= journal_file_link_data(f
, o
, p
, hash
);
1653 r
= journal_file_hmac_put_object(f
, OBJECT_DATA
, o
, p
);
1658 /* The linking might have altered the window, so let's
1659 * refresh our pointer */
1660 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1667 eq
= memchr(data
, '=', size
);
1668 if (eq
&& eq
> data
) {
1672 /* Create field object ... */
1673 r
= journal_file_append_field(f
, data
, (uint8_t*) eq
- (uint8_t*) data
, &fo
, &fp
);
1677 /* ... and link it in. */
1678 o
->data
.next_field_offset
= fo
->field
.head_data_offset
;
1679 fo
->field
.head_data_offset
= le64toh(p
);
1691 uint64_t journal_file_entry_n_items(Object
*o
) {
1695 if (o
->object
.type
!= OBJECT_ENTRY
)
1698 sz
= le64toh(READ_NOW(o
->object
.size
));
1699 if (sz
< offsetof(Object
, entry
.items
))
1702 return (sz
- offsetof(Object
, entry
.items
)) / sizeof(EntryItem
);
1705 uint64_t journal_file_entry_array_n_items(Object
*o
) {
1710 if (o
->object
.type
!= OBJECT_ENTRY_ARRAY
)
1713 sz
= le64toh(READ_NOW(o
->object
.size
));
1714 if (sz
< offsetof(Object
, entry_array
.items
))
1717 return (sz
- offsetof(Object
, entry_array
.items
)) / sizeof(uint64_t);
1720 uint64_t journal_file_hash_table_n_items(Object
*o
) {
1725 if (!IN_SET(o
->object
.type
, OBJECT_DATA_HASH_TABLE
, OBJECT_FIELD_HASH_TABLE
))
1728 sz
= le64toh(READ_NOW(o
->object
.size
));
1729 if (sz
< offsetof(Object
, hash_table
.items
))
1732 return (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
);
1735 static int link_entry_into_array(JournalFile
*f
,
1740 uint64_t n
= 0, ap
= 0, q
, i
, a
, hidx
;
1749 a
= le64toh(*first
);
1750 i
= hidx
= le64toh(READ_NOW(*idx
));
1753 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
1757 n
= journal_file_entry_array_n_items(o
);
1759 o
->entry_array
.items
[i
] = htole64(p
);
1760 *idx
= htole64(hidx
+ 1);
1766 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
1777 r
= journal_file_append_object(f
, OBJECT_ENTRY_ARRAY
,
1778 offsetof(Object
, entry_array
.items
) + n
* sizeof(uint64_t),
1784 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY_ARRAY
, o
, q
);
1789 o
->entry_array
.items
[i
] = htole64(p
);
1792 *first
= htole64(q
);
1794 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, ap
, &o
);
1798 o
->entry_array
.next_entry_array_offset
= htole64(q
);
1801 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
1802 f
->header
->n_entry_arrays
= htole64(le64toh(f
->header
->n_entry_arrays
) + 1);
1804 *idx
= htole64(hidx
+ 1);
1809 static int link_entry_into_array_plus_one(JournalFile
*f
,
1824 hidx
= le64toh(READ_NOW(*idx
));
1825 if (hidx
== UINT64_MAX
)
1828 *extra
= htole64(p
);
1832 i
= htole64(hidx
- 1);
1833 r
= link_entry_into_array(f
, first
, &i
, p
);
1838 *idx
= htole64(hidx
+ 1);
1842 static int journal_file_link_entry_item(JournalFile
*f
, Object
*o
, uint64_t offset
, uint64_t i
) {
1850 p
= le64toh(o
->entry
.items
[i
].object_offset
);
1851 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1855 return link_entry_into_array_plus_one(f
,
1856 &o
->data
.entry_offset
,
1857 &o
->data
.entry_array_offset
,
1862 static int journal_file_link_entry(JournalFile
*f
, Object
*o
, uint64_t offset
) {
1871 if (o
->object
.type
!= OBJECT_ENTRY
)
1874 __sync_synchronize();
1876 /* Link up the entry itself */
1877 r
= link_entry_into_array(f
,
1878 &f
->header
->entry_array_offset
,
1879 &f
->header
->n_entries
,
1884 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1886 if (f
->header
->head_entry_realtime
== 0)
1887 f
->header
->head_entry_realtime
= o
->entry
.realtime
;
1889 f
->header
->tail_entry_realtime
= o
->entry
.realtime
;
1890 f
->header
->tail_entry_monotonic
= o
->entry
.monotonic
;
1892 /* Link up the items */
1893 n
= journal_file_entry_n_items(o
);
1894 for (i
= 0; i
< n
; i
++) {
1895 r
= journal_file_link_entry_item(f
, o
, offset
, i
);
1903 static int journal_file_append_entry_internal(
1905 const dual_timestamp
*ts
,
1906 const sd_id128_t
*boot_id
,
1908 const EntryItem items
[], unsigned n_items
,
1910 Object
**ret
, uint64_t *ret_offset
) {
1918 assert(items
|| n_items
== 0);
1921 osize
= offsetof(Object
, entry
.items
) + (n_items
* sizeof(EntryItem
));
1923 r
= journal_file_append_object(f
, OBJECT_ENTRY
, osize
, &o
, &np
);
1927 o
->entry
.seqnum
= htole64(journal_file_entry_seqnum(f
, seqnum
));
1928 memcpy_safe(o
->entry
.items
, items
, n_items
* sizeof(EntryItem
));
1929 o
->entry
.realtime
= htole64(ts
->realtime
);
1930 o
->entry
.monotonic
= htole64(ts
->monotonic
);
1931 o
->entry
.xor_hash
= htole64(xor_hash
);
1933 f
->header
->boot_id
= *boot_id
;
1934 o
->entry
.boot_id
= f
->header
->boot_id
;
1937 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY
, o
, np
);
1942 r
= journal_file_link_entry(f
, o
, np
);
1955 void journal_file_post_change(JournalFile
*f
) {
1961 /* inotify() does not receive IN_MODIFY events from file
1962 * accesses done via mmap(). After each access we hence
1963 * trigger IN_MODIFY by truncating the journal file to its
1964 * current size which triggers IN_MODIFY. */
1966 __sync_synchronize();
1968 if (ftruncate(f
->fd
, f
->last_stat
.st_size
) < 0)
1969 log_debug_errno(errno
, "Failed to truncate file to its own size: %m");
1972 static int post_change_thunk(sd_event_source
*timer
, uint64_t usec
, void *userdata
) {
1975 journal_file_post_change(userdata
);
1980 static void schedule_post_change(JournalFile
*f
) {
1985 assert(f
->post_change_timer
);
1987 r
= sd_event_source_get_enabled(f
->post_change_timer
, NULL
);
1989 log_debug_errno(r
, "Failed to get ftruncate timer state: %m");
1995 r
= sd_event_now(sd_event_source_get_event(f
->post_change_timer
), CLOCK_MONOTONIC
, &now
);
1997 log_debug_errno(r
, "Failed to get clock's now for scheduling ftruncate: %m");
2001 r
= sd_event_source_set_time(f
->post_change_timer
, now
+ f
->post_change_timer_period
);
2003 log_debug_errno(r
, "Failed to set time for scheduling ftruncate: %m");
2007 r
= sd_event_source_set_enabled(f
->post_change_timer
, SD_EVENT_ONESHOT
);
2009 log_debug_errno(r
, "Failed to enable scheduled ftruncate: %m");
2016 /* On failure, let's simply post the change immediately. */
2017 journal_file_post_change(f
);
2020 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2021 int journal_file_enable_post_change_timer(JournalFile
*f
, sd_event
*e
, usec_t t
) {
2022 _cleanup_(sd_event_source_unrefp
) sd_event_source
*timer
= NULL
;
2026 assert_return(!f
->post_change_timer
, -EINVAL
);
2030 r
= sd_event_add_time(e
, &timer
, CLOCK_MONOTONIC
, 0, 0, post_change_thunk
, f
);
2034 r
= sd_event_source_set_enabled(timer
, SD_EVENT_OFF
);
2038 f
->post_change_timer
= TAKE_PTR(timer
);
2039 f
->post_change_timer_period
= t
;
2044 static int entry_item_cmp(const EntryItem
*a
, const EntryItem
*b
) {
2045 return CMP(le64toh(a
->object_offset
), le64toh(b
->object_offset
));
2048 int journal_file_append_entry(
2050 const dual_timestamp
*ts
,
2051 const sd_id128_t
*boot_id
,
2052 const struct iovec iovec
[], unsigned n_iovec
,
2054 Object
**ret
, uint64_t *ret_offset
) {
2059 uint64_t xor_hash
= 0;
2060 struct dual_timestamp _ts
;
2064 assert(iovec
|| n_iovec
== 0);
2067 if (!VALID_REALTIME(ts
->realtime
))
2068 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2069 "Invalid realtime timestamp %" PRIu64
", refusing entry.",
2071 if (!VALID_MONOTONIC(ts
->monotonic
))
2072 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2073 "Invalid monotomic timestamp %" PRIu64
", refusing entry.",
2076 dual_timestamp_get(&_ts
);
2081 r
= journal_file_maybe_append_tag(f
, ts
->realtime
);
2086 /* alloca() can't take 0, hence let's allocate at least one */
2087 items
= newa(EntryItem
, MAX(1u, n_iovec
));
2089 for (i
= 0; i
< n_iovec
; i
++) {
2093 r
= journal_file_append_data(f
, iovec
[i
].iov_base
, iovec
[i
].iov_len
, &o
, &p
);
2097 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2098 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2099 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2100 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2101 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2102 * hash here for that. This also has the benefit that cursors for old and new journal files
2103 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2104 * files things are easier, we can just take the value from the stored record directly. */
2106 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
2107 xor_hash
^= jenkins_hash64(iovec
[i
].iov_base
, iovec
[i
].iov_len
);
2109 xor_hash
^= le64toh(o
->data
.hash
);
2111 items
[i
].object_offset
= htole64(p
);
2112 items
[i
].hash
= o
->data
.hash
;
2115 /* Order by the position on disk, in order to improve seek
2116 * times for rotating media. */
2117 typesafe_qsort(items
, n_iovec
, entry_item_cmp
);
2119 r
= journal_file_append_entry_internal(f
, ts
, boot_id
, xor_hash
, items
, n_iovec
, seqnum
, ret
, ret_offset
);
2121 /* If the memory mapping triggered a SIGBUS then we return an
2122 * IO error and ignore the error code passed down to us, since
2123 * it is very likely just an effect of a nullified replacement
2126 if (mmap_cache_got_sigbus(f
->mmap
, f
->cache_fd
))
2129 if (f
->post_change_timer
)
2130 schedule_post_change(f
);
2132 journal_file_post_change(f
);
2137 typedef struct ChainCacheItem
{
2138 uint64_t first
; /* the array at the beginning of the chain */
2139 uint64_t array
; /* the cached array */
2140 uint64_t begin
; /* the first item in the cached array */
2141 uint64_t total
; /* the total number of items in all arrays before this one in the chain */
2142 uint64_t last_index
; /* the last index we looked at, to optimize locality when bisecting */
2145 static void chain_cache_put(
2152 uint64_t last_index
) {
2155 /* If the chain item to cache for this chain is the
2156 * first one it's not worth caching anything */
2160 if (ordered_hashmap_size(h
) >= CHAIN_CACHE_MAX
) {
2161 ci
= ordered_hashmap_steal_first(h
);
2164 ci
= new(ChainCacheItem
, 1);
2171 if (ordered_hashmap_put(h
, &ci
->first
, ci
) < 0) {
2176 assert(ci
->first
== first
);
2181 ci
->last_index
= last_index
;
2184 static int generic_array_get(
2188 Object
**ret
, uint64_t *ret_offset
) {
2191 uint64_t p
= 0, a
, t
= 0;
2199 /* Try the chain cache first */
2200 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2201 if (ci
&& i
> ci
->total
) {
2210 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2214 k
= journal_file_entry_array_n_items(o
);
2216 p
= le64toh(o
->entry_array
.items
[i
]);
2222 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2228 /* Let's cache this item for the next invocation */
2229 chain_cache_put(f
->chain_cache
, ci
, first
, a
, le64toh(o
->entry_array
.items
[0]), t
, i
);
2231 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2244 static int generic_array_get_plus_one(
2249 Object
**ret
, uint64_t *ret_offset
) {
2258 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, &o
);
2266 *ret_offset
= extra
;
2271 return generic_array_get(f
, first
, i
-1, ret
, ret_offset
);
2280 static int generic_array_bisect(
2285 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2286 direction_t direction
,
2288 uint64_t *ret_offset
,
2289 uint64_t *ret_idx
) {
2291 uint64_t a
, p
, t
= 0, i
= 0, last_p
= 0, last_index
= (uint64_t) -1;
2292 bool subtract_one
= false;
2293 Object
*o
, *array
= NULL
;
2298 assert(test_object
);
2300 /* Start with the first array in the chain */
2303 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2304 if (ci
&& n
> ci
->total
&& ci
->begin
!= 0) {
2305 /* Ah, we have iterated this bisection array chain
2306 * previously! Let's see if we can skip ahead in the
2307 * chain, as far as the last time. But we can't jump
2308 * backwards in the chain, so let's check that
2311 r
= test_object(f
, ci
->begin
, needle
);
2315 if (r
== TEST_LEFT
) {
2316 /* OK, what we are looking for is right of the
2317 * begin of this EntryArray, so let's jump
2318 * straight to previously cached array in the
2324 last_index
= ci
->last_index
;
2329 uint64_t left
, right
, k
, lp
;
2331 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
2335 k
= journal_file_entry_array_n_items(array
);
2341 lp
= p
= le64toh(array
->entry_array
.items
[i
]);
2345 r
= test_object(f
, p
, needle
);
2346 if (r
== -EBADMSG
) {
2347 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2354 if (r
== TEST_FOUND
)
2355 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2357 if (r
== TEST_RIGHT
) {
2361 if (last_index
!= (uint64_t) -1) {
2362 assert(last_index
<= right
);
2364 /* If we cached the last index we
2365 * looked at, let's try to not to jump
2366 * too wildly around and see if we can
2367 * limit the range to look at early to
2368 * the immediate neighbors of the last
2369 * index we looked at. */
2371 if (last_index
> 0) {
2372 uint64_t x
= last_index
- 1;
2374 p
= le64toh(array
->entry_array
.items
[x
]);
2378 r
= test_object(f
, p
, needle
);
2382 if (r
== TEST_FOUND
)
2383 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2385 if (r
== TEST_RIGHT
)
2391 if (last_index
< right
) {
2392 uint64_t y
= last_index
+ 1;
2394 p
= le64toh(array
->entry_array
.items
[y
]);
2398 r
= test_object(f
, p
, needle
);
2402 if (r
== TEST_FOUND
)
2403 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2405 if (r
== TEST_RIGHT
)
2413 if (left
== right
) {
2414 if (direction
== DIRECTION_UP
)
2415 subtract_one
= true;
2421 assert(left
< right
);
2422 i
= (left
+ right
) / 2;
2424 p
= le64toh(array
->entry_array
.items
[i
]);
2428 r
= test_object(f
, p
, needle
);
2429 if (r
== -EBADMSG
) {
2430 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2437 if (r
== TEST_FOUND
)
2438 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2440 if (r
== TEST_RIGHT
)
2448 if (direction
== DIRECTION_UP
) {
2450 subtract_one
= true;
2461 last_index
= (uint64_t) -1;
2462 a
= le64toh(array
->entry_array
.next_entry_array_offset
);
2468 if (subtract_one
&& t
== 0 && i
== 0)
2471 /* Let's cache this item for the next invocation */
2472 chain_cache_put(f
->chain_cache
, ci
, first
, a
, le64toh(array
->entry_array
.items
[0]), t
, subtract_one
? (i
> 0 ? i
-1 : (uint64_t) -1) : i
);
2474 if (subtract_one
&& i
== 0)
2476 else if (subtract_one
)
2477 p
= le64toh(array
->entry_array
.items
[i
-1]);
2479 p
= le64toh(array
->entry_array
.items
[i
]);
2481 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2492 *ret_idx
= t
+ i
+ (subtract_one
? -1 : 0);
2497 static int generic_array_bisect_plus_one(
2503 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2504 direction_t direction
,
2506 uint64_t *ret_offset
,
2507 uint64_t *ret_idx
) {
2510 bool step_back
= false;
2514 assert(test_object
);
2519 /* This bisects the array in object 'first', but first checks
2521 r
= test_object(f
, extra
, needle
);
2525 if (r
== TEST_FOUND
)
2526 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2528 /* if we are looking with DIRECTION_UP then we need to first
2529 see if in the actual array there is a matching entry, and
2530 return the last one of that. But if there isn't any we need
2531 to return this one. Hence remember this, and return it
2534 step_back
= direction
== DIRECTION_UP
;
2536 if (r
== TEST_RIGHT
) {
2537 if (direction
== DIRECTION_DOWN
)
2543 r
= generic_array_bisect(f
, first
, n
-1, needle
, test_object
, direction
, ret
, ret_offset
, ret_idx
);
2545 if (r
== 0 && step_back
)
2548 if (r
> 0 && ret_idx
)
2554 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, &o
);
2562 *ret_offset
= extra
;
2570 _pure_
static int test_object_offset(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2576 else if (p
< needle
)
2582 static int test_object_seqnum(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2590 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2594 sq
= le64toh(READ_NOW(o
->entry
.seqnum
));
2597 else if (sq
< needle
)
2603 int journal_file_move_to_entry_by_seqnum(
2606 direction_t direction
,
2608 uint64_t *ret_offset
) {
2612 return generic_array_bisect(
2614 le64toh(f
->header
->entry_array_offset
),
2615 le64toh(f
->header
->n_entries
),
2619 ret
, ret_offset
, NULL
);
2622 static int test_object_realtime(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2630 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2634 rt
= le64toh(READ_NOW(o
->entry
.realtime
));
2637 else if (rt
< needle
)
2643 int journal_file_move_to_entry_by_realtime(
2646 direction_t direction
,
2648 uint64_t *ret_offset
) {
2652 return generic_array_bisect(
2654 le64toh(f
->header
->entry_array_offset
),
2655 le64toh(f
->header
->n_entries
),
2657 test_object_realtime
,
2659 ret
, ret_offset
, NULL
);
2662 static int test_object_monotonic(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2670 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2674 m
= le64toh(READ_NOW(o
->entry
.monotonic
));
2677 else if (m
< needle
)
2683 static int find_data_object_by_boot_id(
2689 char t
[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2691 sd_id128_to_string(boot_id
, t
+ 9);
2692 return journal_file_find_data_object(f
, t
, sizeof(t
) - 1, o
, b
);
2695 int journal_file_move_to_entry_by_monotonic(
2699 direction_t direction
,
2701 uint64_t *ret_offset
) {
2708 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
2714 return generic_array_bisect_plus_one(
2716 le64toh(o
->data
.entry_offset
),
2717 le64toh(o
->data
.entry_array_offset
),
2718 le64toh(o
->data
.n_entries
),
2720 test_object_monotonic
,
2722 ret
, ret_offset
, NULL
);
2725 void journal_file_reset_location(JournalFile
*f
) {
2726 f
->location_type
= LOCATION_HEAD
;
2727 f
->current_offset
= 0;
2728 f
->current_seqnum
= 0;
2729 f
->current_realtime
= 0;
2730 f
->current_monotonic
= 0;
2731 zero(f
->current_boot_id
);
2732 f
->current_xor_hash
= 0;
2735 void journal_file_save_location(JournalFile
*f
, Object
*o
, uint64_t offset
) {
2736 f
->location_type
= LOCATION_SEEK
;
2737 f
->current_offset
= offset
;
2738 f
->current_seqnum
= le64toh(o
->entry
.seqnum
);
2739 f
->current_realtime
= le64toh(o
->entry
.realtime
);
2740 f
->current_monotonic
= le64toh(o
->entry
.monotonic
);
2741 f
->current_boot_id
= o
->entry
.boot_id
;
2742 f
->current_xor_hash
= le64toh(o
->entry
.xor_hash
);
2745 int journal_file_compare_locations(JournalFile
*af
, JournalFile
*bf
) {
2752 assert(af
->location_type
== LOCATION_SEEK
);
2753 assert(bf
->location_type
== LOCATION_SEEK
);
2755 /* If contents and timestamps match, these entries are
2756 * identical, even if the seqnum does not match */
2757 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
) &&
2758 af
->current_monotonic
== bf
->current_monotonic
&&
2759 af
->current_realtime
== bf
->current_realtime
&&
2760 af
->current_xor_hash
== bf
->current_xor_hash
)
2763 if (sd_id128_equal(af
->header
->seqnum_id
, bf
->header
->seqnum_id
)) {
2765 /* If this is from the same seqnum source, compare
2767 r
= CMP(af
->current_seqnum
, bf
->current_seqnum
);
2771 /* Wow! This is weird, different data but the same
2772 * seqnums? Something is borked, but let's make the
2773 * best of it and compare by time. */
2776 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
)) {
2778 /* If the boot id matches, compare monotonic time */
2779 r
= CMP(af
->current_monotonic
, bf
->current_monotonic
);
2784 /* Otherwise, compare UTC time */
2785 r
= CMP(af
->current_realtime
, bf
->current_realtime
);
2789 /* Finally, compare by contents */
2790 return CMP(af
->current_xor_hash
, bf
->current_xor_hash
);
2793 static int bump_array_index(uint64_t *i
, direction_t direction
, uint64_t n
) {
2795 /* Increase or decrease the specified index, in the right direction. */
2797 if (direction
== DIRECTION_DOWN
) {
2812 static bool check_properly_ordered(uint64_t new_offset
, uint64_t old_offset
, direction_t direction
) {
2814 /* Consider it an error if any of the two offsets is uninitialized */
2815 if (old_offset
== 0 || new_offset
== 0)
2818 /* If we go down, the new offset must be larger than the old one. */
2819 return direction
== DIRECTION_DOWN
?
2820 new_offset
> old_offset
:
2821 new_offset
< old_offset
;
2824 int journal_file_next_entry(
2827 direction_t direction
,
2828 Object
**ret
, uint64_t *ret_offset
) {
2836 n
= le64toh(READ_NOW(f
->header
->n_entries
));
2841 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
2843 r
= generic_array_bisect(f
,
2844 le64toh(f
->header
->entry_array_offset
),
2845 le64toh(f
->header
->n_entries
),
2854 r
= bump_array_index(&i
, direction
, n
);
2859 /* And jump to it */
2861 r
= generic_array_get(f
,
2862 le64toh(f
->header
->entry_array_offset
),
2870 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2871 * the next one might work for us instead. */
2872 log_debug_errno(r
, "Entry item %" PRIu64
" is bad, skipping over it.", i
);
2874 r
= bump_array_index(&i
, direction
, n
);
2879 /* Ensure our array is properly ordered. */
2880 if (p
> 0 && !check_properly_ordered(ofs
, p
, direction
))
2881 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2882 "%s: entry array not properly ordered at entry %" PRIu64
,
2891 int journal_file_next_entry_for_data(
2893 Object
*o
, uint64_t p
,
2894 uint64_t data_offset
,
2895 direction_t direction
,
2896 Object
**ret
, uint64_t *ret_offset
) {
2903 assert(p
> 0 || !o
);
2905 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2909 n
= le64toh(READ_NOW(d
->data
.n_entries
));
2914 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
2916 if (o
->object
.type
!= OBJECT_ENTRY
)
2919 r
= generic_array_bisect_plus_one(f
,
2920 le64toh(d
->data
.entry_offset
),
2921 le64toh(d
->data
.entry_array_offset
),
2922 le64toh(d
->data
.n_entries
),
2932 r
= bump_array_index(&i
, direction
, n
);
2938 r
= generic_array_get_plus_one(f
,
2939 le64toh(d
->data
.entry_offset
),
2940 le64toh(d
->data
.entry_array_offset
),
2948 log_debug_errno(r
, "Data entry item %" PRIu64
" is bad, skipping over it.", i
);
2950 r
= bump_array_index(&i
, direction
, n
);
2955 /* Ensure our array is properly ordered. */
2956 if (p
> 0 && check_properly_ordered(ofs
, p
, direction
))
2957 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2958 "%s data entry array not properly ordered at entry %" PRIu64
,
2967 int journal_file_move_to_entry_by_offset_for_data(
2969 uint64_t data_offset
,
2971 direction_t direction
,
2972 Object
**ret
, uint64_t *ret_offset
) {
2979 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2983 return generic_array_bisect_plus_one(
2985 le64toh(d
->data
.entry_offset
),
2986 le64toh(d
->data
.entry_array_offset
),
2987 le64toh(d
->data
.n_entries
),
2991 ret
, ret_offset
, NULL
);
2994 int journal_file_move_to_entry_by_monotonic_for_data(
2996 uint64_t data_offset
,
2999 direction_t direction
,
3000 Object
**ret
, uint64_t *ret_offset
) {
3008 /* First, seek by time */
3009 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &b
);
3015 r
= generic_array_bisect_plus_one(f
,
3016 le64toh(o
->data
.entry_offset
),
3017 le64toh(o
->data
.entry_array_offset
),
3018 le64toh(o
->data
.n_entries
),
3020 test_object_monotonic
,
3026 /* And now, continue seeking until we find an entry that
3027 * exists in both bisection arrays */
3033 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
3037 r
= generic_array_bisect_plus_one(f
,
3038 le64toh(d
->data
.entry_offset
),
3039 le64toh(d
->data
.entry_array_offset
),
3040 le64toh(d
->data
.n_entries
),
3048 r
= journal_file_move_to_object(f
, OBJECT_DATA
, b
, &o
);
3052 r
= generic_array_bisect_plus_one(f
,
3053 le64toh(o
->data
.entry_offset
),
3054 le64toh(o
->data
.entry_array_offset
),
3055 le64toh(o
->data
.n_entries
),
3077 int journal_file_move_to_entry_by_seqnum_for_data(
3079 uint64_t data_offset
,
3081 direction_t direction
,
3082 Object
**ret
, uint64_t *ret_offset
) {
3089 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
3093 return generic_array_bisect_plus_one(
3095 le64toh(d
->data
.entry_offset
),
3096 le64toh(d
->data
.entry_array_offset
),
3097 le64toh(d
->data
.n_entries
),
3101 ret
, ret_offset
, NULL
);
3104 int journal_file_move_to_entry_by_realtime_for_data(
3106 uint64_t data_offset
,
3108 direction_t direction
,
3109 Object
**ret
, uint64_t *ret_offset
) {
3116 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
3120 return generic_array_bisect_plus_one(
3122 le64toh(d
->data
.entry_offset
),
3123 le64toh(d
->data
.entry_array_offset
),
3124 le64toh(d
->data
.n_entries
),
3126 test_object_realtime
,
3128 ret
, ret_offset
, NULL
);
3131 void journal_file_dump(JournalFile
*f
) {
3139 journal_file_print_header(f
);
3141 p
= le64toh(READ_NOW(f
->header
->header_size
));
3143 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &o
);
3147 switch (o
->object
.type
) {
3150 printf("Type: OBJECT_UNUSED\n");
3154 printf("Type: OBJECT_DATA\n");
3158 printf("Type: OBJECT_FIELD\n");
3162 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64
" monotonic=%"PRIu64
" realtime=%"PRIu64
"\n",
3163 le64toh(o
->entry
.seqnum
),
3164 le64toh(o
->entry
.monotonic
),
3165 le64toh(o
->entry
.realtime
));
3168 case OBJECT_FIELD_HASH_TABLE
:
3169 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
3172 case OBJECT_DATA_HASH_TABLE
:
3173 printf("Type: OBJECT_DATA_HASH_TABLE\n");
3176 case OBJECT_ENTRY_ARRAY
:
3177 printf("Type: OBJECT_ENTRY_ARRAY\n");
3181 printf("Type: OBJECT_TAG seqnum=%"PRIu64
" epoch=%"PRIu64
"\n",
3182 le64toh(o
->tag
.seqnum
),
3183 le64toh(o
->tag
.epoch
));
3187 printf("Type: unknown (%i)\n", o
->object
.type
);
3191 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
)
3192 printf("Flags: %s\n",
3193 object_compressed_to_string(o
->object
.flags
& OBJECT_COMPRESSION_MASK
));
3195 if (p
== le64toh(f
->header
->tail_object_offset
))
3198 p
+= ALIGN64(le64toh(o
->object
.size
));
3203 log_error("File corrupt");
3206 static const char* format_timestamp_safe(char *buf
, size_t l
, usec_t t
) {
3209 x
= format_timestamp(buf
, l
, t
);
3215 void journal_file_print_header(JournalFile
*f
) {
3216 char a
[SD_ID128_STRING_MAX
], b
[SD_ID128_STRING_MAX
], c
[SD_ID128_STRING_MAX
], d
[SD_ID128_STRING_MAX
];
3217 char x
[FORMAT_TIMESTAMP_MAX
], y
[FORMAT_TIMESTAMP_MAX
], z
[FORMAT_TIMESTAMP_MAX
];
3219 char bytes
[FORMAT_BYTES_MAX
];
3224 printf("File path: %s\n"
3228 "Sequential number ID: %s\n"
3230 "Compatible flags:%s%s\n"
3231 "Incompatible flags:%s%s%s%s%s\n"
3232 "Header size: %"PRIu64
"\n"
3233 "Arena size: %"PRIu64
"\n"
3234 "Data hash table size: %"PRIu64
"\n"
3235 "Field hash table size: %"PRIu64
"\n"
3236 "Rotate suggested: %s\n"
3237 "Head sequential number: %"PRIu64
" (%"PRIx64
")\n"
3238 "Tail sequential number: %"PRIu64
" (%"PRIx64
")\n"
3239 "Head realtime timestamp: %s (%"PRIx64
")\n"
3240 "Tail realtime timestamp: %s (%"PRIx64
")\n"
3241 "Tail monotonic timestamp: %s (%"PRIx64
")\n"
3242 "Objects: %"PRIu64
"\n"
3243 "Entry objects: %"PRIu64
"\n",
3245 sd_id128_to_string(f
->header
->file_id
, a
),
3246 sd_id128_to_string(f
->header
->machine_id
, b
),
3247 sd_id128_to_string(f
->header
->boot_id
, c
),
3248 sd_id128_to_string(f
->header
->seqnum_id
, d
),
3249 f
->header
->state
== STATE_OFFLINE
? "OFFLINE" :
3250 f
->header
->state
== STATE_ONLINE
? "ONLINE" :
3251 f
->header
->state
== STATE_ARCHIVED
? "ARCHIVED" : "UNKNOWN",
3252 JOURNAL_HEADER_SEALED(f
->header
) ? " SEALED" : "",
3253 (le32toh(f
->header
->compatible_flags
) & ~HEADER_COMPATIBLE_ANY
) ? " ???" : "",
3254 JOURNAL_HEADER_COMPRESSED_XZ(f
->header
) ? " COMPRESSED-XZ" : "",
3255 JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
) ? " COMPRESSED-LZ4" : "",
3256 JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
) ? " COMPRESSED-ZSTD" : "",
3257 JOURNAL_HEADER_KEYED_HASH(f
->header
) ? " KEYED-HASH" : "",
3258 (le32toh(f
->header
->incompatible_flags
) & ~HEADER_INCOMPATIBLE_ANY
) ? " ???" : "",
3259 le64toh(f
->header
->header_size
),
3260 le64toh(f
->header
->arena_size
),
3261 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3262 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
),
3263 yes_no(journal_file_rotate_suggested(f
, 0)),
3264 le64toh(f
->header
->head_entry_seqnum
), le64toh(f
->header
->head_entry_seqnum
),
3265 le64toh(f
->header
->tail_entry_seqnum
), le64toh(f
->header
->tail_entry_seqnum
),
3266 format_timestamp_safe(x
, sizeof(x
), le64toh(f
->header
->head_entry_realtime
)), le64toh(f
->header
->head_entry_realtime
),
3267 format_timestamp_safe(y
, sizeof(y
), le64toh(f
->header
->tail_entry_realtime
)), le64toh(f
->header
->tail_entry_realtime
),
3268 format_timespan(z
, sizeof(z
), le64toh(f
->header
->tail_entry_monotonic
), USEC_PER_MSEC
), le64toh(f
->header
->tail_entry_monotonic
),
3269 le64toh(f
->header
->n_objects
),
3270 le64toh(f
->header
->n_entries
));
3272 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3273 printf("Data objects: %"PRIu64
"\n"
3274 "Data hash table fill: %.1f%%\n",
3275 le64toh(f
->header
->n_data
),
3276 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))));
3278 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3279 printf("Field objects: %"PRIu64
"\n"
3280 "Field hash table fill: %.1f%%\n",
3281 le64toh(f
->header
->n_fields
),
3282 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))));
3284 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
))
3285 printf("Tag objects: %"PRIu64
"\n",
3286 le64toh(f
->header
->n_tags
));
3287 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
3288 printf("Entry array objects: %"PRIu64
"\n",
3289 le64toh(f
->header
->n_entry_arrays
));
3291 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
))
3292 printf("Deepest field hash chain: %" PRIu64
"\n",
3293 f
->header
->field_hash_chain_depth
);
3295 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
))
3296 printf("Deepest data hash chain: %" PRIu64
"\n",
3297 f
->header
->data_hash_chain_depth
);
3299 if (fstat(f
->fd
, &st
) >= 0)
3300 printf("Disk usage: %s\n", format_bytes(bytes
, sizeof(bytes
), (uint64_t) st
.st_blocks
* 512ULL));
3303 static int journal_file_warn_btrfs(JournalFile
*f
) {
3309 /* Before we write anything, check if the COW logic is turned
3310 * off on btrfs. Given our write pattern that is quite
3311 * unfriendly to COW file systems this should greatly improve
3312 * performance on COW file systems, such as btrfs, at the
3313 * expense of data integrity features (which shouldn't be too
3314 * bad, given that we do our own checksumming). */
3316 r
= btrfs_is_filesystem(f
->fd
);
3318 return log_warning_errno(r
, "Failed to determine if journal is on btrfs: %m");
3322 r
= read_attr_fd(f
->fd
, &attrs
);
3324 return log_warning_errno(r
, "Failed to read file attributes: %m");
3326 if (attrs
& FS_NOCOW_FL
) {
3327 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3331 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3332 "This is likely to slow down journal access substantially, please consider turning "
3333 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f
->path
);
3338 int journal_file_open(
3344 uint64_t compress_threshold_bytes
,
3346 JournalMetrics
*metrics
,
3347 MMapCache
*mmap_cache
,
3348 Set
*deferred_closes
,
3349 JournalFile
*template,
3350 JournalFile
**ret
) {
3352 bool newly_created
= false;
3358 assert(fd
>= 0 || fname
);
3360 if (!IN_SET((flags
& O_ACCMODE
), O_RDONLY
, O_RDWR
))
3363 if (fname
&& (flags
& O_CREAT
) && !endswith(fname
, ".journal"))
3366 f
= new(JournalFile
, 1);
3370 *f
= (JournalFile
) {
3375 .prot
= prot_from_flags(flags
),
3376 .writable
= (flags
& O_ACCMODE
) != O_RDONLY
,
3379 .compress_zstd
= compress
,
3381 .compress_lz4
= compress
,
3383 .compress_xz
= compress
,
3385 .compress_threshold_bytes
= compress_threshold_bytes
== (uint64_t) -1 ?
3386 DEFAULT_COMPRESS_THRESHOLD
:
3387 MAX(MIN_COMPRESS_THRESHOLD
, compress_threshold_bytes
),
3393 /* We turn on keyed hashes by default, but provide an environment variable to turn them off, if
3394 * people really want that */
3395 r
= getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
3398 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring.");
3399 f
->keyed_hash
= true;
3403 if (DEBUG_LOGGING
) {
3404 static int last_seal
= -1, last_compress
= -1, last_keyed_hash
= -1;
3405 static uint64_t last_bytes
= UINT64_MAX
;
3406 char bytes
[FORMAT_BYTES_MAX
];
3408 if (last_seal
!= f
->seal
||
3409 last_keyed_hash
!= f
->keyed_hash
||
3410 last_compress
!= JOURNAL_FILE_COMPRESS(f
) ||
3411 last_bytes
!= f
->compress_threshold_bytes
) {
3413 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
3414 yes_no(f
->seal
), yes_no(f
->keyed_hash
), yes_no(JOURNAL_FILE_COMPRESS(f
)),
3415 format_bytes(bytes
, sizeof bytes
, f
->compress_threshold_bytes
));
3416 last_seal
= f
->seal
;
3417 last_keyed_hash
= f
->keyed_hash
;
3418 last_compress
= JOURNAL_FILE_COMPRESS(f
);
3419 last_bytes
= f
->compress_threshold_bytes
;
3424 f
->mmap
= mmap_cache_ref(mmap_cache
);
3426 f
->mmap
= mmap_cache_new();
3434 f
->path
= strdup(fname
);
3442 /* If we don't know the path, fill in something explanatory and vaguely useful */
3443 if (asprintf(&f
->path
, "/proc/self/%i", fd
) < 0) {
3449 f
->chain_cache
= ordered_hashmap_new(&uint64_hash_ops
);
3450 if (!f
->chain_cache
) {
3456 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3457 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3458 * it doesn't hurt in that case. */
3460 f
->fd
= open(f
->path
, f
->flags
|O_CLOEXEC
|O_NONBLOCK
, f
->mode
);
3466 /* fds we opened here by us should also be closed by us. */
3469 r
= fd_nonblock(f
->fd
, false);
3474 f
->cache_fd
= mmap_cache_add_fd(f
->mmap
, f
->fd
);
3480 r
= journal_file_fstat(f
);
3484 if (f
->last_stat
.st_size
== 0 && f
->writable
) {
3486 (void) journal_file_warn_btrfs(f
);
3488 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3489 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3490 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3491 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3492 * solely on mtime/atime/ctime of the file. */
3493 (void) fd_setcrtime(f
->fd
, 0);
3496 /* Try to load the FSPRG state, and if we can't, then
3497 * just don't do sealing */
3499 r
= journal_file_fss_load(f
);
3505 r
= journal_file_init_header(f
, template);
3509 r
= journal_file_fstat(f
);
3513 newly_created
= true;
3516 if (f
->last_stat
.st_size
< (off_t
) HEADER_SIZE_MIN
) {
3521 r
= mmap_cache_get(f
->mmap
, f
->cache_fd
, f
->prot
, CONTEXT_HEADER
, true, 0, PAGE_ALIGN(sizeof(Header
)), &f
->last_stat
, &h
, NULL
);
3523 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3524 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3534 if (!newly_created
) {
3535 set_clear_with_destructor(deferred_closes
, journal_file_close
);
3537 r
= journal_file_verify_header(f
);
3543 if (!newly_created
&& f
->writable
) {
3544 r
= journal_file_fss_load(f
);
3552 journal_default_metrics(metrics
, f
->fd
);
3553 f
->metrics
= *metrics
;
3554 } else if (template)
3555 f
->metrics
= template->metrics
;
3557 r
= journal_file_refresh_header(f
);
3563 r
= journal_file_hmac_setup(f
);
3568 if (newly_created
) {
3569 r
= journal_file_setup_field_hash_table(f
);
3573 r
= journal_file_setup_data_hash_table(f
);
3578 r
= journal_file_append_first_tag(f
);
3584 if (mmap_cache_got_sigbus(f
->mmap
, f
->cache_fd
)) {
3589 if (template && template->post_change_timer
) {
3590 r
= journal_file_enable_post_change_timer(
3592 sd_event_source_get_event(template->post_change_timer
),
3593 template->post_change_timer_period
);
3599 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3606 if (f
->cache_fd
&& mmap_cache_got_sigbus(f
->mmap
, f
->cache_fd
))
3609 (void) journal_file_close(f
);
3614 int journal_file_archive(JournalFile
*f
) {
3615 _cleanup_free_
char *p
= NULL
;
3622 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3623 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3624 if (path_startswith(f
->path
, "/proc/self/fd"))
3627 if (!endswith(f
->path
, ".journal"))
3630 if (asprintf(&p
, "%.*s@" SD_ID128_FORMAT_STR
"-%016"PRIx64
"-%016"PRIx64
".journal",
3631 (int) strlen(f
->path
) - 8, f
->path
,
3632 SD_ID128_FORMAT_VAL(f
->header
->seqnum_id
),
3633 le64toh(f
->header
->head_entry_seqnum
),
3634 le64toh(f
->header
->head_entry_realtime
)) < 0)
3637 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3638 * ignore that case. */
3639 if (rename(f
->path
, p
) < 0 && errno
!= ENOENT
)
3642 /* Sync the rename to disk */
3643 (void) fsync_directory_of_file(f
->fd
);
3645 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3646 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3647 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3648 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3652 /* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
3653 * files when we archive them */
3654 f
->defrag_on_close
= true;
3659 JournalFile
* journal_initiate_close(
3661 Set
*deferred_closes
) {
3667 if (deferred_closes
) {
3669 r
= set_put(deferred_closes
, f
);
3671 log_debug_errno(r
, "Failed to add file to deferred close set, closing immediately.");
3673 (void) journal_file_set_offline(f
, false);
3678 return journal_file_close(f
);
3681 int journal_file_rotate(
3684 uint64_t compress_threshold_bytes
,
3686 Set
*deferred_closes
) {
3688 JournalFile
*new_file
= NULL
;
3694 r
= journal_file_archive(*f
);
3698 r
= journal_file_open(
3704 compress_threshold_bytes
,
3712 journal_initiate_close(*f
, deferred_closes
);
3718 int journal_file_dispose(int dir_fd
, const char *fname
) {
3719 _cleanup_free_
char *p
= NULL
;
3720 _cleanup_close_
int fd
= -1;
3724 /* Renames a journal file to *.journal~, i.e. to mark it as corruped or otherwise uncleanly shutdown. Note that
3725 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3726 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3727 * for writing anymore. */
3729 if (!endswith(fname
, ".journal"))
3732 if (asprintf(&p
, "%.*s@%016" PRIx64
"-%016" PRIx64
".journal~",
3733 (int) strlen(fname
) - 8, fname
,
3734 now(CLOCK_REALTIME
),
3738 if (renameat(dir_fd
, fname
, dir_fd
, p
) < 0)
3741 /* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
3742 fd
= openat(dir_fd
, p
, O_RDONLY
|O_CLOEXEC
|O_NOCTTY
|O_NOFOLLOW
);
3744 log_debug_errno(errno
, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
3746 (void) chattr_fd(fd
, 0, FS_NOCOW_FL
, NULL
);
3747 (void) btrfs_defrag_fd(fd
);
3753 int journal_file_open_reliably(
3758 uint64_t compress_threshold_bytes
,
3760 JournalMetrics
*metrics
,
3761 MMapCache
*mmap_cache
,
3762 Set
*deferred_closes
,
3763 JournalFile
*template,
3764 JournalFile
**ret
) {
3768 r
= journal_file_open(-1, fname
, flags
, mode
, compress
, compress_threshold_bytes
, seal
, metrics
, mmap_cache
,
3769 deferred_closes
, template, ret
);
3771 -EBADMSG
, /* Corrupted */
3772 -ENODATA
, /* Truncated */
3773 -EHOSTDOWN
, /* Other machine */
3774 -EPROTONOSUPPORT
, /* Incompatible feature */
3775 -EBUSY
, /* Unclean shutdown */
3776 -ESHUTDOWN
, /* Already archived */
3777 -EIO
, /* IO error, including SIGBUS on mmap */
3778 -EIDRM
, /* File has been deleted */
3779 -ETXTBSY
)) /* File is from the future */
3782 if ((flags
& O_ACCMODE
) == O_RDONLY
)
3785 if (!(flags
& O_CREAT
))
3788 if (!endswith(fname
, ".journal"))
3791 /* The file is corrupted. Rotate it away and try it again (but only once) */
3792 log_warning_errno(r
, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname
);
3794 r
= journal_file_dispose(AT_FDCWD
, fname
);
3798 return journal_file_open(-1, fname
, flags
, mode
, compress
, compress_threshold_bytes
, seal
, metrics
, mmap_cache
,
3799 deferred_closes
, template, ret
);
3802 int journal_file_copy_entry(JournalFile
*from
, JournalFile
*to
, Object
*o
, uint64_t p
) {
3804 uint64_t q
, xor_hash
= 0;
3808 const sd_id128_t
*boot_id
;
3818 ts
.monotonic
= le64toh(o
->entry
.monotonic
);
3819 ts
.realtime
= le64toh(o
->entry
.realtime
);
3820 boot_id
= &o
->entry
.boot_id
;
3822 n
= journal_file_entry_n_items(o
);
3823 /* alloca() can't take 0, hence let's allocate at least one */
3824 items
= newa(EntryItem
, MAX(1u, n
));
3826 for (i
= 0; i
< n
; i
++) {
3833 q
= le64toh(o
->entry
.items
[i
].object_offset
);
3834 le_hash
= o
->entry
.items
[i
].hash
;
3836 r
= journal_file_move_to_object(from
, OBJECT_DATA
, q
, &o
);
3840 if (le_hash
!= o
->data
.hash
)
3843 l
= le64toh(READ_NOW(o
->object
.size
));
3844 if (l
< offsetof(Object
, data
.payload
))
3847 l
-= offsetof(Object
, data
.payload
);
3850 /* We hit the limit on 32bit machines */
3851 if ((uint64_t) t
!= l
)
3854 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
) {
3855 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
3858 r
= decompress_blob(o
->object
.flags
& OBJECT_COMPRESSION_MASK
,
3859 o
->data
.payload
, l
, &from
->compress_buffer
, &from
->compress_buffer_size
, &rsize
, 0);
3863 data
= from
->compress_buffer
;
3866 return -EPROTONOSUPPORT
;
3869 data
= o
->data
.payload
;
3871 r
= journal_file_append_data(to
, data
, l
, &u
, &h
);
3875 if (JOURNAL_HEADER_KEYED_HASH(to
->header
))
3876 xor_hash
^= jenkins_hash64(data
, l
);
3878 xor_hash
^= le64toh(u
->data
.hash
);
3880 items
[i
].object_offset
= htole64(h
);
3881 items
[i
].hash
= u
->data
.hash
;
3883 r
= journal_file_move_to_object(from
, OBJECT_ENTRY
, p
, &o
);
3888 r
= journal_file_append_entry_internal(to
, &ts
, boot_id
, xor_hash
, items
, n
,
3891 if (mmap_cache_got_sigbus(to
->mmap
, to
->cache_fd
))
3897 void journal_reset_metrics(JournalMetrics
*m
) {
3900 /* Set everything to "pick automatic values". */
3902 *m
= (JournalMetrics
) {
3903 .min_use
= (uint64_t) -1,
3904 .max_use
= (uint64_t) -1,
3905 .min_size
= (uint64_t) -1,
3906 .max_size
= (uint64_t) -1,
3907 .keep_free
= (uint64_t) -1,
3908 .n_max_files
= (uint64_t) -1,
3912 void journal_default_metrics(JournalMetrics
*m
, int fd
) {
3913 char a
[FORMAT_BYTES_MAX
], b
[FORMAT_BYTES_MAX
], c
[FORMAT_BYTES_MAX
], d
[FORMAT_BYTES_MAX
], e
[FORMAT_BYTES_MAX
];
3915 uint64_t fs_size
= 0;
3920 if (fstatvfs(fd
, &ss
) >= 0)
3921 fs_size
= ss
.f_frsize
* ss
.f_blocks
;
3923 log_debug_errno(errno
, "Failed to determine disk size: %m");
3925 if (m
->max_use
== (uint64_t) -1) {
3928 m
->max_use
= CLAMP(PAGE_ALIGN(fs_size
/ 10), /* 10% of file system size */
3929 MAX_USE_LOWER
, MAX_USE_UPPER
);
3931 m
->max_use
= MAX_USE_LOWER
;
3933 m
->max_use
= PAGE_ALIGN(m
->max_use
);
3935 if (m
->max_use
!= 0 && m
->max_use
< JOURNAL_FILE_SIZE_MIN
*2)
3936 m
->max_use
= JOURNAL_FILE_SIZE_MIN
*2;
3939 if (m
->min_use
== (uint64_t) -1) {
3941 m
->min_use
= CLAMP(PAGE_ALIGN(fs_size
/ 50), /* 2% of file system size */
3942 MIN_USE_LOW
, MIN_USE_HIGH
);
3944 m
->min_use
= MIN_USE_LOW
;
3947 if (m
->min_use
> m
->max_use
)
3948 m
->min_use
= m
->max_use
;
3950 if (m
->max_size
== (uint64_t) -1)
3951 m
->max_size
= MIN(PAGE_ALIGN(m
->max_use
/ 8), /* 8 chunks */
3954 m
->max_size
= PAGE_ALIGN(m
->max_size
);
3956 if (m
->max_size
!= 0) {
3957 if (m
->max_size
< JOURNAL_FILE_SIZE_MIN
)
3958 m
->max_size
= JOURNAL_FILE_SIZE_MIN
;
3960 if (m
->max_use
!= 0 && m
->max_size
*2 > m
->max_use
)
3961 m
->max_use
= m
->max_size
*2;
3964 if (m
->min_size
== (uint64_t) -1)
3965 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
3967 m
->min_size
= CLAMP(PAGE_ALIGN(m
->min_size
),
3968 JOURNAL_FILE_SIZE_MIN
,
3969 m
->max_size
?: UINT64_MAX
);
3971 if (m
->keep_free
== (uint64_t) -1) {
3973 m
->keep_free
= MIN(PAGE_ALIGN(fs_size
/ 20), /* 5% of file system size */
3976 m
->keep_free
= DEFAULT_KEEP_FREE
;
3979 if (m
->n_max_files
== (uint64_t) -1)
3980 m
->n_max_files
= DEFAULT_N_MAX_FILES
;
3982 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64
,
3983 format_bytes(a
, sizeof(a
), m
->min_use
),
3984 format_bytes(b
, sizeof(b
), m
->max_use
),
3985 format_bytes(c
, sizeof(c
), m
->max_size
),
3986 format_bytes(d
, sizeof(d
), m
->min_size
),
3987 format_bytes(e
, sizeof(e
), m
->keep_free
),
3991 int journal_file_get_cutoff_realtime_usec(JournalFile
*f
, usec_t
*from
, usec_t
*to
) {
3997 if (f
->header
->head_entry_realtime
== 0)
4000 *from
= le64toh(f
->header
->head_entry_realtime
);
4004 if (f
->header
->tail_entry_realtime
== 0)
4007 *to
= le64toh(f
->header
->tail_entry_realtime
);
4013 int journal_file_get_cutoff_monotonic_usec(JournalFile
*f
, sd_id128_t boot_id
, usec_t
*from
, usec_t
*to
) {
4021 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &p
);
4025 if (le64toh(o
->data
.n_entries
) <= 0)
4029 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, le64toh(o
->data
.entry_offset
), &o
);
4033 *from
= le64toh(o
->entry
.monotonic
);
4037 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
4041 r
= generic_array_get_plus_one(f
,
4042 le64toh(o
->data
.entry_offset
),
4043 le64toh(o
->data
.entry_array_offset
),
4044 le64toh(o
->data
.n_entries
)-1,
4049 *to
= le64toh(o
->entry
.monotonic
);
4055 bool journal_file_rotate_suggested(JournalFile
*f
, usec_t max_file_usec
) {
4059 /* If we gained new header fields we gained new features,
4060 * hence suggest a rotation */
4061 if (le64toh(f
->header
->header_size
) < sizeof(Header
)) {
4062 log_debug("%s uses an outdated header, suggesting rotation.", f
->path
);
4066 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4067 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4068 * need the n_data field, which only exists in newer versions. */
4070 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
4071 if (le64toh(f
->header
->n_data
) * 4ULL > (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4072 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items, %llu file size, %"PRIu64
" bytes per hash table item), suggesting rotation.",
4074 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))),
4075 le64toh(f
->header
->n_data
),
4076 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
4077 (unsigned long long) f
->last_stat
.st_size
,
4078 f
->last_stat
.st_size
/ le64toh(f
->header
->n_data
));
4082 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
4083 if (le64toh(f
->header
->n_fields
) * 4ULL > (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
4084 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items), suggesting rotation.",
4086 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))),
4087 le64toh(f
->header
->n_fields
),
4088 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
));
4092 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4093 * longest chain is longer than some threshold, let's suggest rotation. */
4094 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) &&
4095 le64toh(f
->header
->data_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4096 log_debug("Data hash table of %s has deepest hash chain of length %" PRIu64
", suggesting rotation.",
4097 f
->path
, le64toh(f
->header
->data_hash_chain_depth
));
4101 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) &&
4102 le64toh(f
->header
->field_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
4103 log_debug("Field hash table of %s has deepest hash chain of length at %" PRIu64
", suggesting rotation.",
4104 f
->path
, le64toh(f
->header
->field_hash_chain_depth
));
4108 /* Are the data objects properly indexed by field objects? */
4109 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
4110 JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
4111 le64toh(f
->header
->n_data
) > 0 &&
4112 le64toh(f
->header
->n_fields
) == 0)
4115 if (max_file_usec
> 0) {
4118 h
= le64toh(f
->header
->head_entry_realtime
);
4119 t
= now(CLOCK_REALTIME
);
4121 if (h
> 0 && t
> h
+ max_file_usec
)