1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
6 #include <linux/magic.h>
10 #include <sys/statvfs.h>
16 #include "alloc-util.h"
17 #include "chattr-util.h"
21 #include "format-util.h"
23 #include "journal-authenticate.h"
24 #include "journal-def.h"
25 #include "journal-file.h"
27 #include "memory-util.h"
28 #include "path-util.h"
29 #include "random-util.h"
31 #include "sort-util.h"
32 #include "stat-util.h"
33 #include "string-table.h"
34 #include "string-util.h"
36 #include "sync-util.h"
37 #include "xattr-util.h"
39 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
40 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
42 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
43 #define MIN_COMPRESS_THRESHOLD (8ULL)
45 /* This is the minimum journal file size */
46 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
48 /* These are the lower and upper bounds if we deduce the max_use value
49 * from the file system size */
50 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
51 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
53 /* Those are the lower and upper bounds for the minimal use limit,
54 * i.e. how much we'll use even if keep_free suggests otherwise. */
55 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
56 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
58 /* This is the upper bound if we deduce max_size from max_use */
59 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
61 /* This is the upper bound if we deduce the keep_free value from the
63 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
65 /* This is the keep_free value when we can't determine the system
67 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
69 /* This is the default maximum number of journal files to keep around. */
70 #define DEFAULT_N_MAX_FILES 100
72 /* n_data was the first entry we added after the initial file format design */
73 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
75 /* How many entries to keep in the entry array chain cache at max */
76 #define CHAIN_CACHE_MAX 20
78 /* How much to increase the journal file size at once each time we allocate something new. */
79 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
81 /* Reread fstat() of the file for detecting deletions at least this often */
82 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
84 /* The mmap context to use for the header we pick as one above the last defined typed */
85 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
87 /* Longest hash chain to rotate after */
88 #define HASH_CHAIN_DEPTH_MAX 100
91 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
94 int journal_file_tail_end(JournalFile
*f
, uint64_t *ret_offset
) {
103 p
= le64toh(f
->header
->tail_object_offset
);
105 p
= le64toh(f
->header
->header_size
);
109 r
= journal_file_read_object(f
, OBJECT_UNUSED
, p
, &tail
);
113 sz
= le64toh(tail
.object
.size
);
114 if (sz
> UINT64_MAX
- sizeof(uint64_t) + 1)
118 if (p
> UINT64_MAX
- sz
)
129 int journal_file_set_offline_thread_join(JournalFile
*f
) {
134 if (f
->offline_state
== OFFLINE_JOINED
)
137 r
= pthread_join(f
->offline_thread
, NULL
);
141 f
->offline_state
= OFFLINE_JOINED
;
143 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
149 static int journal_file_set_online(JournalFile
*f
) {
157 if (f
->fd
< 0 || !f
->header
)
161 switch (f
->offline_state
) {
163 /* No offline thread, no need to wait. */
167 case OFFLINE_SYNCING
:
168 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_SYNCING
, OFFLINE_CANCEL
))
170 /* Canceled syncing prior to offlining, no need to wait. */
174 case OFFLINE_AGAIN_FROM_SYNCING
:
175 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_AGAIN_FROM_SYNCING
, OFFLINE_CANCEL
))
177 /* Canceled restart from syncing, no need to wait. */
181 case OFFLINE_AGAIN_FROM_OFFLINING
:
182 if (!__sync_bool_compare_and_swap(&f
->offline_state
, OFFLINE_AGAIN_FROM_OFFLINING
, OFFLINE_CANCEL
))
184 /* Canceled restart from offlining, must wait for offlining to complete however. */
189 r
= journal_file_set_offline_thread_join(f
);
199 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
202 switch (f
->header
->state
) {
207 f
->header
->state
= STATE_ONLINE
;
216 JournalFile
* journal_file_close(JournalFile
*f
) {
220 if (f
->mmap
&& f
->cache_fd
)
221 mmap_cache_fd_free(f
->cache_fd
);
223 if (f
->fd
>= 0 && f
->defrag_on_close
) {
225 /* Be friendly to btrfs: turn COW back on again now,
226 * and defragment the file. We won't write to the file
227 * ever again, hence remove all fragmentation, and
228 * reenable all the good bits COW usually provides
229 * (such as data checksumming). */
231 (void) chattr_fd(f
->fd
, 0, FS_NOCOW_FL
, NULL
);
232 (void) btrfs_defrag_fd(f
->fd
);
239 mmap_cache_unref(f
->mmap
);
241 ordered_hashmap_free_free(f
->chain_cache
);
244 free(f
->compress_buffer
);
249 munmap(f
->fss_file
, PAGE_ALIGN(f
->fss_file_size
));
251 free(f
->fsprg_state
);
256 gcry_md_close(f
->hmac
);
262 static int journal_file_init_header(JournalFile
*f
, JournalFile
*template) {
269 memcpy(h
.signature
, HEADER_SIGNATURE
, 8);
270 h
.header_size
= htole64(ALIGN64(sizeof(h
)));
272 h
.incompatible_flags
|= htole32(
273 f
->compress_xz
* HEADER_INCOMPATIBLE_COMPRESSED_XZ
|
274 f
->compress_lz4
* HEADER_INCOMPATIBLE_COMPRESSED_LZ4
|
275 f
->compress_zstd
* HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
|
276 f
->keyed_hash
* HEADER_INCOMPATIBLE_KEYED_HASH
);
278 h
.compatible_flags
= htole32(
279 f
->seal
* HEADER_COMPATIBLE_SEALED
);
281 r
= sd_id128_randomize(&h
.file_id
);
286 h
.seqnum_id
= template->header
->seqnum_id
;
287 h
.tail_entry_seqnum
= template->header
->tail_entry_seqnum
;
289 h
.seqnum_id
= h
.file_id
;
291 k
= pwrite(f
->fd
, &h
, sizeof(h
), 0);
301 static int journal_file_refresh_header(JournalFile
*f
) {
307 r
= sd_id128_get_machine(&f
->header
->machine_id
);
308 if (IN_SET(r
, -ENOENT
, -ENOMEDIUM
))
309 /* We don't have a machine-id, let's continue without */
310 zero(f
->header
->machine_id
);
314 r
= sd_id128_get_boot(&f
->header
->boot_id
);
318 r
= journal_file_set_online(f
);
320 /* Sync the online state to disk; likely just created a new file, also sync the directory this file
322 (void) fsync_full(f
->fd
);
327 static bool warn_wrong_flags(const JournalFile
*f
, bool compatible
) {
328 const uint32_t any
= compatible
? HEADER_COMPATIBLE_ANY
: HEADER_INCOMPATIBLE_ANY
,
329 supported
= compatible
? HEADER_COMPATIBLE_SUPPORTED
: HEADER_INCOMPATIBLE_SUPPORTED
;
330 const char *type
= compatible
? "compatible" : "incompatible";
333 flags
= le32toh(compatible
? f
->header
->compatible_flags
: f
->header
->incompatible_flags
);
335 if (flags
& ~supported
) {
337 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32
,
338 f
->path
, type
, flags
& ~any
);
339 flags
= (flags
& any
) & ~supported
;
343 _cleanup_free_
char *t
= NULL
;
346 if (flags
& HEADER_COMPATIBLE_SEALED
)
347 strv
[n
++] = "sealed";
349 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_XZ
)
350 strv
[n
++] = "xz-compressed";
351 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_LZ4
)
352 strv
[n
++] = "lz4-compressed";
353 if (flags
& HEADER_INCOMPATIBLE_COMPRESSED_ZSTD
)
354 strv
[n
++] = "zstd-compressed";
355 if (flags
& HEADER_INCOMPATIBLE_KEYED_HASH
)
356 strv
[n
++] = "keyed-hash";
359 assert(n
< ELEMENTSOF(strv
));
361 t
= strv_join((char**) strv
, ", ");
362 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
363 f
->path
, type
, n
> 1 ? "flags" : "flag", strnull(t
));
371 static int journal_file_verify_header(JournalFile
*f
) {
372 uint64_t arena_size
, header_size
;
377 if (memcmp(f
->header
->signature
, HEADER_SIGNATURE
, 8))
380 /* In both read and write mode we refuse to open files with incompatible
381 * flags we don't know. */
382 if (warn_wrong_flags(f
, false))
383 return -EPROTONOSUPPORT
;
385 /* When open for writing we refuse to open files with compatible flags, too. */
386 if (f
->writable
&& warn_wrong_flags(f
, true))
387 return -EPROTONOSUPPORT
;
389 if (f
->header
->state
>= _STATE_MAX
)
392 header_size
= le64toh(READ_NOW(f
->header
->header_size
));
394 /* The first addition was n_data, so check that we are at least this large */
395 if (header_size
< HEADER_SIZE_MIN
)
398 if (JOURNAL_HEADER_SEALED(f
->header
) && !JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
401 arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
403 if (UINT64_MAX
- header_size
< arena_size
|| header_size
+ arena_size
> (uint64_t) f
->last_stat
.st_size
)
406 if (le64toh(f
->header
->tail_object_offset
) > header_size
+ arena_size
)
409 if (!VALID64(le64toh(f
->header
->data_hash_table_offset
)) ||
410 !VALID64(le64toh(f
->header
->field_hash_table_offset
)) ||
411 !VALID64(le64toh(f
->header
->tail_object_offset
)) ||
412 !VALID64(le64toh(f
->header
->entry_array_offset
)))
416 sd_id128_t machine_id
;
420 r
= sd_id128_get_machine(&machine_id
);
424 if (!sd_id128_equal(machine_id
, f
->header
->machine_id
))
427 state
= f
->header
->state
;
429 if (state
== STATE_ARCHIVED
)
430 return -ESHUTDOWN
; /* Already archived */
431 else if (state
== STATE_ONLINE
)
432 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
433 "Journal file %s is already online. Assuming unclean closing.",
435 else if (state
!= STATE_OFFLINE
)
436 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY
),
437 "Journal file %s has unknown state %i.",
440 if (f
->header
->field_hash_table_size
== 0 || f
->header
->data_hash_table_size
== 0)
443 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
444 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
446 if (le64toh(f
->header
->tail_entry_realtime
) > now(CLOCK_REALTIME
))
447 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY
),
448 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
452 f
->compress_xz
= JOURNAL_HEADER_COMPRESSED_XZ(f
->header
);
453 f
->compress_lz4
= JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
);
454 f
->compress_zstd
= JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
);
456 f
->seal
= JOURNAL_HEADER_SEALED(f
->header
);
458 f
->keyed_hash
= JOURNAL_HEADER_KEYED_HASH(f
->header
);
463 int journal_file_fstat(JournalFile
*f
) {
469 if (fstat(f
->fd
, &f
->last_stat
) < 0)
472 f
->last_stat_usec
= now(CLOCK_MONOTONIC
);
474 /* Refuse dealing with files that aren't regular */
475 r
= stat_verify_regular(&f
->last_stat
);
479 /* Refuse appending to files that are already deleted */
480 if (f
->last_stat
.st_nlink
<= 0)
486 static int journal_file_allocate(JournalFile
*f
, uint64_t offset
, uint64_t size
) {
487 uint64_t old_size
, new_size
, old_header_size
, old_arena_size
;
493 /* We assume that this file is not sparse, and we know that for sure, since we always call
494 * posix_fallocate() ourselves */
496 if (size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - offset
)
499 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
502 old_header_size
= le64toh(READ_NOW(f
->header
->header_size
));
503 old_arena_size
= le64toh(READ_NOW(f
->header
->arena_size
));
504 if (old_arena_size
> PAGE_ALIGN_DOWN(UINT64_MAX
) - old_header_size
)
507 old_size
= old_header_size
+ old_arena_size
;
509 new_size
= MAX(PAGE_ALIGN(offset
+ size
), old_header_size
);
511 if (new_size
<= old_size
) {
513 /* We already pre-allocated enough space, but before
514 * we write to it, let's check with fstat() if the
515 * file got deleted, in order make sure we don't throw
516 * away the data immediately. Don't check fstat() for
517 * all writes though, but only once ever 10s. */
519 if (f
->last_stat_usec
+ LAST_STAT_REFRESH_USEC
> now(CLOCK_MONOTONIC
))
522 return journal_file_fstat(f
);
525 /* Allocate more space. */
527 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
530 if (new_size
> f
->metrics
.min_size
&& f
->metrics
.keep_free
> 0) {
533 if (fstatvfs(f
->fd
, &svfs
) >= 0) {
536 available
= LESS_BY((uint64_t) svfs
.f_bfree
* (uint64_t) svfs
.f_bsize
, f
->metrics
.keep_free
);
538 if (new_size
- old_size
> available
)
543 /* Increase by larger blocks at once */
544 new_size
= DIV_ROUND_UP(new_size
, FILE_SIZE_INCREASE
) * FILE_SIZE_INCREASE
;
545 if (f
->metrics
.max_size
> 0 && new_size
> f
->metrics
.max_size
)
546 new_size
= f
->metrics
.max_size
;
548 /* Note that the glibc fallocate() fallback is very
549 inefficient, hence we try to minimize the allocation area
551 r
= posix_fallocate_loop(f
->fd
, old_size
, new_size
- old_size
);
555 f
->header
->arena_size
= htole64(new_size
- old_header_size
);
557 return journal_file_fstat(f
);
560 static unsigned type_to_context(ObjectType type
) {
561 /* One context for each type, plus one catch-all for the rest */
562 assert_cc(_OBJECT_TYPE_MAX
<= MMAP_CACHE_MAX_CONTEXTS
);
563 assert_cc(CONTEXT_HEADER
< MMAP_CACHE_MAX_CONTEXTS
);
564 return type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
? type
: 0;
567 static int journal_file_move_to(
583 if (size
> UINT64_MAX
- offset
)
586 /* Avoid SIGBUS on invalid accesses */
587 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
) {
588 /* Hmm, out of range? Let's refresh the fstat() data
589 * first, before we trust that check. */
591 r
= journal_file_fstat(f
);
595 if (offset
+ size
> (uint64_t) f
->last_stat
.st_size
)
596 return -EADDRNOTAVAIL
;
599 return mmap_cache_fd_get(f
->cache_fd
, type_to_context(type
), keep_always
, offset
, size
, &f
->last_stat
, ret
);
602 static uint64_t minimum_header_size(Object
*o
) {
604 static const uint64_t table
[] = {
605 [OBJECT_DATA
] = sizeof(DataObject
),
606 [OBJECT_FIELD
] = sizeof(FieldObject
),
607 [OBJECT_ENTRY
] = sizeof(EntryObject
),
608 [OBJECT_DATA_HASH_TABLE
] = sizeof(HashTableObject
),
609 [OBJECT_FIELD_HASH_TABLE
] = sizeof(HashTableObject
),
610 [OBJECT_ENTRY_ARRAY
] = sizeof(EntryArrayObject
),
611 [OBJECT_TAG
] = sizeof(TagObject
),
614 if (o
->object
.type
>= ELEMENTSOF(table
) || table
[o
->object
.type
] <= 0)
615 return sizeof(ObjectHeader
);
617 return table
[o
->object
.type
];
620 /* Lightweight object checks. We want this to be fast, so that we won't
621 * slowdown every journal_file_move_to_object() call too much. */
622 static int journal_file_check_object(JournalFile
*f
, uint64_t offset
, Object
*o
) {
626 switch (o
->object
.type
) {
629 if ((le64toh(o
->data
.entry_offset
) == 0) ^ (le64toh(o
->data
.n_entries
) == 0))
630 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
631 "Bad n_entries: %" PRIu64
": %" PRIu64
,
632 le64toh(o
->data
.n_entries
),
635 if (le64toh(o
->object
.size
) <= offsetof(DataObject
, payload
))
636 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
637 "Bad object size (<= %zu): %" PRIu64
": %" PRIu64
,
638 offsetof(DataObject
, payload
),
639 le64toh(o
->object
.size
),
642 if (!VALID64(le64toh(o
->data
.next_hash_offset
)) ||
643 !VALID64(le64toh(o
->data
.next_field_offset
)) ||
644 !VALID64(le64toh(o
->data
.entry_offset
)) ||
645 !VALID64(le64toh(o
->data
.entry_array_offset
)))
646 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
647 "Invalid offset, next_hash_offset=" OFSfmt
", next_field_offset=" OFSfmt
", entry_offset=" OFSfmt
", entry_array_offset=" OFSfmt
": %" PRIu64
,
648 le64toh(o
->data
.next_hash_offset
),
649 le64toh(o
->data
.next_field_offset
),
650 le64toh(o
->data
.entry_offset
),
651 le64toh(o
->data
.entry_array_offset
),
657 if (le64toh(o
->object
.size
) <= offsetof(FieldObject
, payload
))
658 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
659 "Bad field size (<= %zu): %" PRIu64
": %" PRIu64
,
660 offsetof(FieldObject
, payload
),
661 le64toh(o
->object
.size
),
664 if (!VALID64(le64toh(o
->field
.next_hash_offset
)) ||
665 !VALID64(le64toh(o
->field
.head_data_offset
)))
666 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
667 "Invalid offset, next_hash_offset=" OFSfmt
", head_data_offset=" OFSfmt
": %" PRIu64
,
668 le64toh(o
->field
.next_hash_offset
),
669 le64toh(o
->field
.head_data_offset
),
676 sz
= le64toh(READ_NOW(o
->object
.size
));
677 if (sz
< offsetof(EntryObject
, items
) ||
678 (sz
- offsetof(EntryObject
, items
)) % sizeof(EntryItem
) != 0)
679 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
680 "Bad entry size (<= %zu): %" PRIu64
": %" PRIu64
,
681 offsetof(EntryObject
, items
),
685 if ((sz
- offsetof(EntryObject
, items
)) / sizeof(EntryItem
) <= 0)
686 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
687 "Invalid number items in entry: %" PRIu64
": %" PRIu64
,
688 (sz
- offsetof(EntryObject
, items
)) / sizeof(EntryItem
),
691 if (le64toh(o
->entry
.seqnum
) <= 0)
692 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
693 "Invalid entry seqnum: %" PRIx64
": %" PRIu64
,
694 le64toh(o
->entry
.seqnum
),
697 if (!VALID_REALTIME(le64toh(o
->entry
.realtime
)))
698 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
699 "Invalid entry realtime timestamp: %" PRIu64
": %" PRIu64
,
700 le64toh(o
->entry
.realtime
),
703 if (!VALID_MONOTONIC(le64toh(o
->entry
.monotonic
)))
704 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
705 "Invalid entry monotonic timestamp: %" PRIu64
": %" PRIu64
,
706 le64toh(o
->entry
.monotonic
),
712 case OBJECT_DATA_HASH_TABLE
:
713 case OBJECT_FIELD_HASH_TABLE
: {
716 sz
= le64toh(READ_NOW(o
->object
.size
));
717 if (sz
< offsetof(HashTableObject
, items
) ||
718 (sz
- offsetof(HashTableObject
, items
)) % sizeof(HashItem
) != 0 ||
719 (sz
- offsetof(HashTableObject
, items
)) / sizeof(HashItem
) <= 0)
720 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
721 "Invalid %s hash table size: %" PRIu64
": %" PRIu64
,
722 o
->object
.type
== OBJECT_DATA_HASH_TABLE
? "data" : "field",
729 case OBJECT_ENTRY_ARRAY
: {
732 sz
= le64toh(READ_NOW(o
->object
.size
));
733 if (sz
< offsetof(EntryArrayObject
, items
) ||
734 (sz
- offsetof(EntryArrayObject
, items
)) % sizeof(le64_t
) != 0 ||
735 (sz
- offsetof(EntryArrayObject
, items
)) / sizeof(le64_t
) <= 0)
736 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
737 "Invalid object entry array size: %" PRIu64
": %" PRIu64
,
741 if (!VALID64(le64toh(o
->entry_array
.next_entry_array_offset
)))
742 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
743 "Invalid object entry array next_entry_array_offset: " OFSfmt
": %" PRIu64
,
744 le64toh(o
->entry_array
.next_entry_array_offset
),
751 if (le64toh(o
->object
.size
) != sizeof(TagObject
))
752 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
753 "Invalid object tag size: %" PRIu64
": %" PRIu64
,
754 le64toh(o
->object
.size
),
757 if (!VALID_EPOCH(le64toh(o
->tag
.epoch
)))
758 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
759 "Invalid object tag epoch: %" PRIu64
": %" PRIu64
,
760 le64toh(o
->tag
.epoch
), offset
);
768 int journal_file_move_to_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
**ret
) {
777 /* Objects may only be located at multiple of 64 bit */
778 if (!VALID64(offset
))
779 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
780 "Attempt to move to object at non-64bit boundary: %" PRIu64
,
783 /* Object may not be located in the file header */
784 if (offset
< le64toh(f
->header
->header_size
))
785 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
786 "Attempt to move to object located in file header: %" PRIu64
,
789 r
= journal_file_move_to(f
, type
, false, offset
, sizeof(ObjectHeader
), &t
);
794 s
= le64toh(READ_NOW(o
->object
.size
));
797 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
798 "Attempt to move to uninitialized object: %" PRIu64
,
800 if (s
< sizeof(ObjectHeader
))
801 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
802 "Attempt to move to overly short object: %" PRIu64
,
805 if (o
->object
.type
<= OBJECT_UNUSED
)
806 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
807 "Attempt to move to object with invalid type: %" PRIu64
,
810 if (s
< minimum_header_size(o
))
811 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
812 "Attempt to move to truncated object: %" PRIu64
,
815 if (type
> OBJECT_UNUSED
&& o
->object
.type
!= type
)
816 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
817 "Attempt to move to object of unexpected type: %" PRIu64
,
820 r
= journal_file_move_to(f
, type
, false, offset
, s
, &t
);
826 r
= journal_file_check_object(f
, offset
, o
);
834 int journal_file_read_object(JournalFile
*f
, ObjectType type
, uint64_t offset
, Object
*ret
) {
842 /* Objects may only be located at multiple of 64 bit */
843 if (!VALID64(offset
))
844 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
845 "Attempt to read object at non-64bit boundary: %" PRIu64
,
848 /* Object may not be located in the file header */
849 if (offset
< le64toh(f
->header
->header_size
))
850 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
851 "Attempt to read object located in file header: %" PRIu64
,
854 /* This will likely read too much data but it avoids having to call pread() twice. */
855 r
= pread(f
->fd
, &o
, sizeof(Object
), offset
);
859 s
= le64toh(o
.object
.size
);
862 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
863 "Attempt to read uninitialized object: %" PRIu64
,
865 if (s
< sizeof(ObjectHeader
))
866 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
867 "Attempt to read overly short object: %" PRIu64
,
870 if (o
.object
.type
<= OBJECT_UNUSED
)
871 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
872 "Attempt to read object with invalid type: %" PRIu64
,
875 if (s
< minimum_header_size(&o
))
876 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
877 "Attempt to read truncated object: %" PRIu64
,
880 if (type
> OBJECT_UNUSED
&& o
.object
.type
!= type
)
881 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
882 "Attempt to read object of unexpected type: %" PRIu64
,
885 r
= journal_file_check_object(f
, offset
, &o
);
893 static uint64_t journal_file_entry_seqnum(
902 /* Picks a new sequence number for the entry we are about to add and returns it. */
904 ret
= le64toh(f
->header
->tail_entry_seqnum
) + 1;
907 /* If an external seqnum counter was passed, we update both the local and the external one,
908 * and set it to the maximum of both */
910 if (*seqnum
+ 1 > ret
)
916 f
->header
->tail_entry_seqnum
= htole64(ret
);
918 if (f
->header
->head_entry_seqnum
== 0)
919 f
->header
->head_entry_seqnum
= htole64(ret
);
924 int journal_file_append_object(
929 uint64_t *ret_offset
) {
938 assert(type
> OBJECT_UNUSED
&& type
< _OBJECT_TYPE_MAX
);
939 assert(size
>= sizeof(ObjectHeader
));
941 r
= journal_file_set_online(f
);
945 r
= journal_file_tail_end(f
, &p
);
949 r
= journal_file_allocate(f
, p
, size
);
953 r
= journal_file_move_to(f
, type
, false, p
, size
, &t
);
958 o
->object
= (ObjectHeader
) {
960 .size
= htole64(size
),
963 f
->header
->tail_object_offset
= htole64(p
);
964 f
->header
->n_objects
= htole64(le64toh(f
->header
->n_objects
) + 1);
975 static int journal_file_setup_data_hash_table(JournalFile
*f
) {
983 /* We estimate that we need 1 hash table entry per 768 bytes
984 of journal file and we want to make sure we never get
985 beyond 75% fill level. Calculate the hash table size for
986 the maximum file size based on these metrics. */
988 s
= (f
->metrics
.max_size
* 4 / 768 / 3) * sizeof(HashItem
);
989 if (s
< DEFAULT_DATA_HASH_TABLE_SIZE
)
990 s
= DEFAULT_DATA_HASH_TABLE_SIZE
;
992 log_debug("Reserving %"PRIu64
" entries in data hash table.", s
/ sizeof(HashItem
));
994 r
= journal_file_append_object(f
,
995 OBJECT_DATA_HASH_TABLE
,
996 offsetof(Object
, hash_table
.items
) + s
,
1001 memzero(o
->hash_table
.items
, s
);
1003 f
->header
->data_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1004 f
->header
->data_hash_table_size
= htole64(s
);
1009 static int journal_file_setup_field_hash_table(JournalFile
*f
) {
1017 /* We use a fixed size hash table for the fields as this
1018 * number should grow very slowly only */
1020 s
= DEFAULT_FIELD_HASH_TABLE_SIZE
;
1021 log_debug("Reserving %"PRIu64
" entries in field hash table.", s
/ sizeof(HashItem
));
1023 r
= journal_file_append_object(f
,
1024 OBJECT_FIELD_HASH_TABLE
,
1025 offsetof(Object
, hash_table
.items
) + s
,
1030 memzero(o
->hash_table
.items
, s
);
1032 f
->header
->field_hash_table_offset
= htole64(p
+ offsetof(Object
, hash_table
.items
));
1033 f
->header
->field_hash_table_size
= htole64(s
);
1038 int journal_file_map_data_hash_table(JournalFile
*f
) {
1046 if (f
->data_hash_table
)
1049 p
= le64toh(f
->header
->data_hash_table_offset
);
1050 s
= le64toh(f
->header
->data_hash_table_size
);
1052 r
= journal_file_move_to(f
,
1053 OBJECT_DATA_HASH_TABLE
,
1060 f
->data_hash_table
= t
;
1064 int journal_file_map_field_hash_table(JournalFile
*f
) {
1072 if (f
->field_hash_table
)
1075 p
= le64toh(f
->header
->field_hash_table_offset
);
1076 s
= le64toh(f
->header
->field_hash_table_size
);
1078 r
= journal_file_move_to(f
,
1079 OBJECT_FIELD_HASH_TABLE
,
1086 f
->field_hash_table
= t
;
1090 static int journal_file_link_field(
1101 assert(f
->field_hash_table
);
1105 if (o
->object
.type
!= OBJECT_FIELD
)
1108 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1112 /* This might alter the window we are looking at */
1113 o
->field
.next_hash_offset
= o
->field
.head_data_offset
= 0;
1116 p
= le64toh(f
->field_hash_table
[h
].tail_hash_offset
);
1118 f
->field_hash_table
[h
].head_hash_offset
= htole64(offset
);
1120 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1124 o
->field
.next_hash_offset
= htole64(offset
);
1127 f
->field_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1129 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
1130 f
->header
->n_fields
= htole64(le64toh(f
->header
->n_fields
) + 1);
1135 static int journal_file_link_data(
1146 assert(f
->data_hash_table
);
1150 if (o
->object
.type
!= OBJECT_DATA
)
1153 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1157 /* This might alter the window we are looking at */
1158 o
->data
.next_hash_offset
= o
->data
.next_field_offset
= 0;
1159 o
->data
.entry_offset
= o
->data
.entry_array_offset
= 0;
1160 o
->data
.n_entries
= 0;
1163 p
= le64toh(f
->data_hash_table
[h
].tail_hash_offset
);
1165 /* Only entry in the hash table is easy */
1166 f
->data_hash_table
[h
].head_hash_offset
= htole64(offset
);
1168 /* Move back to the previous data object, to patch in
1171 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1175 o
->data
.next_hash_offset
= htole64(offset
);
1178 f
->data_hash_table
[h
].tail_hash_offset
= htole64(offset
);
1180 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
1181 f
->header
->n_data
= htole64(le64toh(f
->header
->n_data
) + 1);
1186 static int next_hash_offset(
1189 le64_t
*next_hash_offset
,
1191 le64_t
*header_max_depth
) {
1195 nextp
= le64toh(READ_NOW(*next_hash_offset
));
1197 if (nextp
<= *p
) /* Refuse going in loops */
1198 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1199 "Detected hash item loop in %s, refusing.", f
->path
);
1203 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1204 if (header_max_depth
&& f
->writable
)
1205 *header_max_depth
= htole64(MAX(*depth
, le64toh(*header_max_depth
)));
1212 int journal_file_find_field_object_with_hash(
1214 const void *field
, uint64_t size
, uint64_t hash
,
1215 Object
**ret
, uint64_t *ret_offset
) {
1217 uint64_t p
, osize
, h
, m
, depth
= 0;
1222 assert(field
&& size
> 0);
1224 /* If the field hash table is empty, we can't find anything */
1225 if (le64toh(f
->header
->field_hash_table_size
) <= 0)
1228 /* Map the field hash table, if it isn't mapped yet. */
1229 r
= journal_file_map_field_hash_table(f
);
1233 osize
= offsetof(Object
, field
.payload
) + size
;
1235 m
= le64toh(READ_NOW(f
->header
->field_hash_table_size
)) / sizeof(HashItem
);
1240 p
= le64toh(f
->field_hash_table
[h
].head_hash_offset
);
1244 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1248 if (le64toh(o
->field
.hash
) == hash
&&
1249 le64toh(o
->object
.size
) == osize
&&
1250 memcmp(o
->field
.payload
, field
, size
) == 0) {
1260 r
= next_hash_offset(
1263 &o
->field
.next_hash_offset
,
1265 JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) ? &f
->header
->field_hash_chain_depth
: NULL
);
1273 uint64_t journal_file_hash_data(
1279 assert(data
|| sz
== 0);
1281 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1282 * function use siphash. Old journal files use the Jenkins hash. */
1284 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
1285 return siphash24(data
, sz
, f
->header
->file_id
.bytes
);
1287 return jenkins_hash64(data
, sz
);
1290 int journal_file_find_field_object(
1292 const void *field
, uint64_t size
,
1293 Object
**ret
, uint64_t *ret_offset
) {
1296 assert(field
&& size
> 0);
1298 return journal_file_find_field_object_with_hash(
1301 journal_file_hash_data(f
, field
, size
),
1305 int journal_file_find_data_object_with_hash(
1307 const void *data
, uint64_t size
, uint64_t hash
,
1308 Object
**ret
, uint64_t *ret_offset
) {
1310 uint64_t p
, osize
, h
, m
, depth
= 0;
1315 assert(data
|| size
== 0);
1317 /* If there's no data hash table, then there's no entry. */
1318 if (le64toh(f
->header
->data_hash_table_size
) <= 0)
1321 /* Map the data hash table, if it isn't mapped yet. */
1322 r
= journal_file_map_data_hash_table(f
);
1326 osize
= offsetof(Object
, data
.payload
) + size
;
1328 m
= le64toh(READ_NOW(f
->header
->data_hash_table_size
)) / sizeof(HashItem
);
1333 p
= le64toh(f
->data_hash_table
[h
].head_hash_offset
);
1338 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1342 if (le64toh(o
->data
.hash
) != hash
)
1345 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
) {
1346 #if HAVE_COMPRESSION
1350 l
= le64toh(READ_NOW(o
->object
.size
));
1351 if (l
<= offsetof(Object
, data
.payload
))
1354 l
-= offsetof(Object
, data
.payload
);
1356 r
= decompress_blob(o
->object
.flags
& OBJECT_COMPRESSION_MASK
,
1357 o
->data
.payload
, l
, &f
->compress_buffer
, &rsize
, 0);
1361 if (rsize
== size
&&
1362 memcmp(f
->compress_buffer
, data
, size
) == 0) {
1373 return -EPROTONOSUPPORT
;
1375 } else if (le64toh(o
->object
.size
) == osize
&&
1376 memcmp(o
->data
.payload
, data
, size
) == 0) {
1388 r
= next_hash_offset(
1391 &o
->data
.next_hash_offset
,
1393 JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) ? &f
->header
->data_hash_chain_depth
: NULL
);
1401 int journal_file_find_data_object(
1403 const void *data
, uint64_t size
,
1404 Object
**ret
, uint64_t *ret_offset
) {
1407 assert(data
|| size
== 0);
1409 return journal_file_find_data_object_with_hash(
1412 journal_file_hash_data(f
, data
, size
),
1416 bool journal_field_valid(const char *p
, size_t l
, bool allow_protected
) {
1417 /* We kinda enforce POSIX syntax recommendations for
1418 environment variables here, but make a couple of additional
1421 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
1426 /* No empty field names */
1430 /* Don't allow names longer than 64 chars */
1434 /* Variables starting with an underscore are protected */
1435 if (!allow_protected
&& p
[0] == '_')
1438 /* Don't allow digits as first character */
1439 if (p
[0] >= '0' && p
[0] <= '9')
1442 /* Only allow A-Z0-9 and '_' */
1443 for (const char *a
= p
; a
< p
+ l
; a
++)
1444 if ((*a
< 'A' || *a
> 'Z') &&
1445 (*a
< '0' || *a
> '9') &&
1452 static int journal_file_append_field(
1454 const void *field
, uint64_t size
,
1455 Object
**ret
, uint64_t *ret_offset
) {
1463 assert(field
&& size
> 0);
1465 if (!journal_field_valid(field
, size
, true))
1468 hash
= journal_file_hash_data(f
, field
, size
);
1470 r
= journal_file_find_field_object_with_hash(f
, field
, size
, hash
, &o
, &p
);
1484 osize
= offsetof(Object
, field
.payload
) + size
;
1485 r
= journal_file_append_object(f
, OBJECT_FIELD
, osize
, &o
, &p
);
1489 o
->field
.hash
= htole64(hash
);
1490 memcpy(o
->field
.payload
, field
, size
);
1492 r
= journal_file_link_field(f
, o
, p
, hash
);
1496 /* The linking might have altered the window, so let's
1497 * refresh our pointer */
1498 r
= journal_file_move_to_object(f
, OBJECT_FIELD
, p
, &o
);
1503 r
= journal_file_hmac_put_object(f
, OBJECT_FIELD
, o
, p
);
1517 static int journal_file_append_data(
1519 const void *data
, uint64_t size
,
1520 Object
**ret
, uint64_t *ret_offset
) {
1522 uint64_t hash
, p
, fp
, osize
;
1524 int r
, compression
= 0;
1529 if (!data
|| size
== 0)
1532 hash
= journal_file_hash_data(f
, data
, size
);
1534 r
= journal_file_find_data_object_with_hash(f
, data
, size
, hash
, &o
, &p
);
1548 eq
= memchr(data
, '=', size
);
1552 osize
= offsetof(Object
, data
.payload
) + size
;
1553 r
= journal_file_append_object(f
, OBJECT_DATA
, osize
, &o
, &p
);
1557 o
->data
.hash
= htole64(hash
);
1559 #if HAVE_COMPRESSION
1560 if (JOURNAL_FILE_COMPRESS(f
) && size
>= f
->compress_threshold_bytes
) {
1563 compression
= compress_blob(data
, size
, o
->data
.payload
, size
- 1, &rsize
);
1565 if (compression
>= 0) {
1566 o
->object
.size
= htole64(offsetof(Object
, data
.payload
) + rsize
);
1567 o
->object
.flags
|= compression
;
1569 log_debug("Compressed data object %"PRIu64
" -> %zu using %s",
1570 size
, rsize
, object_compressed_to_string(compression
));
1572 /* Compression didn't work, we don't really care why, let's continue without compression */
1577 if (compression
== 0)
1578 memcpy_safe(o
->data
.payload
, data
, size
);
1580 r
= journal_file_link_data(f
, o
, p
, hash
);
1585 r
= journal_file_hmac_put_object(f
, OBJECT_DATA
, o
, p
);
1590 /* The linking might have altered the window, so let's
1591 * refresh our pointer */
1592 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1596 /* Create field object ... */
1597 r
= journal_file_append_field(f
, data
, (uint8_t*) eq
- (uint8_t*) data
, &fo
, &fp
);
1601 /* ... and link it in. */
1602 o
->data
.next_field_offset
= fo
->field
.head_data_offset
;
1603 fo
->field
.head_data_offset
= le64toh(p
);
1614 uint64_t journal_file_entry_n_items(Object
*o
) {
1618 if (o
->object
.type
!= OBJECT_ENTRY
)
1621 sz
= le64toh(READ_NOW(o
->object
.size
));
1622 if (sz
< offsetof(Object
, entry
.items
))
1625 return (sz
- offsetof(Object
, entry
.items
)) / sizeof(EntryItem
);
1628 uint64_t journal_file_entry_array_n_items(Object
*o
) {
1633 if (o
->object
.type
!= OBJECT_ENTRY_ARRAY
)
1636 sz
= le64toh(READ_NOW(o
->object
.size
));
1637 if (sz
< offsetof(Object
, entry_array
.items
))
1640 return (sz
- offsetof(Object
, entry_array
.items
)) / sizeof(uint64_t);
1643 uint64_t journal_file_hash_table_n_items(Object
*o
) {
1648 if (!IN_SET(o
->object
.type
, OBJECT_DATA_HASH_TABLE
, OBJECT_FIELD_HASH_TABLE
))
1651 sz
= le64toh(READ_NOW(o
->object
.size
));
1652 if (sz
< offsetof(Object
, hash_table
.items
))
1655 return (sz
- offsetof(Object
, hash_table
.items
)) / sizeof(HashItem
);
1658 static int link_entry_into_array(JournalFile
*f
,
1663 uint64_t n
= 0, ap
= 0, q
, i
, a
, hidx
;
1672 a
= le64toh(*first
);
1673 i
= hidx
= le64toh(READ_NOW(*idx
));
1676 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
1680 n
= journal_file_entry_array_n_items(o
);
1682 o
->entry_array
.items
[i
] = htole64(p
);
1683 *idx
= htole64(hidx
+ 1);
1689 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
1700 r
= journal_file_append_object(f
, OBJECT_ENTRY_ARRAY
,
1701 offsetof(Object
, entry_array
.items
) + n
* sizeof(uint64_t),
1707 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY_ARRAY
, o
, q
);
1712 o
->entry_array
.items
[i
] = htole64(p
);
1715 *first
= htole64(q
);
1717 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, ap
, &o
);
1721 o
->entry_array
.next_entry_array_offset
= htole64(q
);
1724 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
1725 f
->header
->n_entry_arrays
= htole64(le64toh(f
->header
->n_entry_arrays
) + 1);
1727 *idx
= htole64(hidx
+ 1);
1732 static int link_entry_into_array_plus_one(JournalFile
*f
,
1747 hidx
= le64toh(READ_NOW(*idx
));
1748 if (hidx
== UINT64_MAX
)
1751 *extra
= htole64(p
);
1755 i
= htole64(hidx
- 1);
1756 r
= link_entry_into_array(f
, first
, &i
, p
);
1761 *idx
= htole64(hidx
+ 1);
1765 static int journal_file_link_entry_item(JournalFile
*f
, Object
*o
, uint64_t offset
, uint64_t i
) {
1773 p
= le64toh(o
->entry
.items
[i
].object_offset
);
1774 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
1778 return link_entry_into_array_plus_one(f
,
1779 &o
->data
.entry_offset
,
1780 &o
->data
.entry_array_offset
,
1785 static int journal_file_link_entry(JournalFile
*f
, Object
*o
, uint64_t offset
) {
1794 if (o
->object
.type
!= OBJECT_ENTRY
)
1797 __sync_synchronize();
1799 /* Link up the entry itself */
1800 r
= link_entry_into_array(f
,
1801 &f
->header
->entry_array_offset
,
1802 &f
->header
->n_entries
,
1807 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1809 if (f
->header
->head_entry_realtime
== 0)
1810 f
->header
->head_entry_realtime
= o
->entry
.realtime
;
1812 f
->header
->tail_entry_realtime
= o
->entry
.realtime
;
1813 f
->header
->tail_entry_monotonic
= o
->entry
.monotonic
;
1815 /* Link up the items */
1816 n
= journal_file_entry_n_items(o
);
1817 for (uint64_t i
= 0; i
< n
; i
++) {
1818 r
= journal_file_link_entry_item(f
, o
, offset
, i
);
1826 static int journal_file_append_entry_internal(
1828 const dual_timestamp
*ts
,
1829 const sd_id128_t
*boot_id
,
1831 const EntryItem items
[], unsigned n_items
,
1833 Object
**ret
, uint64_t *ret_offset
) {
1841 assert(items
|| n_items
== 0);
1844 osize
= offsetof(Object
, entry
.items
) + (n_items
* sizeof(EntryItem
));
1846 r
= journal_file_append_object(f
, OBJECT_ENTRY
, osize
, &o
, &np
);
1850 o
->entry
.seqnum
= htole64(journal_file_entry_seqnum(f
, seqnum
));
1851 memcpy_safe(o
->entry
.items
, items
, n_items
* sizeof(EntryItem
));
1852 o
->entry
.realtime
= htole64(ts
->realtime
);
1853 o
->entry
.monotonic
= htole64(ts
->monotonic
);
1854 o
->entry
.xor_hash
= htole64(xor_hash
);
1856 f
->header
->boot_id
= *boot_id
;
1857 o
->entry
.boot_id
= f
->header
->boot_id
;
1860 r
= journal_file_hmac_put_object(f
, OBJECT_ENTRY
, o
, np
);
1865 r
= journal_file_link_entry(f
, o
, np
);
1878 void journal_file_post_change(JournalFile
*f
) {
1884 /* inotify() does not receive IN_MODIFY events from file
1885 * accesses done via mmap(). After each access we hence
1886 * trigger IN_MODIFY by truncating the journal file to its
1887 * current size which triggers IN_MODIFY. */
1889 __sync_synchronize();
1891 if (ftruncate(f
->fd
, f
->last_stat
.st_size
) < 0)
1892 log_debug_errno(errno
, "Failed to truncate file to its own size: %m");
1895 static int post_change_thunk(sd_event_source
*timer
, uint64_t usec
, void *userdata
) {
1898 journal_file_post_change(userdata
);
1903 static void schedule_post_change(JournalFile
*f
) {
1907 assert(f
->post_change_timer
);
1909 r
= sd_event_source_get_enabled(f
->post_change_timer
, NULL
);
1911 log_debug_errno(r
, "Failed to get ftruncate timer state: %m");
1917 r
= sd_event_source_set_time_relative(f
->post_change_timer
, f
->post_change_timer_period
);
1919 log_debug_errno(r
, "Failed to set time for scheduling ftruncate: %m");
1923 r
= sd_event_source_set_enabled(f
->post_change_timer
, SD_EVENT_ONESHOT
);
1925 log_debug_errno(r
, "Failed to enable scheduled ftruncate: %m");
1932 /* On failure, let's simply post the change immediately. */
1933 journal_file_post_change(f
);
1936 /* Enable coalesced change posting in a timer on the provided sd_event instance */
1937 int journal_file_enable_post_change_timer(JournalFile
*f
, sd_event
*e
, usec_t t
) {
1938 _cleanup_(sd_event_source_unrefp
) sd_event_source
*timer
= NULL
;
1942 assert_return(!f
->post_change_timer
, -EINVAL
);
1946 r
= sd_event_add_time(e
, &timer
, CLOCK_MONOTONIC
, 0, 0, post_change_thunk
, f
);
1950 r
= sd_event_source_set_enabled(timer
, SD_EVENT_OFF
);
1954 f
->post_change_timer
= TAKE_PTR(timer
);
1955 f
->post_change_timer_period
= t
;
1960 static int entry_item_cmp(const EntryItem
*a
, const EntryItem
*b
) {
1961 return CMP(le64toh(a
->object_offset
), le64toh(b
->object_offset
));
1964 static size_t remove_duplicate_entry_items(EntryItem items
[], size_t n
) {
1966 /* This function relies on the items array being sorted. */
1972 for (size_t i
= 1; i
< n
; i
++)
1973 if (items
[i
].object_offset
!= items
[j
- 1].object_offset
)
1974 items
[j
++] = items
[i
];
1979 int journal_file_append_entry(
1981 const dual_timestamp
*ts
,
1982 const sd_id128_t
*boot_id
,
1983 const struct iovec iovec
[], unsigned n_iovec
,
1985 Object
**ret
, uint64_t *ret_offset
) {
1989 uint64_t xor_hash
= 0;
1990 struct dual_timestamp _ts
;
1994 assert(iovec
&& n_iovec
> 0);
1997 if (!VALID_REALTIME(ts
->realtime
))
1998 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
1999 "Invalid realtime timestamp %" PRIu64
", refusing entry.",
2001 if (!VALID_MONOTONIC(ts
->monotonic
))
2002 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2003 "Invalid monotomic timestamp %" PRIu64
", refusing entry.",
2006 dual_timestamp_get(&_ts
);
2011 r
= journal_file_maybe_append_tag(f
, ts
->realtime
);
2016 items
= newa(EntryItem
, n_iovec
);
2018 for (size_t i
= 0; i
< n_iovec
; i
++) {
2022 r
= journal_file_append_data(f
, iovec
[i
].iov_base
, iovec
[i
].iov_len
, &o
, &p
);
2026 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2027 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2028 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2029 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2030 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2031 * hash here for that. This also has the benefit that cursors for old and new journal files
2032 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2033 * files things are easier, we can just take the value from the stored record directly. */
2035 if (JOURNAL_HEADER_KEYED_HASH(f
->header
))
2036 xor_hash
^= jenkins_hash64(iovec
[i
].iov_base
, iovec
[i
].iov_len
);
2038 xor_hash
^= le64toh(o
->data
.hash
);
2040 items
[i
] = (EntryItem
) {
2041 .object_offset
= htole64(p
),
2042 .hash
= o
->data
.hash
,
2046 /* Order by the position on disk, in order to improve seek
2047 * times for rotating media. */
2048 typesafe_qsort(items
, n_iovec
, entry_item_cmp
);
2049 n_iovec
= remove_duplicate_entry_items(items
, n_iovec
);
2051 r
= journal_file_append_entry_internal(f
, ts
, boot_id
, xor_hash
, items
, n_iovec
, seqnum
, ret
, ret_offset
);
2053 /* If the memory mapping triggered a SIGBUS then we return an
2054 * IO error and ignore the error code passed down to us, since
2055 * it is very likely just an effect of a nullified replacement
2058 if (mmap_cache_fd_got_sigbus(f
->cache_fd
))
2061 if (f
->post_change_timer
)
2062 schedule_post_change(f
);
2064 journal_file_post_change(f
);
2069 typedef struct ChainCacheItem
{
2070 uint64_t first
; /* the array at the beginning of the chain */
2071 uint64_t array
; /* the cached array */
2072 uint64_t begin
; /* the first item in the cached array */
2073 uint64_t total
; /* the total number of items in all arrays before this one in the chain */
2074 uint64_t last_index
; /* the last index we looked at, to optimize locality when bisecting */
2077 static void chain_cache_put(
2084 uint64_t last_index
) {
2087 /* If the chain item to cache for this chain is the
2088 * first one it's not worth caching anything */
2092 if (ordered_hashmap_size(h
) >= CHAIN_CACHE_MAX
) {
2093 ci
= ordered_hashmap_steal_first(h
);
2096 ci
= new(ChainCacheItem
, 1);
2103 if (ordered_hashmap_put(h
, &ci
->first
, ci
) < 0) {
2108 assert(ci
->first
== first
);
2113 ci
->last_index
= last_index
;
2116 static int generic_array_get(
2120 Object
**ret
, uint64_t *ret_offset
) {
2123 uint64_t p
= 0, a
, t
= 0;
2131 /* Try the chain cache first */
2132 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2133 if (ci
&& i
> ci
->total
) {
2142 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &o
);
2146 k
= journal_file_entry_array_n_items(o
);
2148 p
= le64toh(o
->entry_array
.items
[i
]);
2154 a
= le64toh(o
->entry_array
.next_entry_array_offset
);
2160 /* Let's cache this item for the next invocation */
2161 chain_cache_put(f
->chain_cache
, ci
, first
, a
, le64toh(o
->entry_array
.items
[0]), t
, i
);
2163 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2176 static int generic_array_get_plus_one(
2181 Object
**ret
, uint64_t *ret_offset
) {
2190 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, &o
);
2198 *ret_offset
= extra
;
2203 return generic_array_get(f
, first
, i
-1, ret
, ret_offset
);
2212 static int generic_array_bisect(
2217 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2218 direction_t direction
,
2220 uint64_t *ret_offset
,
2221 uint64_t *ret_idx
) {
2223 uint64_t a
, p
, t
= 0, i
= 0, last_p
= 0, last_index
= UINT64_MAX
;
2224 bool subtract_one
= false;
2225 Object
*o
, *array
= NULL
;
2230 assert(test_object
);
2232 /* Start with the first array in the chain */
2235 ci
= ordered_hashmap_get(f
->chain_cache
, &first
);
2236 if (ci
&& n
> ci
->total
&& ci
->begin
!= 0) {
2237 /* Ah, we have iterated this bisection array chain
2238 * previously! Let's see if we can skip ahead in the
2239 * chain, as far as the last time. But we can't jump
2240 * backwards in the chain, so let's check that
2243 r
= test_object(f
, ci
->begin
, needle
);
2247 if (r
== TEST_LEFT
) {
2248 /* OK, what we are looking for is right of the
2249 * begin of this EntryArray, so let's jump
2250 * straight to previously cached array in the
2256 last_index
= ci
->last_index
;
2261 uint64_t left
, right
, k
, lp
;
2263 r
= journal_file_move_to_object(f
, OBJECT_ENTRY_ARRAY
, a
, &array
);
2267 k
= journal_file_entry_array_n_items(array
);
2273 lp
= p
= le64toh(array
->entry_array
.items
[i
]);
2277 r
= test_object(f
, p
, needle
);
2278 if (r
== -EBADMSG
) {
2279 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2286 if (r
== TEST_FOUND
)
2287 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2289 if (r
== TEST_RIGHT
) {
2293 if (last_index
!= UINT64_MAX
) {
2294 assert(last_index
<= right
);
2296 /* If we cached the last index we
2297 * looked at, let's try to not to jump
2298 * too wildly around and see if we can
2299 * limit the range to look at early to
2300 * the immediate neighbors of the last
2301 * index we looked at. */
2303 if (last_index
> 0) {
2304 uint64_t x
= last_index
- 1;
2306 p
= le64toh(array
->entry_array
.items
[x
]);
2310 r
= test_object(f
, p
, needle
);
2314 if (r
== TEST_FOUND
)
2315 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2317 if (r
== TEST_RIGHT
)
2323 if (last_index
< right
) {
2324 uint64_t y
= last_index
+ 1;
2326 p
= le64toh(array
->entry_array
.items
[y
]);
2330 r
= test_object(f
, p
, needle
);
2334 if (r
== TEST_FOUND
)
2335 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2337 if (r
== TEST_RIGHT
)
2345 if (left
== right
) {
2346 if (direction
== DIRECTION_UP
)
2347 subtract_one
= true;
2353 assert(left
< right
);
2354 i
= (left
+ right
) / 2;
2356 p
= le64toh(array
->entry_array
.items
[i
]);
2360 r
= test_object(f
, p
, needle
);
2361 if (r
== -EBADMSG
) {
2362 log_debug_errno(r
, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2369 if (r
== TEST_FOUND
)
2370 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2372 if (r
== TEST_RIGHT
)
2380 if (direction
== DIRECTION_UP
) {
2382 subtract_one
= true;
2393 last_index
= UINT64_MAX
;
2394 a
= le64toh(array
->entry_array
.next_entry_array_offset
);
2400 if (subtract_one
&& t
== 0 && i
== 0)
2403 /* Let's cache this item for the next invocation */
2404 chain_cache_put(f
->chain_cache
, ci
, first
, a
, le64toh(array
->entry_array
.items
[0]), t
, subtract_one
? (i
> 0 ? i
-1 : UINT64_MAX
) : i
);
2406 if (subtract_one
&& i
== 0)
2408 else if (subtract_one
)
2409 p
= le64toh(array
->entry_array
.items
[i
-1]);
2411 p
= le64toh(array
->entry_array
.items
[i
]);
2413 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2424 *ret_idx
= t
+ i
+ (subtract_one
? -1 : 0);
2429 static int generic_array_bisect_plus_one(
2435 int (*test_object
)(JournalFile
*f
, uint64_t p
, uint64_t needle
),
2436 direction_t direction
,
2438 uint64_t *ret_offset
,
2439 uint64_t *ret_idx
) {
2442 bool step_back
= false;
2446 assert(test_object
);
2451 /* This bisects the array in object 'first', but first checks
2453 r
= test_object(f
, extra
, needle
);
2457 if (r
== TEST_FOUND
)
2458 r
= direction
== DIRECTION_DOWN
? TEST_RIGHT
: TEST_LEFT
;
2460 /* if we are looking with DIRECTION_UP then we need to first
2461 see if in the actual array there is a matching entry, and
2462 return the last one of that. But if there isn't any we need
2463 to return this one. Hence remember this, and return it
2466 step_back
= direction
== DIRECTION_UP
;
2468 if (r
== TEST_RIGHT
) {
2469 if (direction
== DIRECTION_DOWN
)
2475 r
= generic_array_bisect(f
, first
, n
-1, needle
, test_object
, direction
, ret
, ret_offset
, ret_idx
);
2477 if (r
== 0 && step_back
)
2480 if (r
> 0 && ret_idx
)
2486 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, extra
, &o
);
2494 *ret_offset
= extra
;
2502 _pure_
static int test_object_offset(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2508 else if (p
< needle
)
2514 static int test_object_seqnum(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2522 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2526 sq
= le64toh(READ_NOW(o
->entry
.seqnum
));
2529 else if (sq
< needle
)
2535 int journal_file_move_to_entry_by_seqnum(
2538 direction_t direction
,
2540 uint64_t *ret_offset
) {
2544 return generic_array_bisect(
2546 le64toh(f
->header
->entry_array_offset
),
2547 le64toh(f
->header
->n_entries
),
2551 ret
, ret_offset
, NULL
);
2554 static int test_object_realtime(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2562 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2566 rt
= le64toh(READ_NOW(o
->entry
.realtime
));
2569 else if (rt
< needle
)
2575 int journal_file_move_to_entry_by_realtime(
2578 direction_t direction
,
2580 uint64_t *ret_offset
) {
2584 return generic_array_bisect(
2586 le64toh(f
->header
->entry_array_offset
),
2587 le64toh(f
->header
->n_entries
),
2589 test_object_realtime
,
2591 ret
, ret_offset
, NULL
);
2594 static int test_object_monotonic(JournalFile
*f
, uint64_t p
, uint64_t needle
) {
2602 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, p
, &o
);
2606 m
= le64toh(READ_NOW(o
->entry
.monotonic
));
2609 else if (m
< needle
)
2615 static int find_data_object_by_boot_id(
2621 char t
[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2623 sd_id128_to_string(boot_id
, t
+ 9);
2624 return journal_file_find_data_object(f
, t
, sizeof(t
) - 1, o
, b
);
2627 int journal_file_move_to_entry_by_monotonic(
2631 direction_t direction
,
2633 uint64_t *ret_offset
) {
2640 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, NULL
);
2646 return generic_array_bisect_plus_one(
2648 le64toh(o
->data
.entry_offset
),
2649 le64toh(o
->data
.entry_array_offset
),
2650 le64toh(o
->data
.n_entries
),
2652 test_object_monotonic
,
2654 ret
, ret_offset
, NULL
);
2657 void journal_file_reset_location(JournalFile
*f
) {
2658 f
->location_type
= LOCATION_HEAD
;
2659 f
->current_offset
= 0;
2660 f
->current_seqnum
= 0;
2661 f
->current_realtime
= 0;
2662 f
->current_monotonic
= 0;
2663 zero(f
->current_boot_id
);
2664 f
->current_xor_hash
= 0;
2667 void journal_file_save_location(JournalFile
*f
, Object
*o
, uint64_t offset
) {
2668 f
->location_type
= LOCATION_SEEK
;
2669 f
->current_offset
= offset
;
2670 f
->current_seqnum
= le64toh(o
->entry
.seqnum
);
2671 f
->current_realtime
= le64toh(o
->entry
.realtime
);
2672 f
->current_monotonic
= le64toh(o
->entry
.monotonic
);
2673 f
->current_boot_id
= o
->entry
.boot_id
;
2674 f
->current_xor_hash
= le64toh(o
->entry
.xor_hash
);
2677 int journal_file_compare_locations(JournalFile
*af
, JournalFile
*bf
) {
2684 assert(af
->location_type
== LOCATION_SEEK
);
2685 assert(bf
->location_type
== LOCATION_SEEK
);
2687 /* If contents, timestamps and seqnum match, these entries are
2689 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
) &&
2690 af
->current_monotonic
== bf
->current_monotonic
&&
2691 af
->current_realtime
== bf
->current_realtime
&&
2692 af
->current_xor_hash
== bf
->current_xor_hash
&&
2693 sd_id128_equal(af
->header
->seqnum_id
, bf
->header
->seqnum_id
) &&
2694 af
->current_seqnum
== bf
->current_seqnum
)
2697 if (sd_id128_equal(af
->header
->seqnum_id
, bf
->header
->seqnum_id
)) {
2699 /* If this is from the same seqnum source, compare
2701 r
= CMP(af
->current_seqnum
, bf
->current_seqnum
);
2705 /* Wow! This is weird, different data but the same
2706 * seqnums? Something is borked, but let's make the
2707 * best of it and compare by time. */
2710 if (sd_id128_equal(af
->current_boot_id
, bf
->current_boot_id
)) {
2712 /* If the boot id matches, compare monotonic time */
2713 r
= CMP(af
->current_monotonic
, bf
->current_monotonic
);
2718 /* Otherwise, compare UTC time */
2719 r
= CMP(af
->current_realtime
, bf
->current_realtime
);
2723 /* Finally, compare by contents */
2724 return CMP(af
->current_xor_hash
, bf
->current_xor_hash
);
2727 static int bump_array_index(uint64_t *i
, direction_t direction
, uint64_t n
) {
2729 /* Increase or decrease the specified index, in the right direction. */
2731 if (direction
== DIRECTION_DOWN
) {
2746 static bool check_properly_ordered(uint64_t new_offset
, uint64_t old_offset
, direction_t direction
) {
2748 /* Consider it an error if any of the two offsets is uninitialized */
2749 if (old_offset
== 0 || new_offset
== 0)
2752 /* If we go down, the new offset must be larger than the old one. */
2753 return direction
== DIRECTION_DOWN
?
2754 new_offset
> old_offset
:
2755 new_offset
< old_offset
;
2758 int journal_file_next_entry(
2761 direction_t direction
,
2762 Object
**ret
, uint64_t *ret_offset
) {
2770 n
= le64toh(READ_NOW(f
->header
->n_entries
));
2775 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
2777 r
= generic_array_bisect(f
,
2778 le64toh(f
->header
->entry_array_offset
),
2779 le64toh(f
->header
->n_entries
),
2788 r
= bump_array_index(&i
, direction
, n
);
2793 /* And jump to it */
2795 r
= generic_array_get(f
,
2796 le64toh(f
->header
->entry_array_offset
),
2804 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2805 * the next one might work for us instead. */
2806 log_debug_errno(r
, "Entry item %" PRIu64
" is bad, skipping over it.", i
);
2808 r
= bump_array_index(&i
, direction
, n
);
2813 /* Ensure our array is properly ordered. */
2814 if (p
> 0 && !check_properly_ordered(ofs
, p
, direction
))
2815 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2816 "%s: entry array not properly ordered at entry %" PRIu64
,
2825 int journal_file_next_entry_for_data(
2827 Object
*o
, uint64_t p
,
2828 uint64_t data_offset
,
2829 direction_t direction
,
2830 Object
**ret
, uint64_t *ret_offset
) {
2837 assert(p
> 0 || !o
);
2839 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2843 n
= le64toh(READ_NOW(d
->data
.n_entries
));
2848 i
= direction
== DIRECTION_DOWN
? 0 : n
- 1;
2850 if (o
->object
.type
!= OBJECT_ENTRY
)
2853 r
= generic_array_bisect_plus_one(f
,
2854 le64toh(d
->data
.entry_offset
),
2855 le64toh(d
->data
.entry_array_offset
),
2856 le64toh(d
->data
.n_entries
),
2866 r
= bump_array_index(&i
, direction
, n
);
2872 r
= generic_array_get_plus_one(f
,
2873 le64toh(d
->data
.entry_offset
),
2874 le64toh(d
->data
.entry_array_offset
),
2882 log_debug_errno(r
, "Data entry item %" PRIu64
" is bad, skipping over it.", i
);
2884 r
= bump_array_index(&i
, direction
, n
);
2889 /* Ensure our array is properly ordered. */
2890 if (p
> 0 && check_properly_ordered(ofs
, p
, direction
))
2891 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG
),
2892 "%s data entry array not properly ordered at entry %" PRIu64
,
2901 int journal_file_move_to_entry_by_offset_for_data(
2903 uint64_t data_offset
,
2905 direction_t direction
,
2906 Object
**ret
, uint64_t *ret_offset
) {
2913 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2917 return generic_array_bisect_plus_one(
2919 le64toh(d
->data
.entry_offset
),
2920 le64toh(d
->data
.entry_array_offset
),
2921 le64toh(d
->data
.n_entries
),
2925 ret
, ret_offset
, NULL
);
2928 int journal_file_move_to_entry_by_monotonic_for_data(
2930 uint64_t data_offset
,
2933 direction_t direction
,
2934 Object
**ret
, uint64_t *ret_offset
) {
2942 /* First, seek by time */
2943 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &b
);
2949 r
= generic_array_bisect_plus_one(f
,
2950 le64toh(o
->data
.entry_offset
),
2951 le64toh(o
->data
.entry_array_offset
),
2952 le64toh(o
->data
.n_entries
),
2954 test_object_monotonic
,
2960 /* And now, continue seeking until we find an entry that
2961 * exists in both bisection arrays */
2967 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
2971 r
= generic_array_bisect_plus_one(f
,
2972 le64toh(d
->data
.entry_offset
),
2973 le64toh(d
->data
.entry_array_offset
),
2974 le64toh(d
->data
.n_entries
),
2982 r
= journal_file_move_to_object(f
, OBJECT_DATA
, b
, &o
);
2986 r
= generic_array_bisect_plus_one(f
,
2987 le64toh(o
->data
.entry_offset
),
2988 le64toh(o
->data
.entry_array_offset
),
2989 le64toh(o
->data
.n_entries
),
3011 int journal_file_move_to_entry_by_seqnum_for_data(
3013 uint64_t data_offset
,
3015 direction_t direction
,
3016 Object
**ret
, uint64_t *ret_offset
) {
3023 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
3027 return generic_array_bisect_plus_one(
3029 le64toh(d
->data
.entry_offset
),
3030 le64toh(d
->data
.entry_array_offset
),
3031 le64toh(d
->data
.n_entries
),
3035 ret
, ret_offset
, NULL
);
3038 int journal_file_move_to_entry_by_realtime_for_data(
3040 uint64_t data_offset
,
3042 direction_t direction
,
3043 Object
**ret
, uint64_t *ret_offset
) {
3050 r
= journal_file_move_to_object(f
, OBJECT_DATA
, data_offset
, &d
);
3054 return generic_array_bisect_plus_one(
3056 le64toh(d
->data
.entry_offset
),
3057 le64toh(d
->data
.entry_array_offset
),
3058 le64toh(d
->data
.n_entries
),
3060 test_object_realtime
,
3062 ret
, ret_offset
, NULL
);
3065 void journal_file_dump(JournalFile
*f
) {
3073 journal_file_print_header(f
);
3075 p
= le64toh(READ_NOW(f
->header
->header_size
));
3079 r
= journal_file_move_to_object(f
, OBJECT_UNUSED
, p
, &o
);
3083 s
= journal_object_type_to_string(o
->object
.type
);
3085 switch (o
->object
.type
) {
3090 printf("Type: %s seqnum=%"PRIu64
" monotonic=%"PRIu64
" realtime=%"PRIu64
"\n",
3092 le64toh(o
->entry
.seqnum
),
3093 le64toh(o
->entry
.monotonic
),
3094 le64toh(o
->entry
.realtime
));
3100 printf("Type: %s seqnum=%"PRIu64
" epoch=%"PRIu64
"\n",
3102 le64toh(o
->tag
.seqnum
),
3103 le64toh(o
->tag
.epoch
));
3108 printf("Type: %s \n", s
);
3110 printf("Type: unknown (%i)", o
->object
.type
);
3115 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
)
3116 printf("Flags: %s\n",
3117 object_compressed_to_string(o
->object
.flags
& OBJECT_COMPRESSION_MASK
));
3119 if (p
== le64toh(f
->header
->tail_object_offset
))
3122 p
+= ALIGN64(le64toh(o
->object
.size
));
3127 log_error("File corrupt");
3130 /* Note: the lifetime of the compound literal is the immediately surrounding block. */
3131 #define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
3133 void journal_file_print_header(JournalFile
*f
) {
3139 printf("File path: %s\n"
3143 "Sequential number ID: %s\n"
3145 "Compatible flags:%s%s\n"
3146 "Incompatible flags:%s%s%s%s%s\n"
3147 "Header size: %"PRIu64
"\n"
3148 "Arena size: %"PRIu64
"\n"
3149 "Data hash table size: %"PRIu64
"\n"
3150 "Field hash table size: %"PRIu64
"\n"
3151 "Rotate suggested: %s\n"
3152 "Head sequential number: %"PRIu64
" (%"PRIx64
")\n"
3153 "Tail sequential number: %"PRIu64
" (%"PRIx64
")\n"
3154 "Head realtime timestamp: %s (%"PRIx64
")\n"
3155 "Tail realtime timestamp: %s (%"PRIx64
")\n"
3156 "Tail monotonic timestamp: %s (%"PRIx64
")\n"
3157 "Objects: %"PRIu64
"\n"
3158 "Entry objects: %"PRIu64
"\n",
3160 SD_ID128_TO_STRING(f
->header
->file_id
),
3161 SD_ID128_TO_STRING(f
->header
->machine_id
),
3162 SD_ID128_TO_STRING(f
->header
->boot_id
),
3163 SD_ID128_TO_STRING(f
->header
->seqnum_id
),
3164 f
->header
->state
== STATE_OFFLINE
? "OFFLINE" :
3165 f
->header
->state
== STATE_ONLINE
? "ONLINE" :
3166 f
->header
->state
== STATE_ARCHIVED
? "ARCHIVED" : "UNKNOWN",
3167 JOURNAL_HEADER_SEALED(f
->header
) ? " SEALED" : "",
3168 (le32toh(f
->header
->compatible_flags
) & ~HEADER_COMPATIBLE_ANY
) ? " ???" : "",
3169 JOURNAL_HEADER_COMPRESSED_XZ(f
->header
) ? " COMPRESSED-XZ" : "",
3170 JOURNAL_HEADER_COMPRESSED_LZ4(f
->header
) ? " COMPRESSED-LZ4" : "",
3171 JOURNAL_HEADER_COMPRESSED_ZSTD(f
->header
) ? " COMPRESSED-ZSTD" : "",
3172 JOURNAL_HEADER_KEYED_HASH(f
->header
) ? " KEYED-HASH" : "",
3173 (le32toh(f
->header
->incompatible_flags
) & ~HEADER_INCOMPATIBLE_ANY
) ? " ???" : "",
3174 le64toh(f
->header
->header_size
),
3175 le64toh(f
->header
->arena_size
),
3176 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3177 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
),
3178 yes_no(journal_file_rotate_suggested(f
, 0, LOG_DEBUG
)),
3179 le64toh(f
->header
->head_entry_seqnum
), le64toh(f
->header
->head_entry_seqnum
),
3180 le64toh(f
->header
->tail_entry_seqnum
), le64toh(f
->header
->tail_entry_seqnum
),
3181 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->head_entry_realtime
)), le64toh(f
->header
->head_entry_realtime
),
3182 FORMAT_TIMESTAMP_SAFE(le64toh(f
->header
->tail_entry_realtime
)), le64toh(f
->header
->tail_entry_realtime
),
3183 FORMAT_TIMESPAN(le64toh(f
->header
->tail_entry_monotonic
), USEC_PER_MSEC
), le64toh(f
->header
->tail_entry_monotonic
),
3184 le64toh(f
->header
->n_objects
),
3185 le64toh(f
->header
->n_entries
));
3187 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3188 printf("Data objects: %"PRIu64
"\n"
3189 "Data hash table fill: %.1f%%\n",
3190 le64toh(f
->header
->n_data
),
3191 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))));
3193 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3194 printf("Field objects: %"PRIu64
"\n"
3195 "Field hash table fill: %.1f%%\n",
3196 le64toh(f
->header
->n_fields
),
3197 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))));
3199 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_tags
))
3200 printf("Tag objects: %"PRIu64
"\n",
3201 le64toh(f
->header
->n_tags
));
3202 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_entry_arrays
))
3203 printf("Entry array objects: %"PRIu64
"\n",
3204 le64toh(f
->header
->n_entry_arrays
));
3206 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
))
3207 printf("Deepest field hash chain: %" PRIu64
"\n",
3208 f
->header
->field_hash_chain_depth
);
3210 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
))
3211 printf("Deepest data hash chain: %" PRIu64
"\n",
3212 f
->header
->data_hash_chain_depth
);
3214 if (fstat(f
->fd
, &st
) >= 0)
3215 printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st
.st_blocks
* 512ULL));
3218 static int journal_file_warn_btrfs(JournalFile
*f
) {
3224 /* Before we write anything, check if the COW logic is turned
3225 * off on btrfs. Given our write pattern that is quite
3226 * unfriendly to COW file systems this should greatly improve
3227 * performance on COW file systems, such as btrfs, at the
3228 * expense of data integrity features (which shouldn't be too
3229 * bad, given that we do our own checksumming). */
3231 r
= fd_is_fs_type(f
->fd
, BTRFS_SUPER_MAGIC
);
3233 return log_warning_errno(r
, "Failed to determine if journal is on btrfs: %m");
3237 r
= read_attr_fd(f
->fd
, &attrs
);
3239 return log_warning_errno(r
, "Failed to read file attributes: %m");
3241 if (attrs
& FS_NOCOW_FL
) {
3242 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3246 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3247 "This is likely to slow down journal access substantially, please consider turning "
3248 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f
->path
);
3253 int journal_file_open(
3259 uint64_t compress_threshold_bytes
,
3261 JournalMetrics
*metrics
,
3262 MMapCache
*mmap_cache
,
3263 JournalFile
*template,
3264 JournalFile
**ret
) {
3266 bool newly_created
= false;
3272 assert(fd
>= 0 || fname
);
3274 if (!IN_SET((flags
& O_ACCMODE
), O_RDONLY
, O_RDWR
))
3277 if (fname
&& (flags
& O_CREAT
) && !endswith(fname
, ".journal"))
3280 f
= new(JournalFile
, 1);
3284 *f
= (JournalFile
) {
3289 .writable
= (flags
& O_ACCMODE
) != O_RDONLY
,
3292 .compress_zstd
= compress
,
3294 .compress_lz4
= compress
,
3296 .compress_xz
= compress
,
3298 .compress_threshold_bytes
= compress_threshold_bytes
== UINT64_MAX
?
3299 DEFAULT_COMPRESS_THRESHOLD
:
3300 MAX(MIN_COMPRESS_THRESHOLD
, compress_threshold_bytes
),
3306 /* We turn on keyed hashes by default, but provide an environment variable to turn them off, if
3307 * people really want that */
3308 r
= getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
3311 log_debug_errno(r
, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring.");
3312 f
->keyed_hash
= true;
3316 if (DEBUG_LOGGING
) {
3317 static int last_seal
= -1, last_compress
= -1, last_keyed_hash
= -1;
3318 static uint64_t last_bytes
= UINT64_MAX
;
3320 if (last_seal
!= f
->seal
||
3321 last_keyed_hash
!= f
->keyed_hash
||
3322 last_compress
!= JOURNAL_FILE_COMPRESS(f
) ||
3323 last_bytes
!= f
->compress_threshold_bytes
) {
3325 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
3326 yes_no(f
->seal
), yes_no(f
->keyed_hash
), yes_no(JOURNAL_FILE_COMPRESS(f
)),
3327 FORMAT_BYTES(f
->compress_threshold_bytes
));
3328 last_seal
= f
->seal
;
3329 last_keyed_hash
= f
->keyed_hash
;
3330 last_compress
= JOURNAL_FILE_COMPRESS(f
);
3331 last_bytes
= f
->compress_threshold_bytes
;
3336 f
->mmap
= mmap_cache_ref(mmap_cache
);
3338 f
->mmap
= mmap_cache_new();
3346 f
->path
= strdup(fname
);
3354 /* If we don't know the path, fill in something explanatory and vaguely useful */
3355 if (asprintf(&f
->path
, "/proc/self/%i", fd
) < 0) {
3361 f
->chain_cache
= ordered_hashmap_new(&uint64_hash_ops
);
3362 if (!f
->chain_cache
) {
3368 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3369 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3370 * it doesn't hurt in that case. */
3372 f
->fd
= open(f
->path
, f
->flags
|O_CLOEXEC
|O_NONBLOCK
, f
->mode
);
3378 /* fds we opened here by us should also be closed by us. */
3381 r
= fd_nonblock(f
->fd
, false);
3386 f
->cache_fd
= mmap_cache_add_fd(f
->mmap
, f
->fd
, prot_from_flags(flags
));
3392 r
= journal_file_fstat(f
);
3396 if (f
->last_stat
.st_size
== 0 && f
->writable
) {
3398 (void) journal_file_warn_btrfs(f
);
3400 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3401 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3402 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3403 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3404 * solely on mtime/atime/ctime of the file. */
3405 (void) fd_setcrtime(f
->fd
, 0);
3408 /* Try to load the FSPRG state, and if we can't, then
3409 * just don't do sealing */
3411 r
= journal_file_fss_load(f
);
3417 r
= journal_file_init_header(f
, template);
3421 r
= journal_file_fstat(f
);
3425 newly_created
= true;
3428 if (f
->last_stat
.st_size
< (off_t
) HEADER_SIZE_MIN
) {
3433 r
= mmap_cache_fd_get(f
->cache_fd
, CONTEXT_HEADER
, true, 0, PAGE_ALIGN(sizeof(Header
)), &f
->last_stat
, &h
);
3435 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3436 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3446 if (!newly_created
) {
3447 r
= journal_file_verify_header(f
);
3453 if (!newly_created
&& f
->writable
) {
3454 r
= journal_file_fss_load(f
);
3462 journal_default_metrics(metrics
, f
->fd
);
3463 f
->metrics
= *metrics
;
3464 } else if (template)
3465 f
->metrics
= template->metrics
;
3467 r
= journal_file_refresh_header(f
);
3473 r
= journal_file_hmac_setup(f
);
3478 if (newly_created
) {
3479 r
= journal_file_setup_field_hash_table(f
);
3483 r
= journal_file_setup_data_hash_table(f
);
3488 r
= journal_file_append_first_tag(f
);
3494 if (mmap_cache_fd_got_sigbus(f
->cache_fd
)) {
3499 if (template && template->post_change_timer
) {
3500 r
= journal_file_enable_post_change_timer(
3502 sd_event_source_get_event(template->post_change_timer
),
3503 template->post_change_timer_period
);
3509 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3516 if (f
->cache_fd
&& mmap_cache_fd_got_sigbus(f
->cache_fd
))
3519 (void) journal_file_close(f
);
3524 int journal_file_archive(JournalFile
*f
, char **ret_previous_path
) {
3525 _cleanup_free_
char *p
= NULL
;
3532 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3533 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3534 if (path_startswith(f
->path
, "/proc/self/fd"))
3537 if (!endswith(f
->path
, ".journal"))
3540 if (asprintf(&p
, "%.*s@" SD_ID128_FORMAT_STR
"-%016"PRIx64
"-%016"PRIx64
".journal",
3541 (int) strlen(f
->path
) - 8, f
->path
,
3542 SD_ID128_FORMAT_VAL(f
->header
->seqnum_id
),
3543 le64toh(f
->header
->head_entry_seqnum
),
3544 le64toh(f
->header
->head_entry_realtime
)) < 0)
3547 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3548 * ignore that case. */
3549 if (rename(f
->path
, p
) < 0 && errno
!= ENOENT
)
3552 /* Sync the rename to disk */
3553 (void) fsync_directory_of_file(f
->fd
);
3555 if (ret_previous_path
)
3556 *ret_previous_path
= f
->path
;
3560 f
->path
= TAKE_PTR(p
);
3562 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3563 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3564 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3565 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3569 /* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
3570 * files when we archive them */
3571 f
->defrag_on_close
= true;
3576 int journal_file_dispose(int dir_fd
, const char *fname
) {
3577 _cleanup_free_
char *p
= NULL
;
3578 _cleanup_close_
int fd
= -1;
3582 /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
3583 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3584 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3585 * for writing anymore. */
3587 if (!endswith(fname
, ".journal"))
3590 if (asprintf(&p
, "%.*s@%016" PRIx64
"-%016" PRIx64
".journal~",
3591 (int) strlen(fname
) - 8, fname
,
3592 now(CLOCK_REALTIME
),
3596 if (renameat(dir_fd
, fname
, dir_fd
, p
) < 0)
3599 /* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
3600 fd
= openat(dir_fd
, p
, O_RDONLY
|O_CLOEXEC
|O_NOCTTY
|O_NOFOLLOW
);
3602 log_debug_errno(errno
, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
3604 (void) chattr_fd(fd
, 0, FS_NOCOW_FL
, NULL
);
3605 (void) btrfs_defrag_fd(fd
);
3611 int journal_file_copy_entry(JournalFile
*from
, JournalFile
*to
, Object
*o
, uint64_t p
) {
3612 uint64_t q
, n
, xor_hash
= 0;
3613 const sd_id128_t
*boot_id
;
3626 ts
= (dual_timestamp
) {
3627 .monotonic
= le64toh(o
->entry
.monotonic
),
3628 .realtime
= le64toh(o
->entry
.realtime
),
3630 boot_id
= &o
->entry
.boot_id
;
3632 n
= journal_file_entry_n_items(o
);
3633 items
= newa(EntryItem
, n
);
3635 for (uint64_t i
= 0; i
< n
; i
++) {
3642 q
= le64toh(o
->entry
.items
[i
].object_offset
);
3643 le_hash
= o
->entry
.items
[i
].hash
;
3645 r
= journal_file_move_to_object(from
, OBJECT_DATA
, q
, &o
);
3649 if (le_hash
!= o
->data
.hash
)
3652 l
= le64toh(READ_NOW(o
->object
.size
));
3653 if (l
< offsetof(Object
, data
.payload
))
3656 l
-= offsetof(Object
, data
.payload
);
3659 /* We hit the limit on 32bit machines */
3660 if ((uint64_t) t
!= l
)
3663 if (o
->object
.flags
& OBJECT_COMPRESSION_MASK
) {
3664 #if HAVE_COMPRESSION
3667 r
= decompress_blob(
3668 o
->object
.flags
& OBJECT_COMPRESSION_MASK
,
3670 &from
->compress_buffer
, &rsize
,
3675 data
= from
->compress_buffer
;
3678 return -EPROTONOSUPPORT
;
3681 data
= o
->data
.payload
;
3686 r
= journal_file_append_data(to
, data
, l
, &u
, &h
);
3690 if (JOURNAL_HEADER_KEYED_HASH(to
->header
))
3691 xor_hash
^= jenkins_hash64(data
, l
);
3693 xor_hash
^= le64toh(u
->data
.hash
);
3695 items
[i
] = (EntryItem
) {
3696 .object_offset
= htole64(h
),
3697 .hash
= u
->data
.hash
,
3700 r
= journal_file_move_to_object(from
, OBJECT_ENTRY
, p
, &o
);
3705 r
= journal_file_append_entry_internal(to
, &ts
, boot_id
, xor_hash
, items
, n
, NULL
, NULL
, NULL
);
3707 if (mmap_cache_fd_got_sigbus(to
->cache_fd
))
3713 void journal_reset_metrics(JournalMetrics
*m
) {
3716 /* Set everything to "pick automatic values". */
3718 *m
= (JournalMetrics
) {
3719 .min_use
= UINT64_MAX
,
3720 .max_use
= UINT64_MAX
,
3721 .min_size
= UINT64_MAX
,
3722 .max_size
= UINT64_MAX
,
3723 .keep_free
= UINT64_MAX
,
3724 .n_max_files
= UINT64_MAX
,
3728 void journal_default_metrics(JournalMetrics
*m
, int fd
) {
3730 uint64_t fs_size
= 0;
3735 if (fstatvfs(fd
, &ss
) >= 0)
3736 fs_size
= ss
.f_frsize
* ss
.f_blocks
;
3738 log_debug_errno(errno
, "Failed to determine disk size: %m");
3740 if (m
->max_use
== UINT64_MAX
) {
3743 m
->max_use
= CLAMP(PAGE_ALIGN(fs_size
/ 10), /* 10% of file system size */
3744 MAX_USE_LOWER
, MAX_USE_UPPER
);
3746 m
->max_use
= MAX_USE_LOWER
;
3748 m
->max_use
= PAGE_ALIGN(m
->max_use
);
3750 if (m
->max_use
!= 0 && m
->max_use
< JOURNAL_FILE_SIZE_MIN
*2)
3751 m
->max_use
= JOURNAL_FILE_SIZE_MIN
*2;
3754 if (m
->min_use
== UINT64_MAX
) {
3756 m
->min_use
= CLAMP(PAGE_ALIGN(fs_size
/ 50), /* 2% of file system size */
3757 MIN_USE_LOW
, MIN_USE_HIGH
);
3759 m
->min_use
= MIN_USE_LOW
;
3762 if (m
->min_use
> m
->max_use
)
3763 m
->min_use
= m
->max_use
;
3765 if (m
->max_size
== UINT64_MAX
)
3766 m
->max_size
= MIN(PAGE_ALIGN(m
->max_use
/ 8), /* 8 chunks */
3769 m
->max_size
= PAGE_ALIGN(m
->max_size
);
3771 if (m
->max_size
!= 0) {
3772 if (m
->max_size
< JOURNAL_FILE_SIZE_MIN
)
3773 m
->max_size
= JOURNAL_FILE_SIZE_MIN
;
3775 if (m
->max_use
!= 0 && m
->max_size
*2 > m
->max_use
)
3776 m
->max_use
= m
->max_size
*2;
3779 if (m
->min_size
== UINT64_MAX
)
3780 m
->min_size
= JOURNAL_FILE_SIZE_MIN
;
3782 m
->min_size
= CLAMP(PAGE_ALIGN(m
->min_size
),
3783 JOURNAL_FILE_SIZE_MIN
,
3784 m
->max_size
?: UINT64_MAX
);
3786 if (m
->keep_free
== UINT64_MAX
) {
3788 m
->keep_free
= MIN(PAGE_ALIGN(fs_size
/ 20), /* 5% of file system size */
3791 m
->keep_free
= DEFAULT_KEEP_FREE
;
3794 if (m
->n_max_files
== UINT64_MAX
)
3795 m
->n_max_files
= DEFAULT_N_MAX_FILES
;
3797 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64
,
3798 FORMAT_BYTES(m
->min_use
),
3799 FORMAT_BYTES(m
->max_use
),
3800 FORMAT_BYTES(m
->max_size
),
3801 FORMAT_BYTES(m
->min_size
),
3802 FORMAT_BYTES(m
->keep_free
),
3806 int journal_file_get_cutoff_realtime_usec(JournalFile
*f
, usec_t
*from
, usec_t
*to
) {
3812 if (f
->header
->head_entry_realtime
== 0)
3815 *from
= le64toh(f
->header
->head_entry_realtime
);
3819 if (f
->header
->tail_entry_realtime
== 0)
3822 *to
= le64toh(f
->header
->tail_entry_realtime
);
3828 int journal_file_get_cutoff_monotonic_usec(JournalFile
*f
, sd_id128_t boot_id
, usec_t
*from
, usec_t
*to
) {
3836 r
= find_data_object_by_boot_id(f
, boot_id
, &o
, &p
);
3840 if (le64toh(o
->data
.n_entries
) <= 0)
3844 r
= journal_file_move_to_object(f
, OBJECT_ENTRY
, le64toh(o
->data
.entry_offset
), &o
);
3848 *from
= le64toh(o
->entry
.monotonic
);
3852 r
= journal_file_move_to_object(f
, OBJECT_DATA
, p
, &o
);
3856 r
= generic_array_get_plus_one(f
,
3857 le64toh(o
->data
.entry_offset
),
3858 le64toh(o
->data
.entry_array_offset
),
3859 le64toh(o
->data
.n_entries
)-1,
3864 *to
= le64toh(o
->entry
.monotonic
);
3870 bool journal_file_rotate_suggested(JournalFile
*f
, usec_t max_file_usec
, int log_level
) {
3874 /* If we gained new header fields we gained new features,
3875 * hence suggest a rotation */
3876 if (le64toh(f
->header
->header_size
) < sizeof(Header
)) {
3877 log_full(log_level
, "%s uses an outdated header, suggesting rotation.", f
->path
);
3881 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
3882 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
3883 * need the n_data field, which only exists in newer versions. */
3885 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
))
3886 if (le64toh(f
->header
->n_data
) * 4ULL > (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
3888 "Data hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items, %llu file size, %"PRIu64
" bytes per hash table item), suggesting rotation.",
3890 100.0 * (double) le64toh(f
->header
->n_data
) / ((double) (le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
))),
3891 le64toh(f
->header
->n_data
),
3892 le64toh(f
->header
->data_hash_table_size
) / sizeof(HashItem
),
3893 (unsigned long long) f
->last_stat
.st_size
,
3894 f
->last_stat
.st_size
/ le64toh(f
->header
->n_data
));
3898 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
))
3899 if (le64toh(f
->header
->n_fields
) * 4ULL > (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
)) * 3ULL) {
3901 "Field hash table of %s has a fill level at %.1f (%"PRIu64
" of %"PRIu64
" items), suggesting rotation.",
3903 100.0 * (double) le64toh(f
->header
->n_fields
) / ((double) (le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
))),
3904 le64toh(f
->header
->n_fields
),
3905 le64toh(f
->header
->field_hash_table_size
) / sizeof(HashItem
));
3909 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
3910 * longest chain is longer than some threshold, let's suggest rotation. */
3911 if (JOURNAL_HEADER_CONTAINS(f
->header
, data_hash_chain_depth
) &&
3912 le64toh(f
->header
->data_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
3914 "Data hash table of %s has deepest hash chain of length %" PRIu64
", suggesting rotation.",
3915 f
->path
, le64toh(f
->header
->data_hash_chain_depth
));
3919 if (JOURNAL_HEADER_CONTAINS(f
->header
, field_hash_chain_depth
) &&
3920 le64toh(f
->header
->field_hash_chain_depth
) > HASH_CHAIN_DEPTH_MAX
) {
3922 "Field hash table of %s has deepest hash chain of length at %" PRIu64
", suggesting rotation.",
3923 f
->path
, le64toh(f
->header
->field_hash_chain_depth
));
3927 /* Are the data objects properly indexed by field objects? */
3928 if (JOURNAL_HEADER_CONTAINS(f
->header
, n_data
) &&
3929 JOURNAL_HEADER_CONTAINS(f
->header
, n_fields
) &&
3930 le64toh(f
->header
->n_data
) > 0 &&
3931 le64toh(f
->header
->n_fields
) == 0) {
3933 "Data objects of %s are not indexed by field objects, suggesting rotation.",
3938 if (max_file_usec
> 0) {
3941 h
= le64toh(f
->header
->head_entry_realtime
);
3942 t
= now(CLOCK_REALTIME
);
3944 if (h
> 0 && t
> h
+ max_file_usec
) {
3946 "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
3947 f
->path
, FORMAT_TIMESPAN(max_file_usec
, USEC_PER_SEC
));
3955 static const char * const journal_object_type_table
[] = {
3956 [OBJECT_UNUSED
] = "unused",
3957 [OBJECT_DATA
] = "data",
3958 [OBJECT_FIELD
] = "field",
3959 [OBJECT_ENTRY
] = "entry",
3960 [OBJECT_DATA_HASH_TABLE
] = "data hash table",
3961 [OBJECT_FIELD_HASH_TABLE
] = "field hash table",
3962 [OBJECT_ENTRY_ARRAY
] = "entry array",
3963 [OBJECT_TAG
] = "tag",
3966 DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type
, ObjectType
);