]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/libsystemd/sd-journal/journal-file.c
localed: reload PID1 configuration after modifying /etc/locale.conf
[thirdparty/systemd.git] / src / libsystemd / sd-journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <linux/fs.h>
6 #include <linux/magic.h>
7 #include <pthread.h>
8 #include <stddef.h>
9 #include <sys/mman.h>
10 #include <sys/statvfs.h>
11 #include <sys/uio.h>
12 #include <unistd.h>
13
14 #include "sd-event.h"
15
16 #include "alloc-util.h"
17 #include "chattr-util.h"
18 #include "compress.h"
19 #include "env-util.h"
20 #include "fd-util.h"
21 #include "format-util.h"
22 #include "fs-util.h"
23 #include "journal-authenticate.h"
24 #include "journal-def.h"
25 #include "journal-file.h"
26 #include "lookup3.h"
27 #include "memory-util.h"
28 #include "path-util.h"
29 #include "random-util.h"
30 #include "set.h"
31 #include "sort-util.h"
32 #include "stat-util.h"
33 #include "string-table.h"
34 #include "string-util.h"
35 #include "strv.h"
36 #include "sync-util.h"
37 #include "xattr-util.h"
38
39 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
40 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
41
42 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
43 #define MIN_COMPRESS_THRESHOLD (8ULL)
44
45 /* This is the minimum journal file size */
46 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
47 #define JOURNAL_COMPACT_SIZE_MAX UINT32_MAX /* 4 GiB */
48
49 /* These are the lower and upper bounds if we deduce the max_use value
50 * from the file system size */
51 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
52 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
53
54 /* Those are the lower and upper bounds for the minimal use limit,
55 * i.e. how much we'll use even if keep_free suggests otherwise. */
56 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
57 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
58
59 /* This is the upper bound if we deduce max_size from max_use */
60 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
61
62 /* This is the upper bound if we deduce the keep_free value from the
63 * file system size */
64 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
65
66 /* This is the keep_free value when we can't determine the system
67 * size */
68 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
69
70 /* This is the default maximum number of journal files to keep around. */
71 #define DEFAULT_N_MAX_FILES 100
72
73 /* n_data was the first entry we added after the initial file format design */
74 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
75
76 /* How many entries to keep in the entry array chain cache at max */
77 #define CHAIN_CACHE_MAX 20
78
79 /* How much to increase the journal file size at once each time we allocate something new. */
80 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
81
82 /* Reread fstat() of the file for detecting deletions at least this often */
83 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
84
85 /* The mmap context to use for the header we pick as one above the last defined typed */
86 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
87
88 /* Longest hash chain to rotate after */
89 #define HASH_CHAIN_DEPTH_MAX 100
90
91 #ifdef __clang__
92 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
93 #endif
94
95 static int mmap_prot_from_open_flags(int flags) {
96 switch (flags & O_ACCMODE) {
97 case O_RDONLY:
98 return PROT_READ;
99 case O_WRONLY:
100 return PROT_WRITE;
101 case O_RDWR:
102 return PROT_READ|PROT_WRITE;
103 default:
104 assert_not_reached();
105 }
106 }
107
108 int journal_file_tail_end_by_pread(JournalFile *f, uint64_t *ret_offset) {
109 uint64_t p;
110 int r;
111
112 assert(f);
113 assert(f->header);
114 assert(ret_offset);
115
116 /* Same as journal_file_tail_end_by_mmap() below, but operates with pread() to avoid the mmap cache
117 * (and thus is thread safe) */
118
119 p = le64toh(f->header->tail_object_offset);
120 if (p == 0)
121 p = le64toh(f->header->header_size);
122 else {
123 Object tail;
124 uint64_t sz;
125
126 r = journal_file_read_object_header(f, OBJECT_UNUSED, p, &tail);
127 if (r < 0)
128 return r;
129
130 sz = le64toh(tail.object.size);
131 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
132 return -EBADMSG;
133
134 sz = ALIGN64(sz);
135 if (p > UINT64_MAX - sz)
136 return -EBADMSG;
137
138 p += sz;
139 }
140
141 *ret_offset = p;
142
143 return 0;
144 }
145
146 int journal_file_tail_end_by_mmap(JournalFile *f, uint64_t *ret_offset) {
147 uint64_t p;
148 int r;
149
150 assert(f);
151 assert(f->header);
152 assert(ret_offset);
153
154 /* Same as journal_file_tail_end_by_pread() above, but operates with the usual mmap logic */
155
156 p = le64toh(f->header->tail_object_offset);
157 if (p == 0)
158 p = le64toh(f->header->header_size);
159 else {
160 Object *tail;
161 uint64_t sz;
162
163 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
164 if (r < 0)
165 return r;
166
167 sz = le64toh(READ_NOW(tail->object.size));
168 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
169 return -EBADMSG;
170
171 sz = ALIGN64(sz);
172 if (p > UINT64_MAX - sz)
173 return -EBADMSG;
174
175 p += sz;
176 }
177
178 *ret_offset = p;
179
180 return 0;
181 }
182
183 int journal_file_set_offline_thread_join(JournalFile *f) {
184 int r;
185
186 assert(f);
187
188 if (f->offline_state == OFFLINE_JOINED)
189 return 0;
190
191 r = pthread_join(f->offline_thread, NULL);
192 if (r)
193 return -r;
194
195 f->offline_state = OFFLINE_JOINED;
196
197 if (mmap_cache_fd_got_sigbus(f->cache_fd))
198 return -EIO;
199
200 return 0;
201 }
202
203 static int journal_file_set_online(JournalFile *f) {
204 bool wait = true;
205
206 assert(f);
207
208 if (!journal_file_writable(f))
209 return -EPERM;
210
211 if (f->fd < 0 || !f->header)
212 return -EINVAL;
213
214 while (wait) {
215 switch (f->offline_state) {
216 case OFFLINE_JOINED:
217 /* No offline thread, no need to wait. */
218 wait = false;
219 break;
220
221 case OFFLINE_SYNCING: {
222 OfflineState tmp_state = OFFLINE_SYNCING;
223 if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
224 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
225 continue;
226 }
227 /* Canceled syncing prior to offlining, no need to wait. */
228 wait = false;
229 break;
230
231 case OFFLINE_AGAIN_FROM_SYNCING: {
232 OfflineState tmp_state = OFFLINE_AGAIN_FROM_SYNCING;
233 if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
234 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
235 continue;
236 }
237 /* Canceled restart from syncing, no need to wait. */
238 wait = false;
239 break;
240
241 case OFFLINE_AGAIN_FROM_OFFLINING: {
242 OfflineState tmp_state = OFFLINE_AGAIN_FROM_OFFLINING;
243 if (!__atomic_compare_exchange_n(&f->offline_state, &tmp_state, OFFLINE_CANCEL,
244 false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST))
245 continue;
246 }
247 /* Canceled restart from offlining, must wait for offlining to complete however. */
248 _fallthrough_;
249 default: {
250 int r;
251
252 r = journal_file_set_offline_thread_join(f);
253 if (r < 0)
254 return r;
255
256 wait = false;
257 break;
258 }
259 }
260 }
261
262 if (mmap_cache_fd_got_sigbus(f->cache_fd))
263 return -EIO;
264
265 switch (f->header->state) {
266 case STATE_ONLINE:
267 return 0;
268
269 case STATE_OFFLINE:
270 f->header->state = STATE_ONLINE;
271 (void) fsync(f->fd);
272 return 0;
273
274 default:
275 return -EINVAL;
276 }
277 }
278
279 JournalFile* journal_file_close(JournalFile *f) {
280 if (!f)
281 return NULL;
282
283 if (f->cache_fd)
284 mmap_cache_fd_free(f->cache_fd);
285
286 if (f->close_fd)
287 safe_close(f->fd);
288 free(f->path);
289
290 ordered_hashmap_free_free(f->chain_cache);
291
292 #if HAVE_COMPRESSION
293 free(f->compress_buffer);
294 #endif
295
296 #if HAVE_GCRYPT
297 if (f->fss_file)
298 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
299 else
300 free(f->fsprg_state);
301
302 free(f->fsprg_seed);
303
304 if (f->hmac)
305 gcry_md_close(f->hmac);
306 #endif
307
308 return mfree(f);
309 }
310
311 static bool keyed_hash_requested(void) {
312 int r;
313
314 r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
315 if (r >= 0)
316 return r;
317 if (r != -ENXIO)
318 log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring: %m");
319
320 return true;
321 }
322
323 static bool compact_mode_requested(void) {
324 int r;
325
326 r = getenv_bool("SYSTEMD_JOURNAL_COMPACT");
327 if (r >= 0)
328 return r;
329 if (r != -ENXIO)
330 log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_COMPACT environment variable, ignoring: %m");
331
332 return true;
333 }
334
335 static int journal_file_init_header(JournalFile *f, JournalFileFlags file_flags, JournalFile *template) {
336 Header h = {};
337 ssize_t k;
338 bool seal = false;
339 int r;
340
341 assert(f);
342
343 #if HAVE_GCRYPT
344 /* Try to load the FSPRG state, and if we can't, then just don't do sealing */
345 seal = FLAGS_SET(file_flags, JOURNAL_SEAL) && journal_file_fss_load(f) >= 0;
346 #endif
347
348 memcpy(h.signature, HEADER_SIGNATURE, 8);
349 h.header_size = htole64(ALIGN64(sizeof(h)));
350
351 h.incompatible_flags |= htole32(
352 FLAGS_SET(file_flags, JOURNAL_COMPRESS) *
353 COMPRESSION_TO_HEADER_INCOMPATIBLE_FLAG(DEFAULT_COMPRESSION) |
354 keyed_hash_requested() * HEADER_INCOMPATIBLE_KEYED_HASH |
355 compact_mode_requested() * HEADER_INCOMPATIBLE_COMPACT);
356
357 h.compatible_flags = htole32(seal * HEADER_COMPATIBLE_SEALED);
358
359 r = sd_id128_randomize(&h.file_id);
360 if (r < 0)
361 return r;
362
363 if (template) {
364 h.seqnum_id = template->header->seqnum_id;
365 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
366 } else
367 h.seqnum_id = h.file_id;
368
369 k = pwrite(f->fd, &h, sizeof(h), 0);
370 if (k < 0)
371 return -errno;
372
373 if (k != sizeof(h))
374 return -EIO;
375
376 return 0;
377 }
378
379 static int journal_file_refresh_header(JournalFile *f) {
380 int r;
381
382 assert(f);
383 assert(f->header);
384
385 r = sd_id128_get_machine(&f->header->machine_id);
386 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
387 /* We don't have a machine-id, let's continue without */
388 zero(f->header->machine_id);
389 else if (r < 0)
390 return r;
391
392 r = sd_id128_get_boot(&f->header->boot_id);
393 if (r < 0)
394 return r;
395
396 r = journal_file_set_online(f);
397
398 /* Sync the online state to disk; likely just created a new file, also sync the directory this file
399 * is located in. */
400 (void) fsync_full(f->fd);
401
402 return r;
403 }
404
405 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
406 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
407 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
408 const char *type = compatible ? "compatible" : "incompatible";
409 uint32_t flags;
410
411 assert(f);
412 assert(f->header);
413
414 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
415
416 if (flags & ~supported) {
417 if (flags & ~any)
418 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
419 f->path, type, flags & ~any);
420 flags = (flags & any) & ~supported;
421 if (flags) {
422 const char* strv[6];
423 size_t n = 0;
424 _cleanup_free_ char *t = NULL;
425
426 if (compatible) {
427 if (flags & HEADER_COMPATIBLE_SEALED)
428 strv[n++] = "sealed";
429 } else {
430 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ)
431 strv[n++] = "xz-compressed";
432 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
433 strv[n++] = "lz4-compressed";
434 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
435 strv[n++] = "zstd-compressed";
436 if (flags & HEADER_INCOMPATIBLE_KEYED_HASH)
437 strv[n++] = "keyed-hash";
438 if (flags & HEADER_INCOMPATIBLE_COMPACT)
439 strv[n++] = "compact";
440 }
441 strv[n] = NULL;
442 assert(n < ELEMENTSOF(strv));
443
444 t = strv_join((char**) strv, ", ");
445 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
446 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
447 }
448 return true;
449 }
450
451 return false;
452 }
453
454 static int journal_file_verify_header(JournalFile *f) {
455 uint64_t arena_size, header_size;
456
457 assert(f);
458 assert(f->header);
459
460 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
461 return -EBADMSG;
462
463 /* In both read and write mode we refuse to open files with incompatible
464 * flags we don't know. */
465 if (warn_wrong_flags(f, false))
466 return -EPROTONOSUPPORT;
467
468 /* When open for writing we refuse to open files with compatible flags, too. */
469 if (journal_file_writable(f) && warn_wrong_flags(f, true))
470 return -EPROTONOSUPPORT;
471
472 if (f->header->state >= _STATE_MAX)
473 return -EBADMSG;
474
475 header_size = le64toh(READ_NOW(f->header->header_size));
476
477 /* The first addition was n_data, so check that we are at least this large */
478 if (header_size < HEADER_SIZE_MIN)
479 return -EBADMSG;
480
481 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
482 return -EBADMSG;
483
484 arena_size = le64toh(READ_NOW(f->header->arena_size));
485
486 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
487 return -ENODATA;
488
489 if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
490 return -ENODATA;
491
492 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
493 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
494 !VALID64(le64toh(f->header->tail_object_offset)) ||
495 !VALID64(le64toh(f->header->entry_array_offset)))
496 return -ENODATA;
497
498 if (journal_file_writable(f)) {
499 sd_id128_t machine_id;
500 uint8_t state;
501 int r;
502
503 r = sd_id128_get_machine(&machine_id);
504 if (r < 0)
505 return r;
506
507 if (!sd_id128_equal(machine_id, f->header->machine_id))
508 return -EHOSTDOWN;
509
510 state = f->header->state;
511
512 if (state == STATE_ARCHIVED)
513 return -ESHUTDOWN; /* Already archived */
514 else if (state == STATE_ONLINE)
515 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
516 "Journal file %s is already online. Assuming unclean closing.",
517 f->path);
518 else if (state != STATE_OFFLINE)
519 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
520 "Journal file %s has unknown state %i.",
521 f->path, state);
522
523 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
524 return -EBADMSG;
525
526 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
527 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
528 * bisection. */
529 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME))
530 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY),
531 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
532 f->path);
533 }
534
535 return 0;
536 }
537
538 int journal_file_fstat(JournalFile *f) {
539 int r;
540
541 assert(f);
542 assert(f->fd >= 0);
543
544 if (fstat(f->fd, &f->last_stat) < 0)
545 return -errno;
546
547 f->last_stat_usec = now(CLOCK_MONOTONIC);
548
549 /* Refuse dealing with files that aren't regular */
550 r = stat_verify_regular(&f->last_stat);
551 if (r < 0)
552 return r;
553
554 /* Refuse appending to files that are already deleted */
555 if (f->last_stat.st_nlink <= 0)
556 return -EIDRM;
557
558 return 0;
559 }
560
561 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
562 uint64_t old_size, new_size, old_header_size, old_arena_size;
563 int r;
564
565 assert(f);
566 assert(f->header);
567
568 /* We assume that this file is not sparse, and we know that for sure, since we always call
569 * posix_fallocate() ourselves */
570
571 if (size > PAGE_ALIGN_DOWN(UINT64_MAX) - offset)
572 return -EINVAL;
573
574 if (mmap_cache_fd_got_sigbus(f->cache_fd))
575 return -EIO;
576
577 old_header_size = le64toh(READ_NOW(f->header->header_size));
578 old_arena_size = le64toh(READ_NOW(f->header->arena_size));
579 if (old_arena_size > PAGE_ALIGN_DOWN(UINT64_MAX) - old_header_size)
580 return -EBADMSG;
581
582 old_size = old_header_size + old_arena_size;
583
584 new_size = MAX(PAGE_ALIGN(offset + size), old_header_size);
585
586 if (new_size <= old_size) {
587
588 /* We already pre-allocated enough space, but before
589 * we write to it, let's check with fstat() if the
590 * file got deleted, in order make sure we don't throw
591 * away the data immediately. Don't check fstat() for
592 * all writes though, but only once ever 10s. */
593
594 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
595 return 0;
596
597 return journal_file_fstat(f);
598 }
599
600 /* Allocate more space. */
601
602 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
603 return -E2BIG;
604
605 /* Refuse to go over 4G in compact mode so offsets can be stored in 32-bit. */
606 if (JOURNAL_HEADER_COMPACT(f->header) && new_size > UINT32_MAX)
607 return -E2BIG;
608
609 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
610 struct statvfs svfs;
611
612 if (fstatvfs(f->fd, &svfs) >= 0) {
613 uint64_t available;
614
615 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
616
617 if (new_size - old_size > available)
618 return -E2BIG;
619 }
620 }
621
622 /* Increase by larger blocks at once */
623 new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
624 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
625 new_size = f->metrics.max_size;
626
627 /* Note that the glibc fallocate() fallback is very
628 inefficient, hence we try to minimize the allocation area
629 as we can. */
630 r = posix_fallocate_loop(f->fd, old_size, new_size - old_size);
631 if (r < 0)
632 return r;
633
634 f->header->arena_size = htole64(new_size - old_header_size);
635
636 return journal_file_fstat(f);
637 }
638
639 static unsigned type_to_context(ObjectType type) {
640 /* One context for each type, plus one catch-all for the rest */
641 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
642 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
643 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
644 }
645
646 static int journal_file_move_to(
647 JournalFile *f,
648 ObjectType type,
649 bool keep_always,
650 uint64_t offset,
651 uint64_t size,
652 void **ret) {
653
654 int r;
655
656 assert(f);
657 assert(ret);
658
659 /* This function may clear, overwrite, or alter previously cached entries. After this function has
660 * been called, all objects except for one obtained by this function are invalidated and must be
661 * re-read before use. */
662
663 if (size <= 0)
664 return -EINVAL;
665
666 if (size > UINT64_MAX - offset)
667 return -EBADMSG;
668
669 /* Avoid SIGBUS on invalid accesses */
670 if (offset + size > (uint64_t) f->last_stat.st_size) {
671 /* Hmm, out of range? Let's refresh the fstat() data
672 * first, before we trust that check. */
673
674 r = journal_file_fstat(f);
675 if (r < 0)
676 return r;
677
678 if (offset + size > (uint64_t) f->last_stat.st_size)
679 return -EADDRNOTAVAIL;
680 }
681
682 return mmap_cache_fd_get(f->cache_fd, type_to_context(type), keep_always, offset, size, &f->last_stat, ret);
683 }
684
685 static uint64_t minimum_header_size(JournalFile *f, Object *o) {
686
687 static const uint64_t table[] = {
688 [OBJECT_DATA] = sizeof(DataObject),
689 [OBJECT_FIELD] = sizeof(FieldObject),
690 [OBJECT_ENTRY] = sizeof(EntryObject),
691 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
692 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
693 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
694 [OBJECT_TAG] = sizeof(TagObject),
695 };
696
697 assert(f);
698 assert(o);
699
700 if (o->object.type == OBJECT_DATA)
701 return journal_file_data_payload_offset(f);
702
703 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
704 return sizeof(ObjectHeader);
705
706 return table[o->object.type];
707 }
708
709 static int check_object_header(JournalFile *f, Object *o, ObjectType type, uint64_t offset) {
710 uint64_t s;
711
712 assert(f);
713 assert(o);
714
715 s = le64toh(READ_NOW(o->object.size));
716 if (s == 0)
717 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
718 "Attempt to move to uninitialized object: %" PRIu64,
719 offset);
720
721 if (s < sizeof(ObjectHeader))
722 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
723 "Attempt to move to overly short object: %" PRIu64,
724 offset);
725
726 if (o->object.type <= OBJECT_UNUSED)
727 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
728 "Attempt to move to object with invalid type: %" PRIu64,
729 offset);
730
731 if (type > OBJECT_UNUSED && o->object.type != type)
732 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
733 "Attempt to move to object of unexpected type: %" PRIu64,
734 offset);
735
736 if (s < minimum_header_size(f, o))
737 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
738 "Attempt to move to truncated object: %" PRIu64,
739 offset);
740
741 return 0;
742 }
743
744 /* Lightweight object checks. We want this to be fast, so that we won't
745 * slowdown every journal_file_move_to_object() call too much. */
746 static int check_object(JournalFile *f, Object *o, uint64_t offset) {
747 assert(f);
748 assert(o);
749
750 switch (o->object.type) {
751
752 case OBJECT_DATA:
753 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
754 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
755 "Bad n_entries: %" PRIu64 ": %" PRIu64,
756 le64toh(o->data.n_entries),
757 offset);
758
759 if (le64toh(o->object.size) <= journal_file_data_payload_offset(f))
760 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
761 "Bad object size (<= %zu): %" PRIu64 ": %" PRIu64,
762 journal_file_data_payload_offset(f),
763 le64toh(o->object.size),
764 offset);
765
766 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
767 !VALID64(le64toh(o->data.next_field_offset)) ||
768 !VALID64(le64toh(o->data.entry_offset)) ||
769 !VALID64(le64toh(o->data.entry_array_offset)))
770 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
771 "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
772 le64toh(o->data.next_hash_offset),
773 le64toh(o->data.next_field_offset),
774 le64toh(o->data.entry_offset),
775 le64toh(o->data.entry_array_offset),
776 offset);
777
778 break;
779
780 case OBJECT_FIELD:
781 if (le64toh(o->object.size) <= offsetof(Object, field.payload))
782 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
783 "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
784 offsetof(Object, field.payload),
785 le64toh(o->object.size),
786 offset);
787
788 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
789 !VALID64(le64toh(o->field.head_data_offset)))
790 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
791 "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
792 le64toh(o->field.next_hash_offset),
793 le64toh(o->field.head_data_offset),
794 offset);
795 break;
796
797 case OBJECT_ENTRY: {
798 uint64_t sz;
799
800 sz = le64toh(READ_NOW(o->object.size));
801 if (sz < offsetof(Object, entry.items) ||
802 (sz - offsetof(Object, entry.items)) % journal_file_entry_item_size(f) != 0)
803 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
804 "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
805 offsetof(Object, entry.items),
806 sz,
807 offset);
808
809 if ((sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f) <= 0)
810 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
811 "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
812 (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f),
813 offset);
814
815 if (le64toh(o->entry.seqnum) <= 0)
816 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
817 "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
818 le64toh(o->entry.seqnum),
819 offset);
820
821 if (!VALID_REALTIME(le64toh(o->entry.realtime)))
822 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
823 "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
824 le64toh(o->entry.realtime),
825 offset);
826
827 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
828 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
829 "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
830 le64toh(o->entry.monotonic),
831 offset);
832
833 break;
834 }
835
836 case OBJECT_DATA_HASH_TABLE:
837 case OBJECT_FIELD_HASH_TABLE: {
838 uint64_t sz;
839
840 sz = le64toh(READ_NOW(o->object.size));
841 if (sz < offsetof(Object, hash_table.items) ||
842 (sz - offsetof(Object, hash_table.items)) % sizeof(HashItem) != 0 ||
843 (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem) <= 0)
844 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
845 "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
846 o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
847 sz,
848 offset);
849
850 break;
851 }
852
853 case OBJECT_ENTRY_ARRAY: {
854 uint64_t sz;
855
856 sz = le64toh(READ_NOW(o->object.size));
857 if (sz < offsetof(Object, entry_array.items) ||
858 (sz - offsetof(Object, entry_array.items)) % journal_file_entry_array_item_size(f) != 0 ||
859 (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f) <= 0)
860 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
861 "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
862 sz,
863 offset);
864
865 if (!VALID64(le64toh(o->entry_array.next_entry_array_offset)))
866 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
867 "Invalid object entry array next_entry_array_offset: " OFSfmt ": %" PRIu64,
868 le64toh(o->entry_array.next_entry_array_offset),
869 offset);
870
871 break;
872 }
873
874 case OBJECT_TAG:
875 if (le64toh(o->object.size) != sizeof(TagObject))
876 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
877 "Invalid object tag size: %" PRIu64 ": %" PRIu64,
878 le64toh(o->object.size),
879 offset);
880
881 if (!VALID_EPOCH(le64toh(o->tag.epoch)))
882 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
883 "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
884 le64toh(o->tag.epoch), offset);
885
886 break;
887 }
888
889 return 0;
890 }
891
892 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
893 int r;
894 Object *o;
895
896 assert(f);
897
898 /* Even if this function fails, it may clear, overwrite, or alter previously cached entries. After
899 * this function has been called, all objects except for one obtained by this function are
900 * invalidated and must be re-read before use.. */
901
902 /* Objects may only be located at multiple of 64 bit */
903 if (!VALID64(offset))
904 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
905 "Attempt to move to object at non-64bit boundary: %" PRIu64,
906 offset);
907
908 /* Object may not be located in the file header */
909 if (offset < le64toh(f->header->header_size))
910 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
911 "Attempt to move to object located in file header: %" PRIu64,
912 offset);
913
914 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), (void**) &o);
915 if (r < 0)
916 return r;
917
918 r = check_object_header(f, o, type, offset);
919 if (r < 0)
920 return r;
921
922 r = journal_file_move_to(f, type, false, offset, le64toh(READ_NOW(o->object.size)), (void**) &o);
923 if (r < 0)
924 return r;
925
926 r = check_object_header(f, o, type, offset);
927 if (r < 0)
928 return r;
929
930 r = check_object(f, o, offset);
931 if (r < 0)
932 return r;
933
934 if (ret)
935 *ret = o;
936
937 return 0;
938 }
939
940 int journal_file_read_object_header(JournalFile *f, ObjectType type, uint64_t offset, Object *ret) {
941 ssize_t n;
942 Object o;
943 int r;
944
945 assert(f);
946
947 /* Objects may only be located at multiple of 64 bit */
948 if (!VALID64(offset))
949 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
950 "Attempt to read object at non-64bit boundary: %" PRIu64,
951 offset);
952
953 /* Object may not be located in the file header */
954 if (offset < le64toh(f->header->header_size))
955 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
956 "Attempt to read object located in file header: %" PRIu64,
957 offset);
958
959 /* This will likely read too much data but it avoids having to call pread() twice. */
960 n = pread(f->fd, &o, sizeof(o), offset);
961 if (n < 0)
962 return log_debug_errno(errno, "Failed to read journal file at offset: %" PRIu64,
963 offset);
964
965 if ((size_t) n < sizeof(o.object))
966 return log_debug_errno(SYNTHETIC_ERRNO(EIO),
967 "Failed to read short object at offset: %" PRIu64,
968 offset);
969
970 r = check_object_header(f, &o, type, offset);
971 if (r < 0)
972 return r;
973
974 if ((size_t) n < minimum_header_size(f, &o))
975 return log_debug_errno(SYNTHETIC_ERRNO(EIO),
976 "Short read while reading object: %" PRIu64,
977 offset);
978
979 r = check_object(f, &o, offset);
980 if (r < 0)
981 return r;
982
983 if (ret)
984 *ret = o;
985
986 return 0;
987 }
988
989 static uint64_t journal_file_entry_seqnum(
990 JournalFile *f,
991 uint64_t *seqnum) {
992
993 uint64_t ret;
994
995 assert(f);
996 assert(f->header);
997
998 /* Picks a new sequence number for the entry we are about to add and returns it. */
999
1000 ret = le64toh(f->header->tail_entry_seqnum) + 1;
1001
1002 if (seqnum) {
1003 /* If an external seqnum counter was passed, we update both the local and the external one,
1004 * and set it to the maximum of both */
1005
1006 if (*seqnum + 1 > ret)
1007 ret = *seqnum + 1;
1008
1009 *seqnum = ret;
1010 }
1011
1012 f->header->tail_entry_seqnum = htole64(ret);
1013
1014 if (f->header->head_entry_seqnum == 0)
1015 f->header->head_entry_seqnum = htole64(ret);
1016
1017 return ret;
1018 }
1019
1020 int journal_file_append_object(
1021 JournalFile *f,
1022 ObjectType type,
1023 uint64_t size,
1024 Object **ret_object,
1025 uint64_t *ret_offset) {
1026
1027 int r;
1028 uint64_t p;
1029 Object *o;
1030
1031 assert(f);
1032 assert(f->header);
1033 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1034 assert(size >= sizeof(ObjectHeader));
1035
1036 r = journal_file_set_online(f);
1037 if (r < 0)
1038 return r;
1039
1040 r = journal_file_tail_end_by_mmap(f, &p);
1041 if (r < 0)
1042 return r;
1043
1044 r = journal_file_allocate(f, p, size);
1045 if (r < 0)
1046 return r;
1047
1048 r = journal_file_move_to(f, type, false, p, size, (void**) &o);
1049 if (r < 0)
1050 return r;
1051
1052 o->object = (ObjectHeader) {
1053 .type = type,
1054 .size = htole64(size),
1055 };
1056
1057 f->header->tail_object_offset = htole64(p);
1058 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1059
1060 if (ret_object)
1061 *ret_object = o;
1062
1063 if (ret_offset)
1064 *ret_offset = p;
1065
1066 return 0;
1067 }
1068
1069 static int journal_file_setup_data_hash_table(JournalFile *f) {
1070 uint64_t s, p;
1071 Object *o;
1072 int r;
1073
1074 assert(f);
1075 assert(f->header);
1076
1077 /* We estimate that we need 1 hash table entry per 768 bytes
1078 of journal file and we want to make sure we never get
1079 beyond 75% fill level. Calculate the hash table size for
1080 the maximum file size based on these metrics. */
1081
1082 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1083 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1084 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1085
1086 log_debug("Reserving %"PRIu64" entries in data hash table.", s / sizeof(HashItem));
1087
1088 r = journal_file_append_object(f,
1089 OBJECT_DATA_HASH_TABLE,
1090 offsetof(Object, hash_table.items) + s,
1091 &o, &p);
1092 if (r < 0)
1093 return r;
1094
1095 memzero(o->hash_table.items, s);
1096
1097 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1098 f->header->data_hash_table_size = htole64(s);
1099
1100 return 0;
1101 }
1102
1103 static int journal_file_setup_field_hash_table(JournalFile *f) {
1104 uint64_t s, p;
1105 Object *o;
1106 int r;
1107
1108 assert(f);
1109 assert(f->header);
1110
1111 /* We use a fixed size hash table for the fields as this
1112 * number should grow very slowly only */
1113
1114 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1115 log_debug("Reserving %"PRIu64" entries in field hash table.", s / sizeof(HashItem));
1116
1117 r = journal_file_append_object(f,
1118 OBJECT_FIELD_HASH_TABLE,
1119 offsetof(Object, hash_table.items) + s,
1120 &o, &p);
1121 if (r < 0)
1122 return r;
1123
1124 memzero(o->hash_table.items, s);
1125
1126 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1127 f->header->field_hash_table_size = htole64(s);
1128
1129 return 0;
1130 }
1131
1132 int journal_file_map_data_hash_table(JournalFile *f) {
1133 uint64_t s, p;
1134 void *t;
1135 int r;
1136
1137 assert(f);
1138 assert(f->header);
1139
1140 if (f->data_hash_table)
1141 return 0;
1142
1143 p = le64toh(f->header->data_hash_table_offset);
1144 s = le64toh(f->header->data_hash_table_size);
1145
1146 r = journal_file_move_to(f,
1147 OBJECT_DATA_HASH_TABLE,
1148 true,
1149 p, s,
1150 &t);
1151 if (r < 0)
1152 return r;
1153
1154 f->data_hash_table = t;
1155 return 0;
1156 }
1157
1158 int journal_file_map_field_hash_table(JournalFile *f) {
1159 uint64_t s, p;
1160 void *t;
1161 int r;
1162
1163 assert(f);
1164 assert(f->header);
1165
1166 if (f->field_hash_table)
1167 return 0;
1168
1169 p = le64toh(f->header->field_hash_table_offset);
1170 s = le64toh(f->header->field_hash_table_size);
1171
1172 r = journal_file_move_to(f,
1173 OBJECT_FIELD_HASH_TABLE,
1174 true,
1175 p, s,
1176 &t);
1177 if (r < 0)
1178 return r;
1179
1180 f->field_hash_table = t;
1181 return 0;
1182 }
1183
1184 static int journal_file_link_field(
1185 JournalFile *f,
1186 Object *o,
1187 uint64_t offset,
1188 uint64_t hash) {
1189
1190 uint64_t p, h, m;
1191 int r;
1192
1193 assert(f);
1194 assert(f->header);
1195 assert(f->field_hash_table);
1196 assert(o);
1197 assert(offset > 0);
1198
1199 if (o->object.type != OBJECT_FIELD)
1200 return -EINVAL;
1201
1202 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1203 if (m <= 0)
1204 return -EBADMSG;
1205
1206 /* This might alter the window we are looking at */
1207 o->field.next_hash_offset = o->field.head_data_offset = 0;
1208
1209 h = hash % m;
1210 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1211 if (p == 0)
1212 f->field_hash_table[h].head_hash_offset = htole64(offset);
1213 else {
1214 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1215 if (r < 0)
1216 return r;
1217
1218 o->field.next_hash_offset = htole64(offset);
1219 }
1220
1221 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1222
1223 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1224 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1225
1226 return 0;
1227 }
1228
1229 static int journal_file_link_data(
1230 JournalFile *f,
1231 Object *o,
1232 uint64_t offset,
1233 uint64_t hash) {
1234
1235 uint64_t p, h, m;
1236 int r;
1237
1238 assert(f);
1239 assert(f->header);
1240 assert(f->data_hash_table);
1241 assert(o);
1242 assert(offset > 0);
1243
1244 if (o->object.type != OBJECT_DATA)
1245 return -EINVAL;
1246
1247 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1248 if (m <= 0)
1249 return -EBADMSG;
1250
1251 /* This might alter the window we are looking at */
1252 o->data.next_hash_offset = o->data.next_field_offset = 0;
1253 o->data.entry_offset = o->data.entry_array_offset = 0;
1254 o->data.n_entries = 0;
1255
1256 h = hash % m;
1257 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1258 if (p == 0)
1259 /* Only entry in the hash table is easy */
1260 f->data_hash_table[h].head_hash_offset = htole64(offset);
1261 else {
1262 /* Move back to the previous data object, to patch in
1263 * pointer */
1264
1265 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1266 if (r < 0)
1267 return r;
1268
1269 o->data.next_hash_offset = htole64(offset);
1270 }
1271
1272 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1273
1274 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1275 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1276
1277 return 0;
1278 }
1279
1280 static int get_next_hash_offset(
1281 JournalFile *f,
1282 uint64_t *p,
1283 le64_t *next_hash_offset,
1284 uint64_t *depth,
1285 le64_t *header_max_depth) {
1286
1287 uint64_t nextp;
1288
1289 assert(f);
1290 assert(p);
1291 assert(next_hash_offset);
1292 assert(depth);
1293
1294 nextp = le64toh(READ_NOW(*next_hash_offset));
1295 if (nextp > 0) {
1296 if (nextp <= *p) /* Refuse going in loops */
1297 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1298 "Detected hash item loop in %s, refusing.", f->path);
1299
1300 (*depth)++;
1301
1302 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1303 if (header_max_depth && journal_file_writable(f))
1304 *header_max_depth = htole64(MAX(*depth, le64toh(*header_max_depth)));
1305 }
1306
1307 *p = nextp;
1308 return 0;
1309 }
1310
1311 int journal_file_find_field_object_with_hash(
1312 JournalFile *f,
1313 const void *field,
1314 uint64_t size,
1315 uint64_t hash,
1316 Object **ret_object,
1317 uint64_t *ret_offset) {
1318
1319 uint64_t p, osize, h, m, depth = 0;
1320 int r;
1321
1322 assert(f);
1323 assert(f->header);
1324 assert(field);
1325 assert(size > 0);
1326
1327 /* If the field hash table is empty, we can't find anything */
1328 if (le64toh(f->header->field_hash_table_size) <= 0)
1329 return 0;
1330
1331 /* Map the field hash table, if it isn't mapped yet. */
1332 r = journal_file_map_field_hash_table(f);
1333 if (r < 0)
1334 return r;
1335
1336 osize = offsetof(Object, field.payload) + size;
1337
1338 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1339 if (m <= 0)
1340 return -EBADMSG;
1341
1342 h = hash % m;
1343 p = le64toh(f->field_hash_table[h].head_hash_offset);
1344 while (p > 0) {
1345 Object *o;
1346
1347 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1348 if (r < 0)
1349 return r;
1350
1351 if (le64toh(o->field.hash) == hash &&
1352 le64toh(o->object.size) == osize &&
1353 memcmp(o->field.payload, field, size) == 0) {
1354
1355 if (ret_object)
1356 *ret_object = o;
1357 if (ret_offset)
1358 *ret_offset = p;
1359
1360 return 1;
1361 }
1362
1363 r = get_next_hash_offset(
1364 f,
1365 &p,
1366 &o->field.next_hash_offset,
1367 &depth,
1368 JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) ? &f->header->field_hash_chain_depth : NULL);
1369 if (r < 0)
1370 return r;
1371 }
1372
1373 return 0;
1374 }
1375
1376 uint64_t journal_file_hash_data(
1377 JournalFile *f,
1378 const void *data,
1379 size_t sz) {
1380
1381 assert(f);
1382 assert(f->header);
1383 assert(data || sz == 0);
1384
1385 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1386 * function use siphash. Old journal files use the Jenkins hash. */
1387
1388 if (JOURNAL_HEADER_KEYED_HASH(f->header))
1389 return siphash24(data, sz, f->header->file_id.bytes);
1390
1391 return jenkins_hash64(data, sz);
1392 }
1393
1394 int journal_file_find_field_object(
1395 JournalFile *f,
1396 const void *field,
1397 uint64_t size,
1398 Object **ret_object,
1399 uint64_t *ret_offset) {
1400
1401 assert(f);
1402 assert(field);
1403 assert(size > 0);
1404
1405 return journal_file_find_field_object_with_hash(
1406 f,
1407 field, size,
1408 journal_file_hash_data(f, field, size),
1409 ret_object, ret_offset);
1410 }
1411
1412 int journal_file_find_data_object_with_hash(
1413 JournalFile *f,
1414 const void *data,
1415 uint64_t size,
1416 uint64_t hash,
1417 Object **ret_object,
1418 uint64_t *ret_offset) {
1419
1420 uint64_t p, h, m, depth = 0;
1421 int r;
1422
1423 assert(f);
1424 assert(f->header);
1425 assert(data || size == 0);
1426
1427 /* If there's no data hash table, then there's no entry. */
1428 if (le64toh(f->header->data_hash_table_size) <= 0)
1429 return 0;
1430
1431 /* Map the data hash table, if it isn't mapped yet. */
1432 r = journal_file_map_data_hash_table(f);
1433 if (r < 0)
1434 return r;
1435
1436 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1437 if (m <= 0)
1438 return -EBADMSG;
1439
1440 h = hash % m;
1441 p = le64toh(f->data_hash_table[h].head_hash_offset);
1442
1443 while (p > 0) {
1444 Object *o;
1445 void *d;
1446 size_t rsize;
1447
1448 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1449 if (r < 0)
1450 return r;
1451
1452 if (le64toh(o->data.hash) != hash)
1453 goto next;
1454
1455 r = journal_file_data_payload(f, o, p, NULL, 0, 0, &d, &rsize);
1456 if (r < 0)
1457 return r;
1458 assert(r > 0); /* journal_file_data_payload() always returns > 0 if no field is provided. */
1459
1460 if (memcmp_nn(data, size, d, rsize) == 0) {
1461 if (ret_object)
1462 *ret_object = o;
1463
1464 if (ret_offset)
1465 *ret_offset = p;
1466
1467 return 1;
1468 }
1469
1470 next:
1471 r = get_next_hash_offset(
1472 f,
1473 &p,
1474 &o->data.next_hash_offset,
1475 &depth,
1476 JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) ? &f->header->data_hash_chain_depth : NULL);
1477 if (r < 0)
1478 return r;
1479 }
1480
1481 return 0;
1482 }
1483
1484 int journal_file_find_data_object(
1485 JournalFile *f,
1486 const void *data,
1487 uint64_t size,
1488 Object **ret_object,
1489 uint64_t *ret_offset) {
1490
1491 assert(f);
1492 assert(data || size == 0);
1493
1494 return journal_file_find_data_object_with_hash(
1495 f,
1496 data, size,
1497 journal_file_hash_data(f, data, size),
1498 ret_object, ret_offset);
1499 }
1500
1501 bool journal_field_valid(const char *p, size_t l, bool allow_protected) {
1502 /* We kinda enforce POSIX syntax recommendations for
1503 environment variables here, but make a couple of additional
1504 requirements.
1505
1506 http://pubs.opengroup.org/onlinepubs/000095399/basedefs/xbd_chap08.html */
1507
1508 assert(p);
1509
1510 if (l == SIZE_MAX)
1511 l = strlen(p);
1512
1513 /* No empty field names */
1514 if (l <= 0)
1515 return false;
1516
1517 /* Don't allow names longer than 64 chars */
1518 if (l > 64)
1519 return false;
1520
1521 /* Variables starting with an underscore are protected */
1522 if (!allow_protected && p[0] == '_')
1523 return false;
1524
1525 /* Don't allow digits as first character */
1526 if (ascii_isdigit(p[0]))
1527 return false;
1528
1529 /* Only allow A-Z0-9 and '_' */
1530 for (const char *a = p; a < p + l; a++)
1531 if ((*a < 'A' || *a > 'Z') &&
1532 !ascii_isdigit(*a) &&
1533 *a != '_')
1534 return false;
1535
1536 return true;
1537 }
1538
1539 static int journal_file_append_field(
1540 JournalFile *f,
1541 const void *field,
1542 uint64_t size,
1543 Object **ret_object,
1544 uint64_t *ret_offset) {
1545
1546 uint64_t hash, p;
1547 uint64_t osize;
1548 Object *o;
1549 int r;
1550
1551 assert(f);
1552 assert(field);
1553 assert(size > 0);
1554
1555 if (!journal_field_valid(field, size, true))
1556 return -EBADMSG;
1557
1558 hash = journal_file_hash_data(f, field, size);
1559
1560 r = journal_file_find_field_object_with_hash(f, field, size, hash, ret_object, ret_offset);
1561 if (r < 0)
1562 return r;
1563 if (r > 0)
1564 return 0;
1565
1566 osize = offsetof(Object, field.payload) + size;
1567 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1568 if (r < 0)
1569 return r;
1570
1571 o->field.hash = htole64(hash);
1572 memcpy(o->field.payload, field, size);
1573
1574 r = journal_file_link_field(f, o, p, hash);
1575 if (r < 0)
1576 return r;
1577
1578 /* The linking might have altered the window, so let's only pass the offset to hmac which will
1579 * move to the object again if needed. */
1580
1581 #if HAVE_GCRYPT
1582 r = journal_file_hmac_put_object(f, OBJECT_FIELD, NULL, p);
1583 if (r < 0)
1584 return r;
1585 #endif
1586
1587 if (ret_object) {
1588 r = journal_file_move_to_object(f, OBJECT_FIELD, p, ret_object);
1589 if (r < 0)
1590 return r;
1591 }
1592
1593 if (ret_offset)
1594 *ret_offset = p;
1595
1596 return 0;
1597 }
1598
1599 static Compression maybe_compress_payload(JournalFile *f, uint8_t *dst, const uint8_t *src, uint64_t size, size_t *rsize) {
1600 Compression compression = COMPRESSION_NONE;
1601
1602 assert(f);
1603 assert(f->header);
1604
1605 #if HAVE_COMPRESSION
1606 if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
1607 compression = compress_blob(src, size, dst, size - 1, rsize);
1608 if (compression > 0)
1609 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1610 size, *rsize, compression_to_string(compression));
1611 else
1612 /* Compression didn't work, we don't really care why, let's continue without compression */
1613 compression = COMPRESSION_NONE;
1614 }
1615 #endif
1616
1617 return compression;
1618 }
1619
1620 static int journal_file_append_data(
1621 JournalFile *f,
1622 const void *data,
1623 uint64_t size,
1624 Object **ret_object,
1625 uint64_t *ret_offset) {
1626
1627 uint64_t hash, p, osize;
1628 Object *o, *fo;
1629 size_t rsize = 0;
1630 Compression c;
1631 const void *eq;
1632 int r;
1633
1634 assert(f);
1635
1636 if (!data || size == 0)
1637 return -EINVAL;
1638
1639 hash = journal_file_hash_data(f, data, size);
1640
1641 r = journal_file_find_data_object_with_hash(f, data, size, hash, ret_object, ret_offset);
1642 if (r < 0)
1643 return r;
1644 if (r > 0)
1645 return 0;
1646
1647 eq = memchr(data, '=', size);
1648 if (!eq)
1649 return -EINVAL;
1650
1651 osize = journal_file_data_payload_offset(f) + size;
1652 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1653 if (r < 0)
1654 return r;
1655
1656 o->data.hash = htole64(hash);
1657
1658 c = maybe_compress_payload(f, journal_file_data_payload_field(f, o), data, size, &rsize);
1659
1660 if (c != COMPRESSION_NONE) {
1661 o->object.size = htole64(journal_file_data_payload_offset(f) + rsize);
1662 o->object.flags |= COMPRESSION_TO_OBJECT_FLAG(c);
1663 } else
1664 memcpy_safe(journal_file_data_payload_field(f, o), data, size);
1665
1666 r = journal_file_link_data(f, o, p, hash);
1667 if (r < 0)
1668 return r;
1669
1670 /* The linking might have altered the window, so let's refresh our pointer. */
1671 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1672 if (r < 0)
1673 return r;
1674
1675 #if HAVE_GCRYPT
1676 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1677 if (r < 0)
1678 return r;
1679 #endif
1680
1681 /* Create field object ... */
1682 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, NULL);
1683 if (r < 0)
1684 return r;
1685
1686 /* ... and link it in. */
1687 o->data.next_field_offset = fo->field.head_data_offset;
1688 fo->field.head_data_offset = le64toh(p);
1689
1690 if (ret_object)
1691 *ret_object = o;
1692
1693 if (ret_offset)
1694 *ret_offset = p;
1695
1696 return 0;
1697 }
1698
1699 static int maybe_decompress_payload(
1700 JournalFile *f,
1701 uint8_t *payload,
1702 uint64_t size,
1703 Compression compression,
1704 const char *field,
1705 size_t field_length,
1706 size_t data_threshold,
1707 void **ret_data,
1708 size_t *ret_size) {
1709
1710 assert(f);
1711
1712 /* We can't read objects larger than 4G on a 32bit machine */
1713 if ((uint64_t) (size_t) size != size)
1714 return -E2BIG;
1715
1716 if (compression != COMPRESSION_NONE) {
1717 #if HAVE_COMPRESSION
1718 size_t rsize;
1719 int r;
1720
1721 if (field) {
1722 r = decompress_startswith(compression, payload, size, &f->compress_buffer, field,
1723 field_length, '=');
1724 if (r < 0)
1725 return log_debug_errno(r,
1726 "Cannot decompress %s object of length %" PRIu64 ": %m",
1727 compression_to_string(compression),
1728 size);
1729 if (r == 0) {
1730 if (ret_data)
1731 *ret_data = NULL;
1732 if (ret_size)
1733 *ret_size = 0;
1734 return 0;
1735 }
1736 }
1737
1738 r = decompress_blob(compression, payload, size, &f->compress_buffer, &rsize, 0);
1739 if (r < 0)
1740 return r;
1741
1742 if (ret_data)
1743 *ret_data = f->compress_buffer;
1744 if (ret_size)
1745 *ret_size = rsize;
1746 #else
1747 return -EPROTONOSUPPORT;
1748 #endif
1749 } else {
1750 if (field && (size < field_length + 1 || memcmp(payload, field, field_length) != 0 || payload[field_length] != '=')) {
1751 if (ret_data)
1752 *ret_data = NULL;
1753 if (ret_size)
1754 *ret_size = 0;
1755 return 0;
1756 }
1757
1758 if (ret_data)
1759 *ret_data = payload;
1760 if (ret_size)
1761 *ret_size = (size_t) size;
1762 }
1763
1764 return 1;
1765 }
1766
1767 int journal_file_data_payload(
1768 JournalFile *f,
1769 Object *o,
1770 uint64_t offset,
1771 const char *field,
1772 size_t field_length,
1773 size_t data_threshold,
1774 void **ret_data,
1775 size_t *ret_size) {
1776
1777 uint64_t size;
1778 Compression c;
1779 int r;
1780
1781 assert(f);
1782 assert(!field == (field_length == 0)); /* These must be specified together. */
1783
1784 if (!o) {
1785 r = journal_file_move_to_object(f, OBJECT_DATA, offset, &o);
1786 if (r < 0)
1787 return r;
1788 }
1789
1790 size = le64toh(READ_NOW(o->object.size));
1791 if (size < journal_file_data_payload_offset(f))
1792 return -EBADMSG;
1793
1794 size -= journal_file_data_payload_offset(f);
1795
1796 c = COMPRESSION_FROM_OBJECT(o);
1797 if (c < 0)
1798 return -EPROTONOSUPPORT;
1799
1800 return maybe_decompress_payload(f, journal_file_data_payload_field(f, o), size, c, field,
1801 field_length, data_threshold, ret_data, ret_size);
1802 }
1803
1804 uint64_t journal_file_entry_n_items(JournalFile *f, Object *o) {
1805 uint64_t sz;
1806
1807 assert(f);
1808 assert(o);
1809
1810 if (o->object.type != OBJECT_ENTRY)
1811 return 0;
1812
1813 sz = le64toh(READ_NOW(o->object.size));
1814 if (sz < offsetof(Object, entry.items))
1815 return 0;
1816
1817 return (sz - offsetof(Object, entry.items)) / journal_file_entry_item_size(f);
1818 }
1819
1820 uint64_t journal_file_entry_array_n_items(JournalFile *f, Object *o) {
1821 uint64_t sz;
1822
1823 assert(f);
1824 assert(o);
1825
1826 if (o->object.type != OBJECT_ENTRY_ARRAY)
1827 return 0;
1828
1829 sz = le64toh(READ_NOW(o->object.size));
1830 if (sz < offsetof(Object, entry_array.items))
1831 return 0;
1832
1833 return (sz - offsetof(Object, entry_array.items)) / journal_file_entry_array_item_size(f);
1834 }
1835
1836 uint64_t journal_file_hash_table_n_items(Object *o) {
1837 uint64_t sz;
1838
1839 assert(o);
1840
1841 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
1842 return 0;
1843
1844 sz = le64toh(READ_NOW(o->object.size));
1845 if (sz < offsetof(Object, hash_table.items))
1846 return 0;
1847
1848 return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1849 }
1850
1851 static void write_entry_array_item(JournalFile *f, Object *o, uint64_t i, uint64_t p) {
1852 assert(f);
1853 assert(o);
1854
1855 if (JOURNAL_HEADER_COMPACT(f->header)) {
1856 assert(p <= UINT32_MAX);
1857 o->entry_array.items.compact[i] = htole32(p);
1858 } else
1859 o->entry_array.items.regular[i] = htole64(p);
1860 }
1861
1862 static int link_entry_into_array(
1863 JournalFile *f,
1864 le64_t *first,
1865 le64_t *idx,
1866 le32_t *tail,
1867 le32_t *tidx,
1868 uint64_t p) {
1869
1870 uint64_t n = 0, ap = 0, q, i, a, hidx;
1871 Object *o;
1872 int r;
1873
1874 assert(f);
1875 assert(f->header);
1876 assert(first);
1877 assert(idx);
1878 assert(p > 0);
1879
1880 a = tail ? le32toh(*tail) : le64toh(*first);
1881 hidx = le64toh(READ_NOW(*idx));
1882 i = tidx ? le32toh(READ_NOW(*tidx)) : hidx;
1883
1884 while (a > 0) {
1885 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1886 if (r < 0)
1887 return r;
1888
1889 n = journal_file_entry_array_n_items(f, o);
1890 if (i < n) {
1891 write_entry_array_item(f, o, i, p);
1892 *idx = htole64(hidx + 1);
1893 if (tidx)
1894 *tidx = htole32(le32toh(*tidx) + 1);
1895 return 0;
1896 }
1897
1898 i -= n;
1899 ap = a;
1900 a = le64toh(o->entry_array.next_entry_array_offset);
1901 }
1902
1903 if (hidx > n)
1904 n = (hidx+1) * 2;
1905 else
1906 n = n * 2;
1907
1908 if (n < 4)
1909 n = 4;
1910
1911 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1912 offsetof(Object, entry_array.items) + n * journal_file_entry_array_item_size(f),
1913 &o, &q);
1914 if (r < 0)
1915 return r;
1916
1917 #if HAVE_GCRYPT
1918 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1919 if (r < 0)
1920 return r;
1921 #endif
1922
1923 write_entry_array_item(f, o, i, p);
1924
1925 if (ap == 0)
1926 *first = htole64(q);
1927 else {
1928 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1929 if (r < 0)
1930 return r;
1931
1932 o->entry_array.next_entry_array_offset = htole64(q);
1933 }
1934
1935 if (tail)
1936 *tail = htole32(q);
1937
1938 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1939 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1940
1941 *idx = htole64(hidx + 1);
1942 if (tidx)
1943 *tidx = htole32(1);
1944
1945 return 0;
1946 }
1947
1948 static int link_entry_into_array_plus_one(
1949 JournalFile *f,
1950 le64_t *extra,
1951 le64_t *first,
1952 le64_t *idx,
1953 le32_t *tail,
1954 le32_t *tidx,
1955 uint64_t p) {
1956
1957 uint64_t hidx;
1958 int r;
1959
1960 assert(f);
1961 assert(extra);
1962 assert(first);
1963 assert(idx);
1964 assert(p > 0);
1965
1966 hidx = le64toh(READ_NOW(*idx));
1967 if (hidx == UINT64_MAX)
1968 return -EBADMSG;
1969 if (hidx == 0)
1970 *extra = htole64(p);
1971 else {
1972 le64_t i;
1973
1974 i = htole64(hidx - 1);
1975 r = link_entry_into_array(f, first, &i, tail, tidx, p);
1976 if (r < 0)
1977 return r;
1978 }
1979
1980 *idx = htole64(hidx + 1);
1981 return 0;
1982 }
1983
1984 static int journal_file_link_entry_item(JournalFile *f, uint64_t offset, uint64_t p) {
1985 Object *o;
1986 int r;
1987
1988 assert(f);
1989 assert(offset > 0);
1990
1991 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1992 if (r < 0)
1993 return r;
1994
1995 return link_entry_into_array_plus_one(f,
1996 &o->data.entry_offset,
1997 &o->data.entry_array_offset,
1998 &o->data.n_entries,
1999 JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_offset : NULL,
2000 JOURNAL_HEADER_COMPACT(f->header) ? &o->data.compact.tail_entry_array_n_entries : NULL,
2001 offset);
2002 }
2003
2004 static int journal_file_link_entry(
2005 JournalFile *f,
2006 Object *o,
2007 uint64_t offset,
2008 const EntryItem items[],
2009 size_t n_items) {
2010
2011 int r;
2012
2013 assert(f);
2014 assert(f->header);
2015 assert(o);
2016 assert(offset > 0);
2017
2018 if (o->object.type != OBJECT_ENTRY)
2019 return -EINVAL;
2020
2021 __atomic_thread_fence(__ATOMIC_SEQ_CST);
2022
2023 /* Link up the entry itself */
2024 r = link_entry_into_array(f,
2025 &f->header->entry_array_offset,
2026 &f->header->n_entries,
2027 JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_offset) ? &f->header->tail_entry_array_offset : NULL,
2028 JOURNAL_HEADER_CONTAINS(f->header, tail_entry_array_n_entries) ? &f->header->tail_entry_array_n_entries : NULL,
2029 offset);
2030 if (r < 0)
2031 return r;
2032
2033 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
2034
2035 if (f->header->head_entry_realtime == 0)
2036 f->header->head_entry_realtime = o->entry.realtime;
2037
2038 f->header->tail_entry_realtime = o->entry.realtime;
2039 f->header->tail_entry_monotonic = o->entry.monotonic;
2040
2041 /* Link up the items */
2042 for (uint64_t i = 0; i < n_items; i++) {
2043 int k;
2044
2045 /* If we fail to link an entry item because we can't allocate a new entry array, don't fail
2046 * immediately but try to link the other entry items since it might still be possible to link
2047 * those if they don't require a new entry array to be allocated. */
2048
2049 k = journal_file_link_entry_item(f, offset, items[i].object_offset);
2050 if (k == -E2BIG)
2051 r = k;
2052 else if (k < 0)
2053 return k;
2054 }
2055
2056 return r;
2057 }
2058
2059 static void write_entry_item(JournalFile *f, Object *o, uint64_t i, const EntryItem *item) {
2060 assert(f);
2061 assert(o);
2062 assert(item);
2063
2064 if (JOURNAL_HEADER_COMPACT(f->header)) {
2065 assert(item->object_offset <= UINT32_MAX);
2066 o->entry.items.compact[i].object_offset = htole32(item->object_offset);
2067 } else {
2068 o->entry.items.regular[i].object_offset = htole64(item->object_offset);
2069 o->entry.items.regular[i].hash = htole64(item->hash);
2070 }
2071 }
2072
2073 static int journal_file_append_entry_internal(
2074 JournalFile *f,
2075 const dual_timestamp *ts,
2076 const sd_id128_t *boot_id,
2077 uint64_t xor_hash,
2078 const EntryItem items[],
2079 size_t n_items,
2080 uint64_t *seqnum,
2081 Object **ret_object,
2082 uint64_t *ret_offset) {
2083
2084 uint64_t np;
2085 uint64_t osize;
2086 Object *o;
2087 int r;
2088
2089 assert(f);
2090 assert(f->header);
2091 assert(ts);
2092 assert(items || n_items == 0);
2093
2094 osize = offsetof(Object, entry.items) + (n_items * journal_file_entry_item_size(f));
2095
2096 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
2097 if (r < 0)
2098 return r;
2099
2100 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
2101 o->entry.realtime = htole64(ts->realtime);
2102 o->entry.monotonic = htole64(ts->monotonic);
2103 o->entry.xor_hash = htole64(xor_hash);
2104 if (boot_id)
2105 f->header->boot_id = *boot_id;
2106 o->entry.boot_id = f->header->boot_id;
2107
2108 for (size_t i = 0; i < n_items; i++)
2109 write_entry_item(f, o, i, &items[i]);
2110
2111 #if HAVE_GCRYPT
2112 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
2113 if (r < 0)
2114 return r;
2115 #endif
2116
2117 r = journal_file_link_entry(f, o, np, items, n_items);
2118 if (r < 0)
2119 return r;
2120
2121 if (ret_object)
2122 *ret_object = o;
2123
2124 if (ret_offset)
2125 *ret_offset = np;
2126
2127 return r;
2128 }
2129
2130 void journal_file_post_change(JournalFile *f) {
2131 assert(f);
2132
2133 if (f->fd < 0)
2134 return;
2135
2136 /* inotify() does not receive IN_MODIFY events from file
2137 * accesses done via mmap(). After each access we hence
2138 * trigger IN_MODIFY by truncating the journal file to its
2139 * current size which triggers IN_MODIFY. */
2140
2141 __atomic_thread_fence(__ATOMIC_SEQ_CST);
2142
2143 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
2144 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
2145 }
2146
2147 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
2148 assert(userdata);
2149
2150 journal_file_post_change(userdata);
2151
2152 return 1;
2153 }
2154
2155 static void schedule_post_change(JournalFile *f) {
2156 sd_event *e;
2157 int r;
2158
2159 assert(f);
2160 assert(f->post_change_timer);
2161
2162 assert_se(e = sd_event_source_get_event(f->post_change_timer));
2163
2164 /* If we are already going down, post the change immediately. */
2165 if (IN_SET(sd_event_get_state(e), SD_EVENT_EXITING, SD_EVENT_FINISHED))
2166 goto fail;
2167
2168 r = sd_event_source_get_enabled(f->post_change_timer, NULL);
2169 if (r < 0) {
2170 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
2171 goto fail;
2172 }
2173 if (r > 0)
2174 return;
2175
2176 r = sd_event_source_set_time_relative(f->post_change_timer, f->post_change_timer_period);
2177 if (r < 0) {
2178 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
2179 goto fail;
2180 }
2181
2182 r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
2183 if (r < 0) {
2184 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
2185 goto fail;
2186 }
2187
2188 return;
2189
2190 fail:
2191 /* On failure, let's simply post the change immediately. */
2192 journal_file_post_change(f);
2193 }
2194
2195 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2196 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
2197 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
2198 int r;
2199
2200 assert(f);
2201 assert_return(!f->post_change_timer, -EINVAL);
2202 assert(e);
2203 assert(t);
2204
2205 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
2206 if (r < 0)
2207 return r;
2208
2209 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
2210 if (r < 0)
2211 return r;
2212
2213 f->post_change_timer = TAKE_PTR(timer);
2214 f->post_change_timer_period = t;
2215
2216 return r;
2217 }
2218
2219 static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
2220 return CMP(ASSERT_PTR(a)->object_offset, ASSERT_PTR(b)->object_offset);
2221 }
2222
2223 static size_t remove_duplicate_entry_items(EntryItem items[], size_t n) {
2224 size_t j = 1;
2225
2226 assert(items || n == 0);
2227
2228 if (n <= 1)
2229 return n;
2230
2231 for (size_t i = 1; i < n; i++)
2232 if (items[i].object_offset != items[j - 1].object_offset)
2233 items[j++] = items[i];
2234
2235 return j;
2236 }
2237
2238 int journal_file_append_entry(
2239 JournalFile *f,
2240 const dual_timestamp *ts,
2241 const sd_id128_t *boot_id,
2242 const struct iovec iovec[],
2243 unsigned n_iovec,
2244 uint64_t *seqnum,
2245 Object **ret_object,
2246 uint64_t *ret_offset) {
2247
2248 _cleanup_free_ EntryItem *items_alloc = NULL;
2249 EntryItem *items;
2250 uint64_t xor_hash = 0;
2251 struct dual_timestamp _ts;
2252 int r;
2253
2254 assert(f);
2255 assert(f->header);
2256 assert(iovec);
2257 assert(n_iovec > 0);
2258
2259 if (ts) {
2260 if (!VALID_REALTIME(ts->realtime))
2261 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2262 "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
2263 ts->realtime);
2264 if (!VALID_MONOTONIC(ts->monotonic))
2265 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2266 "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
2267 ts->monotonic);
2268 } else {
2269 dual_timestamp_get(&_ts);
2270 ts = &_ts;
2271 }
2272
2273 #if HAVE_GCRYPT
2274 r = journal_file_maybe_append_tag(f, ts->realtime);
2275 if (r < 0)
2276 return r;
2277 #endif
2278
2279 if (n_iovec < ALLOCA_MAX / sizeof(EntryItem) / 2)
2280 items = newa(EntryItem, n_iovec);
2281 else {
2282 items_alloc = new(EntryItem, n_iovec);
2283 if (!items_alloc)
2284 return -ENOMEM;
2285
2286 items = items_alloc;
2287 }
2288
2289 for (size_t i = 0; i < n_iovec; i++) {
2290 uint64_t p;
2291 Object *o;
2292
2293 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
2294 if (r < 0)
2295 return r;
2296
2297 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2298 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2299 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2300 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2301 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2302 * hash here for that. This also has the benefit that cursors for old and new journal files
2303 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2304 * files things are easier, we can just take the value from the stored record directly. */
2305
2306 if (JOURNAL_HEADER_KEYED_HASH(f->header))
2307 xor_hash ^= jenkins_hash64(iovec[i].iov_base, iovec[i].iov_len);
2308 else
2309 xor_hash ^= le64toh(o->data.hash);
2310
2311 items[i] = (EntryItem) {
2312 .object_offset = p,
2313 .hash = le64toh(o->data.hash),
2314 };
2315 }
2316
2317 /* Order by the position on disk, in order to improve seek
2318 * times for rotating media. */
2319 typesafe_qsort(items, n_iovec, entry_item_cmp);
2320 n_iovec = remove_duplicate_entry_items(items, n_iovec);
2321
2322 r = journal_file_append_entry_internal(f, ts, boot_id, xor_hash, items, n_iovec, seqnum, ret_object, ret_offset);
2323
2324 /* If the memory mapping triggered a SIGBUS then we return an
2325 * IO error and ignore the error code passed down to us, since
2326 * it is very likely just an effect of a nullified replacement
2327 * mapping page */
2328
2329 if (mmap_cache_fd_got_sigbus(f->cache_fd))
2330 r = -EIO;
2331
2332 if (f->post_change_timer)
2333 schedule_post_change(f);
2334 else
2335 journal_file_post_change(f);
2336
2337 return r;
2338 }
2339
2340 typedef struct ChainCacheItem {
2341 uint64_t first; /* the array at the beginning of the chain */
2342 uint64_t array; /* the cached array */
2343 uint64_t begin; /* the first item in the cached array */
2344 uint64_t total; /* the total number of items in all arrays before this one in the chain */
2345 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
2346 } ChainCacheItem;
2347
2348 static void chain_cache_put(
2349 OrderedHashmap *h,
2350 ChainCacheItem *ci,
2351 uint64_t first,
2352 uint64_t array,
2353 uint64_t begin,
2354 uint64_t total,
2355 uint64_t last_index) {
2356
2357 assert(h);
2358
2359 if (!ci) {
2360 /* If the chain item to cache for this chain is the
2361 * first one it's not worth caching anything */
2362 if (array == first)
2363 return;
2364
2365 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2366 ci = ordered_hashmap_steal_first(h);
2367 assert(ci);
2368 } else {
2369 ci = new(ChainCacheItem, 1);
2370 if (!ci)
2371 return;
2372 }
2373
2374 ci->first = first;
2375
2376 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2377 free(ci);
2378 return;
2379 }
2380 } else
2381 assert(ci->first == first);
2382
2383 ci->array = array;
2384 ci->begin = begin;
2385 ci->total = total;
2386 ci->last_index = last_index;
2387 }
2388
2389 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2390 assert(i);
2391
2392 /* Increase or decrease the specified index, in the right direction. */
2393
2394 if (direction == DIRECTION_DOWN) {
2395 if (*i >= n - 1)
2396 return 0;
2397
2398 (*i)++;
2399 } else {
2400 if (*i <= 0)
2401 return 0;
2402
2403 (*i)--;
2404 }
2405
2406 return 1;
2407 }
2408
2409 static int bump_entry_array(
2410 JournalFile *f,
2411 Object *o,
2412 uint64_t offset,
2413 uint64_t first,
2414 direction_t direction,
2415 uint64_t *ret) {
2416
2417 uint64_t p, q = 0;
2418 int r;
2419
2420 assert(f);
2421 assert(offset);
2422 assert(ret);
2423
2424 if (direction == DIRECTION_DOWN) {
2425 assert(o);
2426 return le64toh(o->entry_array.next_entry_array_offset);
2427 }
2428
2429 /* Entry array chains are a singly linked list, so to find the previous array in the chain, we have
2430 * to start iterating from the top. */
2431
2432 p = first;
2433
2434 while (p > 0 && p != offset) {
2435 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, p, &o);
2436 if (r < 0)
2437 return r;
2438
2439 q = p;
2440 p = le64toh(o->entry_array.next_entry_array_offset);
2441 }
2442
2443 /* If we can't find the previous entry array in the entry array chain, we're likely dealing with a
2444 * corrupted journal file. */
2445 if (p == 0)
2446 return -EBADMSG;
2447
2448 *ret = q;
2449
2450 return 0;
2451 }
2452
2453 static int generic_array_get(
2454 JournalFile *f,
2455 uint64_t first,
2456 uint64_t i,
2457 direction_t direction,
2458 Object **ret_object,
2459 uint64_t *ret_offset) {
2460
2461 uint64_t p = 0, a, t = 0, k;
2462 ChainCacheItem *ci;
2463 Object *o;
2464 int r;
2465
2466 assert(f);
2467
2468 /* FIXME: fix return value assignment on success. */
2469
2470 a = first;
2471
2472 /* Try the chain cache first */
2473 ci = ordered_hashmap_get(f->chain_cache, &first);
2474 if (ci && i > ci->total) {
2475 a = ci->array;
2476 i -= ci->total;
2477 t = ci->total;
2478 }
2479
2480 while (a > 0) {
2481 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2482 if (IN_SET(r, -EBADMSG, -EADDRNOTAVAIL)) {
2483 /* If there's corruption and we're going downwards, let's pretend we reached the
2484 * final entry in the entry array chain. */
2485
2486 if (direction == DIRECTION_DOWN)
2487 return 0;
2488
2489 /* If there's corruption and we're going upwards, move back to the previous entry
2490 * array and start iterating entries from there. */
2491
2492 r = bump_entry_array(f, NULL, a, first, DIRECTION_UP, &a);
2493 if (r < 0)
2494 return r;
2495
2496 i = UINT64_MAX;
2497
2498 break;
2499 }
2500 if (r < 0)
2501 return r;
2502
2503 k = journal_file_entry_array_n_items(f, o);
2504 if (i < k)
2505 break;
2506
2507 i -= k;
2508 t += k;
2509 a = le64toh(o->entry_array.next_entry_array_offset);
2510 }
2511
2512 /* If we've found the right location, now look for the first non-corrupt entry object (in the right
2513 * direction). */
2514
2515 while (a > 0) {
2516 /* In the first iteration of the while loop, we reuse i, k and o from the previous while
2517 * loop. */
2518 if (i == UINT64_MAX) {
2519 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2520 if (r < 0)
2521 return r;
2522
2523 k = journal_file_entry_array_n_items(f, o);
2524 if (k == 0)
2525 break;
2526
2527 i = direction == DIRECTION_DOWN ? 0 : k - 1;
2528 }
2529
2530 do {
2531 p = journal_file_entry_array_item(f, o, i);
2532
2533 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret_object);
2534 if (r >= 0) {
2535 /* Let's cache this item for the next invocation */
2536 chain_cache_put(f->chain_cache, ci, first, a, journal_file_entry_array_item(f, o, 0), t, i);
2537
2538 if (ret_offset)
2539 *ret_offset = p;
2540
2541 return 1;
2542 }
2543 if (!IN_SET(r, -EADDRNOTAVAIL, -EBADMSG))
2544 return r;
2545
2546 /* OK, so this entry is borked. Most likely some entry didn't get synced to
2547 * disk properly, let's see if the next one might work for us instead. */
2548 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2549 } while (bump_array_index(&i, direction, k) > 0);
2550
2551 r = bump_entry_array(f, o, a, first, direction, &a);
2552 if (r < 0)
2553 return r;
2554
2555 t += k;
2556 i = UINT64_MAX;
2557 }
2558
2559 return 0;
2560 }
2561
2562 static int generic_array_get_plus_one(
2563 JournalFile *f,
2564 uint64_t extra,
2565 uint64_t first,
2566 uint64_t i,
2567 direction_t direction,
2568 Object **ret_object,
2569 uint64_t *ret_offset) {
2570
2571 int r;
2572
2573 assert(f);
2574
2575 /* FIXME: fix return value assignment on success. */
2576
2577 if (i == 0) {
2578 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
2579 if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG))
2580 return generic_array_get(f, first, 0, direction, ret_object, ret_offset);
2581 if (r < 0)
2582 return r;
2583
2584 if (ret_offset)
2585 *ret_offset = extra;
2586
2587 return 1;
2588 }
2589
2590 return generic_array_get(f, first, i - 1, direction, ret_object, ret_offset);
2591 }
2592
2593 enum {
2594 TEST_FOUND,
2595 TEST_LEFT,
2596 TEST_RIGHT
2597 };
2598
2599 static int generic_array_bisect(
2600 JournalFile *f,
2601 uint64_t first,
2602 uint64_t n,
2603 uint64_t needle,
2604 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2605 direction_t direction,
2606 Object **ret_object,
2607 uint64_t *ret_offset,
2608 uint64_t *ret_idx) {
2609
2610 /* Given an entry array chain, this function finds the object "closest" to the given needle in the
2611 * chain, taking into account the provided direction. A function can be provided to determine how
2612 * an object is matched against the given needle.
2613 *
2614 * Given a journal file, the offset of an object and the needle, the test_object() function should
2615 * return TEST_LEFT if the needle is located earlier in the entry array chain, TEST_RIGHT if the
2616 * needle is located later in the entry array chain and TEST_FOUND if the object matches the needle.
2617 * If test_object() returns TEST_FOUND for a specific object, that object's information will be used
2618 * to populate the return values of this function. If test_object() never returns TEST_FOUND, the
2619 * return values are populated with the details of one of the objects closest to the needle. If the
2620 * direction is DIRECTION_UP, the earlier object is used. Otherwise, the later object is used.
2621 */
2622
2623 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = UINT64_MAX;
2624 bool subtract_one = false;
2625 Object *array = NULL;
2626 ChainCacheItem *ci;
2627 int r;
2628
2629 assert(f);
2630 assert(test_object);
2631
2632 /* Start with the first array in the chain */
2633 a = first;
2634
2635 ci = ordered_hashmap_get(f->chain_cache, &first);
2636 if (ci && n > ci->total && ci->begin != 0) {
2637 /* Ah, we have iterated this bisection array chain
2638 * previously! Let's see if we can skip ahead in the
2639 * chain, as far as the last time. But we can't jump
2640 * backwards in the chain, so let's check that
2641 * first. */
2642
2643 r = test_object(f, ci->begin, needle);
2644 if (r < 0)
2645 return r;
2646
2647 if (r == TEST_LEFT) {
2648 /* OK, what we are looking for is right of the
2649 * begin of this EntryArray, so let's jump
2650 * straight to previously cached array in the
2651 * chain */
2652
2653 a = ci->array;
2654 n -= ci->total;
2655 t = ci->total;
2656 last_index = ci->last_index;
2657 }
2658 }
2659
2660 while (a > 0) {
2661 uint64_t left, right, k, lp;
2662
2663 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2664 if (r < 0)
2665 return r;
2666
2667 k = journal_file_entry_array_n_items(f, array);
2668 right = MIN(k, n);
2669 if (right <= 0)
2670 return 0;
2671
2672 i = right - 1;
2673 lp = p = journal_file_entry_array_item(f, array, i);
2674 if (p <= 0)
2675 r = -EBADMSG;
2676 else
2677 r = test_object(f, p, needle);
2678 if (r == -EBADMSG) {
2679 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2680 n = i;
2681 continue;
2682 }
2683 if (r < 0)
2684 return r;
2685
2686 if (r == TEST_FOUND)
2687 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2688
2689 if (r == TEST_RIGHT) {
2690 left = 0;
2691 right -= 1;
2692
2693 if (last_index != UINT64_MAX) {
2694 assert(last_index <= right);
2695
2696 /* If we cached the last index we
2697 * looked at, let's try to not to jump
2698 * too wildly around and see if we can
2699 * limit the range to look at early to
2700 * the immediate neighbors of the last
2701 * index we looked at. */
2702
2703 if (last_index > 0) {
2704 uint64_t x = last_index - 1;
2705
2706 p = journal_file_entry_array_item(f, array, x);
2707 if (p <= 0)
2708 return -EBADMSG;
2709
2710 r = test_object(f, p, needle);
2711 if (r < 0)
2712 return r;
2713
2714 if (r == TEST_FOUND)
2715 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2716
2717 if (r == TEST_RIGHT)
2718 right = x;
2719 else
2720 left = x + 1;
2721 }
2722
2723 if (last_index < right) {
2724 uint64_t y = last_index + 1;
2725
2726 p = journal_file_entry_array_item(f, array, y);
2727 if (p <= 0)
2728 return -EBADMSG;
2729
2730 r = test_object(f, p, needle);
2731 if (r < 0)
2732 return r;
2733
2734 if (r == TEST_FOUND)
2735 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2736
2737 if (r == TEST_RIGHT)
2738 right = y;
2739 else
2740 left = y + 1;
2741 }
2742 }
2743
2744 for (;;) {
2745 if (left == right) {
2746 if (direction == DIRECTION_UP)
2747 subtract_one = true;
2748
2749 i = left;
2750 goto found;
2751 }
2752
2753 assert(left < right);
2754 i = (left + right) / 2;
2755
2756 p = journal_file_entry_array_item(f, array, i);
2757 if (p <= 0)
2758 r = -EBADMSG;
2759 else
2760 r = test_object(f, p, needle);
2761 if (r == -EBADMSG) {
2762 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2763 right = n = i;
2764 continue;
2765 }
2766 if (r < 0)
2767 return r;
2768
2769 if (r == TEST_FOUND)
2770 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2771
2772 if (r == TEST_RIGHT)
2773 right = i;
2774 else
2775 left = i + 1;
2776 }
2777 }
2778
2779 if (k >= n) {
2780 if (direction == DIRECTION_UP) {
2781 i = n;
2782 subtract_one = true;
2783 goto found;
2784 }
2785
2786 return 0;
2787 }
2788
2789 last_p = lp;
2790
2791 n -= k;
2792 t += k;
2793 last_index = UINT64_MAX;
2794 a = le64toh(array->entry_array.next_entry_array_offset);
2795 }
2796
2797 return 0;
2798
2799 found:
2800 if (subtract_one && t == 0 && i == 0)
2801 return 0;
2802
2803 /* Let's cache this item for the next invocation */
2804 chain_cache_put(f->chain_cache, ci, first, a, journal_file_entry_array_item(f, array, 0), t, subtract_one ? (i > 0 ? i-1 : UINT64_MAX) : i);
2805
2806 if (subtract_one && i == 0)
2807 p = last_p;
2808 else if (subtract_one)
2809 p = journal_file_entry_array_item(f, array, i - 1);
2810 else
2811 p = journal_file_entry_array_item(f, array, i);
2812
2813 if (ret_object) {
2814 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, ret_object);
2815 if (r < 0)
2816 return r;
2817 }
2818
2819 if (ret_offset)
2820 *ret_offset = p;
2821
2822 if (ret_idx)
2823 *ret_idx = t + i + (subtract_one ? -1 : 0);
2824
2825 return 1;
2826 }
2827
2828 static int generic_array_bisect_plus_one(
2829 JournalFile *f,
2830 uint64_t extra,
2831 uint64_t first,
2832 uint64_t n,
2833 uint64_t needle,
2834 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2835 direction_t direction,
2836 Object **ret_object,
2837 uint64_t *ret_offset,
2838 uint64_t *ret_idx) {
2839
2840 int r;
2841 bool step_back = false;
2842
2843 assert(f);
2844 assert(test_object);
2845
2846 if (n <= 0)
2847 return 0;
2848
2849 /* This bisects the array in object 'first', but first checks
2850 * an extra */
2851 r = test_object(f, extra, needle);
2852 if (r < 0)
2853 return r;
2854
2855 if (r == TEST_FOUND)
2856 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2857
2858 /* if we are looking with DIRECTION_UP then we need to first
2859 see if in the actual array there is a matching entry, and
2860 return the last one of that. But if there isn't any we need
2861 to return this one. Hence remember this, and return it
2862 below. */
2863 if (r == TEST_LEFT)
2864 step_back = direction == DIRECTION_UP;
2865
2866 if (r == TEST_RIGHT) {
2867 if (direction == DIRECTION_DOWN)
2868 goto found;
2869 else
2870 return 0;
2871 }
2872
2873 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret_object, ret_offset, ret_idx);
2874
2875 if (r == 0 && step_back)
2876 goto found;
2877
2878 if (r > 0 && ret_idx)
2879 (*ret_idx)++;
2880
2881 return r;
2882
2883 found:
2884 if (ret_object) {
2885 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, ret_object);
2886 if (r < 0)
2887 return r;
2888 }
2889
2890 if (ret_offset)
2891 *ret_offset = extra;
2892
2893 if (ret_idx)
2894 *ret_idx = 0;
2895
2896 return 1;
2897 }
2898
2899 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2900 assert(f);
2901 assert(p > 0);
2902
2903 if (p == needle)
2904 return TEST_FOUND;
2905 else if (p < needle)
2906 return TEST_LEFT;
2907 else
2908 return TEST_RIGHT;
2909 }
2910
2911 int journal_file_move_to_entry_by_offset(
2912 JournalFile *f,
2913 uint64_t p,
2914 direction_t direction,
2915 Object **ret_object,
2916 uint64_t *ret_offset) {
2917
2918 assert(f);
2919 assert(f->header);
2920
2921 return generic_array_bisect(
2922 f,
2923 le64toh(f->header->entry_array_offset),
2924 le64toh(f->header->n_entries),
2925 p,
2926 test_object_offset,
2927 direction,
2928 ret_object, ret_offset, NULL);
2929 }
2930
2931 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2932 uint64_t sq;
2933 Object *o;
2934 int r;
2935
2936 assert(f);
2937 assert(p > 0);
2938
2939 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2940 if (r < 0)
2941 return r;
2942
2943 sq = le64toh(READ_NOW(o->entry.seqnum));
2944 if (sq == needle)
2945 return TEST_FOUND;
2946 else if (sq < needle)
2947 return TEST_LEFT;
2948 else
2949 return TEST_RIGHT;
2950 }
2951
2952 int journal_file_move_to_entry_by_seqnum(
2953 JournalFile *f,
2954 uint64_t seqnum,
2955 direction_t direction,
2956 Object **ret_object,
2957 uint64_t *ret_offset) {
2958
2959 assert(f);
2960 assert(f->header);
2961
2962 return generic_array_bisect(
2963 f,
2964 le64toh(f->header->entry_array_offset),
2965 le64toh(f->header->n_entries),
2966 seqnum,
2967 test_object_seqnum,
2968 direction,
2969 ret_object, ret_offset, NULL);
2970 }
2971
2972 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2973 Object *o;
2974 uint64_t rt;
2975 int r;
2976
2977 assert(f);
2978 assert(p > 0);
2979
2980 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2981 if (r < 0)
2982 return r;
2983
2984 rt = le64toh(READ_NOW(o->entry.realtime));
2985 if (rt == needle)
2986 return TEST_FOUND;
2987 else if (rt < needle)
2988 return TEST_LEFT;
2989 else
2990 return TEST_RIGHT;
2991 }
2992
2993 int journal_file_move_to_entry_by_realtime(
2994 JournalFile *f,
2995 uint64_t realtime,
2996 direction_t direction,
2997 Object **ret_object,
2998 uint64_t *ret_offset) {
2999
3000 assert(f);
3001 assert(f->header);
3002
3003 return generic_array_bisect(
3004 f,
3005 le64toh(f->header->entry_array_offset),
3006 le64toh(f->header->n_entries),
3007 realtime,
3008 test_object_realtime,
3009 direction,
3010 ret_object, ret_offset, NULL);
3011 }
3012
3013 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
3014 Object *o;
3015 uint64_t m;
3016 int r;
3017
3018 assert(f);
3019 assert(p > 0);
3020
3021 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
3022 if (r < 0)
3023 return r;
3024
3025 m = le64toh(READ_NOW(o->entry.monotonic));
3026 if (m == needle)
3027 return TEST_FOUND;
3028 else if (m < needle)
3029 return TEST_LEFT;
3030 else
3031 return TEST_RIGHT;
3032 }
3033
3034 static int find_data_object_by_boot_id(
3035 JournalFile *f,
3036 sd_id128_t boot_id,
3037 Object **ret_object,
3038 uint64_t *ret_offset) {
3039
3040 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
3041
3042 assert(f);
3043
3044 sd_id128_to_string(boot_id, t + 9);
3045 return journal_file_find_data_object(f, t, sizeof(t) - 1, ret_object, ret_offset);
3046 }
3047
3048 int journal_file_move_to_entry_by_monotonic(
3049 JournalFile *f,
3050 sd_id128_t boot_id,
3051 uint64_t monotonic,
3052 direction_t direction,
3053 Object **ret_object,
3054 uint64_t *ret_offset) {
3055
3056 Object *o;
3057 int r;
3058
3059 assert(f);
3060
3061 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
3062 if (r < 0)
3063 return r;
3064 if (r == 0)
3065 return -ENOENT;
3066
3067 return generic_array_bisect_plus_one(
3068 f,
3069 le64toh(o->data.entry_offset),
3070 le64toh(o->data.entry_array_offset),
3071 le64toh(o->data.n_entries),
3072 monotonic,
3073 test_object_monotonic,
3074 direction,
3075 ret_object, ret_offset, NULL);
3076 }
3077
3078 void journal_file_reset_location(JournalFile *f) {
3079 assert(f);
3080
3081 f->location_type = LOCATION_HEAD;
3082 f->current_offset = 0;
3083 f->current_seqnum = 0;
3084 f->current_realtime = 0;
3085 f->current_monotonic = 0;
3086 zero(f->current_boot_id);
3087 f->current_xor_hash = 0;
3088 }
3089
3090 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
3091 assert(f);
3092 assert(o);
3093
3094 f->location_type = LOCATION_SEEK;
3095 f->current_offset = offset;
3096 f->current_seqnum = le64toh(o->entry.seqnum);
3097 f->current_realtime = le64toh(o->entry.realtime);
3098 f->current_monotonic = le64toh(o->entry.monotonic);
3099 f->current_boot_id = o->entry.boot_id;
3100 f->current_xor_hash = le64toh(o->entry.xor_hash);
3101 }
3102
3103 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
3104 int r;
3105
3106 assert(af);
3107 assert(af->header);
3108 assert(bf);
3109 assert(bf->header);
3110 assert(af->location_type == LOCATION_SEEK);
3111 assert(bf->location_type == LOCATION_SEEK);
3112
3113 /* If contents, timestamps and seqnum match, these entries are
3114 * identical. */
3115 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
3116 af->current_monotonic == bf->current_monotonic &&
3117 af->current_realtime == bf->current_realtime &&
3118 af->current_xor_hash == bf->current_xor_hash &&
3119 sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id) &&
3120 af->current_seqnum == bf->current_seqnum)
3121 return 0;
3122
3123 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
3124
3125 /* If this is from the same seqnum source, compare
3126 * seqnums */
3127 r = CMP(af->current_seqnum, bf->current_seqnum);
3128 if (r != 0)
3129 return r;
3130
3131 /* Wow! This is weird, different data but the same
3132 * seqnums? Something is borked, but let's make the
3133 * best of it and compare by time. */
3134 }
3135
3136 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
3137
3138 /* If the boot id matches, compare monotonic time */
3139 r = CMP(af->current_monotonic, bf->current_monotonic);
3140 if (r != 0)
3141 return r;
3142 }
3143
3144 /* Otherwise, compare UTC time */
3145 r = CMP(af->current_realtime, bf->current_realtime);
3146 if (r != 0)
3147 return r;
3148
3149 /* Finally, compare by contents */
3150 return CMP(af->current_xor_hash, bf->current_xor_hash);
3151 }
3152
3153 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
3154
3155 /* Consider it an error if any of the two offsets is uninitialized */
3156 if (old_offset == 0 || new_offset == 0)
3157 return false;
3158
3159 /* If we go down, the new offset must be larger than the old one. */
3160 return direction == DIRECTION_DOWN ?
3161 new_offset > old_offset :
3162 new_offset < old_offset;
3163 }
3164
3165 int journal_file_next_entry(
3166 JournalFile *f,
3167 uint64_t p,
3168 direction_t direction,
3169 Object **ret_object,
3170 uint64_t *ret_offset) {
3171
3172 uint64_t i, n, ofs;
3173 int r;
3174
3175 assert(f);
3176 assert(f->header);
3177
3178 /* FIXME: fix return value assignment. */
3179
3180 n = le64toh(READ_NOW(f->header->n_entries));
3181 if (n <= 0)
3182 return 0;
3183
3184 if (p == 0)
3185 i = direction == DIRECTION_DOWN ? 0 : n - 1;
3186 else {
3187 r = generic_array_bisect(f,
3188 le64toh(f->header->entry_array_offset),
3189 le64toh(f->header->n_entries),
3190 p,
3191 test_object_offset,
3192 DIRECTION_DOWN,
3193 NULL, NULL,
3194 &i);
3195 if (r <= 0)
3196 return r;
3197
3198 r = bump_array_index(&i, direction, n);
3199 if (r <= 0)
3200 return r;
3201 }
3202
3203 /* And jump to it */
3204 r = generic_array_get(f, le64toh(f->header->entry_array_offset), i, direction, ret_object, &ofs);
3205 if (r <= 0)
3206 return r;
3207
3208 /* Ensure our array is properly ordered. */
3209 if (p > 0 && !check_properly_ordered(ofs, p, direction))
3210 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
3211 "%s: entry array not properly ordered at entry %" PRIu64,
3212 f->path, i);
3213
3214 if (ret_offset)
3215 *ret_offset = ofs;
3216
3217 return 1;
3218 }
3219
3220 int journal_file_next_entry_for_data(
3221 JournalFile *f,
3222 Object *d,
3223 direction_t direction,
3224 Object **ret_object,
3225 uint64_t *ret_offset) {
3226
3227 uint64_t i, n, ofs;
3228 int r;
3229
3230 assert(f);
3231 assert(d);
3232 assert(d->object.type == OBJECT_DATA);
3233
3234 /* FIXME: fix return value assignment. */
3235
3236 n = le64toh(READ_NOW(d->data.n_entries));
3237 if (n <= 0)
3238 return n;
3239
3240 i = direction == DIRECTION_DOWN ? 0 : n - 1;
3241
3242 r = generic_array_get_plus_one(f,
3243 le64toh(d->data.entry_offset),
3244 le64toh(d->data.entry_array_offset),
3245 i,
3246 direction,
3247 ret_object, &ofs);
3248 if (r <= 0)
3249 return r;
3250
3251 if (ret_offset)
3252 *ret_offset = ofs;
3253
3254 return 1;
3255 }
3256
3257 int journal_file_move_to_entry_by_offset_for_data(
3258 JournalFile *f,
3259 Object *d,
3260 uint64_t p,
3261 direction_t direction,
3262 Object **ret, uint64_t *ret_offset) {
3263
3264 assert(f);
3265 assert(d);
3266 assert(d->object.type == OBJECT_DATA);
3267
3268 return generic_array_bisect_plus_one(
3269 f,
3270 le64toh(d->data.entry_offset),
3271 le64toh(d->data.entry_array_offset),
3272 le64toh(d->data.n_entries),
3273 p,
3274 test_object_offset,
3275 direction,
3276 ret, ret_offset, NULL);
3277 }
3278
3279 int journal_file_move_to_entry_by_monotonic_for_data(
3280 JournalFile *f,
3281 Object *d,
3282 sd_id128_t boot_id,
3283 uint64_t monotonic,
3284 direction_t direction,
3285 Object **ret_object,
3286 uint64_t *ret_offset) {
3287
3288 uint64_t b, z, entry_offset, entry_array_offset, n_entries;
3289 Object *o;
3290 int r;
3291
3292 assert(f);
3293 assert(d);
3294 assert(d->object.type == OBJECT_DATA);
3295
3296 /* Save all the required data before the data object gets invalidated. */
3297 entry_offset = le64toh(READ_NOW(d->data.entry_offset));
3298 entry_array_offset = le64toh(READ_NOW(d->data.entry_array_offset));
3299 n_entries = le64toh(READ_NOW(d->data.n_entries));
3300
3301 /* First, seek by time */
3302 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
3303 if (r < 0)
3304 return r;
3305 if (r == 0)
3306 return -ENOENT;
3307
3308 r = generic_array_bisect_plus_one(f,
3309 le64toh(o->data.entry_offset),
3310 le64toh(o->data.entry_array_offset),
3311 le64toh(o->data.n_entries),
3312 monotonic,
3313 test_object_monotonic,
3314 direction,
3315 NULL, &z, NULL);
3316 if (r <= 0)
3317 return r;
3318
3319 /* And now, continue seeking until we find an entry that
3320 * exists in both bisection arrays */
3321
3322 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
3323 if (r < 0)
3324 return r;
3325
3326 for (;;) {
3327 uint64_t p, q;
3328
3329 r = generic_array_bisect_plus_one(f,
3330 entry_offset,
3331 entry_array_offset,
3332 n_entries,
3333 z,
3334 test_object_offset,
3335 direction,
3336 NULL, &p, NULL);
3337 if (r <= 0)
3338 return r;
3339
3340 r = generic_array_bisect_plus_one(f,
3341 le64toh(o->data.entry_offset),
3342 le64toh(o->data.entry_array_offset),
3343 le64toh(o->data.n_entries),
3344 p,
3345 test_object_offset,
3346 direction,
3347 NULL, &q, NULL);
3348
3349 if (r <= 0)
3350 return r;
3351
3352 if (p == q) {
3353 if (ret_object) {
3354 r = journal_file_move_to_object(f, OBJECT_ENTRY, q, ret_object);
3355 if (r < 0)
3356 return r;
3357 }
3358
3359 if (ret_offset)
3360 *ret_offset = q;
3361
3362 return 1;
3363 }
3364
3365 z = q;
3366 }
3367 }
3368
3369 int journal_file_move_to_entry_by_seqnum_for_data(
3370 JournalFile *f,
3371 Object *d,
3372 uint64_t seqnum,
3373 direction_t direction,
3374 Object **ret_object,
3375 uint64_t *ret_offset) {
3376
3377 assert(f);
3378 assert(d);
3379 assert(d->object.type == OBJECT_DATA);
3380
3381 return generic_array_bisect_plus_one(
3382 f,
3383 le64toh(d->data.entry_offset),
3384 le64toh(d->data.entry_array_offset),
3385 le64toh(d->data.n_entries),
3386 seqnum,
3387 test_object_seqnum,
3388 direction,
3389 ret_object, ret_offset, NULL);
3390 }
3391
3392 int journal_file_move_to_entry_by_realtime_for_data(
3393 JournalFile *f,
3394 Object *d,
3395 uint64_t realtime,
3396 direction_t direction,
3397 Object **ret, uint64_t *ret_offset) {
3398
3399 assert(f);
3400 assert(d);
3401 assert(d->object.type == OBJECT_DATA);
3402
3403 return generic_array_bisect_plus_one(
3404 f,
3405 le64toh(d->data.entry_offset),
3406 le64toh(d->data.entry_array_offset),
3407 le64toh(d->data.n_entries),
3408 realtime,
3409 test_object_realtime,
3410 direction,
3411 ret, ret_offset, NULL);
3412 }
3413
3414 void journal_file_dump(JournalFile *f) {
3415 Object *o;
3416 uint64_t p;
3417 int r;
3418
3419 assert(f);
3420 assert(f->header);
3421
3422 journal_file_print_header(f);
3423
3424 p = le64toh(READ_NOW(f->header->header_size));
3425 while (p != 0) {
3426 const char *s;
3427 Compression c;
3428
3429 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3430 if (r < 0)
3431 goto fail;
3432
3433 s = journal_object_type_to_string(o->object.type);
3434
3435 switch (o->object.type) {
3436
3437 case OBJECT_ENTRY:
3438 assert(s);
3439
3440 printf("Type: %s seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3441 s,
3442 le64toh(o->entry.seqnum),
3443 le64toh(o->entry.monotonic),
3444 le64toh(o->entry.realtime));
3445 break;
3446
3447 case OBJECT_TAG:
3448 assert(s);
3449
3450 printf("Type: %s seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3451 s,
3452 le64toh(o->tag.seqnum),
3453 le64toh(o->tag.epoch));
3454 break;
3455
3456 default:
3457 if (s)
3458 printf("Type: %s \n", s);
3459 else
3460 printf("Type: unknown (%i)", o->object.type);
3461
3462 break;
3463 }
3464
3465 c = COMPRESSION_FROM_OBJECT(o);
3466 if (c > COMPRESSION_NONE)
3467 printf("Flags: %s\n",
3468 compression_to_string(c));
3469
3470 if (p == le64toh(f->header->tail_object_offset))
3471 p = 0;
3472 else
3473 p += ALIGN64(le64toh(o->object.size));
3474 }
3475
3476 return;
3477 fail:
3478 log_error("File corrupt");
3479 }
3480
3481 /* Note: the lifetime of the compound literal is the immediately surrounding block. */
3482 #define FORMAT_TIMESTAMP_SAFE(t) (FORMAT_TIMESTAMP(t) ?: " --- ")
3483
3484 void journal_file_print_header(JournalFile *f) {
3485 struct stat st;
3486
3487 assert(f);
3488 assert(f->header);
3489
3490 printf("File path: %s\n"
3491 "File ID: %s\n"
3492 "Machine ID: %s\n"
3493 "Boot ID: %s\n"
3494 "Sequential number ID: %s\n"
3495 "State: %s\n"
3496 "Compatible flags:%s%s\n"
3497 "Incompatible flags:%s%s%s%s%s%s\n"
3498 "Header size: %"PRIu64"\n"
3499 "Arena size: %"PRIu64"\n"
3500 "Data hash table size: %"PRIu64"\n"
3501 "Field hash table size: %"PRIu64"\n"
3502 "Rotate suggested: %s\n"
3503 "Head sequential number: %"PRIu64" (%"PRIx64")\n"
3504 "Tail sequential number: %"PRIu64" (%"PRIx64")\n"
3505 "Head realtime timestamp: %s (%"PRIx64")\n"
3506 "Tail realtime timestamp: %s (%"PRIx64")\n"
3507 "Tail monotonic timestamp: %s (%"PRIx64")\n"
3508 "Objects: %"PRIu64"\n"
3509 "Entry objects: %"PRIu64"\n",
3510 f->path,
3511 SD_ID128_TO_STRING(f->header->file_id),
3512 SD_ID128_TO_STRING(f->header->machine_id),
3513 SD_ID128_TO_STRING(f->header->boot_id),
3514 SD_ID128_TO_STRING(f->header->seqnum_id),
3515 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3516 f->header->state == STATE_ONLINE ? "ONLINE" :
3517 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3518 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3519 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3520 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3521 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3522 JOURNAL_HEADER_COMPRESSED_ZSTD(f->header) ? " COMPRESSED-ZSTD" : "",
3523 JOURNAL_HEADER_KEYED_HASH(f->header) ? " KEYED-HASH" : "",
3524 JOURNAL_HEADER_COMPACT(f->header) ? " COMPACT" : "",
3525 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3526 le64toh(f->header->header_size),
3527 le64toh(f->header->arena_size),
3528 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3529 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3530 yes_no(journal_file_rotate_suggested(f, 0, LOG_DEBUG)),
3531 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3532 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3533 FORMAT_TIMESTAMP_SAFE(le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3534 FORMAT_TIMESTAMP_SAFE(le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3535 FORMAT_TIMESPAN(le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3536 le64toh(f->header->n_objects),
3537 le64toh(f->header->n_entries));
3538
3539 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3540 printf("Data objects: %"PRIu64"\n"
3541 "Data hash table fill: %.1f%%\n",
3542 le64toh(f->header->n_data),
3543 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3544
3545 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3546 printf("Field objects: %"PRIu64"\n"
3547 "Field hash table fill: %.1f%%\n",
3548 le64toh(f->header->n_fields),
3549 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3550
3551 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3552 printf("Tag objects: %"PRIu64"\n",
3553 le64toh(f->header->n_tags));
3554 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3555 printf("Entry array objects: %"PRIu64"\n",
3556 le64toh(f->header->n_entry_arrays));
3557
3558 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth))
3559 printf("Deepest field hash chain: %" PRIu64"\n",
3560 f->header->field_hash_chain_depth);
3561
3562 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth))
3563 printf("Deepest data hash chain: %" PRIu64"\n",
3564 f->header->data_hash_chain_depth);
3565
3566 if (fstat(f->fd, &st) >= 0)
3567 printf("Disk usage: %s\n", FORMAT_BYTES((uint64_t) st.st_blocks * 512ULL));
3568 }
3569
3570 static int journal_file_warn_btrfs(JournalFile *f) {
3571 unsigned attrs;
3572 int r;
3573
3574 assert(f);
3575
3576 /* Before we write anything, check if the COW logic is turned
3577 * off on btrfs. Given our write pattern that is quite
3578 * unfriendly to COW file systems this should greatly improve
3579 * performance on COW file systems, such as btrfs, at the
3580 * expense of data integrity features (which shouldn't be too
3581 * bad, given that we do our own checksumming). */
3582
3583 r = fd_is_fs_type(f->fd, BTRFS_SUPER_MAGIC);
3584 if (r < 0)
3585 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3586 if (!r)
3587 return 0;
3588
3589 r = read_attr_fd(f->fd, &attrs);
3590 if (r < 0)
3591 return log_warning_errno(r, "Failed to read file attributes: %m");
3592
3593 if (attrs & FS_NOCOW_FL) {
3594 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3595 return 0;
3596 }
3597
3598 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3599 "This is likely to slow down journal access substantially, please consider turning "
3600 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3601
3602 return 1;
3603 }
3604
3605 static void journal_default_metrics(JournalMetrics *m, int fd, bool compact) {
3606 struct statvfs ss;
3607 uint64_t fs_size = 0;
3608
3609 assert(m);
3610 assert(fd >= 0);
3611
3612 if (fstatvfs(fd, &ss) >= 0)
3613 fs_size = ss.f_frsize * ss.f_blocks;
3614 else
3615 log_debug_errno(errno, "Failed to determine disk size: %m");
3616
3617 if (m->max_use == UINT64_MAX) {
3618
3619 if (fs_size > 0)
3620 m->max_use = CLAMP(PAGE_ALIGN(fs_size / 10), /* 10% of file system size */
3621 MAX_USE_LOWER, MAX_USE_UPPER);
3622 else
3623 m->max_use = MAX_USE_LOWER;
3624 } else {
3625 m->max_use = PAGE_ALIGN(m->max_use);
3626
3627 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3628 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3629 }
3630
3631 if (m->min_use == UINT64_MAX) {
3632 if (fs_size > 0)
3633 m->min_use = CLAMP(PAGE_ALIGN(fs_size / 50), /* 2% of file system size */
3634 MIN_USE_LOW, MIN_USE_HIGH);
3635 else
3636 m->min_use = MIN_USE_LOW;
3637 }
3638
3639 if (m->min_use > m->max_use)
3640 m->min_use = m->max_use;
3641
3642 if (m->max_size == UINT64_MAX)
3643 m->max_size = MIN(PAGE_ALIGN(m->max_use / 8), /* 8 chunks */
3644 MAX_SIZE_UPPER);
3645 else
3646 m->max_size = PAGE_ALIGN(m->max_size);
3647
3648 if (compact && m->max_size > JOURNAL_COMPACT_SIZE_MAX)
3649 m->max_size = JOURNAL_COMPACT_SIZE_MAX;
3650
3651 if (m->max_size != 0) {
3652 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3653 m->max_size = JOURNAL_FILE_SIZE_MIN;
3654
3655 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3656 m->max_use = m->max_size*2;
3657 }
3658
3659 if (m->min_size == UINT64_MAX)
3660 m->min_size = JOURNAL_FILE_SIZE_MIN;
3661 else
3662 m->min_size = CLAMP(PAGE_ALIGN(m->min_size),
3663 JOURNAL_FILE_SIZE_MIN,
3664 m->max_size ?: UINT64_MAX);
3665
3666 if (m->keep_free == UINT64_MAX) {
3667 if (fs_size > 0)
3668 m->keep_free = MIN(PAGE_ALIGN(fs_size / 20), /* 5% of file system size */
3669 KEEP_FREE_UPPER);
3670 else
3671 m->keep_free = DEFAULT_KEEP_FREE;
3672 }
3673
3674 if (m->n_max_files == UINT64_MAX)
3675 m->n_max_files = DEFAULT_N_MAX_FILES;
3676
3677 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3678 FORMAT_BYTES(m->min_use),
3679 FORMAT_BYTES(m->max_use),
3680 FORMAT_BYTES(m->max_size),
3681 FORMAT_BYTES(m->min_size),
3682 FORMAT_BYTES(m->keep_free),
3683 m->n_max_files);
3684 }
3685
3686 int journal_file_open(
3687 int fd,
3688 const char *fname,
3689 int open_flags,
3690 JournalFileFlags file_flags,
3691 mode_t mode,
3692 uint64_t compress_threshold_bytes,
3693 JournalMetrics *metrics,
3694 MMapCache *mmap_cache,
3695 JournalFile *template,
3696 JournalFile **ret) {
3697
3698 bool newly_created = false;
3699 JournalFile *f;
3700 void *h;
3701 int r;
3702
3703 assert(fd >= 0 || fname);
3704 assert(mmap_cache);
3705 assert(ret);
3706
3707 if (!IN_SET((open_flags & O_ACCMODE), O_RDONLY, O_RDWR))
3708 return -EINVAL;
3709
3710 if ((open_flags & O_ACCMODE) == O_RDONLY && FLAGS_SET(open_flags, O_CREAT))
3711 return -EINVAL;
3712
3713 if (fname && (open_flags & O_CREAT) && !endswith(fname, ".journal"))
3714 return -EINVAL;
3715
3716 f = new(JournalFile, 1);
3717 if (!f)
3718 return -ENOMEM;
3719
3720 *f = (JournalFile) {
3721 .fd = fd,
3722 .mode = mode,
3723 .open_flags = open_flags,
3724 .compress_threshold_bytes = compress_threshold_bytes == UINT64_MAX ?
3725 DEFAULT_COMPRESS_THRESHOLD :
3726 MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
3727 };
3728
3729 if (fname) {
3730 f->path = strdup(fname);
3731 if (!f->path) {
3732 r = -ENOMEM;
3733 goto fail;
3734 }
3735 } else {
3736 assert(fd >= 0);
3737
3738 /* If we don't know the path, fill in something explanatory and vaguely useful */
3739 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
3740 r = -ENOMEM;
3741 goto fail;
3742 }
3743 }
3744
3745 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3746 if (!f->chain_cache) {
3747 r = -ENOMEM;
3748 goto fail;
3749 }
3750
3751 if (f->fd < 0) {
3752 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3753 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3754 * it doesn't hurt in that case. */
3755
3756 f->fd = openat_report_new(AT_FDCWD, f->path, f->open_flags|O_CLOEXEC|O_NONBLOCK, f->mode, &newly_created);
3757 if (f->fd < 0) {
3758 r = f->fd;
3759 goto fail;
3760 }
3761
3762 /* fds we opened here by us should also be closed by us. */
3763 f->close_fd = true;
3764
3765 r = fd_nonblock(f->fd, false);
3766 if (r < 0)
3767 goto fail;
3768
3769 if (!newly_created) {
3770 r = journal_file_fstat(f);
3771 if (r < 0)
3772 goto fail;
3773 }
3774 } else {
3775 r = journal_file_fstat(f);
3776 if (r < 0)
3777 goto fail;
3778
3779 /* If we just got the fd passed in, we don't really know if we created the file anew */
3780 newly_created = f->last_stat.st_size == 0 && journal_file_writable(f);
3781 }
3782
3783 f->cache_fd = mmap_cache_add_fd(mmap_cache, f->fd, mmap_prot_from_open_flags(open_flags));
3784 if (!f->cache_fd) {
3785 r = -ENOMEM;
3786 goto fail;
3787 }
3788
3789 if (newly_created) {
3790 (void) journal_file_warn_btrfs(f);
3791
3792 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3793 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3794 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3795 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3796 * solely on mtime/atime/ctime of the file. */
3797 (void) fd_setcrtime(f->fd, 0);
3798
3799 r = journal_file_init_header(f, file_flags, template);
3800 if (r < 0)
3801 goto fail;
3802
3803 r = journal_file_fstat(f);
3804 if (r < 0)
3805 goto fail;
3806 }
3807
3808 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3809 r = -ENODATA;
3810 goto fail;
3811 }
3812
3813 r = mmap_cache_fd_get(f->cache_fd, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h);
3814 if (r == -EINVAL) {
3815 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3816 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3817 * code. */
3818 r = -EAFNOSUPPORT;
3819 goto fail;
3820 }
3821 if (r < 0)
3822 goto fail;
3823
3824 f->header = h;
3825
3826 if (!newly_created) {
3827 r = journal_file_verify_header(f);
3828 if (r < 0)
3829 goto fail;
3830 }
3831
3832 #if HAVE_GCRYPT
3833 if (!newly_created && journal_file_writable(f) && JOURNAL_HEADER_SEALED(f->header)) {
3834 r = journal_file_fss_load(f);
3835 if (r < 0)
3836 goto fail;
3837 }
3838 #endif
3839
3840 if (journal_file_writable(f)) {
3841 if (metrics) {
3842 journal_default_metrics(metrics, f->fd, JOURNAL_HEADER_COMPACT(f->header));
3843 f->metrics = *metrics;
3844 } else if (template)
3845 f->metrics = template->metrics;
3846
3847 r = journal_file_refresh_header(f);
3848 if (r < 0)
3849 goto fail;
3850 }
3851
3852 #if HAVE_GCRYPT
3853 r = journal_file_hmac_setup(f);
3854 if (r < 0)
3855 goto fail;
3856 #endif
3857
3858 if (newly_created) {
3859 r = journal_file_setup_field_hash_table(f);
3860 if (r < 0)
3861 goto fail;
3862
3863 r = journal_file_setup_data_hash_table(f);
3864 if (r < 0)
3865 goto fail;
3866
3867 #if HAVE_GCRYPT
3868 r = journal_file_append_first_tag(f);
3869 if (r < 0)
3870 goto fail;
3871 #endif
3872 }
3873
3874 if (mmap_cache_fd_got_sigbus(f->cache_fd)) {
3875 r = -EIO;
3876 goto fail;
3877 }
3878
3879 if (template && template->post_change_timer) {
3880 r = journal_file_enable_post_change_timer(
3881 f,
3882 sd_event_source_get_event(template->post_change_timer),
3883 template->post_change_timer_period);
3884
3885 if (r < 0)
3886 goto fail;
3887 }
3888
3889 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3890 f->close_fd = true;
3891
3892 if (DEBUG_LOGGING) {
3893 static int last_seal = -1, last_compress = -1, last_keyed_hash = -1;
3894 static uint64_t last_bytes = UINT64_MAX;
3895
3896 if (last_seal != JOURNAL_HEADER_SEALED(f->header) ||
3897 last_keyed_hash != JOURNAL_HEADER_KEYED_HASH(f->header) ||
3898 last_compress != JOURNAL_FILE_COMPRESS(f) ||
3899 last_bytes != f->compress_threshold_bytes) {
3900
3901 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
3902 yes_no(JOURNAL_HEADER_SEALED(f->header)), yes_no(JOURNAL_HEADER_KEYED_HASH(f->header)),
3903 yes_no(JOURNAL_FILE_COMPRESS(f)), FORMAT_BYTES(f->compress_threshold_bytes));
3904 last_seal = JOURNAL_HEADER_SEALED(f->header);
3905 last_keyed_hash = JOURNAL_HEADER_KEYED_HASH(f->header);
3906 last_compress = JOURNAL_FILE_COMPRESS(f);
3907 last_bytes = f->compress_threshold_bytes;
3908 }
3909 }
3910
3911 *ret = f;
3912 return 0;
3913
3914 fail:
3915 if (f->cache_fd && mmap_cache_fd_got_sigbus(f->cache_fd))
3916 r = -EIO;
3917
3918 (void) journal_file_close(f);
3919
3920 if (newly_created && fd < 0)
3921 (void) unlink(fname);
3922
3923 return r;
3924 }
3925
3926 int journal_file_archive(JournalFile *f, char **ret_previous_path) {
3927 _cleanup_free_ char *p = NULL;
3928
3929 assert(f);
3930
3931 if (!journal_file_writable(f))
3932 return -EINVAL;
3933
3934 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3935 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3936 if (path_startswith(f->path, "/proc/self/fd"))
3937 return -EINVAL;
3938
3939 if (!endswith(f->path, ".journal"))
3940 return -EINVAL;
3941
3942 if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3943 (int) strlen(f->path) - 8, f->path,
3944 SD_ID128_FORMAT_VAL(f->header->seqnum_id),
3945 le64toh(f->header->head_entry_seqnum),
3946 le64toh(f->header->head_entry_realtime)) < 0)
3947 return -ENOMEM;
3948
3949 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3950 * ignore that case. */
3951 if (rename(f->path, p) < 0 && errno != ENOENT)
3952 return -errno;
3953
3954 /* Sync the rename to disk */
3955 (void) fsync_directory_of_file(f->fd);
3956
3957 if (ret_previous_path)
3958 *ret_previous_path = f->path;
3959 else
3960 free(f->path);
3961
3962 f->path = TAKE_PTR(p);
3963
3964 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3965 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3966 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3967 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3968 * occurs. */
3969 f->archive = true;
3970
3971 return 0;
3972 }
3973
3974 int journal_file_dispose(int dir_fd, const char *fname) {
3975 _cleanup_free_ char *p = NULL;
3976
3977 assert(fname);
3978
3979 /* Renames a journal file to *.journal~, i.e. to mark it as corrupted or otherwise uncleanly shutdown. Note that
3980 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3981 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3982 * for writing anymore. */
3983
3984 if (!endswith(fname, ".journal"))
3985 return -EINVAL;
3986
3987 if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
3988 (int) strlen(fname) - 8, fname,
3989 now(CLOCK_REALTIME),
3990 random_u64()) < 0)
3991 return -ENOMEM;
3992
3993 if (renameat(dir_fd, fname, dir_fd, p) < 0)
3994 return -errno;
3995
3996 return 0;
3997 }
3998
3999 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p) {
4000 _cleanup_free_ EntryItem *items_alloc = NULL;
4001 EntryItem *items;
4002 uint64_t q, n, xor_hash = 0;
4003 const sd_id128_t *boot_id;
4004 dual_timestamp ts;
4005 int r;
4006
4007 assert(from);
4008 assert(to);
4009 assert(o);
4010 assert(p > 0);
4011
4012 if (!journal_file_writable(to))
4013 return -EPERM;
4014
4015 ts = (dual_timestamp) {
4016 .monotonic = le64toh(o->entry.monotonic),
4017 .realtime = le64toh(o->entry.realtime),
4018 };
4019 boot_id = &o->entry.boot_id;
4020
4021 n = journal_file_entry_n_items(from, o);
4022
4023 if (n < ALLOCA_MAX / sizeof(EntryItem) / 2)
4024 items = newa(EntryItem, n);
4025 else {
4026 items_alloc = new(EntryItem, n);
4027 if (!items_alloc)
4028 return -ENOMEM;
4029
4030 items = items_alloc;
4031 }
4032
4033 for (uint64_t i = 0; i < n; i++) {
4034 uint64_t h;
4035 void *data;
4036 size_t l;
4037 Object *u;
4038
4039 q = journal_file_entry_item_object_offset(from, o, i);
4040 r = journal_file_data_payload(from, NULL, q, NULL, 0, 0, &data, &l);
4041 if (IN_SET(r, -EADDRNOTAVAIL, -EBADMSG)) {
4042 log_debug_errno(r, "Entry item %"PRIu64" data object is bad, skipping over it: %m", i);
4043 goto next;
4044 }
4045 if (r < 0)
4046 return r;
4047 assert(r > 0);
4048
4049 if (l == 0)
4050 return -EBADMSG;
4051
4052 r = journal_file_append_data(to, data, l, &u, &h);
4053 if (r < 0)
4054 return r;
4055
4056 if (JOURNAL_HEADER_KEYED_HASH(to->header))
4057 xor_hash ^= jenkins_hash64(data, l);
4058 else
4059 xor_hash ^= le64toh(u->data.hash);
4060
4061 items[i] = (EntryItem) {
4062 .object_offset = h,
4063 .hash = le64toh(u->data.hash),
4064 };
4065
4066 next:
4067 /* The above journal_file_data_payload() may clear or overwrite cached object. Hence, we need
4068 * to re-read the object from the cache. */
4069 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
4070 if (r < 0)
4071 return r;
4072 }
4073
4074 r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n, NULL, NULL, NULL);
4075
4076 if (mmap_cache_fd_got_sigbus(to->cache_fd))
4077 return -EIO;
4078
4079 return r;
4080 }
4081
4082 void journal_reset_metrics(JournalMetrics *m) {
4083 assert(m);
4084
4085 /* Set everything to "pick automatic values". */
4086
4087 *m = (JournalMetrics) {
4088 .min_use = UINT64_MAX,
4089 .max_use = UINT64_MAX,
4090 .min_size = UINT64_MAX,
4091 .max_size = UINT64_MAX,
4092 .keep_free = UINT64_MAX,
4093 .n_max_files = UINT64_MAX,
4094 };
4095 }
4096
4097 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *ret_from, usec_t *ret_to) {
4098 assert(f);
4099 assert(f->header);
4100 assert(ret_from || ret_to);
4101
4102 if (ret_from) {
4103 if (f->header->head_entry_realtime == 0)
4104 return -ENOENT;
4105
4106 *ret_from = le64toh(f->header->head_entry_realtime);
4107 }
4108
4109 if (ret_to) {
4110 if (f->header->tail_entry_realtime == 0)
4111 return -ENOENT;
4112
4113 *ret_to = le64toh(f->header->tail_entry_realtime);
4114 }
4115
4116 return 1;
4117 }
4118
4119 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *ret_from, usec_t *ret_to) {
4120 Object *o;
4121 uint64_t p;
4122 int r;
4123
4124 assert(f);
4125 assert(ret_from || ret_to);
4126
4127 /* FIXME: fix return value assignment on success with 0. */
4128
4129 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
4130 if (r <= 0)
4131 return r;
4132
4133 if (le64toh(o->data.n_entries) <= 0)
4134 return 0;
4135
4136 if (ret_from) {
4137 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
4138 if (r < 0)
4139 return r;
4140
4141 *ret_from = le64toh(o->entry.monotonic);
4142 }
4143
4144 if (ret_to) {
4145 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
4146 if (r < 0)
4147 return r;
4148
4149 r = generic_array_get_plus_one(f,
4150 le64toh(o->data.entry_offset),
4151 le64toh(o->data.entry_array_offset),
4152 le64toh(o->data.n_entries) - 1,
4153 DIRECTION_UP,
4154 &o, NULL);
4155 if (r <= 0)
4156 return r;
4157
4158 *ret_to = le64toh(o->entry.monotonic);
4159 }
4160
4161 return 1;
4162 }
4163
4164 /* Ideally this would be a function parameter but initializers for static fields have to be compile
4165 * time constants so we hardcode the interval instead. */
4166 #define LOG_RATELIMIT ((const RateLimit) { .interval = 60 * USEC_PER_SEC, .burst = 3 })
4167
4168 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec, int log_level) {
4169 assert(f);
4170 assert(f->header);
4171
4172 /* If we gained new header fields we gained new features,
4173 * hence suggest a rotation */
4174 if (le64toh(f->header->header_size) < sizeof(Header)) {
4175 log_full(log_level, "%s uses an outdated header, suggesting rotation.", f->path);
4176 return true;
4177 }
4178
4179 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4180 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4181 * need the n_data field, which only exists in newer versions. */
4182
4183 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
4184 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4185 log_ratelimit_full(
4186 log_level, LOG_RATELIMIT,
4187 "Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
4188 f->path,
4189 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
4190 le64toh(f->header->n_data),
4191 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
4192 (unsigned long long) f->last_stat.st_size,
4193 f->last_stat.st_size / le64toh(f->header->n_data));
4194 return true;
4195 }
4196
4197 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
4198 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4199 log_ratelimit_full(
4200 log_level, LOG_RATELIMIT,
4201 "Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
4202 f->path,
4203 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
4204 le64toh(f->header->n_fields),
4205 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
4206 return true;
4207 }
4208
4209 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4210 * longest chain is longer than some threshold, let's suggest rotation. */
4211 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) &&
4212 le64toh(f->header->data_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4213 log_ratelimit_full(
4214 log_level, LOG_RATELIMIT,
4215 "Data hash table of %s has deepest hash chain of length %" PRIu64 ", suggesting rotation.",
4216 f->path, le64toh(f->header->data_hash_chain_depth));
4217 return true;
4218 }
4219
4220 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) &&
4221 le64toh(f->header->field_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4222 log_ratelimit_full(
4223 log_level, LOG_RATELIMIT,
4224 "Field hash table of %s has deepest hash chain of length at %" PRIu64 ", suggesting rotation.",
4225 f->path, le64toh(f->header->field_hash_chain_depth));
4226 return true;
4227 }
4228
4229 /* Are the data objects properly indexed by field objects? */
4230 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
4231 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
4232 le64toh(f->header->n_data) > 0 &&
4233 le64toh(f->header->n_fields) == 0) {
4234 log_ratelimit_full(
4235 log_level, LOG_RATELIMIT,
4236 "Data objects of %s are not indexed by field objects, suggesting rotation.",
4237 f->path);
4238 return true;
4239 }
4240
4241 if (max_file_usec > 0) {
4242 usec_t t, h;
4243
4244 h = le64toh(f->header->head_entry_realtime);
4245 t = now(CLOCK_REALTIME);
4246
4247 if (h > 0 && t > h + max_file_usec) {
4248 log_ratelimit_full(
4249 log_level, LOG_RATELIMIT,
4250 "Oldest entry in %s is older than the configured file retention duration (%s), suggesting rotation.",
4251 f->path, FORMAT_TIMESPAN(max_file_usec, USEC_PER_SEC));
4252 return true;
4253 }
4254 }
4255
4256 return false;
4257 }
4258
4259 static const char * const journal_object_type_table[] = {
4260 [OBJECT_UNUSED] = "unused",
4261 [OBJECT_DATA] = "data",
4262 [OBJECT_FIELD] = "field",
4263 [OBJECT_ENTRY] = "entry",
4264 [OBJECT_DATA_HASH_TABLE] = "data hash table",
4265 [OBJECT_FIELD_HASH_TABLE] = "field hash table",
4266 [OBJECT_ENTRY_ARRAY] = "entry array",
4267 [OBJECT_TAG] = "tag",
4268 };
4269
4270 DEFINE_STRING_TABLE_LOOKUP_TO_STRING(journal_object_type, ObjectType);