]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journal-file.c
docs: document the new journal file format additions
[thirdparty/systemd.git] / src / journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <linux/fs.h>
6 #include <pthread.h>
7 #include <stddef.h>
8 #include <sys/mman.h>
9 #include <sys/statvfs.h>
10 #include <sys/uio.h>
11 #include <unistd.h>
12
13 #include "sd-event.h"
14
15 #include "alloc-util.h"
16 #include "btrfs-util.h"
17 #include "chattr-util.h"
18 #include "compress.h"
19 #include "env-util.h"
20 #include "fd-util.h"
21 #include "format-util.h"
22 #include "fs-util.h"
23 #include "journal-authenticate.h"
24 #include "journal-def.h"
25 #include "journal-file.h"
26 #include "lookup3.h"
27 #include "memory-util.h"
28 #include "path-util.h"
29 #include "random-util.h"
30 #include "set.h"
31 #include "sort-util.h"
32 #include "stat-util.h"
33 #include "string-util.h"
34 #include "strv.h"
35 #include "xattr-util.h"
36
37 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
38 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
39
40 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
41 #define MIN_COMPRESS_THRESHOLD (8ULL)
42
43 /* This is the minimum journal file size */
44 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
45
46 /* These are the lower and upper bounds if we deduce the max_use value
47 * from the file system size */
48 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
49 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
50
51 /* Those are the lower and upper bounds for the minimal use limit,
52 * i.e. how much we'll use even if keep_free suggests otherwise. */
53 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
54 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
55
56 /* This is the upper bound if we deduce max_size from max_use */
57 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
58
59 /* This is the upper bound if we deduce the keep_free value from the
60 * file system size */
61 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
62
63 /* This is the keep_free value when we can't determine the system
64 * size */
65 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
66
67 /* This is the default maximum number of journal files to keep around. */
68 #define DEFAULT_N_MAX_FILES 100
69
70 /* n_data was the first entry we added after the initial file format design */
71 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
72
73 /* How many entries to keep in the entry array chain cache at max */
74 #define CHAIN_CACHE_MAX 20
75
76 /* How much to increase the journal file size at once each time we allocate something new. */
77 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
78
79 /* Reread fstat() of the file for detecting deletions at least this often */
80 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
81
82 /* The mmap context to use for the header we pick as one above the last defined typed */
83 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
84
85 /* Longest hash chain to rotate after */
86 #define HASH_CHAIN_DEPTH_MAX 100
87
88 #ifdef __clang__
89 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
90 #endif
91
92 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
93 * As a result we use atomic operations on f->offline_state for inter-thread communications with
94 * journal_file_set_offline() and journal_file_set_online(). */
95 static void journal_file_set_offline_internal(JournalFile *f) {
96 assert(f);
97 assert(f->fd >= 0);
98 assert(f->header);
99
100 for (;;) {
101 switch (f->offline_state) {
102 case OFFLINE_CANCEL:
103 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
104 continue;
105 return;
106
107 case OFFLINE_AGAIN_FROM_SYNCING:
108 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
109 continue;
110 break;
111
112 case OFFLINE_AGAIN_FROM_OFFLINING:
113 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
114 continue;
115 break;
116
117 case OFFLINE_SYNCING:
118 (void) fsync(f->fd);
119
120 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
121 continue;
122
123 f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
124 (void) fsync(f->fd);
125 break;
126
127 case OFFLINE_OFFLINING:
128 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
129 continue;
130 _fallthrough_;
131 case OFFLINE_DONE:
132 return;
133
134 case OFFLINE_JOINED:
135 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
136 return;
137 }
138 }
139 }
140
141 static void * journal_file_set_offline_thread(void *arg) {
142 JournalFile *f = arg;
143
144 (void) pthread_setname_np(pthread_self(), "journal-offline");
145
146 journal_file_set_offline_internal(f);
147
148 return NULL;
149 }
150
151 static int journal_file_set_offline_thread_join(JournalFile *f) {
152 int r;
153
154 assert(f);
155
156 if (f->offline_state == OFFLINE_JOINED)
157 return 0;
158
159 r = pthread_join(f->offline_thread, NULL);
160 if (r)
161 return -r;
162
163 f->offline_state = OFFLINE_JOINED;
164
165 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
166 return -EIO;
167
168 return 0;
169 }
170
171 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
172 static bool journal_file_set_offline_try_restart(JournalFile *f) {
173 for (;;) {
174 switch (f->offline_state) {
175 case OFFLINE_AGAIN_FROM_SYNCING:
176 case OFFLINE_AGAIN_FROM_OFFLINING:
177 return true;
178
179 case OFFLINE_CANCEL:
180 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
181 continue;
182 return true;
183
184 case OFFLINE_SYNCING:
185 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
186 continue;
187 return true;
188
189 case OFFLINE_OFFLINING:
190 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
191 continue;
192 return true;
193
194 default:
195 return false;
196 }
197 }
198 }
199
200 /* Sets a journal offline.
201 *
202 * If wait is false then an offline is dispatched in a separate thread for a
203 * subsequent journal_file_set_offline() or journal_file_set_online() of the
204 * same journal to synchronize with.
205 *
206 * If wait is true, then either an existing offline thread will be restarted
207 * and joined, or if none exists the offline is simply performed in this
208 * context without involving another thread.
209 */
210 int journal_file_set_offline(JournalFile *f, bool wait) {
211 bool restarted;
212 int r;
213
214 assert(f);
215
216 if (!f->writable)
217 return -EPERM;
218
219 if (f->fd < 0 || !f->header)
220 return -EINVAL;
221
222 /* An offlining journal is implicitly online and may modify f->header->state,
223 * we must also join any potentially lingering offline thread when not online. */
224 if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
225 return journal_file_set_offline_thread_join(f);
226
227 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
228 restarted = journal_file_set_offline_try_restart(f);
229 if ((restarted && wait) || !restarted) {
230 r = journal_file_set_offline_thread_join(f);
231 if (r < 0)
232 return r;
233 }
234
235 if (restarted)
236 return 0;
237
238 /* Initiate a new offline. */
239 f->offline_state = OFFLINE_SYNCING;
240
241 if (wait) /* Without using a thread if waiting. */
242 journal_file_set_offline_internal(f);
243 else {
244 sigset_t ss, saved_ss;
245 int k;
246
247 assert_se(sigfillset(&ss) >= 0);
248 /* Don't block SIGBUS since the offlining thread accesses a memory mapped file.
249 * Asynchronous SIGBUS signals can safely be handled by either thread. */
250 assert_se(sigdelset(&ss, SIGBUS) >= 0);
251
252 r = pthread_sigmask(SIG_BLOCK, &ss, &saved_ss);
253 if (r > 0)
254 return -r;
255
256 r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
257
258 k = pthread_sigmask(SIG_SETMASK, &saved_ss, NULL);
259 if (r > 0) {
260 f->offline_state = OFFLINE_JOINED;
261 return -r;
262 }
263 if (k > 0)
264 return -k;
265 }
266
267 return 0;
268 }
269
270 static int journal_file_set_online(JournalFile *f) {
271 bool wait = true;
272
273 assert(f);
274
275 if (!f->writable)
276 return -EPERM;
277
278 if (f->fd < 0 || !f->header)
279 return -EINVAL;
280
281 while (wait) {
282 switch (f->offline_state) {
283 case OFFLINE_JOINED:
284 /* No offline thread, no need to wait. */
285 wait = false;
286 break;
287
288 case OFFLINE_SYNCING:
289 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
290 continue;
291 /* Canceled syncing prior to offlining, no need to wait. */
292 wait = false;
293 break;
294
295 case OFFLINE_AGAIN_FROM_SYNCING:
296 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
297 continue;
298 /* Canceled restart from syncing, no need to wait. */
299 wait = false;
300 break;
301
302 case OFFLINE_AGAIN_FROM_OFFLINING:
303 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
304 continue;
305 /* Canceled restart from offlining, must wait for offlining to complete however. */
306 _fallthrough_;
307 default: {
308 int r;
309
310 r = journal_file_set_offline_thread_join(f);
311 if (r < 0)
312 return r;
313
314 wait = false;
315 break;
316 }
317 }
318 }
319
320 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
321 return -EIO;
322
323 switch (f->header->state) {
324 case STATE_ONLINE:
325 return 0;
326
327 case STATE_OFFLINE:
328 f->header->state = STATE_ONLINE;
329 (void) fsync(f->fd);
330 return 0;
331
332 default:
333 return -EINVAL;
334 }
335 }
336
337 bool journal_file_is_offlining(JournalFile *f) {
338 assert(f);
339
340 __sync_synchronize();
341
342 if (IN_SET(f->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
343 return false;
344
345 return true;
346 }
347
348 JournalFile* journal_file_close(JournalFile *f) {
349 if (!f)
350 return NULL;
351
352 #if HAVE_GCRYPT
353 /* Write the final tag */
354 if (f->seal && f->writable) {
355 int r;
356
357 r = journal_file_append_tag(f);
358 if (r < 0)
359 log_error_errno(r, "Failed to append tag when closing journal: %m");
360 }
361 #endif
362
363 if (f->post_change_timer) {
364 if (sd_event_source_get_enabled(f->post_change_timer, NULL) > 0)
365 journal_file_post_change(f);
366
367 sd_event_source_disable_unref(f->post_change_timer);
368 }
369
370 journal_file_set_offline(f, true);
371
372 if (f->mmap && f->cache_fd)
373 mmap_cache_free_fd(f->mmap, f->cache_fd);
374
375 if (f->fd >= 0 && f->defrag_on_close) {
376
377 /* Be friendly to btrfs: turn COW back on again now,
378 * and defragment the file. We won't write to the file
379 * ever again, hence remove all fragmentation, and
380 * reenable all the good bits COW usually provides
381 * (such as data checksumming). */
382
383 (void) chattr_fd(f->fd, 0, FS_NOCOW_FL, NULL);
384 (void) btrfs_defrag_fd(f->fd);
385 }
386
387 if (f->close_fd)
388 safe_close(f->fd);
389 free(f->path);
390
391 mmap_cache_unref(f->mmap);
392
393 ordered_hashmap_free_free(f->chain_cache);
394
395 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
396 free(f->compress_buffer);
397 #endif
398
399 #if HAVE_GCRYPT
400 if (f->fss_file)
401 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
402 else
403 free(f->fsprg_state);
404
405 free(f->fsprg_seed);
406
407 if (f->hmac)
408 gcry_md_close(f->hmac);
409 #endif
410
411 return mfree(f);
412 }
413
414 static int journal_file_init_header(JournalFile *f, JournalFile *template) {
415 Header h = {};
416 ssize_t k;
417 int r;
418
419 assert(f);
420
421 memcpy(h.signature, HEADER_SIGNATURE, 8);
422 h.header_size = htole64(ALIGN64(sizeof(h)));
423
424 h.incompatible_flags |= htole32(
425 f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
426 f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4 |
427 f->compress_zstd * HEADER_INCOMPATIBLE_COMPRESSED_ZSTD |
428 f->keyed_hash * HEADER_INCOMPATIBLE_KEYED_HASH);
429
430 h.compatible_flags = htole32(
431 f->seal * HEADER_COMPATIBLE_SEALED);
432
433 r = sd_id128_randomize(&h.file_id);
434 if (r < 0)
435 return r;
436
437 if (template) {
438 h.seqnum_id = template->header->seqnum_id;
439 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
440 } else
441 h.seqnum_id = h.file_id;
442
443 k = pwrite(f->fd, &h, sizeof(h), 0);
444 if (k < 0)
445 return -errno;
446
447 if (k != sizeof(h))
448 return -EIO;
449
450 return 0;
451 }
452
453 static int journal_file_refresh_header(JournalFile *f) {
454 int r;
455
456 assert(f);
457 assert(f->header);
458
459 r = sd_id128_get_machine(&f->header->machine_id);
460 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
461 /* We don't have a machine-id, let's continue without */
462 zero(f->header->machine_id);
463 else if (r < 0)
464 return r;
465
466 r = sd_id128_get_boot(&f->header->boot_id);
467 if (r < 0)
468 return r;
469
470 r = journal_file_set_online(f);
471
472 /* Sync the online state to disk */
473 (void) fsync(f->fd);
474
475 /* We likely just created a new file, also sync the directory this file is located in. */
476 (void) fsync_directory_of_file(f->fd);
477
478 return r;
479 }
480
481 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
482 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
483 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
484 const char *type = compatible ? "compatible" : "incompatible";
485 uint32_t flags;
486
487 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
488
489 if (flags & ~supported) {
490 if (flags & ~any)
491 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
492 f->path, type, flags & ~any);
493 flags = (flags & any) & ~supported;
494 if (flags) {
495 const char* strv[5];
496 unsigned n = 0;
497 _cleanup_free_ char *t = NULL;
498
499 if (compatible) {
500 if (flags & HEADER_COMPATIBLE_SEALED)
501 strv[n++] = "sealed";
502 } else {
503 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ)
504 strv[n++] = "xz-compressed";
505 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
506 strv[n++] = "lz4-compressed";
507 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
508 strv[n++] = "zstd-compressed";
509 if (flags & HEADER_INCOMPATIBLE_KEYED_HASH)
510 strv[n++] = "keyed-hash";
511 }
512 strv[n] = NULL;
513 assert(n < ELEMENTSOF(strv));
514
515 t = strv_join((char**) strv, ", ");
516 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
517 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
518 }
519 return true;
520 }
521
522 return false;
523 }
524
525 static int journal_file_verify_header(JournalFile *f) {
526 uint64_t arena_size, header_size;
527
528 assert(f);
529 assert(f->header);
530
531 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
532 return -EBADMSG;
533
534 /* In both read and write mode we refuse to open files with incompatible
535 * flags we don't know. */
536 if (warn_wrong_flags(f, false))
537 return -EPROTONOSUPPORT;
538
539 /* When open for writing we refuse to open files with compatible flags, too. */
540 if (f->writable && warn_wrong_flags(f, true))
541 return -EPROTONOSUPPORT;
542
543 if (f->header->state >= _STATE_MAX)
544 return -EBADMSG;
545
546 header_size = le64toh(READ_NOW(f->header->header_size));
547
548 /* The first addition was n_data, so check that we are at least this large */
549 if (header_size < HEADER_SIZE_MIN)
550 return -EBADMSG;
551
552 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
553 return -EBADMSG;
554
555 arena_size = le64toh(READ_NOW(f->header->arena_size));
556
557 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
558 return -ENODATA;
559
560 if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
561 return -ENODATA;
562
563 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
564 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
565 !VALID64(le64toh(f->header->tail_object_offset)) ||
566 !VALID64(le64toh(f->header->entry_array_offset)))
567 return -ENODATA;
568
569 if (f->writable) {
570 sd_id128_t machine_id;
571 uint8_t state;
572 int r;
573
574 r = sd_id128_get_machine(&machine_id);
575 if (r < 0)
576 return r;
577
578 if (!sd_id128_equal(machine_id, f->header->machine_id))
579 return -EHOSTDOWN;
580
581 state = f->header->state;
582
583 if (state == STATE_ARCHIVED)
584 return -ESHUTDOWN; /* Already archived */
585 else if (state == STATE_ONLINE)
586 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
587 "Journal file %s is already online. Assuming unclean closing.",
588 f->path);
589 else if (state != STATE_OFFLINE)
590 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
591 "Journal file %s has unknown state %i.",
592 f->path, state);
593
594 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
595 return -EBADMSG;
596
597 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
598 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
599 * bisection. */
600 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME))
601 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY),
602 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
603 f->path);
604 }
605
606 f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
607 f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
608 f->compress_zstd = JOURNAL_HEADER_COMPRESSED_ZSTD(f->header);
609
610 f->seal = JOURNAL_HEADER_SEALED(f->header);
611
612 f->keyed_hash = JOURNAL_HEADER_KEYED_HASH(f->header);
613
614 return 0;
615 }
616
617 int journal_file_fstat(JournalFile *f) {
618 int r;
619
620 assert(f);
621 assert(f->fd >= 0);
622
623 if (fstat(f->fd, &f->last_stat) < 0)
624 return -errno;
625
626 f->last_stat_usec = now(CLOCK_MONOTONIC);
627
628 /* Refuse dealing with with files that aren't regular */
629 r = stat_verify_regular(&f->last_stat);
630 if (r < 0)
631 return r;
632
633 /* Refuse appending to files that are already deleted */
634 if (f->last_stat.st_nlink <= 0)
635 return -EIDRM;
636
637 return 0;
638 }
639
640 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
641 uint64_t old_size, new_size, old_header_size, old_arena_size;
642 int r;
643
644 assert(f);
645 assert(f->header);
646
647 /* We assume that this file is not sparse, and we know that for sure, since we always call
648 * posix_fallocate() ourselves */
649
650 if (size > PAGE_ALIGN_DOWN(UINT64_MAX) - offset)
651 return -EINVAL;
652
653 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
654 return -EIO;
655
656 old_header_size = le64toh(READ_NOW(f->header->header_size));
657 old_arena_size = le64toh(READ_NOW(f->header->arena_size));
658 if (old_arena_size > PAGE_ALIGN_DOWN(UINT64_MAX) - old_header_size)
659 return -EBADMSG;
660
661 old_size = old_header_size + old_arena_size;
662
663 new_size = MAX(PAGE_ALIGN(offset + size), old_header_size);
664
665 if (new_size <= old_size) {
666
667 /* We already pre-allocated enough space, but before
668 * we write to it, let's check with fstat() if the
669 * file got deleted, in order make sure we don't throw
670 * away the data immediately. Don't check fstat() for
671 * all writes though, but only once ever 10s. */
672
673 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
674 return 0;
675
676 return journal_file_fstat(f);
677 }
678
679 /* Allocate more space. */
680
681 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
682 return -E2BIG;
683
684 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
685 struct statvfs svfs;
686
687 if (fstatvfs(f->fd, &svfs) >= 0) {
688 uint64_t available;
689
690 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
691
692 if (new_size - old_size > available)
693 return -E2BIG;
694 }
695 }
696
697 /* Increase by larger blocks at once */
698 new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
699 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
700 new_size = f->metrics.max_size;
701
702 /* Note that the glibc fallocate() fallback is very
703 inefficient, hence we try to minimize the allocation area
704 as we can. */
705 r = posix_fallocate(f->fd, old_size, new_size - old_size);
706 if (r != 0)
707 return -r;
708
709 f->header->arena_size = htole64(new_size - old_header_size);
710
711 return journal_file_fstat(f);
712 }
713
714 static unsigned type_to_context(ObjectType type) {
715 /* One context for each type, plus one catch-all for the rest */
716 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
717 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
718 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
719 }
720
721 static int journal_file_move_to(
722 JournalFile *f,
723 ObjectType type,
724 bool keep_always,
725 uint64_t offset,
726 uint64_t size,
727 void **ret,
728 size_t *ret_size) {
729
730 int r;
731
732 assert(f);
733 assert(ret);
734
735 if (size <= 0)
736 return -EINVAL;
737
738 if (size > UINT64_MAX - offset)
739 return -EBADMSG;
740
741 /* Avoid SIGBUS on invalid accesses */
742 if (offset + size > (uint64_t) f->last_stat.st_size) {
743 /* Hmm, out of range? Let's refresh the fstat() data
744 * first, before we trust that check. */
745
746 r = journal_file_fstat(f);
747 if (r < 0)
748 return r;
749
750 if (offset + size > (uint64_t) f->last_stat.st_size)
751 return -EADDRNOTAVAIL;
752 }
753
754 return mmap_cache_get(f->mmap, f->cache_fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret, ret_size);
755 }
756
757 static uint64_t minimum_header_size(Object *o) {
758
759 static const uint64_t table[] = {
760 [OBJECT_DATA] = sizeof(DataObject),
761 [OBJECT_FIELD] = sizeof(FieldObject),
762 [OBJECT_ENTRY] = sizeof(EntryObject),
763 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
764 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
765 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
766 [OBJECT_TAG] = sizeof(TagObject),
767 };
768
769 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
770 return sizeof(ObjectHeader);
771
772 return table[o->object.type];
773 }
774
775 /* Lightweight object checks. We want this to be fast, so that we won't
776 * slowdown every journal_file_move_to_object() call too much. */
777 static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) {
778 assert(f);
779 assert(o);
780
781 switch (o->object.type) {
782
783 case OBJECT_DATA:
784 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
785 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
786 "Bad n_entries: %" PRIu64 ": %" PRIu64,
787 le64toh(o->data.n_entries),
788 offset);
789
790 if (le64toh(o->object.size) <= offsetof(DataObject, payload))
791 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
792 "Bad object size (<= %zu): %" PRIu64 ": %" PRIu64,
793 offsetof(DataObject, payload),
794 le64toh(o->object.size),
795 offset);
796
797 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
798 !VALID64(le64toh(o->data.next_field_offset)) ||
799 !VALID64(le64toh(o->data.entry_offset)) ||
800 !VALID64(le64toh(o->data.entry_array_offset)))
801 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
802 "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
803 le64toh(o->data.next_hash_offset),
804 le64toh(o->data.next_field_offset),
805 le64toh(o->data.entry_offset),
806 le64toh(o->data.entry_array_offset),
807 offset);
808
809 break;
810
811 case OBJECT_FIELD:
812 if (le64toh(o->object.size) <= offsetof(FieldObject, payload))
813 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
814 "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
815 offsetof(FieldObject, payload),
816 le64toh(o->object.size),
817 offset);
818
819 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
820 !VALID64(le64toh(o->field.head_data_offset)))
821 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
822 "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
823 le64toh(o->field.next_hash_offset),
824 le64toh(o->field.head_data_offset),
825 offset);
826 break;
827
828 case OBJECT_ENTRY: {
829 uint64_t sz;
830
831 sz = le64toh(READ_NOW(o->object.size));
832 if (sz < offsetof(EntryObject, items) ||
833 (sz - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0)
834 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
835 "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
836 offsetof(EntryObject, items),
837 sz,
838 offset);
839
840 if ((sz - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0)
841 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
842 "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
843 (sz - offsetof(EntryObject, items)) / sizeof(EntryItem),
844 offset);
845
846 if (le64toh(o->entry.seqnum) <= 0)
847 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
848 "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
849 le64toh(o->entry.seqnum),
850 offset);
851
852 if (!VALID_REALTIME(le64toh(o->entry.realtime)))
853 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
854 "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
855 le64toh(o->entry.realtime),
856 offset);
857
858 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
859 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
860 "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
861 le64toh(o->entry.monotonic),
862 offset);
863
864 break;
865 }
866
867 case OBJECT_DATA_HASH_TABLE:
868 case OBJECT_FIELD_HASH_TABLE: {
869 uint64_t sz;
870
871 sz = le64toh(READ_NOW(o->object.size));
872 if (sz < offsetof(HashTableObject, items) ||
873 (sz - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
874 (sz - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0)
875 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
876 "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
877 o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
878 sz,
879 offset);
880
881 break;
882 }
883
884 case OBJECT_ENTRY_ARRAY: {
885 uint64_t sz;
886
887 sz = le64toh(READ_NOW(o->object.size));
888 if (sz < offsetof(EntryArrayObject, items) ||
889 (sz - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
890 (sz - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0)
891 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
892 "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
893 sz,
894 offset);
895
896 if (!VALID64(le64toh(o->entry_array.next_entry_array_offset)))
897 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
898 "Invalid object entry array next_entry_array_offset: " OFSfmt ": %" PRIu64,
899 le64toh(o->entry_array.next_entry_array_offset),
900 offset);
901
902 break;
903 }
904
905 case OBJECT_TAG:
906 if (le64toh(o->object.size) != sizeof(TagObject))
907 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
908 "Invalid object tag size: %" PRIu64 ": %" PRIu64,
909 le64toh(o->object.size),
910 offset);
911
912 if (!VALID_EPOCH(le64toh(o->tag.epoch)))
913 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
914 "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
915 le64toh(o->tag.epoch), offset);
916
917 break;
918 }
919
920 return 0;
921 }
922
923 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
924 int r;
925 void *t;
926 size_t tsize;
927 Object *o;
928 uint64_t s;
929
930 assert(f);
931 assert(ret);
932
933 /* Objects may only be located at multiple of 64 bit */
934 if (!VALID64(offset))
935 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
936 "Attempt to move to object at non-64bit boundary: %" PRIu64,
937 offset);
938
939 /* Object may not be located in the file header */
940 if (offset < le64toh(f->header->header_size))
941 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
942 "Attempt to move to object located in file header: %" PRIu64,
943 offset);
944
945 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t, &tsize);
946 if (r < 0)
947 return r;
948
949 o = (Object*) t;
950 s = le64toh(READ_NOW(o->object.size));
951
952 if (s == 0)
953 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
954 "Attempt to move to uninitialized object: %" PRIu64,
955 offset);
956 if (s < sizeof(ObjectHeader))
957 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
958 "Attempt to move to overly short object: %" PRIu64,
959 offset);
960
961 if (o->object.type <= OBJECT_UNUSED)
962 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
963 "Attempt to move to object with invalid type: %" PRIu64,
964 offset);
965
966 if (s < minimum_header_size(o))
967 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
968 "Attempt to move to truncated object: %" PRIu64,
969 offset);
970
971 if (type > OBJECT_UNUSED && o->object.type != type)
972 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
973 "Attempt to move to object of unexpected type: %" PRIu64,
974 offset);
975
976 if (s > tsize) {
977 r = journal_file_move_to(f, type, false, offset, s, &t, NULL);
978 if (r < 0)
979 return r;
980
981 o = (Object*) t;
982 }
983
984 r = journal_file_check_object(f, offset, o);
985 if (r < 0)
986 return r;
987
988 *ret = o;
989 return 0;
990 }
991
992 static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
993 uint64_t r;
994
995 assert(f);
996 assert(f->header);
997
998 r = le64toh(f->header->tail_entry_seqnum) + 1;
999
1000 if (seqnum) {
1001 /* If an external seqnum counter was passed, we update
1002 * both the local and the external one, and set it to
1003 * the maximum of both */
1004
1005 if (*seqnum + 1 > r)
1006 r = *seqnum + 1;
1007
1008 *seqnum = r;
1009 }
1010
1011 f->header->tail_entry_seqnum = htole64(r);
1012
1013 if (f->header->head_entry_seqnum == 0)
1014 f->header->head_entry_seqnum = htole64(r);
1015
1016 return r;
1017 }
1018
1019 int journal_file_append_object(
1020 JournalFile *f,
1021 ObjectType type,
1022 uint64_t size,
1023 Object **ret,
1024 uint64_t *ret_offset) {
1025
1026 int r;
1027 uint64_t p;
1028 Object *tail, *o;
1029 void *t;
1030
1031 assert(f);
1032 assert(f->header);
1033 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1034 assert(size >= sizeof(ObjectHeader));
1035
1036 r = journal_file_set_online(f);
1037 if (r < 0)
1038 return r;
1039
1040 p = le64toh(f->header->tail_object_offset);
1041 if (p == 0)
1042 p = le64toh(f->header->header_size);
1043 else {
1044 uint64_t sz;
1045
1046 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
1047 if (r < 0)
1048 return r;
1049
1050 sz = le64toh(READ_NOW(tail->object.size));
1051 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
1052 return -EBADMSG;
1053
1054 sz = ALIGN64(sz);
1055 if (p > UINT64_MAX - sz)
1056 return -EBADMSG;
1057
1058 p += sz;
1059 }
1060
1061 r = journal_file_allocate(f, p, size);
1062 if (r < 0)
1063 return r;
1064
1065 r = journal_file_move_to(f, type, false, p, size, &t, NULL);
1066 if (r < 0)
1067 return r;
1068
1069 o = (Object*) t;
1070 o->object = (ObjectHeader) {
1071 .type = type,
1072 .size = htole64(size),
1073 };
1074
1075 f->header->tail_object_offset = htole64(p);
1076 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1077
1078 if (ret)
1079 *ret = o;
1080
1081 if (ret_offset)
1082 *ret_offset = p;
1083
1084 return 0;
1085 }
1086
1087 static int journal_file_setup_data_hash_table(JournalFile *f) {
1088 uint64_t s, p;
1089 Object *o;
1090 int r;
1091
1092 assert(f);
1093 assert(f->header);
1094
1095 /* We estimate that we need 1 hash table entry per 768 bytes
1096 of journal file and we want to make sure we never get
1097 beyond 75% fill level. Calculate the hash table size for
1098 the maximum file size based on these metrics. */
1099
1100 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1101 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1102 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1103
1104 log_debug("Reserving %"PRIu64" entries in data hash table.", s / sizeof(HashItem));
1105
1106 r = journal_file_append_object(f,
1107 OBJECT_DATA_HASH_TABLE,
1108 offsetof(Object, hash_table.items) + s,
1109 &o, &p);
1110 if (r < 0)
1111 return r;
1112
1113 memzero(o->hash_table.items, s);
1114
1115 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1116 f->header->data_hash_table_size = htole64(s);
1117
1118 return 0;
1119 }
1120
1121 static int journal_file_setup_field_hash_table(JournalFile *f) {
1122 uint64_t s, p;
1123 Object *o;
1124 int r;
1125
1126 assert(f);
1127 assert(f->header);
1128
1129 /* We use a fixed size hash table for the fields as this
1130 * number should grow very slowly only */
1131
1132 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1133 log_debug("Reserving %"PRIu64" entries in field hash table.", s / sizeof(HashItem));
1134
1135 r = journal_file_append_object(f,
1136 OBJECT_FIELD_HASH_TABLE,
1137 offsetof(Object, hash_table.items) + s,
1138 &o, &p);
1139 if (r < 0)
1140 return r;
1141
1142 memzero(o->hash_table.items, s);
1143
1144 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1145 f->header->field_hash_table_size = htole64(s);
1146
1147 return 0;
1148 }
1149
1150 int journal_file_map_data_hash_table(JournalFile *f) {
1151 uint64_t s, p;
1152 void *t;
1153 int r;
1154
1155 assert(f);
1156 assert(f->header);
1157
1158 if (f->data_hash_table)
1159 return 0;
1160
1161 p = le64toh(f->header->data_hash_table_offset);
1162 s = le64toh(f->header->data_hash_table_size);
1163
1164 r = journal_file_move_to(f,
1165 OBJECT_DATA_HASH_TABLE,
1166 true,
1167 p, s,
1168 &t, NULL);
1169 if (r < 0)
1170 return r;
1171
1172 f->data_hash_table = t;
1173 return 0;
1174 }
1175
1176 int journal_file_map_field_hash_table(JournalFile *f) {
1177 uint64_t s, p;
1178 void *t;
1179 int r;
1180
1181 assert(f);
1182 assert(f->header);
1183
1184 if (f->field_hash_table)
1185 return 0;
1186
1187 p = le64toh(f->header->field_hash_table_offset);
1188 s = le64toh(f->header->field_hash_table_size);
1189
1190 r = journal_file_move_to(f,
1191 OBJECT_FIELD_HASH_TABLE,
1192 true,
1193 p, s,
1194 &t, NULL);
1195 if (r < 0)
1196 return r;
1197
1198 f->field_hash_table = t;
1199 return 0;
1200 }
1201
1202 static int journal_file_link_field(
1203 JournalFile *f,
1204 Object *o,
1205 uint64_t offset,
1206 uint64_t hash) {
1207
1208 uint64_t p, h, m;
1209 int r;
1210
1211 assert(f);
1212 assert(f->header);
1213 assert(f->field_hash_table);
1214 assert(o);
1215 assert(offset > 0);
1216
1217 if (o->object.type != OBJECT_FIELD)
1218 return -EINVAL;
1219
1220 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1221 if (m <= 0)
1222 return -EBADMSG;
1223
1224 /* This might alter the window we are looking at */
1225 o->field.next_hash_offset = o->field.head_data_offset = 0;
1226
1227 h = hash % m;
1228 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1229 if (p == 0)
1230 f->field_hash_table[h].head_hash_offset = htole64(offset);
1231 else {
1232 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1233 if (r < 0)
1234 return r;
1235
1236 o->field.next_hash_offset = htole64(offset);
1237 }
1238
1239 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1240
1241 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1242 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1243
1244 return 0;
1245 }
1246
1247 static int journal_file_link_data(
1248 JournalFile *f,
1249 Object *o,
1250 uint64_t offset,
1251 uint64_t hash) {
1252
1253 uint64_t p, h, m;
1254 int r;
1255
1256 assert(f);
1257 assert(f->header);
1258 assert(f->data_hash_table);
1259 assert(o);
1260 assert(offset > 0);
1261
1262 if (o->object.type != OBJECT_DATA)
1263 return -EINVAL;
1264
1265 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1266 if (m <= 0)
1267 return -EBADMSG;
1268
1269 /* This might alter the window we are looking at */
1270 o->data.next_hash_offset = o->data.next_field_offset = 0;
1271 o->data.entry_offset = o->data.entry_array_offset = 0;
1272 o->data.n_entries = 0;
1273
1274 h = hash % m;
1275 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1276 if (p == 0)
1277 /* Only entry in the hash table is easy */
1278 f->data_hash_table[h].head_hash_offset = htole64(offset);
1279 else {
1280 /* Move back to the previous data object, to patch in
1281 * pointer */
1282
1283 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1284 if (r < 0)
1285 return r;
1286
1287 o->data.next_hash_offset = htole64(offset);
1288 }
1289
1290 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1291
1292 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1293 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1294
1295 return 0;
1296 }
1297
1298 static int next_hash_offset(
1299 JournalFile *f,
1300 uint64_t *p,
1301 le64_t *next_hash_offset,
1302 uint64_t *depth,
1303 le64_t *header_max_depth) {
1304
1305 uint64_t nextp;
1306
1307 nextp = le64toh(READ_NOW(*next_hash_offset));
1308 if (nextp > 0) {
1309 if (nextp <= *p) /* Refuse going in loops */
1310 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1311 "Detected hash item loop in %s, refusing.", f->path);
1312
1313 (*depth)++;
1314
1315 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1316 if (header_max_depth && f->writable)
1317 *header_max_depth = htole64(MAX(*depth, le64toh(*header_max_depth)));
1318 }
1319
1320 *p = nextp;
1321 return 0;
1322 }
1323
1324 int journal_file_find_field_object_with_hash(
1325 JournalFile *f,
1326 const void *field, uint64_t size, uint64_t hash,
1327 Object **ret, uint64_t *ret_offset) {
1328
1329 uint64_t p, osize, h, m, depth = 0;
1330 int r;
1331
1332 assert(f);
1333 assert(f->header);
1334 assert(field && size > 0);
1335
1336 /* If the field hash table is empty, we can't find anything */
1337 if (le64toh(f->header->field_hash_table_size) <= 0)
1338 return 0;
1339
1340 /* Map the field hash table, if it isn't mapped yet. */
1341 r = journal_file_map_field_hash_table(f);
1342 if (r < 0)
1343 return r;
1344
1345 osize = offsetof(Object, field.payload) + size;
1346
1347 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1348 if (m <= 0)
1349 return -EBADMSG;
1350
1351 h = hash % m;
1352 p = le64toh(f->field_hash_table[h].head_hash_offset);
1353 while (p > 0) {
1354 Object *o;
1355
1356 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1357 if (r < 0)
1358 return r;
1359
1360 if (le64toh(o->field.hash) == hash &&
1361 le64toh(o->object.size) == osize &&
1362 memcmp(o->field.payload, field, size) == 0) {
1363
1364 if (ret)
1365 *ret = o;
1366 if (ret_offset)
1367 *ret_offset = p;
1368
1369 return 1;
1370 }
1371
1372 r = next_hash_offset(
1373 f,
1374 &p,
1375 &o->field.next_hash_offset,
1376 &depth,
1377 JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) ? &f->header->field_hash_chain_depth : NULL);
1378 if (r < 0)
1379 return r;
1380 }
1381
1382 return 0;
1383 }
1384
1385 uint64_t journal_file_hash_data(
1386 JournalFile *f,
1387 const void *data,
1388 size_t sz) {
1389
1390 assert(f);
1391 assert(data || sz == 0);
1392
1393 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1394 * function use siphash. Old journal files use the Jenkins hash. */
1395
1396 if (JOURNAL_HEADER_KEYED_HASH(f->header))
1397 return siphash24(data, sz, f->header->file_id.bytes);
1398
1399 return jenkins_hash64(data, sz);
1400 }
1401
1402 int journal_file_find_field_object(
1403 JournalFile *f,
1404 const void *field, uint64_t size,
1405 Object **ret, uint64_t *ret_offset) {
1406
1407 assert(f);
1408 assert(field && size > 0);
1409
1410 return journal_file_find_field_object_with_hash(
1411 f,
1412 field, size,
1413 journal_file_hash_data(f, field, size),
1414 ret, ret_offset);
1415 }
1416
1417 int journal_file_find_data_object_with_hash(
1418 JournalFile *f,
1419 const void *data, uint64_t size, uint64_t hash,
1420 Object **ret, uint64_t *ret_offset) {
1421
1422 uint64_t p, osize, h, m, depth = 0;
1423 int r;
1424
1425 assert(f);
1426 assert(f->header);
1427 assert(data || size == 0);
1428
1429 /* If there's no data hash table, then there's no entry. */
1430 if (le64toh(f->header->data_hash_table_size) <= 0)
1431 return 0;
1432
1433 /* Map the data hash table, if it isn't mapped yet. */
1434 r = journal_file_map_data_hash_table(f);
1435 if (r < 0)
1436 return r;
1437
1438 osize = offsetof(Object, data.payload) + size;
1439
1440 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1441 if (m <= 0)
1442 return -EBADMSG;
1443
1444 h = hash % m;
1445 p = le64toh(f->data_hash_table[h].head_hash_offset);
1446
1447 while (p > 0) {
1448 Object *o;
1449
1450 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1451 if (r < 0)
1452 return r;
1453
1454 if (le64toh(o->data.hash) != hash)
1455 goto next;
1456
1457 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
1458 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
1459 uint64_t l;
1460 size_t rsize = 0;
1461
1462 l = le64toh(READ_NOW(o->object.size));
1463 if (l <= offsetof(Object, data.payload))
1464 return -EBADMSG;
1465
1466 l -= offsetof(Object, data.payload);
1467
1468 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
1469 o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
1470 if (r < 0)
1471 return r;
1472
1473 if (rsize == size &&
1474 memcmp(f->compress_buffer, data, size) == 0) {
1475
1476 if (ret)
1477 *ret = o;
1478
1479 if (ret_offset)
1480 *ret_offset = p;
1481
1482 return 1;
1483 }
1484 #else
1485 return -EPROTONOSUPPORT;
1486 #endif
1487 } else if (le64toh(o->object.size) == osize &&
1488 memcmp(o->data.payload, data, size) == 0) {
1489
1490 if (ret)
1491 *ret = o;
1492
1493 if (ret_offset)
1494 *ret_offset = p;
1495
1496 return 1;
1497 }
1498
1499 next:
1500 r = next_hash_offset(
1501 f,
1502 &p,
1503 &o->data.next_hash_offset,
1504 &depth,
1505 JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) ? &f->header->data_hash_chain_depth : NULL);
1506 if (r < 0)
1507 return r;
1508 }
1509
1510 return 0;
1511 }
1512
1513 int journal_file_find_data_object(
1514 JournalFile *f,
1515 const void *data, uint64_t size,
1516 Object **ret, uint64_t *ret_offset) {
1517
1518 assert(f);
1519 assert(data || size == 0);
1520
1521 return journal_file_find_data_object_with_hash(
1522 f,
1523 data, size,
1524 journal_file_hash_data(f, data, size),
1525 ret, ret_offset);
1526 }
1527
1528 static int journal_file_append_field(
1529 JournalFile *f,
1530 const void *field, uint64_t size,
1531 Object **ret, uint64_t *ret_offset) {
1532
1533 uint64_t hash, p;
1534 uint64_t osize;
1535 Object *o;
1536 int r;
1537
1538 assert(f);
1539 assert(field && size > 0);
1540
1541 hash = journal_file_hash_data(f, field, size);
1542
1543 r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
1544 if (r < 0)
1545 return r;
1546 else if (r > 0) {
1547
1548 if (ret)
1549 *ret = o;
1550
1551 if (ret_offset)
1552 *ret_offset = p;
1553
1554 return 0;
1555 }
1556
1557 osize = offsetof(Object, field.payload) + size;
1558 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1559 if (r < 0)
1560 return r;
1561
1562 o->field.hash = htole64(hash);
1563 memcpy(o->field.payload, field, size);
1564
1565 r = journal_file_link_field(f, o, p, hash);
1566 if (r < 0)
1567 return r;
1568
1569 /* The linking might have altered the window, so let's
1570 * refresh our pointer */
1571 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1572 if (r < 0)
1573 return r;
1574
1575 #if HAVE_GCRYPT
1576 r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
1577 if (r < 0)
1578 return r;
1579 #endif
1580
1581 if (ret)
1582 *ret = o;
1583
1584 if (ret_offset)
1585 *ret_offset = p;
1586
1587 return 0;
1588 }
1589
1590 static int journal_file_append_data(
1591 JournalFile *f,
1592 const void *data, uint64_t size,
1593 Object **ret, uint64_t *ret_offset) {
1594
1595 uint64_t hash, p;
1596 uint64_t osize;
1597 Object *o;
1598 int r, compression = 0;
1599 const void *eq;
1600
1601 assert(f);
1602 assert(data || size == 0);
1603
1604 hash = journal_file_hash_data(f, data, size);
1605
1606 r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
1607 if (r < 0)
1608 return r;
1609 if (r > 0) {
1610
1611 if (ret)
1612 *ret = o;
1613
1614 if (ret_offset)
1615 *ret_offset = p;
1616
1617 return 0;
1618 }
1619
1620 osize = offsetof(Object, data.payload) + size;
1621 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1622 if (r < 0)
1623 return r;
1624
1625 o->data.hash = htole64(hash);
1626
1627 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
1628 if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
1629 size_t rsize = 0;
1630
1631 compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
1632
1633 if (compression >= 0) {
1634 o->object.size = htole64(offsetof(Object, data.payload) + rsize);
1635 o->object.flags |= compression;
1636
1637 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1638 size, rsize, object_compressed_to_string(compression));
1639 } else
1640 /* Compression didn't work, we don't really care why, let's continue without compression */
1641 compression = 0;
1642 }
1643 #endif
1644
1645 if (compression == 0)
1646 memcpy_safe(o->data.payload, data, size);
1647
1648 r = journal_file_link_data(f, o, p, hash);
1649 if (r < 0)
1650 return r;
1651
1652 #if HAVE_GCRYPT
1653 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1654 if (r < 0)
1655 return r;
1656 #endif
1657
1658 /* The linking might have altered the window, so let's
1659 * refresh our pointer */
1660 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1661 if (r < 0)
1662 return r;
1663
1664 if (!data)
1665 eq = NULL;
1666 else
1667 eq = memchr(data, '=', size);
1668 if (eq && eq > data) {
1669 Object *fo = NULL;
1670 uint64_t fp;
1671
1672 /* Create field object ... */
1673 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
1674 if (r < 0)
1675 return r;
1676
1677 /* ... and link it in. */
1678 o->data.next_field_offset = fo->field.head_data_offset;
1679 fo->field.head_data_offset = le64toh(p);
1680 }
1681
1682 if (ret)
1683 *ret = o;
1684
1685 if (ret_offset)
1686 *ret_offset = p;
1687
1688 return 0;
1689 }
1690
1691 uint64_t journal_file_entry_n_items(Object *o) {
1692 uint64_t sz;
1693 assert(o);
1694
1695 if (o->object.type != OBJECT_ENTRY)
1696 return 0;
1697
1698 sz = le64toh(READ_NOW(o->object.size));
1699 if (sz < offsetof(Object, entry.items))
1700 return 0;
1701
1702 return (sz - offsetof(Object, entry.items)) / sizeof(EntryItem);
1703 }
1704
1705 uint64_t journal_file_entry_array_n_items(Object *o) {
1706 uint64_t sz;
1707
1708 assert(o);
1709
1710 if (o->object.type != OBJECT_ENTRY_ARRAY)
1711 return 0;
1712
1713 sz = le64toh(READ_NOW(o->object.size));
1714 if (sz < offsetof(Object, entry_array.items))
1715 return 0;
1716
1717 return (sz - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
1718 }
1719
1720 uint64_t journal_file_hash_table_n_items(Object *o) {
1721 uint64_t sz;
1722
1723 assert(o);
1724
1725 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
1726 return 0;
1727
1728 sz = le64toh(READ_NOW(o->object.size));
1729 if (sz < offsetof(Object, hash_table.items))
1730 return 0;
1731
1732 return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1733 }
1734
1735 static int link_entry_into_array(JournalFile *f,
1736 le64_t *first,
1737 le64_t *idx,
1738 uint64_t p) {
1739 int r;
1740 uint64_t n = 0, ap = 0, q, i, a, hidx;
1741 Object *o;
1742
1743 assert(f);
1744 assert(f->header);
1745 assert(first);
1746 assert(idx);
1747 assert(p > 0);
1748
1749 a = le64toh(*first);
1750 i = hidx = le64toh(READ_NOW(*idx));
1751 while (a > 0) {
1752
1753 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1754 if (r < 0)
1755 return r;
1756
1757 n = journal_file_entry_array_n_items(o);
1758 if (i < n) {
1759 o->entry_array.items[i] = htole64(p);
1760 *idx = htole64(hidx + 1);
1761 return 0;
1762 }
1763
1764 i -= n;
1765 ap = a;
1766 a = le64toh(o->entry_array.next_entry_array_offset);
1767 }
1768
1769 if (hidx > n)
1770 n = (hidx+1) * 2;
1771 else
1772 n = n * 2;
1773
1774 if (n < 4)
1775 n = 4;
1776
1777 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1778 offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
1779 &o, &q);
1780 if (r < 0)
1781 return r;
1782
1783 #if HAVE_GCRYPT
1784 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1785 if (r < 0)
1786 return r;
1787 #endif
1788
1789 o->entry_array.items[i] = htole64(p);
1790
1791 if (ap == 0)
1792 *first = htole64(q);
1793 else {
1794 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1795 if (r < 0)
1796 return r;
1797
1798 o->entry_array.next_entry_array_offset = htole64(q);
1799 }
1800
1801 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1802 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1803
1804 *idx = htole64(hidx + 1);
1805
1806 return 0;
1807 }
1808
1809 static int link_entry_into_array_plus_one(JournalFile *f,
1810 le64_t *extra,
1811 le64_t *first,
1812 le64_t *idx,
1813 uint64_t p) {
1814
1815 uint64_t hidx;
1816 int r;
1817
1818 assert(f);
1819 assert(extra);
1820 assert(first);
1821 assert(idx);
1822 assert(p > 0);
1823
1824 hidx = le64toh(READ_NOW(*idx));
1825 if (hidx == UINT64_MAX)
1826 return -EBADMSG;
1827 if (hidx == 0)
1828 *extra = htole64(p);
1829 else {
1830 le64_t i;
1831
1832 i = htole64(hidx - 1);
1833 r = link_entry_into_array(f, first, &i, p);
1834 if (r < 0)
1835 return r;
1836 }
1837
1838 *idx = htole64(hidx + 1);
1839 return 0;
1840 }
1841
1842 static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
1843 uint64_t p;
1844 int r;
1845
1846 assert(f);
1847 assert(o);
1848 assert(offset > 0);
1849
1850 p = le64toh(o->entry.items[i].object_offset);
1851 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1852 if (r < 0)
1853 return r;
1854
1855 return link_entry_into_array_plus_one(f,
1856 &o->data.entry_offset,
1857 &o->data.entry_array_offset,
1858 &o->data.n_entries,
1859 offset);
1860 }
1861
1862 static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
1863 uint64_t n, i;
1864 int r;
1865
1866 assert(f);
1867 assert(f->header);
1868 assert(o);
1869 assert(offset > 0);
1870
1871 if (o->object.type != OBJECT_ENTRY)
1872 return -EINVAL;
1873
1874 __sync_synchronize();
1875
1876 /* Link up the entry itself */
1877 r = link_entry_into_array(f,
1878 &f->header->entry_array_offset,
1879 &f->header->n_entries,
1880 offset);
1881 if (r < 0)
1882 return r;
1883
1884 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1885
1886 if (f->header->head_entry_realtime == 0)
1887 f->header->head_entry_realtime = o->entry.realtime;
1888
1889 f->header->tail_entry_realtime = o->entry.realtime;
1890 f->header->tail_entry_monotonic = o->entry.monotonic;
1891
1892 /* Link up the items */
1893 n = journal_file_entry_n_items(o);
1894 for (i = 0; i < n; i++) {
1895 r = journal_file_link_entry_item(f, o, offset, i);
1896 if (r < 0)
1897 return r;
1898 }
1899
1900 return 0;
1901 }
1902
1903 static int journal_file_append_entry_internal(
1904 JournalFile *f,
1905 const dual_timestamp *ts,
1906 const sd_id128_t *boot_id,
1907 uint64_t xor_hash,
1908 const EntryItem items[], unsigned n_items,
1909 uint64_t *seqnum,
1910 Object **ret, uint64_t *ret_offset) {
1911 uint64_t np;
1912 uint64_t osize;
1913 Object *o;
1914 int r;
1915
1916 assert(f);
1917 assert(f->header);
1918 assert(items || n_items == 0);
1919 assert(ts);
1920
1921 osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
1922
1923 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
1924 if (r < 0)
1925 return r;
1926
1927 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
1928 memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
1929 o->entry.realtime = htole64(ts->realtime);
1930 o->entry.monotonic = htole64(ts->monotonic);
1931 o->entry.xor_hash = htole64(xor_hash);
1932 if (boot_id)
1933 f->header->boot_id = *boot_id;
1934 o->entry.boot_id = f->header->boot_id;
1935
1936 #if HAVE_GCRYPT
1937 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
1938 if (r < 0)
1939 return r;
1940 #endif
1941
1942 r = journal_file_link_entry(f, o, np);
1943 if (r < 0)
1944 return r;
1945
1946 if (ret)
1947 *ret = o;
1948
1949 if (ret_offset)
1950 *ret_offset = np;
1951
1952 return 0;
1953 }
1954
1955 void journal_file_post_change(JournalFile *f) {
1956 assert(f);
1957
1958 if (f->fd < 0)
1959 return;
1960
1961 /* inotify() does not receive IN_MODIFY events from file
1962 * accesses done via mmap(). After each access we hence
1963 * trigger IN_MODIFY by truncating the journal file to its
1964 * current size which triggers IN_MODIFY. */
1965
1966 __sync_synchronize();
1967
1968 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
1969 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
1970 }
1971
1972 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
1973 assert(userdata);
1974
1975 journal_file_post_change(userdata);
1976
1977 return 1;
1978 }
1979
1980 static void schedule_post_change(JournalFile *f) {
1981 uint64_t now;
1982 int r;
1983
1984 assert(f);
1985 assert(f->post_change_timer);
1986
1987 r = sd_event_source_get_enabled(f->post_change_timer, NULL);
1988 if (r < 0) {
1989 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
1990 goto fail;
1991 }
1992 if (r > 0)
1993 return;
1994
1995 r = sd_event_now(sd_event_source_get_event(f->post_change_timer), CLOCK_MONOTONIC, &now);
1996 if (r < 0) {
1997 log_debug_errno(r, "Failed to get clock's now for scheduling ftruncate: %m");
1998 goto fail;
1999 }
2000
2001 r = sd_event_source_set_time(f->post_change_timer, now + f->post_change_timer_period);
2002 if (r < 0) {
2003 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
2004 goto fail;
2005 }
2006
2007 r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
2008 if (r < 0) {
2009 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
2010 goto fail;
2011 }
2012
2013 return;
2014
2015 fail:
2016 /* On failure, let's simply post the change immediately. */
2017 journal_file_post_change(f);
2018 }
2019
2020 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2021 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
2022 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
2023 int r;
2024
2025 assert(f);
2026 assert_return(!f->post_change_timer, -EINVAL);
2027 assert(e);
2028 assert(t);
2029
2030 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
2031 if (r < 0)
2032 return r;
2033
2034 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
2035 if (r < 0)
2036 return r;
2037
2038 f->post_change_timer = TAKE_PTR(timer);
2039 f->post_change_timer_period = t;
2040
2041 return r;
2042 }
2043
2044 static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
2045 return CMP(le64toh(a->object_offset), le64toh(b->object_offset));
2046 }
2047
2048 int journal_file_append_entry(
2049 JournalFile *f,
2050 const dual_timestamp *ts,
2051 const sd_id128_t *boot_id,
2052 const struct iovec iovec[], unsigned n_iovec,
2053 uint64_t *seqnum,
2054 Object **ret, uint64_t *ret_offset) {
2055
2056 unsigned i;
2057 EntryItem *items;
2058 int r;
2059 uint64_t xor_hash = 0;
2060 struct dual_timestamp _ts;
2061
2062 assert(f);
2063 assert(f->header);
2064 assert(iovec || n_iovec == 0);
2065
2066 if (ts) {
2067 if (!VALID_REALTIME(ts->realtime))
2068 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2069 "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
2070 ts->realtime);
2071 if (!VALID_MONOTONIC(ts->monotonic))
2072 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2073 "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
2074 ts->monotonic);
2075 } else {
2076 dual_timestamp_get(&_ts);
2077 ts = &_ts;
2078 }
2079
2080 #if HAVE_GCRYPT
2081 r = journal_file_maybe_append_tag(f, ts->realtime);
2082 if (r < 0)
2083 return r;
2084 #endif
2085
2086 /* alloca() can't take 0, hence let's allocate at least one */
2087 items = newa(EntryItem, MAX(1u, n_iovec));
2088
2089 for (i = 0; i < n_iovec; i++) {
2090 uint64_t p;
2091 Object *o;
2092
2093 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
2094 if (r < 0)
2095 return r;
2096
2097 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2098 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2099 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2100 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2101 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2102 * hash here for that. This also has the benefit that cursors for old and new journal files
2103 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2104 * files things are easier, we can just take the value from the stored record directly. */
2105
2106 if (JOURNAL_HEADER_KEYED_HASH(f->header))
2107 xor_hash ^= jenkins_hash64(iovec[i].iov_base, iovec[i].iov_len);
2108 else
2109 xor_hash ^= le64toh(o->data.hash);
2110
2111 items[i].object_offset = htole64(p);
2112 items[i].hash = o->data.hash;
2113 }
2114
2115 /* Order by the position on disk, in order to improve seek
2116 * times for rotating media. */
2117 typesafe_qsort(items, n_iovec, entry_item_cmp);
2118
2119 r = journal_file_append_entry_internal(f, ts, boot_id, xor_hash, items, n_iovec, seqnum, ret, ret_offset);
2120
2121 /* If the memory mapping triggered a SIGBUS then we return an
2122 * IO error and ignore the error code passed down to us, since
2123 * it is very likely just an effect of a nullified replacement
2124 * mapping page */
2125
2126 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
2127 r = -EIO;
2128
2129 if (f->post_change_timer)
2130 schedule_post_change(f);
2131 else
2132 journal_file_post_change(f);
2133
2134 return r;
2135 }
2136
2137 typedef struct ChainCacheItem {
2138 uint64_t first; /* the array at the beginning of the chain */
2139 uint64_t array; /* the cached array */
2140 uint64_t begin; /* the first item in the cached array */
2141 uint64_t total; /* the total number of items in all arrays before this one in the chain */
2142 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
2143 } ChainCacheItem;
2144
2145 static void chain_cache_put(
2146 OrderedHashmap *h,
2147 ChainCacheItem *ci,
2148 uint64_t first,
2149 uint64_t array,
2150 uint64_t begin,
2151 uint64_t total,
2152 uint64_t last_index) {
2153
2154 if (!ci) {
2155 /* If the chain item to cache for this chain is the
2156 * first one it's not worth caching anything */
2157 if (array == first)
2158 return;
2159
2160 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2161 ci = ordered_hashmap_steal_first(h);
2162 assert(ci);
2163 } else {
2164 ci = new(ChainCacheItem, 1);
2165 if (!ci)
2166 return;
2167 }
2168
2169 ci->first = first;
2170
2171 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2172 free(ci);
2173 return;
2174 }
2175 } else
2176 assert(ci->first == first);
2177
2178 ci->array = array;
2179 ci->begin = begin;
2180 ci->total = total;
2181 ci->last_index = last_index;
2182 }
2183
2184 static int generic_array_get(
2185 JournalFile *f,
2186 uint64_t first,
2187 uint64_t i,
2188 Object **ret, uint64_t *ret_offset) {
2189
2190 Object *o;
2191 uint64_t p = 0, a, t = 0;
2192 int r;
2193 ChainCacheItem *ci;
2194
2195 assert(f);
2196
2197 a = first;
2198
2199 /* Try the chain cache first */
2200 ci = ordered_hashmap_get(f->chain_cache, &first);
2201 if (ci && i > ci->total) {
2202 a = ci->array;
2203 i -= ci->total;
2204 t = ci->total;
2205 }
2206
2207 while (a > 0) {
2208 uint64_t k;
2209
2210 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2211 if (r < 0)
2212 return r;
2213
2214 k = journal_file_entry_array_n_items(o);
2215 if (i < k) {
2216 p = le64toh(o->entry_array.items[i]);
2217 goto found;
2218 }
2219
2220 i -= k;
2221 t += k;
2222 a = le64toh(o->entry_array.next_entry_array_offset);
2223 }
2224
2225 return 0;
2226
2227 found:
2228 /* Let's cache this item for the next invocation */
2229 chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
2230
2231 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2232 if (r < 0)
2233 return r;
2234
2235 if (ret)
2236 *ret = o;
2237
2238 if (ret_offset)
2239 *ret_offset = p;
2240
2241 return 1;
2242 }
2243
2244 static int generic_array_get_plus_one(
2245 JournalFile *f,
2246 uint64_t extra,
2247 uint64_t first,
2248 uint64_t i,
2249 Object **ret, uint64_t *ret_offset) {
2250
2251 Object *o;
2252
2253 assert(f);
2254
2255 if (i == 0) {
2256 int r;
2257
2258 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2259 if (r < 0)
2260 return r;
2261
2262 if (ret)
2263 *ret = o;
2264
2265 if (ret_offset)
2266 *ret_offset = extra;
2267
2268 return 1;
2269 }
2270
2271 return generic_array_get(f, first, i-1, ret, ret_offset);
2272 }
2273
2274 enum {
2275 TEST_FOUND,
2276 TEST_LEFT,
2277 TEST_RIGHT
2278 };
2279
2280 static int generic_array_bisect(
2281 JournalFile *f,
2282 uint64_t first,
2283 uint64_t n,
2284 uint64_t needle,
2285 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2286 direction_t direction,
2287 Object **ret,
2288 uint64_t *ret_offset,
2289 uint64_t *ret_idx) {
2290
2291 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
2292 bool subtract_one = false;
2293 Object *o, *array = NULL;
2294 int r;
2295 ChainCacheItem *ci;
2296
2297 assert(f);
2298 assert(test_object);
2299
2300 /* Start with the first array in the chain */
2301 a = first;
2302
2303 ci = ordered_hashmap_get(f->chain_cache, &first);
2304 if (ci && n > ci->total && ci->begin != 0) {
2305 /* Ah, we have iterated this bisection array chain
2306 * previously! Let's see if we can skip ahead in the
2307 * chain, as far as the last time. But we can't jump
2308 * backwards in the chain, so let's check that
2309 * first. */
2310
2311 r = test_object(f, ci->begin, needle);
2312 if (r < 0)
2313 return r;
2314
2315 if (r == TEST_LEFT) {
2316 /* OK, what we are looking for is right of the
2317 * begin of this EntryArray, so let's jump
2318 * straight to previously cached array in the
2319 * chain */
2320
2321 a = ci->array;
2322 n -= ci->total;
2323 t = ci->total;
2324 last_index = ci->last_index;
2325 }
2326 }
2327
2328 while (a > 0) {
2329 uint64_t left, right, k, lp;
2330
2331 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2332 if (r < 0)
2333 return r;
2334
2335 k = journal_file_entry_array_n_items(array);
2336 right = MIN(k, n);
2337 if (right <= 0)
2338 return 0;
2339
2340 i = right - 1;
2341 lp = p = le64toh(array->entry_array.items[i]);
2342 if (p <= 0)
2343 r = -EBADMSG;
2344 else
2345 r = test_object(f, p, needle);
2346 if (r == -EBADMSG) {
2347 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2348 n = i;
2349 continue;
2350 }
2351 if (r < 0)
2352 return r;
2353
2354 if (r == TEST_FOUND)
2355 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2356
2357 if (r == TEST_RIGHT) {
2358 left = 0;
2359 right -= 1;
2360
2361 if (last_index != (uint64_t) -1) {
2362 assert(last_index <= right);
2363
2364 /* If we cached the last index we
2365 * looked at, let's try to not to jump
2366 * too wildly around and see if we can
2367 * limit the range to look at early to
2368 * the immediate neighbors of the last
2369 * index we looked at. */
2370
2371 if (last_index > 0) {
2372 uint64_t x = last_index - 1;
2373
2374 p = le64toh(array->entry_array.items[x]);
2375 if (p <= 0)
2376 return -EBADMSG;
2377
2378 r = test_object(f, p, needle);
2379 if (r < 0)
2380 return r;
2381
2382 if (r == TEST_FOUND)
2383 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2384
2385 if (r == TEST_RIGHT)
2386 right = x;
2387 else
2388 left = x + 1;
2389 }
2390
2391 if (last_index < right) {
2392 uint64_t y = last_index + 1;
2393
2394 p = le64toh(array->entry_array.items[y]);
2395 if (p <= 0)
2396 return -EBADMSG;
2397
2398 r = test_object(f, p, needle);
2399 if (r < 0)
2400 return r;
2401
2402 if (r == TEST_FOUND)
2403 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2404
2405 if (r == TEST_RIGHT)
2406 right = y;
2407 else
2408 left = y + 1;
2409 }
2410 }
2411
2412 for (;;) {
2413 if (left == right) {
2414 if (direction == DIRECTION_UP)
2415 subtract_one = true;
2416
2417 i = left;
2418 goto found;
2419 }
2420
2421 assert(left < right);
2422 i = (left + right) / 2;
2423
2424 p = le64toh(array->entry_array.items[i]);
2425 if (p <= 0)
2426 r = -EBADMSG;
2427 else
2428 r = test_object(f, p, needle);
2429 if (r == -EBADMSG) {
2430 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2431 right = n = i;
2432 continue;
2433 }
2434 if (r < 0)
2435 return r;
2436
2437 if (r == TEST_FOUND)
2438 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2439
2440 if (r == TEST_RIGHT)
2441 right = i;
2442 else
2443 left = i + 1;
2444 }
2445 }
2446
2447 if (k >= n) {
2448 if (direction == DIRECTION_UP) {
2449 i = n;
2450 subtract_one = true;
2451 goto found;
2452 }
2453
2454 return 0;
2455 }
2456
2457 last_p = lp;
2458
2459 n -= k;
2460 t += k;
2461 last_index = (uint64_t) -1;
2462 a = le64toh(array->entry_array.next_entry_array_offset);
2463 }
2464
2465 return 0;
2466
2467 found:
2468 if (subtract_one && t == 0 && i == 0)
2469 return 0;
2470
2471 /* Let's cache this item for the next invocation */
2472 chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
2473
2474 if (subtract_one && i == 0)
2475 p = last_p;
2476 else if (subtract_one)
2477 p = le64toh(array->entry_array.items[i-1]);
2478 else
2479 p = le64toh(array->entry_array.items[i]);
2480
2481 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2482 if (r < 0)
2483 return r;
2484
2485 if (ret)
2486 *ret = o;
2487
2488 if (ret_offset)
2489 *ret_offset = p;
2490
2491 if (ret_idx)
2492 *ret_idx = t + i + (subtract_one ? -1 : 0);
2493
2494 return 1;
2495 }
2496
2497 static int generic_array_bisect_plus_one(
2498 JournalFile *f,
2499 uint64_t extra,
2500 uint64_t first,
2501 uint64_t n,
2502 uint64_t needle,
2503 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2504 direction_t direction,
2505 Object **ret,
2506 uint64_t *ret_offset,
2507 uint64_t *ret_idx) {
2508
2509 int r;
2510 bool step_back = false;
2511 Object *o;
2512
2513 assert(f);
2514 assert(test_object);
2515
2516 if (n <= 0)
2517 return 0;
2518
2519 /* This bisects the array in object 'first', but first checks
2520 * an extra */
2521 r = test_object(f, extra, needle);
2522 if (r < 0)
2523 return r;
2524
2525 if (r == TEST_FOUND)
2526 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2527
2528 /* if we are looking with DIRECTION_UP then we need to first
2529 see if in the actual array there is a matching entry, and
2530 return the last one of that. But if there isn't any we need
2531 to return this one. Hence remember this, and return it
2532 below. */
2533 if (r == TEST_LEFT)
2534 step_back = direction == DIRECTION_UP;
2535
2536 if (r == TEST_RIGHT) {
2537 if (direction == DIRECTION_DOWN)
2538 goto found;
2539 else
2540 return 0;
2541 }
2542
2543 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, ret_offset, ret_idx);
2544
2545 if (r == 0 && step_back)
2546 goto found;
2547
2548 if (r > 0 && ret_idx)
2549 (*ret_idx)++;
2550
2551 return r;
2552
2553 found:
2554 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2555 if (r < 0)
2556 return r;
2557
2558 if (ret)
2559 *ret = o;
2560
2561 if (ret_offset)
2562 *ret_offset = extra;
2563
2564 if (ret_idx)
2565 *ret_idx = 0;
2566
2567 return 1;
2568 }
2569
2570 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2571 assert(f);
2572 assert(p > 0);
2573
2574 if (p == needle)
2575 return TEST_FOUND;
2576 else if (p < needle)
2577 return TEST_LEFT;
2578 else
2579 return TEST_RIGHT;
2580 }
2581
2582 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2583 uint64_t sq;
2584 Object *o;
2585 int r;
2586
2587 assert(f);
2588 assert(p > 0);
2589
2590 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2591 if (r < 0)
2592 return r;
2593
2594 sq = le64toh(READ_NOW(o->entry.seqnum));
2595 if (sq == needle)
2596 return TEST_FOUND;
2597 else if (sq < needle)
2598 return TEST_LEFT;
2599 else
2600 return TEST_RIGHT;
2601 }
2602
2603 int journal_file_move_to_entry_by_seqnum(
2604 JournalFile *f,
2605 uint64_t seqnum,
2606 direction_t direction,
2607 Object **ret,
2608 uint64_t *ret_offset) {
2609 assert(f);
2610 assert(f->header);
2611
2612 return generic_array_bisect(
2613 f,
2614 le64toh(f->header->entry_array_offset),
2615 le64toh(f->header->n_entries),
2616 seqnum,
2617 test_object_seqnum,
2618 direction,
2619 ret, ret_offset, NULL);
2620 }
2621
2622 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2623 Object *o;
2624 uint64_t rt;
2625 int r;
2626
2627 assert(f);
2628 assert(p > 0);
2629
2630 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2631 if (r < 0)
2632 return r;
2633
2634 rt = le64toh(READ_NOW(o->entry.realtime));
2635 if (rt == needle)
2636 return TEST_FOUND;
2637 else if (rt < needle)
2638 return TEST_LEFT;
2639 else
2640 return TEST_RIGHT;
2641 }
2642
2643 int journal_file_move_to_entry_by_realtime(
2644 JournalFile *f,
2645 uint64_t realtime,
2646 direction_t direction,
2647 Object **ret,
2648 uint64_t *ret_offset) {
2649 assert(f);
2650 assert(f->header);
2651
2652 return generic_array_bisect(
2653 f,
2654 le64toh(f->header->entry_array_offset),
2655 le64toh(f->header->n_entries),
2656 realtime,
2657 test_object_realtime,
2658 direction,
2659 ret, ret_offset, NULL);
2660 }
2661
2662 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
2663 Object *o;
2664 uint64_t m;
2665 int r;
2666
2667 assert(f);
2668 assert(p > 0);
2669
2670 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2671 if (r < 0)
2672 return r;
2673
2674 m = le64toh(READ_NOW(o->entry.monotonic));
2675 if (m == needle)
2676 return TEST_FOUND;
2677 else if (m < needle)
2678 return TEST_LEFT;
2679 else
2680 return TEST_RIGHT;
2681 }
2682
2683 static int find_data_object_by_boot_id(
2684 JournalFile *f,
2685 sd_id128_t boot_id,
2686 Object **o,
2687 uint64_t *b) {
2688
2689 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2690
2691 sd_id128_to_string(boot_id, t + 9);
2692 return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
2693 }
2694
2695 int journal_file_move_to_entry_by_monotonic(
2696 JournalFile *f,
2697 sd_id128_t boot_id,
2698 uint64_t monotonic,
2699 direction_t direction,
2700 Object **ret,
2701 uint64_t *ret_offset) {
2702
2703 Object *o;
2704 int r;
2705
2706 assert(f);
2707
2708 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
2709 if (r < 0)
2710 return r;
2711 if (r == 0)
2712 return -ENOENT;
2713
2714 return generic_array_bisect_plus_one(
2715 f,
2716 le64toh(o->data.entry_offset),
2717 le64toh(o->data.entry_array_offset),
2718 le64toh(o->data.n_entries),
2719 monotonic,
2720 test_object_monotonic,
2721 direction,
2722 ret, ret_offset, NULL);
2723 }
2724
2725 void journal_file_reset_location(JournalFile *f) {
2726 f->location_type = LOCATION_HEAD;
2727 f->current_offset = 0;
2728 f->current_seqnum = 0;
2729 f->current_realtime = 0;
2730 f->current_monotonic = 0;
2731 zero(f->current_boot_id);
2732 f->current_xor_hash = 0;
2733 }
2734
2735 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
2736 f->location_type = LOCATION_SEEK;
2737 f->current_offset = offset;
2738 f->current_seqnum = le64toh(o->entry.seqnum);
2739 f->current_realtime = le64toh(o->entry.realtime);
2740 f->current_monotonic = le64toh(o->entry.monotonic);
2741 f->current_boot_id = o->entry.boot_id;
2742 f->current_xor_hash = le64toh(o->entry.xor_hash);
2743 }
2744
2745 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
2746 int r;
2747
2748 assert(af);
2749 assert(af->header);
2750 assert(bf);
2751 assert(bf->header);
2752 assert(af->location_type == LOCATION_SEEK);
2753 assert(bf->location_type == LOCATION_SEEK);
2754
2755 /* If contents and timestamps match, these entries are
2756 * identical, even if the seqnum does not match */
2757 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
2758 af->current_monotonic == bf->current_monotonic &&
2759 af->current_realtime == bf->current_realtime &&
2760 af->current_xor_hash == bf->current_xor_hash)
2761 return 0;
2762
2763 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
2764
2765 /* If this is from the same seqnum source, compare
2766 * seqnums */
2767 r = CMP(af->current_seqnum, bf->current_seqnum);
2768 if (r != 0)
2769 return r;
2770
2771 /* Wow! This is weird, different data but the same
2772 * seqnums? Something is borked, but let's make the
2773 * best of it and compare by time. */
2774 }
2775
2776 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
2777
2778 /* If the boot id matches, compare monotonic time */
2779 r = CMP(af->current_monotonic, bf->current_monotonic);
2780 if (r != 0)
2781 return r;
2782 }
2783
2784 /* Otherwise, compare UTC time */
2785 r = CMP(af->current_realtime, bf->current_realtime);
2786 if (r != 0)
2787 return r;
2788
2789 /* Finally, compare by contents */
2790 return CMP(af->current_xor_hash, bf->current_xor_hash);
2791 }
2792
2793 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2794
2795 /* Increase or decrease the specified index, in the right direction. */
2796
2797 if (direction == DIRECTION_DOWN) {
2798 if (*i >= n - 1)
2799 return 0;
2800
2801 (*i) ++;
2802 } else {
2803 if (*i <= 0)
2804 return 0;
2805
2806 (*i) --;
2807 }
2808
2809 return 1;
2810 }
2811
2812 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
2813
2814 /* Consider it an error if any of the two offsets is uninitialized */
2815 if (old_offset == 0 || new_offset == 0)
2816 return false;
2817
2818 /* If we go down, the new offset must be larger than the old one. */
2819 return direction == DIRECTION_DOWN ?
2820 new_offset > old_offset :
2821 new_offset < old_offset;
2822 }
2823
2824 int journal_file_next_entry(
2825 JournalFile *f,
2826 uint64_t p,
2827 direction_t direction,
2828 Object **ret, uint64_t *ret_offset) {
2829
2830 uint64_t i, n, ofs;
2831 int r;
2832
2833 assert(f);
2834 assert(f->header);
2835
2836 n = le64toh(READ_NOW(f->header->n_entries));
2837 if (n <= 0)
2838 return 0;
2839
2840 if (p == 0)
2841 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2842 else {
2843 r = generic_array_bisect(f,
2844 le64toh(f->header->entry_array_offset),
2845 le64toh(f->header->n_entries),
2846 p,
2847 test_object_offset,
2848 DIRECTION_DOWN,
2849 NULL, NULL,
2850 &i);
2851 if (r <= 0)
2852 return r;
2853
2854 r = bump_array_index(&i, direction, n);
2855 if (r <= 0)
2856 return r;
2857 }
2858
2859 /* And jump to it */
2860 for (;;) {
2861 r = generic_array_get(f,
2862 le64toh(f->header->entry_array_offset),
2863 i,
2864 ret, &ofs);
2865 if (r > 0)
2866 break;
2867 if (r != -EBADMSG)
2868 return r;
2869
2870 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2871 * the next one might work for us instead. */
2872 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2873
2874 r = bump_array_index(&i, direction, n);
2875 if (r <= 0)
2876 return r;
2877 }
2878
2879 /* Ensure our array is properly ordered. */
2880 if (p > 0 && !check_properly_ordered(ofs, p, direction))
2881 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2882 "%s: entry array not properly ordered at entry %" PRIu64,
2883 f->path, i);
2884
2885 if (ret_offset)
2886 *ret_offset = ofs;
2887
2888 return 1;
2889 }
2890
2891 int journal_file_next_entry_for_data(
2892 JournalFile *f,
2893 Object *o, uint64_t p,
2894 uint64_t data_offset,
2895 direction_t direction,
2896 Object **ret, uint64_t *ret_offset) {
2897
2898 uint64_t i, n, ofs;
2899 Object *d;
2900 int r;
2901
2902 assert(f);
2903 assert(p > 0 || !o);
2904
2905 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2906 if (r < 0)
2907 return r;
2908
2909 n = le64toh(READ_NOW(d->data.n_entries));
2910 if (n <= 0)
2911 return n;
2912
2913 if (!o)
2914 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2915 else {
2916 if (o->object.type != OBJECT_ENTRY)
2917 return -EINVAL;
2918
2919 r = generic_array_bisect_plus_one(f,
2920 le64toh(d->data.entry_offset),
2921 le64toh(d->data.entry_array_offset),
2922 le64toh(d->data.n_entries),
2923 p,
2924 test_object_offset,
2925 DIRECTION_DOWN,
2926 NULL, NULL,
2927 &i);
2928
2929 if (r <= 0)
2930 return r;
2931
2932 r = bump_array_index(&i, direction, n);
2933 if (r <= 0)
2934 return r;
2935 }
2936
2937 for (;;) {
2938 r = generic_array_get_plus_one(f,
2939 le64toh(d->data.entry_offset),
2940 le64toh(d->data.entry_array_offset),
2941 i,
2942 ret, &ofs);
2943 if (r > 0)
2944 break;
2945 if (r != -EBADMSG)
2946 return r;
2947
2948 log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
2949
2950 r = bump_array_index(&i, direction, n);
2951 if (r <= 0)
2952 return r;
2953 }
2954
2955 /* Ensure our array is properly ordered. */
2956 if (p > 0 && check_properly_ordered(ofs, p, direction))
2957 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2958 "%s data entry array not properly ordered at entry %" PRIu64,
2959 f->path, i);
2960
2961 if (ret_offset)
2962 *ret_offset = ofs;
2963
2964 return 1;
2965 }
2966
2967 int journal_file_move_to_entry_by_offset_for_data(
2968 JournalFile *f,
2969 uint64_t data_offset,
2970 uint64_t p,
2971 direction_t direction,
2972 Object **ret, uint64_t *ret_offset) {
2973
2974 int r;
2975 Object *d;
2976
2977 assert(f);
2978
2979 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2980 if (r < 0)
2981 return r;
2982
2983 return generic_array_bisect_plus_one(
2984 f,
2985 le64toh(d->data.entry_offset),
2986 le64toh(d->data.entry_array_offset),
2987 le64toh(d->data.n_entries),
2988 p,
2989 test_object_offset,
2990 direction,
2991 ret, ret_offset, NULL);
2992 }
2993
2994 int journal_file_move_to_entry_by_monotonic_for_data(
2995 JournalFile *f,
2996 uint64_t data_offset,
2997 sd_id128_t boot_id,
2998 uint64_t monotonic,
2999 direction_t direction,
3000 Object **ret, uint64_t *ret_offset) {
3001
3002 Object *o, *d;
3003 int r;
3004 uint64_t b, z;
3005
3006 assert(f);
3007
3008 /* First, seek by time */
3009 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
3010 if (r < 0)
3011 return r;
3012 if (r == 0)
3013 return -ENOENT;
3014
3015 r = generic_array_bisect_plus_one(f,
3016 le64toh(o->data.entry_offset),
3017 le64toh(o->data.entry_array_offset),
3018 le64toh(o->data.n_entries),
3019 monotonic,
3020 test_object_monotonic,
3021 direction,
3022 NULL, &z, NULL);
3023 if (r <= 0)
3024 return r;
3025
3026 /* And now, continue seeking until we find an entry that
3027 * exists in both bisection arrays */
3028
3029 for (;;) {
3030 Object *qo;
3031 uint64_t p, q;
3032
3033 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3034 if (r < 0)
3035 return r;
3036
3037 r = generic_array_bisect_plus_one(f,
3038 le64toh(d->data.entry_offset),
3039 le64toh(d->data.entry_array_offset),
3040 le64toh(d->data.n_entries),
3041 z,
3042 test_object_offset,
3043 direction,
3044 NULL, &p, NULL);
3045 if (r <= 0)
3046 return r;
3047
3048 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
3049 if (r < 0)
3050 return r;
3051
3052 r = generic_array_bisect_plus_one(f,
3053 le64toh(o->data.entry_offset),
3054 le64toh(o->data.entry_array_offset),
3055 le64toh(o->data.n_entries),
3056 p,
3057 test_object_offset,
3058 direction,
3059 &qo, &q, NULL);
3060
3061 if (r <= 0)
3062 return r;
3063
3064 if (p == q) {
3065 if (ret)
3066 *ret = qo;
3067 if (ret_offset)
3068 *ret_offset = q;
3069
3070 return 1;
3071 }
3072
3073 z = q;
3074 }
3075 }
3076
3077 int journal_file_move_to_entry_by_seqnum_for_data(
3078 JournalFile *f,
3079 uint64_t data_offset,
3080 uint64_t seqnum,
3081 direction_t direction,
3082 Object **ret, uint64_t *ret_offset) {
3083
3084 Object *d;
3085 int r;
3086
3087 assert(f);
3088
3089 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3090 if (r < 0)
3091 return r;
3092
3093 return generic_array_bisect_plus_one(
3094 f,
3095 le64toh(d->data.entry_offset),
3096 le64toh(d->data.entry_array_offset),
3097 le64toh(d->data.n_entries),
3098 seqnum,
3099 test_object_seqnum,
3100 direction,
3101 ret, ret_offset, NULL);
3102 }
3103
3104 int journal_file_move_to_entry_by_realtime_for_data(
3105 JournalFile *f,
3106 uint64_t data_offset,
3107 uint64_t realtime,
3108 direction_t direction,
3109 Object **ret, uint64_t *ret_offset) {
3110
3111 Object *d;
3112 int r;
3113
3114 assert(f);
3115
3116 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3117 if (r < 0)
3118 return r;
3119
3120 return generic_array_bisect_plus_one(
3121 f,
3122 le64toh(d->data.entry_offset),
3123 le64toh(d->data.entry_array_offset),
3124 le64toh(d->data.n_entries),
3125 realtime,
3126 test_object_realtime,
3127 direction,
3128 ret, ret_offset, NULL);
3129 }
3130
3131 void journal_file_dump(JournalFile *f) {
3132 Object *o;
3133 int r;
3134 uint64_t p;
3135
3136 assert(f);
3137 assert(f->header);
3138
3139 journal_file_print_header(f);
3140
3141 p = le64toh(READ_NOW(f->header->header_size));
3142 while (p != 0) {
3143 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3144 if (r < 0)
3145 goto fail;
3146
3147 switch (o->object.type) {
3148
3149 case OBJECT_UNUSED:
3150 printf("Type: OBJECT_UNUSED\n");
3151 break;
3152
3153 case OBJECT_DATA:
3154 printf("Type: OBJECT_DATA\n");
3155 break;
3156
3157 case OBJECT_FIELD:
3158 printf("Type: OBJECT_FIELD\n");
3159 break;
3160
3161 case OBJECT_ENTRY:
3162 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3163 le64toh(o->entry.seqnum),
3164 le64toh(o->entry.monotonic),
3165 le64toh(o->entry.realtime));
3166 break;
3167
3168 case OBJECT_FIELD_HASH_TABLE:
3169 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
3170 break;
3171
3172 case OBJECT_DATA_HASH_TABLE:
3173 printf("Type: OBJECT_DATA_HASH_TABLE\n");
3174 break;
3175
3176 case OBJECT_ENTRY_ARRAY:
3177 printf("Type: OBJECT_ENTRY_ARRAY\n");
3178 break;
3179
3180 case OBJECT_TAG:
3181 printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3182 le64toh(o->tag.seqnum),
3183 le64toh(o->tag.epoch));
3184 break;
3185
3186 default:
3187 printf("Type: unknown (%i)\n", o->object.type);
3188 break;
3189 }
3190
3191 if (o->object.flags & OBJECT_COMPRESSION_MASK)
3192 printf("Flags: %s\n",
3193 object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
3194
3195 if (p == le64toh(f->header->tail_object_offset))
3196 p = 0;
3197 else
3198 p += ALIGN64(le64toh(o->object.size));
3199 }
3200
3201 return;
3202 fail:
3203 log_error("File corrupt");
3204 }
3205
3206 static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
3207 const char *x;
3208
3209 x = format_timestamp(buf, l, t);
3210 if (x)
3211 return x;
3212 return " --- ";
3213 }
3214
3215 void journal_file_print_header(JournalFile *f) {
3216 char a[SD_ID128_STRING_MAX], b[SD_ID128_STRING_MAX], c[SD_ID128_STRING_MAX], d[SD_ID128_STRING_MAX];
3217 char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
3218 struct stat st;
3219 char bytes[FORMAT_BYTES_MAX];
3220
3221 assert(f);
3222 assert(f->header);
3223
3224 printf("File path: %s\n"
3225 "File ID: %s\n"
3226 "Machine ID: %s\n"
3227 "Boot ID: %s\n"
3228 "Sequential number ID: %s\n"
3229 "State: %s\n"
3230 "Compatible flags:%s%s\n"
3231 "Incompatible flags:%s%s%s%s%s\n"
3232 "Header size: %"PRIu64"\n"
3233 "Arena size: %"PRIu64"\n"
3234 "Data hash table size: %"PRIu64"\n"
3235 "Field hash table size: %"PRIu64"\n"
3236 "Rotate suggested: %s\n"
3237 "Head sequential number: %"PRIu64" (%"PRIx64")\n"
3238 "Tail sequential number: %"PRIu64" (%"PRIx64")\n"
3239 "Head realtime timestamp: %s (%"PRIx64")\n"
3240 "Tail realtime timestamp: %s (%"PRIx64")\n"
3241 "Tail monotonic timestamp: %s (%"PRIx64")\n"
3242 "Objects: %"PRIu64"\n"
3243 "Entry objects: %"PRIu64"\n",
3244 f->path,
3245 sd_id128_to_string(f->header->file_id, a),
3246 sd_id128_to_string(f->header->machine_id, b),
3247 sd_id128_to_string(f->header->boot_id, c),
3248 sd_id128_to_string(f->header->seqnum_id, d),
3249 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3250 f->header->state == STATE_ONLINE ? "ONLINE" :
3251 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3252 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3253 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3254 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3255 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3256 JOURNAL_HEADER_COMPRESSED_ZSTD(f->header) ? " COMPRESSED-ZSTD" : "",
3257 JOURNAL_HEADER_KEYED_HASH(f->header) ? " KEYED-HASH" : "",
3258 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3259 le64toh(f->header->header_size),
3260 le64toh(f->header->arena_size),
3261 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3262 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3263 yes_no(journal_file_rotate_suggested(f, 0)),
3264 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3265 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3266 format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3267 format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3268 format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3269 le64toh(f->header->n_objects),
3270 le64toh(f->header->n_entries));
3271
3272 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3273 printf("Data objects: %"PRIu64"\n"
3274 "Data hash table fill: %.1f%%\n",
3275 le64toh(f->header->n_data),
3276 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3277
3278 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3279 printf("Field objects: %"PRIu64"\n"
3280 "Field hash table fill: %.1f%%\n",
3281 le64toh(f->header->n_fields),
3282 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3283
3284 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3285 printf("Tag objects: %"PRIu64"\n",
3286 le64toh(f->header->n_tags));
3287 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3288 printf("Entry array objects: %"PRIu64"\n",
3289 le64toh(f->header->n_entry_arrays));
3290
3291 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth))
3292 printf("Deepest field hash chain: %" PRIu64"\n",
3293 f->header->field_hash_chain_depth);
3294
3295 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth))
3296 printf("Deepest data hash chain: %" PRIu64"\n",
3297 f->header->data_hash_chain_depth);
3298
3299 if (fstat(f->fd, &st) >= 0)
3300 printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
3301 }
3302
3303 static int journal_file_warn_btrfs(JournalFile *f) {
3304 unsigned attrs;
3305 int r;
3306
3307 assert(f);
3308
3309 /* Before we write anything, check if the COW logic is turned
3310 * off on btrfs. Given our write pattern that is quite
3311 * unfriendly to COW file systems this should greatly improve
3312 * performance on COW file systems, such as btrfs, at the
3313 * expense of data integrity features (which shouldn't be too
3314 * bad, given that we do our own checksumming). */
3315
3316 r = btrfs_is_filesystem(f->fd);
3317 if (r < 0)
3318 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3319 if (!r)
3320 return 0;
3321
3322 r = read_attr_fd(f->fd, &attrs);
3323 if (r < 0)
3324 return log_warning_errno(r, "Failed to read file attributes: %m");
3325
3326 if (attrs & FS_NOCOW_FL) {
3327 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3328 return 0;
3329 }
3330
3331 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3332 "This is likely to slow down journal access substantially, please consider turning "
3333 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3334
3335 return 1;
3336 }
3337
3338 int journal_file_open(
3339 int fd,
3340 const char *fname,
3341 int flags,
3342 mode_t mode,
3343 bool compress,
3344 uint64_t compress_threshold_bytes,
3345 bool seal,
3346 JournalMetrics *metrics,
3347 MMapCache *mmap_cache,
3348 Set *deferred_closes,
3349 JournalFile *template,
3350 JournalFile **ret) {
3351
3352 bool newly_created = false;
3353 JournalFile *f;
3354 void *h;
3355 int r;
3356
3357 assert(ret);
3358 assert(fd >= 0 || fname);
3359
3360 if (!IN_SET((flags & O_ACCMODE), O_RDONLY, O_RDWR))
3361 return -EINVAL;
3362
3363 if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
3364 return -EINVAL;
3365
3366 f = new(JournalFile, 1);
3367 if (!f)
3368 return -ENOMEM;
3369
3370 *f = (JournalFile) {
3371 .fd = fd,
3372 .mode = mode,
3373
3374 .flags = flags,
3375 .prot = prot_from_flags(flags),
3376 .writable = (flags & O_ACCMODE) != O_RDONLY,
3377
3378 #if HAVE_ZSTD
3379 .compress_zstd = compress,
3380 #elif HAVE_LZ4
3381 .compress_lz4 = compress,
3382 #elif HAVE_XZ
3383 .compress_xz = compress,
3384 #endif
3385 .compress_threshold_bytes = compress_threshold_bytes == (uint64_t) -1 ?
3386 DEFAULT_COMPRESS_THRESHOLD :
3387 MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
3388 #if HAVE_GCRYPT
3389 .seal = seal,
3390 #endif
3391 };
3392
3393 /* We turn on keyed hashes by default, but provide an environment variable to turn them off, if
3394 * people really want that */
3395 r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
3396 if (r < 0) {
3397 if (r != -ENXIO)
3398 log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring.");
3399 f->keyed_hash = true;
3400 } else
3401 f->keyed_hash = r;
3402
3403 if (DEBUG_LOGGING) {
3404 static int last_seal = -1, last_compress = -1, last_keyed_hash = -1;
3405 static uint64_t last_bytes = UINT64_MAX;
3406 char bytes[FORMAT_BYTES_MAX];
3407
3408 if (last_seal != f->seal ||
3409 last_keyed_hash != f->keyed_hash ||
3410 last_compress != JOURNAL_FILE_COMPRESS(f) ||
3411 last_bytes != f->compress_threshold_bytes) {
3412
3413 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
3414 yes_no(f->seal), yes_no(f->keyed_hash), yes_no(JOURNAL_FILE_COMPRESS(f)),
3415 format_bytes(bytes, sizeof bytes, f->compress_threshold_bytes));
3416 last_seal = f->seal;
3417 last_keyed_hash = f->keyed_hash;
3418 last_compress = JOURNAL_FILE_COMPRESS(f);
3419 last_bytes = f->compress_threshold_bytes;
3420 }
3421 }
3422
3423 if (mmap_cache)
3424 f->mmap = mmap_cache_ref(mmap_cache);
3425 else {
3426 f->mmap = mmap_cache_new();
3427 if (!f->mmap) {
3428 r = -ENOMEM;
3429 goto fail;
3430 }
3431 }
3432
3433 if (fname) {
3434 f->path = strdup(fname);
3435 if (!f->path) {
3436 r = -ENOMEM;
3437 goto fail;
3438 }
3439 } else {
3440 assert(fd >= 0);
3441
3442 /* If we don't know the path, fill in something explanatory and vaguely useful */
3443 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
3444 r = -ENOMEM;
3445 goto fail;
3446 }
3447 }
3448
3449 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3450 if (!f->chain_cache) {
3451 r = -ENOMEM;
3452 goto fail;
3453 }
3454
3455 if (f->fd < 0) {
3456 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3457 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3458 * it doesn't hurt in that case. */
3459
3460 f->fd = open(f->path, f->flags|O_CLOEXEC|O_NONBLOCK, f->mode);
3461 if (f->fd < 0) {
3462 r = -errno;
3463 goto fail;
3464 }
3465
3466 /* fds we opened here by us should also be closed by us. */
3467 f->close_fd = true;
3468
3469 r = fd_nonblock(f->fd, false);
3470 if (r < 0)
3471 goto fail;
3472 }
3473
3474 f->cache_fd = mmap_cache_add_fd(f->mmap, f->fd);
3475 if (!f->cache_fd) {
3476 r = -ENOMEM;
3477 goto fail;
3478 }
3479
3480 r = journal_file_fstat(f);
3481 if (r < 0)
3482 goto fail;
3483
3484 if (f->last_stat.st_size == 0 && f->writable) {
3485
3486 (void) journal_file_warn_btrfs(f);
3487
3488 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3489 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3490 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3491 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3492 * solely on mtime/atime/ctime of the file. */
3493 (void) fd_setcrtime(f->fd, 0);
3494
3495 #if HAVE_GCRYPT
3496 /* Try to load the FSPRG state, and if we can't, then
3497 * just don't do sealing */
3498 if (f->seal) {
3499 r = journal_file_fss_load(f);
3500 if (r < 0)
3501 f->seal = false;
3502 }
3503 #endif
3504
3505 r = journal_file_init_header(f, template);
3506 if (r < 0)
3507 goto fail;
3508
3509 r = journal_file_fstat(f);
3510 if (r < 0)
3511 goto fail;
3512
3513 newly_created = true;
3514 }
3515
3516 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3517 r = -ENODATA;
3518 goto fail;
3519 }
3520
3521 r = mmap_cache_get(f->mmap, f->cache_fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h, NULL);
3522 if (r == -EINVAL) {
3523 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3524 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3525 * code. */
3526 r = -EAFNOSUPPORT;
3527 goto fail;
3528 }
3529 if (r < 0)
3530 goto fail;
3531
3532 f->header = h;
3533
3534 if (!newly_created) {
3535 set_clear_with_destructor(deferred_closes, journal_file_close);
3536
3537 r = journal_file_verify_header(f);
3538 if (r < 0)
3539 goto fail;
3540 }
3541
3542 #if HAVE_GCRYPT
3543 if (!newly_created && f->writable) {
3544 r = journal_file_fss_load(f);
3545 if (r < 0)
3546 goto fail;
3547 }
3548 #endif
3549
3550 if (f->writable) {
3551 if (metrics) {
3552 journal_default_metrics(metrics, f->fd);
3553 f->metrics = *metrics;
3554 } else if (template)
3555 f->metrics = template->metrics;
3556
3557 r = journal_file_refresh_header(f);
3558 if (r < 0)
3559 goto fail;
3560 }
3561
3562 #if HAVE_GCRYPT
3563 r = journal_file_hmac_setup(f);
3564 if (r < 0)
3565 goto fail;
3566 #endif
3567
3568 if (newly_created) {
3569 r = journal_file_setup_field_hash_table(f);
3570 if (r < 0)
3571 goto fail;
3572
3573 r = journal_file_setup_data_hash_table(f);
3574 if (r < 0)
3575 goto fail;
3576
3577 #if HAVE_GCRYPT
3578 r = journal_file_append_first_tag(f);
3579 if (r < 0)
3580 goto fail;
3581 #endif
3582 }
3583
3584 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) {
3585 r = -EIO;
3586 goto fail;
3587 }
3588
3589 if (template && template->post_change_timer) {
3590 r = journal_file_enable_post_change_timer(
3591 f,
3592 sd_event_source_get_event(template->post_change_timer),
3593 template->post_change_timer_period);
3594
3595 if (r < 0)
3596 goto fail;
3597 }
3598
3599 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3600 f->close_fd = true;
3601
3602 *ret = f;
3603 return 0;
3604
3605 fail:
3606 if (f->cache_fd && mmap_cache_got_sigbus(f->mmap, f->cache_fd))
3607 r = -EIO;
3608
3609 (void) journal_file_close(f);
3610
3611 return r;
3612 }
3613
3614 int journal_file_archive(JournalFile *f) {
3615 _cleanup_free_ char *p = NULL;
3616
3617 assert(f);
3618
3619 if (!f->writable)
3620 return -EINVAL;
3621
3622 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3623 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3624 if (path_startswith(f->path, "/proc/self/fd"))
3625 return -EINVAL;
3626
3627 if (!endswith(f->path, ".journal"))
3628 return -EINVAL;
3629
3630 if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3631 (int) strlen(f->path) - 8, f->path,
3632 SD_ID128_FORMAT_VAL(f->header->seqnum_id),
3633 le64toh(f->header->head_entry_seqnum),
3634 le64toh(f->header->head_entry_realtime)) < 0)
3635 return -ENOMEM;
3636
3637 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3638 * ignore that case. */
3639 if (rename(f->path, p) < 0 && errno != ENOENT)
3640 return -errno;
3641
3642 /* Sync the rename to disk */
3643 (void) fsync_directory_of_file(f->fd);
3644
3645 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3646 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3647 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3648 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3649 * occurs. */
3650 f->archive = true;
3651
3652 /* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
3653 * files when we archive them */
3654 f->defrag_on_close = true;
3655
3656 return 0;
3657 }
3658
3659 JournalFile* journal_initiate_close(
3660 JournalFile *f,
3661 Set *deferred_closes) {
3662
3663 int r;
3664
3665 assert(f);
3666
3667 if (deferred_closes) {
3668
3669 r = set_put(deferred_closes, f);
3670 if (r < 0)
3671 log_debug_errno(r, "Failed to add file to deferred close set, closing immediately.");
3672 else {
3673 (void) journal_file_set_offline(f, false);
3674 return NULL;
3675 }
3676 }
3677
3678 return journal_file_close(f);
3679 }
3680
3681 int journal_file_rotate(
3682 JournalFile **f,
3683 bool compress,
3684 uint64_t compress_threshold_bytes,
3685 bool seal,
3686 Set *deferred_closes) {
3687
3688 JournalFile *new_file = NULL;
3689 int r;
3690
3691 assert(f);
3692 assert(*f);
3693
3694 r = journal_file_archive(*f);
3695 if (r < 0)
3696 return r;
3697
3698 r = journal_file_open(
3699 -1,
3700 (*f)->path,
3701 (*f)->flags,
3702 (*f)->mode,
3703 compress,
3704 compress_threshold_bytes,
3705 seal,
3706 NULL, /* metrics */
3707 (*f)->mmap,
3708 deferred_closes,
3709 *f, /* template */
3710 &new_file);
3711
3712 journal_initiate_close(*f, deferred_closes);
3713 *f = new_file;
3714
3715 return r;
3716 }
3717
3718 int journal_file_dispose(int dir_fd, const char *fname) {
3719 _cleanup_free_ char *p = NULL;
3720 _cleanup_close_ int fd = -1;
3721
3722 assert(fname);
3723
3724 /* Renames a journal file to *.journal~, i.e. to mark it as corruped or otherwise uncleanly shutdown. Note that
3725 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3726 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3727 * for writing anymore. */
3728
3729 if (!endswith(fname, ".journal"))
3730 return -EINVAL;
3731
3732 if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
3733 (int) strlen(fname) - 8, fname,
3734 now(CLOCK_REALTIME),
3735 random_u64()) < 0)
3736 return -ENOMEM;
3737
3738 if (renameat(dir_fd, fname, dir_fd, p) < 0)
3739 return -errno;
3740
3741 /* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
3742 fd = openat(dir_fd, p, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW);
3743 if (fd < 0)
3744 log_debug_errno(errno, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
3745 else {
3746 (void) chattr_fd(fd, 0, FS_NOCOW_FL, NULL);
3747 (void) btrfs_defrag_fd(fd);
3748 }
3749
3750 return 0;
3751 }
3752
3753 int journal_file_open_reliably(
3754 const char *fname,
3755 int flags,
3756 mode_t mode,
3757 bool compress,
3758 uint64_t compress_threshold_bytes,
3759 bool seal,
3760 JournalMetrics *metrics,
3761 MMapCache *mmap_cache,
3762 Set *deferred_closes,
3763 JournalFile *template,
3764 JournalFile **ret) {
3765
3766 int r;
3767
3768 r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3769 deferred_closes, template, ret);
3770 if (!IN_SET(r,
3771 -EBADMSG, /* Corrupted */
3772 -ENODATA, /* Truncated */
3773 -EHOSTDOWN, /* Other machine */
3774 -EPROTONOSUPPORT, /* Incompatible feature */
3775 -EBUSY, /* Unclean shutdown */
3776 -ESHUTDOWN, /* Already archived */
3777 -EIO, /* IO error, including SIGBUS on mmap */
3778 -EIDRM, /* File has been deleted */
3779 -ETXTBSY)) /* File is from the future */
3780 return r;
3781
3782 if ((flags & O_ACCMODE) == O_RDONLY)
3783 return r;
3784
3785 if (!(flags & O_CREAT))
3786 return r;
3787
3788 if (!endswith(fname, ".journal"))
3789 return r;
3790
3791 /* The file is corrupted. Rotate it away and try it again (but only once) */
3792 log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
3793
3794 r = journal_file_dispose(AT_FDCWD, fname);
3795 if (r < 0)
3796 return r;
3797
3798 return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3799 deferred_closes, template, ret);
3800 }
3801
3802 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p) {
3803 uint64_t i, n;
3804 uint64_t q, xor_hash = 0;
3805 int r;
3806 EntryItem *items;
3807 dual_timestamp ts;
3808 const sd_id128_t *boot_id;
3809
3810 assert(from);
3811 assert(to);
3812 assert(o);
3813 assert(p);
3814
3815 if (!to->writable)
3816 return -EPERM;
3817
3818 ts.monotonic = le64toh(o->entry.monotonic);
3819 ts.realtime = le64toh(o->entry.realtime);
3820 boot_id = &o->entry.boot_id;
3821
3822 n = journal_file_entry_n_items(o);
3823 /* alloca() can't take 0, hence let's allocate at least one */
3824 items = newa(EntryItem, MAX(1u, n));
3825
3826 for (i = 0; i < n; i++) {
3827 uint64_t l, h;
3828 le64_t le_hash;
3829 size_t t;
3830 void *data;
3831 Object *u;
3832
3833 q = le64toh(o->entry.items[i].object_offset);
3834 le_hash = o->entry.items[i].hash;
3835
3836 r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
3837 if (r < 0)
3838 return r;
3839
3840 if (le_hash != o->data.hash)
3841 return -EBADMSG;
3842
3843 l = le64toh(READ_NOW(o->object.size));
3844 if (l < offsetof(Object, data.payload))
3845 return -EBADMSG;
3846
3847 l -= offsetof(Object, data.payload);
3848 t = (size_t) l;
3849
3850 /* We hit the limit on 32bit machines */
3851 if ((uint64_t) t != l)
3852 return -E2BIG;
3853
3854 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
3855 #if HAVE_XZ || HAVE_LZ4 || HAVE_ZSTD
3856 size_t rsize = 0;
3857
3858 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
3859 o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
3860 if (r < 0)
3861 return r;
3862
3863 data = from->compress_buffer;
3864 l = rsize;
3865 #else
3866 return -EPROTONOSUPPORT;
3867 #endif
3868 } else
3869 data = o->data.payload;
3870
3871 r = journal_file_append_data(to, data, l, &u, &h);
3872 if (r < 0)
3873 return r;
3874
3875 if (JOURNAL_HEADER_KEYED_HASH(to->header))
3876 xor_hash ^= jenkins_hash64(data, l);
3877 else
3878 xor_hash ^= le64toh(u->data.hash);
3879
3880 items[i].object_offset = htole64(h);
3881 items[i].hash = u->data.hash;
3882
3883 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
3884 if (r < 0)
3885 return r;
3886 }
3887
3888 r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n,
3889 NULL, NULL, NULL);
3890
3891 if (mmap_cache_got_sigbus(to->mmap, to->cache_fd))
3892 return -EIO;
3893
3894 return r;
3895 }
3896
3897 void journal_reset_metrics(JournalMetrics *m) {
3898 assert(m);
3899
3900 /* Set everything to "pick automatic values". */
3901
3902 *m = (JournalMetrics) {
3903 .min_use = (uint64_t) -1,
3904 .max_use = (uint64_t) -1,
3905 .min_size = (uint64_t) -1,
3906 .max_size = (uint64_t) -1,
3907 .keep_free = (uint64_t) -1,
3908 .n_max_files = (uint64_t) -1,
3909 };
3910 }
3911
3912 void journal_default_metrics(JournalMetrics *m, int fd) {
3913 char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
3914 struct statvfs ss;
3915 uint64_t fs_size = 0;
3916
3917 assert(m);
3918 assert(fd >= 0);
3919
3920 if (fstatvfs(fd, &ss) >= 0)
3921 fs_size = ss.f_frsize * ss.f_blocks;
3922 else
3923 log_debug_errno(errno, "Failed to determine disk size: %m");
3924
3925 if (m->max_use == (uint64_t) -1) {
3926
3927 if (fs_size > 0)
3928 m->max_use = CLAMP(PAGE_ALIGN(fs_size / 10), /* 10% of file system size */
3929 MAX_USE_LOWER, MAX_USE_UPPER);
3930 else
3931 m->max_use = MAX_USE_LOWER;
3932 } else {
3933 m->max_use = PAGE_ALIGN(m->max_use);
3934
3935 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3936 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3937 }
3938
3939 if (m->min_use == (uint64_t) -1) {
3940 if (fs_size > 0)
3941 m->min_use = CLAMP(PAGE_ALIGN(fs_size / 50), /* 2% of file system size */
3942 MIN_USE_LOW, MIN_USE_HIGH);
3943 else
3944 m->min_use = MIN_USE_LOW;
3945 }
3946
3947 if (m->min_use > m->max_use)
3948 m->min_use = m->max_use;
3949
3950 if (m->max_size == (uint64_t) -1)
3951 m->max_size = MIN(PAGE_ALIGN(m->max_use / 8), /* 8 chunks */
3952 MAX_SIZE_UPPER);
3953 else
3954 m->max_size = PAGE_ALIGN(m->max_size);
3955
3956 if (m->max_size != 0) {
3957 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3958 m->max_size = JOURNAL_FILE_SIZE_MIN;
3959
3960 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3961 m->max_use = m->max_size*2;
3962 }
3963
3964 if (m->min_size == (uint64_t) -1)
3965 m->min_size = JOURNAL_FILE_SIZE_MIN;
3966 else
3967 m->min_size = CLAMP(PAGE_ALIGN(m->min_size),
3968 JOURNAL_FILE_SIZE_MIN,
3969 m->max_size ?: UINT64_MAX);
3970
3971 if (m->keep_free == (uint64_t) -1) {
3972 if (fs_size > 0)
3973 m->keep_free = MIN(PAGE_ALIGN(fs_size / 20), /* 5% of file system size */
3974 KEEP_FREE_UPPER);
3975 else
3976 m->keep_free = DEFAULT_KEEP_FREE;
3977 }
3978
3979 if (m->n_max_files == (uint64_t) -1)
3980 m->n_max_files = DEFAULT_N_MAX_FILES;
3981
3982 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3983 format_bytes(a, sizeof(a), m->min_use),
3984 format_bytes(b, sizeof(b), m->max_use),
3985 format_bytes(c, sizeof(c), m->max_size),
3986 format_bytes(d, sizeof(d), m->min_size),
3987 format_bytes(e, sizeof(e), m->keep_free),
3988 m->n_max_files);
3989 }
3990
3991 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
3992 assert(f);
3993 assert(f->header);
3994 assert(from || to);
3995
3996 if (from) {
3997 if (f->header->head_entry_realtime == 0)
3998 return -ENOENT;
3999
4000 *from = le64toh(f->header->head_entry_realtime);
4001 }
4002
4003 if (to) {
4004 if (f->header->tail_entry_realtime == 0)
4005 return -ENOENT;
4006
4007 *to = le64toh(f->header->tail_entry_realtime);
4008 }
4009
4010 return 1;
4011 }
4012
4013 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
4014 Object *o;
4015 uint64_t p;
4016 int r;
4017
4018 assert(f);
4019 assert(from || to);
4020
4021 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
4022 if (r <= 0)
4023 return r;
4024
4025 if (le64toh(o->data.n_entries) <= 0)
4026 return 0;
4027
4028 if (from) {
4029 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
4030 if (r < 0)
4031 return r;
4032
4033 *from = le64toh(o->entry.monotonic);
4034 }
4035
4036 if (to) {
4037 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
4038 if (r < 0)
4039 return r;
4040
4041 r = generic_array_get_plus_one(f,
4042 le64toh(o->data.entry_offset),
4043 le64toh(o->data.entry_array_offset),
4044 le64toh(o->data.n_entries)-1,
4045 &o, NULL);
4046 if (r <= 0)
4047 return r;
4048
4049 *to = le64toh(o->entry.monotonic);
4050 }
4051
4052 return 1;
4053 }
4054
4055 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
4056 assert(f);
4057 assert(f->header);
4058
4059 /* If we gained new header fields we gained new features,
4060 * hence suggest a rotation */
4061 if (le64toh(f->header->header_size) < sizeof(Header)) {
4062 log_debug("%s uses an outdated header, suggesting rotation.", f->path);
4063 return true;
4064 }
4065
4066 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4067 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4068 * need the n_data field, which only exists in newer versions. */
4069
4070 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
4071 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4072 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
4073 f->path,
4074 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
4075 le64toh(f->header->n_data),
4076 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
4077 (unsigned long long) f->last_stat.st_size,
4078 f->last_stat.st_size / le64toh(f->header->n_data));
4079 return true;
4080 }
4081
4082 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
4083 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4084 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
4085 f->path,
4086 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
4087 le64toh(f->header->n_fields),
4088 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
4089 return true;
4090 }
4091
4092 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4093 * longest chain is longer than some threshold, let's suggest rotation. */
4094 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) &&
4095 le64toh(f->header->data_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4096 log_debug("Data hash table of %s has deepest hash chain of length %" PRIu64 ", suggesting rotation.",
4097 f->path, le64toh(f->header->data_hash_chain_depth));
4098 return true;
4099 }
4100
4101 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) &&
4102 le64toh(f->header->field_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4103 log_debug("Field hash table of %s has deepest hash chain of length at %" PRIu64 ", suggesting rotation.",
4104 f->path, le64toh(f->header->field_hash_chain_depth));
4105 return true;
4106 }
4107
4108 /* Are the data objects properly indexed by field objects? */
4109 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
4110 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
4111 le64toh(f->header->n_data) > 0 &&
4112 le64toh(f->header->n_fields) == 0)
4113 return true;
4114
4115 if (max_file_usec > 0) {
4116 usec_t t, h;
4117
4118 h = le64toh(f->header->head_entry_realtime);
4119 t = now(CLOCK_REALTIME);
4120
4121 if (h > 0 && t > h + max_file_usec)
4122 return true;
4123 }
4124
4125 return false;
4126 }