]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journal-file.c
tree-wide: make use of new relative time events in sd-event.h
[thirdparty/systemd.git] / src / journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <linux/fs.h>
6 #include <pthread.h>
7 #include <stddef.h>
8 #include <sys/mman.h>
9 #include <sys/statvfs.h>
10 #include <sys/uio.h>
11 #include <unistd.h>
12
13 #include "sd-event.h"
14
15 #include "alloc-util.h"
16 #include "btrfs-util.h"
17 #include "chattr-util.h"
18 #include "compress.h"
19 #include "env-util.h"
20 #include "fd-util.h"
21 #include "format-util.h"
22 #include "fs-util.h"
23 #include "journal-authenticate.h"
24 #include "journal-def.h"
25 #include "journal-file.h"
26 #include "lookup3.h"
27 #include "memory-util.h"
28 #include "path-util.h"
29 #include "random-util.h"
30 #include "set.h"
31 #include "sort-util.h"
32 #include "stat-util.h"
33 #include "string-util.h"
34 #include "strv.h"
35 #include "xattr-util.h"
36
37 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
38 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
39
40 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
41 #define MIN_COMPRESS_THRESHOLD (8ULL)
42
43 /* This is the minimum journal file size */
44 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
45
46 /* These are the lower and upper bounds if we deduce the max_use value
47 * from the file system size */
48 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
49 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
50
51 /* Those are the lower and upper bounds for the minimal use limit,
52 * i.e. how much we'll use even if keep_free suggests otherwise. */
53 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
54 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
55
56 /* This is the upper bound if we deduce max_size from max_use */
57 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
58
59 /* This is the upper bound if we deduce the keep_free value from the
60 * file system size */
61 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
62
63 /* This is the keep_free value when we can't determine the system
64 * size */
65 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
66
67 /* This is the default maximum number of journal files to keep around. */
68 #define DEFAULT_N_MAX_FILES 100
69
70 /* n_data was the first entry we added after the initial file format design */
71 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
72
73 /* How many entries to keep in the entry array chain cache at max */
74 #define CHAIN_CACHE_MAX 20
75
76 /* How much to increase the journal file size at once each time we allocate something new. */
77 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
78
79 /* Reread fstat() of the file for detecting deletions at least this often */
80 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
81
82 /* The mmap context to use for the header we pick as one above the last defined typed */
83 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
84
85 /* Longest hash chain to rotate after */
86 #define HASH_CHAIN_DEPTH_MAX 100
87
88 #ifdef __clang__
89 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
90 #endif
91
92 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
93 * As a result we use atomic operations on f->offline_state for inter-thread communications with
94 * journal_file_set_offline() and journal_file_set_online(). */
95 static void journal_file_set_offline_internal(JournalFile *f) {
96 assert(f);
97 assert(f->fd >= 0);
98 assert(f->header);
99
100 for (;;) {
101 switch (f->offline_state) {
102 case OFFLINE_CANCEL:
103 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
104 continue;
105 return;
106
107 case OFFLINE_AGAIN_FROM_SYNCING:
108 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
109 continue;
110 break;
111
112 case OFFLINE_AGAIN_FROM_OFFLINING:
113 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
114 continue;
115 break;
116
117 case OFFLINE_SYNCING:
118 (void) fsync(f->fd);
119
120 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
121 continue;
122
123 f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
124 (void) fsync(f->fd);
125 break;
126
127 case OFFLINE_OFFLINING:
128 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
129 continue;
130 _fallthrough_;
131 case OFFLINE_DONE:
132 return;
133
134 case OFFLINE_JOINED:
135 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
136 return;
137 }
138 }
139 }
140
141 static void * journal_file_set_offline_thread(void *arg) {
142 JournalFile *f = arg;
143
144 (void) pthread_setname_np(pthread_self(), "journal-offline");
145
146 journal_file_set_offline_internal(f);
147
148 return NULL;
149 }
150
151 static int journal_file_set_offline_thread_join(JournalFile *f) {
152 int r;
153
154 assert(f);
155
156 if (f->offline_state == OFFLINE_JOINED)
157 return 0;
158
159 r = pthread_join(f->offline_thread, NULL);
160 if (r)
161 return -r;
162
163 f->offline_state = OFFLINE_JOINED;
164
165 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
166 return -EIO;
167
168 return 0;
169 }
170
171 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
172 static bool journal_file_set_offline_try_restart(JournalFile *f) {
173 for (;;) {
174 switch (f->offline_state) {
175 case OFFLINE_AGAIN_FROM_SYNCING:
176 case OFFLINE_AGAIN_FROM_OFFLINING:
177 return true;
178
179 case OFFLINE_CANCEL:
180 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
181 continue;
182 return true;
183
184 case OFFLINE_SYNCING:
185 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
186 continue;
187 return true;
188
189 case OFFLINE_OFFLINING:
190 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
191 continue;
192 return true;
193
194 default:
195 return false;
196 }
197 }
198 }
199
200 /* Sets a journal offline.
201 *
202 * If wait is false then an offline is dispatched in a separate thread for a
203 * subsequent journal_file_set_offline() or journal_file_set_online() of the
204 * same journal to synchronize with.
205 *
206 * If wait is true, then either an existing offline thread will be restarted
207 * and joined, or if none exists the offline is simply performed in this
208 * context without involving another thread.
209 */
210 int journal_file_set_offline(JournalFile *f, bool wait) {
211 bool restarted;
212 int r;
213
214 assert(f);
215
216 if (!f->writable)
217 return -EPERM;
218
219 if (f->fd < 0 || !f->header)
220 return -EINVAL;
221
222 /* An offlining journal is implicitly online and may modify f->header->state,
223 * we must also join any potentially lingering offline thread when not online. */
224 if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
225 return journal_file_set_offline_thread_join(f);
226
227 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
228 restarted = journal_file_set_offline_try_restart(f);
229 if ((restarted && wait) || !restarted) {
230 r = journal_file_set_offline_thread_join(f);
231 if (r < 0)
232 return r;
233 }
234
235 if (restarted)
236 return 0;
237
238 /* Initiate a new offline. */
239 f->offline_state = OFFLINE_SYNCING;
240
241 if (wait) /* Without using a thread if waiting. */
242 journal_file_set_offline_internal(f);
243 else {
244 sigset_t ss, saved_ss;
245 int k;
246
247 assert_se(sigfillset(&ss) >= 0);
248 /* Don't block SIGBUS since the offlining thread accesses a memory mapped file.
249 * Asynchronous SIGBUS signals can safely be handled by either thread. */
250 assert_se(sigdelset(&ss, SIGBUS) >= 0);
251
252 r = pthread_sigmask(SIG_BLOCK, &ss, &saved_ss);
253 if (r > 0)
254 return -r;
255
256 r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
257
258 k = pthread_sigmask(SIG_SETMASK, &saved_ss, NULL);
259 if (r > 0) {
260 f->offline_state = OFFLINE_JOINED;
261 return -r;
262 }
263 if (k > 0)
264 return -k;
265 }
266
267 return 0;
268 }
269
270 static int journal_file_set_online(JournalFile *f) {
271 bool wait = true;
272
273 assert(f);
274
275 if (!f->writable)
276 return -EPERM;
277
278 if (f->fd < 0 || !f->header)
279 return -EINVAL;
280
281 while (wait) {
282 switch (f->offline_state) {
283 case OFFLINE_JOINED:
284 /* No offline thread, no need to wait. */
285 wait = false;
286 break;
287
288 case OFFLINE_SYNCING:
289 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
290 continue;
291 /* Canceled syncing prior to offlining, no need to wait. */
292 wait = false;
293 break;
294
295 case OFFLINE_AGAIN_FROM_SYNCING:
296 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
297 continue;
298 /* Canceled restart from syncing, no need to wait. */
299 wait = false;
300 break;
301
302 case OFFLINE_AGAIN_FROM_OFFLINING:
303 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
304 continue;
305 /* Canceled restart from offlining, must wait for offlining to complete however. */
306 _fallthrough_;
307 default: {
308 int r;
309
310 r = journal_file_set_offline_thread_join(f);
311 if (r < 0)
312 return r;
313
314 wait = false;
315 break;
316 }
317 }
318 }
319
320 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
321 return -EIO;
322
323 switch (f->header->state) {
324 case STATE_ONLINE:
325 return 0;
326
327 case STATE_OFFLINE:
328 f->header->state = STATE_ONLINE;
329 (void) fsync(f->fd);
330 return 0;
331
332 default:
333 return -EINVAL;
334 }
335 }
336
337 bool journal_file_is_offlining(JournalFile *f) {
338 assert(f);
339
340 __sync_synchronize();
341
342 if (IN_SET(f->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
343 return false;
344
345 return true;
346 }
347
348 JournalFile* journal_file_close(JournalFile *f) {
349 if (!f)
350 return NULL;
351
352 #if HAVE_GCRYPT
353 /* Write the final tag */
354 if (f->seal && f->writable) {
355 int r;
356
357 r = journal_file_append_tag(f);
358 if (r < 0)
359 log_error_errno(r, "Failed to append tag when closing journal: %m");
360 }
361 #endif
362
363 if (f->post_change_timer) {
364 if (sd_event_source_get_enabled(f->post_change_timer, NULL) > 0)
365 journal_file_post_change(f);
366
367 sd_event_source_disable_unref(f->post_change_timer);
368 }
369
370 journal_file_set_offline(f, true);
371
372 if (f->mmap && f->cache_fd)
373 mmap_cache_free_fd(f->mmap, f->cache_fd);
374
375 if (f->fd >= 0 && f->defrag_on_close) {
376
377 /* Be friendly to btrfs: turn COW back on again now,
378 * and defragment the file. We won't write to the file
379 * ever again, hence remove all fragmentation, and
380 * reenable all the good bits COW usually provides
381 * (such as data checksumming). */
382
383 (void) chattr_fd(f->fd, 0, FS_NOCOW_FL, NULL);
384 (void) btrfs_defrag_fd(f->fd);
385 }
386
387 if (f->close_fd)
388 safe_close(f->fd);
389 free(f->path);
390
391 mmap_cache_unref(f->mmap);
392
393 ordered_hashmap_free_free(f->chain_cache);
394
395 #if HAVE_COMPRESSION
396 free(f->compress_buffer);
397 #endif
398
399 #if HAVE_GCRYPT
400 if (f->fss_file)
401 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
402 else
403 free(f->fsprg_state);
404
405 free(f->fsprg_seed);
406
407 if (f->hmac)
408 gcry_md_close(f->hmac);
409 #endif
410
411 return mfree(f);
412 }
413
414 static int journal_file_init_header(JournalFile *f, JournalFile *template) {
415 Header h = {};
416 ssize_t k;
417 int r;
418
419 assert(f);
420
421 memcpy(h.signature, HEADER_SIGNATURE, 8);
422 h.header_size = htole64(ALIGN64(sizeof(h)));
423
424 h.incompatible_flags |= htole32(
425 f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
426 f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4 |
427 f->compress_zstd * HEADER_INCOMPATIBLE_COMPRESSED_ZSTD |
428 f->keyed_hash * HEADER_INCOMPATIBLE_KEYED_HASH);
429
430 h.compatible_flags = htole32(
431 f->seal * HEADER_COMPATIBLE_SEALED);
432
433 r = sd_id128_randomize(&h.file_id);
434 if (r < 0)
435 return r;
436
437 if (template) {
438 h.seqnum_id = template->header->seqnum_id;
439 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
440 } else
441 h.seqnum_id = h.file_id;
442
443 k = pwrite(f->fd, &h, sizeof(h), 0);
444 if (k < 0)
445 return -errno;
446
447 if (k != sizeof(h))
448 return -EIO;
449
450 return 0;
451 }
452
453 static int journal_file_refresh_header(JournalFile *f) {
454 int r;
455
456 assert(f);
457 assert(f->header);
458
459 r = sd_id128_get_machine(&f->header->machine_id);
460 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
461 /* We don't have a machine-id, let's continue without */
462 zero(f->header->machine_id);
463 else if (r < 0)
464 return r;
465
466 r = sd_id128_get_boot(&f->header->boot_id);
467 if (r < 0)
468 return r;
469
470 r = journal_file_set_online(f);
471
472 /* Sync the online state to disk */
473 (void) fsync(f->fd);
474
475 /* We likely just created a new file, also sync the directory this file is located in. */
476 (void) fsync_directory_of_file(f->fd);
477
478 return r;
479 }
480
481 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
482 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
483 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
484 const char *type = compatible ? "compatible" : "incompatible";
485 uint32_t flags;
486
487 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
488
489 if (flags & ~supported) {
490 if (flags & ~any)
491 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
492 f->path, type, flags & ~any);
493 flags = (flags & any) & ~supported;
494 if (flags) {
495 const char* strv[5];
496 unsigned n = 0;
497 _cleanup_free_ char *t = NULL;
498
499 if (compatible) {
500 if (flags & HEADER_COMPATIBLE_SEALED)
501 strv[n++] = "sealed";
502 } else {
503 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ)
504 strv[n++] = "xz-compressed";
505 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4)
506 strv[n++] = "lz4-compressed";
507 if (flags & HEADER_INCOMPATIBLE_COMPRESSED_ZSTD)
508 strv[n++] = "zstd-compressed";
509 if (flags & HEADER_INCOMPATIBLE_KEYED_HASH)
510 strv[n++] = "keyed-hash";
511 }
512 strv[n] = NULL;
513 assert(n < ELEMENTSOF(strv));
514
515 t = strv_join((char**) strv, ", ");
516 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
517 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
518 }
519 return true;
520 }
521
522 return false;
523 }
524
525 static int journal_file_verify_header(JournalFile *f) {
526 uint64_t arena_size, header_size;
527
528 assert(f);
529 assert(f->header);
530
531 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
532 return -EBADMSG;
533
534 /* In both read and write mode we refuse to open files with incompatible
535 * flags we don't know. */
536 if (warn_wrong_flags(f, false))
537 return -EPROTONOSUPPORT;
538
539 /* When open for writing we refuse to open files with compatible flags, too. */
540 if (f->writable && warn_wrong_flags(f, true))
541 return -EPROTONOSUPPORT;
542
543 if (f->header->state >= _STATE_MAX)
544 return -EBADMSG;
545
546 header_size = le64toh(READ_NOW(f->header->header_size));
547
548 /* The first addition was n_data, so check that we are at least this large */
549 if (header_size < HEADER_SIZE_MIN)
550 return -EBADMSG;
551
552 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
553 return -EBADMSG;
554
555 arena_size = le64toh(READ_NOW(f->header->arena_size));
556
557 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
558 return -ENODATA;
559
560 if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
561 return -ENODATA;
562
563 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
564 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
565 !VALID64(le64toh(f->header->tail_object_offset)) ||
566 !VALID64(le64toh(f->header->entry_array_offset)))
567 return -ENODATA;
568
569 if (f->writable) {
570 sd_id128_t machine_id;
571 uint8_t state;
572 int r;
573
574 r = sd_id128_get_machine(&machine_id);
575 if (r < 0)
576 return r;
577
578 if (!sd_id128_equal(machine_id, f->header->machine_id))
579 return -EHOSTDOWN;
580
581 state = f->header->state;
582
583 if (state == STATE_ARCHIVED)
584 return -ESHUTDOWN; /* Already archived */
585 else if (state == STATE_ONLINE)
586 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
587 "Journal file %s is already online. Assuming unclean closing.",
588 f->path);
589 else if (state != STATE_OFFLINE)
590 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
591 "Journal file %s has unknown state %i.",
592 f->path, state);
593
594 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
595 return -EBADMSG;
596
597 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
598 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
599 * bisection. */
600 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME))
601 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY),
602 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
603 f->path);
604 }
605
606 f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
607 f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
608 f->compress_zstd = JOURNAL_HEADER_COMPRESSED_ZSTD(f->header);
609
610 f->seal = JOURNAL_HEADER_SEALED(f->header);
611
612 f->keyed_hash = JOURNAL_HEADER_KEYED_HASH(f->header);
613
614 return 0;
615 }
616
617 int journal_file_fstat(JournalFile *f) {
618 int r;
619
620 assert(f);
621 assert(f->fd >= 0);
622
623 if (fstat(f->fd, &f->last_stat) < 0)
624 return -errno;
625
626 f->last_stat_usec = now(CLOCK_MONOTONIC);
627
628 /* Refuse dealing with files that aren't regular */
629 r = stat_verify_regular(&f->last_stat);
630 if (r < 0)
631 return r;
632
633 /* Refuse appending to files that are already deleted */
634 if (f->last_stat.st_nlink <= 0)
635 return -EIDRM;
636
637 return 0;
638 }
639
640 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
641 uint64_t old_size, new_size, old_header_size, old_arena_size;
642 int r;
643
644 assert(f);
645 assert(f->header);
646
647 /* We assume that this file is not sparse, and we know that for sure, since we always call
648 * posix_fallocate() ourselves */
649
650 if (size > PAGE_ALIGN_DOWN(UINT64_MAX) - offset)
651 return -EINVAL;
652
653 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
654 return -EIO;
655
656 old_header_size = le64toh(READ_NOW(f->header->header_size));
657 old_arena_size = le64toh(READ_NOW(f->header->arena_size));
658 if (old_arena_size > PAGE_ALIGN_DOWN(UINT64_MAX) - old_header_size)
659 return -EBADMSG;
660
661 old_size = old_header_size + old_arena_size;
662
663 new_size = MAX(PAGE_ALIGN(offset + size), old_header_size);
664
665 if (new_size <= old_size) {
666
667 /* We already pre-allocated enough space, but before
668 * we write to it, let's check with fstat() if the
669 * file got deleted, in order make sure we don't throw
670 * away the data immediately. Don't check fstat() for
671 * all writes though, but only once ever 10s. */
672
673 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
674 return 0;
675
676 return journal_file_fstat(f);
677 }
678
679 /* Allocate more space. */
680
681 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
682 return -E2BIG;
683
684 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
685 struct statvfs svfs;
686
687 if (fstatvfs(f->fd, &svfs) >= 0) {
688 uint64_t available;
689
690 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
691
692 if (new_size - old_size > available)
693 return -E2BIG;
694 }
695 }
696
697 /* Increase by larger blocks at once */
698 new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
699 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
700 new_size = f->metrics.max_size;
701
702 /* Note that the glibc fallocate() fallback is very
703 inefficient, hence we try to minimize the allocation area
704 as we can. */
705 r = posix_fallocate(f->fd, old_size, new_size - old_size);
706 if (r != 0)
707 return -r;
708
709 f->header->arena_size = htole64(new_size - old_header_size);
710
711 return journal_file_fstat(f);
712 }
713
714 static unsigned type_to_context(ObjectType type) {
715 /* One context for each type, plus one catch-all for the rest */
716 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
717 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
718 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
719 }
720
721 static int journal_file_move_to(
722 JournalFile *f,
723 ObjectType type,
724 bool keep_always,
725 uint64_t offset,
726 uint64_t size,
727 void **ret,
728 size_t *ret_size) {
729
730 int r;
731
732 assert(f);
733 assert(ret);
734
735 if (size <= 0)
736 return -EINVAL;
737
738 if (size > UINT64_MAX - offset)
739 return -EBADMSG;
740
741 /* Avoid SIGBUS on invalid accesses */
742 if (offset + size > (uint64_t) f->last_stat.st_size) {
743 /* Hmm, out of range? Let's refresh the fstat() data
744 * first, before we trust that check. */
745
746 r = journal_file_fstat(f);
747 if (r < 0)
748 return r;
749
750 if (offset + size > (uint64_t) f->last_stat.st_size)
751 return -EADDRNOTAVAIL;
752 }
753
754 return mmap_cache_get(f->mmap, f->cache_fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret, ret_size);
755 }
756
757 static uint64_t minimum_header_size(Object *o) {
758
759 static const uint64_t table[] = {
760 [OBJECT_DATA] = sizeof(DataObject),
761 [OBJECT_FIELD] = sizeof(FieldObject),
762 [OBJECT_ENTRY] = sizeof(EntryObject),
763 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
764 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
765 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
766 [OBJECT_TAG] = sizeof(TagObject),
767 };
768
769 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
770 return sizeof(ObjectHeader);
771
772 return table[o->object.type];
773 }
774
775 /* Lightweight object checks. We want this to be fast, so that we won't
776 * slowdown every journal_file_move_to_object() call too much. */
777 static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) {
778 assert(f);
779 assert(o);
780
781 switch (o->object.type) {
782
783 case OBJECT_DATA:
784 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
785 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
786 "Bad n_entries: %" PRIu64 ": %" PRIu64,
787 le64toh(o->data.n_entries),
788 offset);
789
790 if (le64toh(o->object.size) <= offsetof(DataObject, payload))
791 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
792 "Bad object size (<= %zu): %" PRIu64 ": %" PRIu64,
793 offsetof(DataObject, payload),
794 le64toh(o->object.size),
795 offset);
796
797 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
798 !VALID64(le64toh(o->data.next_field_offset)) ||
799 !VALID64(le64toh(o->data.entry_offset)) ||
800 !VALID64(le64toh(o->data.entry_array_offset)))
801 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
802 "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
803 le64toh(o->data.next_hash_offset),
804 le64toh(o->data.next_field_offset),
805 le64toh(o->data.entry_offset),
806 le64toh(o->data.entry_array_offset),
807 offset);
808
809 break;
810
811 case OBJECT_FIELD:
812 if (le64toh(o->object.size) <= offsetof(FieldObject, payload))
813 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
814 "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
815 offsetof(FieldObject, payload),
816 le64toh(o->object.size),
817 offset);
818
819 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
820 !VALID64(le64toh(o->field.head_data_offset)))
821 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
822 "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
823 le64toh(o->field.next_hash_offset),
824 le64toh(o->field.head_data_offset),
825 offset);
826 break;
827
828 case OBJECT_ENTRY: {
829 uint64_t sz;
830
831 sz = le64toh(READ_NOW(o->object.size));
832 if (sz < offsetof(EntryObject, items) ||
833 (sz - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0)
834 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
835 "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
836 offsetof(EntryObject, items),
837 sz,
838 offset);
839
840 if ((sz - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0)
841 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
842 "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
843 (sz - offsetof(EntryObject, items)) / sizeof(EntryItem),
844 offset);
845
846 if (le64toh(o->entry.seqnum) <= 0)
847 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
848 "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
849 le64toh(o->entry.seqnum),
850 offset);
851
852 if (!VALID_REALTIME(le64toh(o->entry.realtime)))
853 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
854 "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
855 le64toh(o->entry.realtime),
856 offset);
857
858 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
859 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
860 "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
861 le64toh(o->entry.monotonic),
862 offset);
863
864 break;
865 }
866
867 case OBJECT_DATA_HASH_TABLE:
868 case OBJECT_FIELD_HASH_TABLE: {
869 uint64_t sz;
870
871 sz = le64toh(READ_NOW(o->object.size));
872 if (sz < offsetof(HashTableObject, items) ||
873 (sz - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
874 (sz - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0)
875 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
876 "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
877 o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
878 sz,
879 offset);
880
881 break;
882 }
883
884 case OBJECT_ENTRY_ARRAY: {
885 uint64_t sz;
886
887 sz = le64toh(READ_NOW(o->object.size));
888 if (sz < offsetof(EntryArrayObject, items) ||
889 (sz - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
890 (sz - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0)
891 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
892 "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
893 sz,
894 offset);
895
896 if (!VALID64(le64toh(o->entry_array.next_entry_array_offset)))
897 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
898 "Invalid object entry array next_entry_array_offset: " OFSfmt ": %" PRIu64,
899 le64toh(o->entry_array.next_entry_array_offset),
900 offset);
901
902 break;
903 }
904
905 case OBJECT_TAG:
906 if (le64toh(o->object.size) != sizeof(TagObject))
907 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
908 "Invalid object tag size: %" PRIu64 ": %" PRIu64,
909 le64toh(o->object.size),
910 offset);
911
912 if (!VALID_EPOCH(le64toh(o->tag.epoch)))
913 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
914 "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
915 le64toh(o->tag.epoch), offset);
916
917 break;
918 }
919
920 return 0;
921 }
922
923 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
924 int r;
925 void *t;
926 size_t tsize;
927 Object *o;
928 uint64_t s;
929
930 assert(f);
931 assert(ret);
932
933 /* Objects may only be located at multiple of 64 bit */
934 if (!VALID64(offset))
935 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
936 "Attempt to move to object at non-64bit boundary: %" PRIu64,
937 offset);
938
939 /* Object may not be located in the file header */
940 if (offset < le64toh(f->header->header_size))
941 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
942 "Attempt to move to object located in file header: %" PRIu64,
943 offset);
944
945 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t, &tsize);
946 if (r < 0)
947 return r;
948
949 o = (Object*) t;
950 s = le64toh(READ_NOW(o->object.size));
951
952 if (s == 0)
953 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
954 "Attempt to move to uninitialized object: %" PRIu64,
955 offset);
956 if (s < sizeof(ObjectHeader))
957 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
958 "Attempt to move to overly short object: %" PRIu64,
959 offset);
960
961 if (o->object.type <= OBJECT_UNUSED)
962 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
963 "Attempt to move to object with invalid type: %" PRIu64,
964 offset);
965
966 if (s < minimum_header_size(o))
967 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
968 "Attempt to move to truncated object: %" PRIu64,
969 offset);
970
971 if (type > OBJECT_UNUSED && o->object.type != type)
972 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
973 "Attempt to move to object of unexpected type: %" PRIu64,
974 offset);
975
976 if (s > tsize) {
977 r = journal_file_move_to(f, type, false, offset, s, &t, NULL);
978 if (r < 0)
979 return r;
980
981 o = (Object*) t;
982 }
983
984 r = journal_file_check_object(f, offset, o);
985 if (r < 0)
986 return r;
987
988 *ret = o;
989 return 0;
990 }
991
992 static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
993 uint64_t r;
994
995 assert(f);
996 assert(f->header);
997
998 r = le64toh(f->header->tail_entry_seqnum) + 1;
999
1000 if (seqnum) {
1001 /* If an external seqnum counter was passed, we update
1002 * both the local and the external one, and set it to
1003 * the maximum of both */
1004
1005 if (*seqnum + 1 > r)
1006 r = *seqnum + 1;
1007
1008 *seqnum = r;
1009 }
1010
1011 f->header->tail_entry_seqnum = htole64(r);
1012
1013 if (f->header->head_entry_seqnum == 0)
1014 f->header->head_entry_seqnum = htole64(r);
1015
1016 return r;
1017 }
1018
1019 int journal_file_append_object(
1020 JournalFile *f,
1021 ObjectType type,
1022 uint64_t size,
1023 Object **ret,
1024 uint64_t *ret_offset) {
1025
1026 int r;
1027 uint64_t p;
1028 Object *tail, *o;
1029 void *t;
1030
1031 assert(f);
1032 assert(f->header);
1033 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1034 assert(size >= sizeof(ObjectHeader));
1035
1036 r = journal_file_set_online(f);
1037 if (r < 0)
1038 return r;
1039
1040 p = le64toh(f->header->tail_object_offset);
1041 if (p == 0)
1042 p = le64toh(f->header->header_size);
1043 else {
1044 uint64_t sz;
1045
1046 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
1047 if (r < 0)
1048 return r;
1049
1050 sz = le64toh(READ_NOW(tail->object.size));
1051 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
1052 return -EBADMSG;
1053
1054 sz = ALIGN64(sz);
1055 if (p > UINT64_MAX - sz)
1056 return -EBADMSG;
1057
1058 p += sz;
1059 }
1060
1061 r = journal_file_allocate(f, p, size);
1062 if (r < 0)
1063 return r;
1064
1065 r = journal_file_move_to(f, type, false, p, size, &t, NULL);
1066 if (r < 0)
1067 return r;
1068
1069 o = (Object*) t;
1070 o->object = (ObjectHeader) {
1071 .type = type,
1072 .size = htole64(size),
1073 };
1074
1075 f->header->tail_object_offset = htole64(p);
1076 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1077
1078 if (ret)
1079 *ret = o;
1080
1081 if (ret_offset)
1082 *ret_offset = p;
1083
1084 return 0;
1085 }
1086
1087 static int journal_file_setup_data_hash_table(JournalFile *f) {
1088 uint64_t s, p;
1089 Object *o;
1090 int r;
1091
1092 assert(f);
1093 assert(f->header);
1094
1095 /* We estimate that we need 1 hash table entry per 768 bytes
1096 of journal file and we want to make sure we never get
1097 beyond 75% fill level. Calculate the hash table size for
1098 the maximum file size based on these metrics. */
1099
1100 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1101 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1102 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1103
1104 log_debug("Reserving %"PRIu64" entries in data hash table.", s / sizeof(HashItem));
1105
1106 r = journal_file_append_object(f,
1107 OBJECT_DATA_HASH_TABLE,
1108 offsetof(Object, hash_table.items) + s,
1109 &o, &p);
1110 if (r < 0)
1111 return r;
1112
1113 memzero(o->hash_table.items, s);
1114
1115 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1116 f->header->data_hash_table_size = htole64(s);
1117
1118 return 0;
1119 }
1120
1121 static int journal_file_setup_field_hash_table(JournalFile *f) {
1122 uint64_t s, p;
1123 Object *o;
1124 int r;
1125
1126 assert(f);
1127 assert(f->header);
1128
1129 /* We use a fixed size hash table for the fields as this
1130 * number should grow very slowly only */
1131
1132 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1133 log_debug("Reserving %"PRIu64" entries in field hash table.", s / sizeof(HashItem));
1134
1135 r = journal_file_append_object(f,
1136 OBJECT_FIELD_HASH_TABLE,
1137 offsetof(Object, hash_table.items) + s,
1138 &o, &p);
1139 if (r < 0)
1140 return r;
1141
1142 memzero(o->hash_table.items, s);
1143
1144 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1145 f->header->field_hash_table_size = htole64(s);
1146
1147 return 0;
1148 }
1149
1150 int journal_file_map_data_hash_table(JournalFile *f) {
1151 uint64_t s, p;
1152 void *t;
1153 int r;
1154
1155 assert(f);
1156 assert(f->header);
1157
1158 if (f->data_hash_table)
1159 return 0;
1160
1161 p = le64toh(f->header->data_hash_table_offset);
1162 s = le64toh(f->header->data_hash_table_size);
1163
1164 r = journal_file_move_to(f,
1165 OBJECT_DATA_HASH_TABLE,
1166 true,
1167 p, s,
1168 &t, NULL);
1169 if (r < 0)
1170 return r;
1171
1172 f->data_hash_table = t;
1173 return 0;
1174 }
1175
1176 int journal_file_map_field_hash_table(JournalFile *f) {
1177 uint64_t s, p;
1178 void *t;
1179 int r;
1180
1181 assert(f);
1182 assert(f->header);
1183
1184 if (f->field_hash_table)
1185 return 0;
1186
1187 p = le64toh(f->header->field_hash_table_offset);
1188 s = le64toh(f->header->field_hash_table_size);
1189
1190 r = journal_file_move_to(f,
1191 OBJECT_FIELD_HASH_TABLE,
1192 true,
1193 p, s,
1194 &t, NULL);
1195 if (r < 0)
1196 return r;
1197
1198 f->field_hash_table = t;
1199 return 0;
1200 }
1201
1202 static int journal_file_link_field(
1203 JournalFile *f,
1204 Object *o,
1205 uint64_t offset,
1206 uint64_t hash) {
1207
1208 uint64_t p, h, m;
1209 int r;
1210
1211 assert(f);
1212 assert(f->header);
1213 assert(f->field_hash_table);
1214 assert(o);
1215 assert(offset > 0);
1216
1217 if (o->object.type != OBJECT_FIELD)
1218 return -EINVAL;
1219
1220 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1221 if (m <= 0)
1222 return -EBADMSG;
1223
1224 /* This might alter the window we are looking at */
1225 o->field.next_hash_offset = o->field.head_data_offset = 0;
1226
1227 h = hash % m;
1228 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1229 if (p == 0)
1230 f->field_hash_table[h].head_hash_offset = htole64(offset);
1231 else {
1232 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1233 if (r < 0)
1234 return r;
1235
1236 o->field.next_hash_offset = htole64(offset);
1237 }
1238
1239 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1240
1241 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1242 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1243
1244 return 0;
1245 }
1246
1247 static int journal_file_link_data(
1248 JournalFile *f,
1249 Object *o,
1250 uint64_t offset,
1251 uint64_t hash) {
1252
1253 uint64_t p, h, m;
1254 int r;
1255
1256 assert(f);
1257 assert(f->header);
1258 assert(f->data_hash_table);
1259 assert(o);
1260 assert(offset > 0);
1261
1262 if (o->object.type != OBJECT_DATA)
1263 return -EINVAL;
1264
1265 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1266 if (m <= 0)
1267 return -EBADMSG;
1268
1269 /* This might alter the window we are looking at */
1270 o->data.next_hash_offset = o->data.next_field_offset = 0;
1271 o->data.entry_offset = o->data.entry_array_offset = 0;
1272 o->data.n_entries = 0;
1273
1274 h = hash % m;
1275 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1276 if (p == 0)
1277 /* Only entry in the hash table is easy */
1278 f->data_hash_table[h].head_hash_offset = htole64(offset);
1279 else {
1280 /* Move back to the previous data object, to patch in
1281 * pointer */
1282
1283 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1284 if (r < 0)
1285 return r;
1286
1287 o->data.next_hash_offset = htole64(offset);
1288 }
1289
1290 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1291
1292 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1293 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1294
1295 return 0;
1296 }
1297
1298 static int next_hash_offset(
1299 JournalFile *f,
1300 uint64_t *p,
1301 le64_t *next_hash_offset,
1302 uint64_t *depth,
1303 le64_t *header_max_depth) {
1304
1305 uint64_t nextp;
1306
1307 nextp = le64toh(READ_NOW(*next_hash_offset));
1308 if (nextp > 0) {
1309 if (nextp <= *p) /* Refuse going in loops */
1310 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
1311 "Detected hash item loop in %s, refusing.", f->path);
1312
1313 (*depth)++;
1314
1315 /* If the depth of this hash chain is larger than all others we have seen so far, record it */
1316 if (header_max_depth && f->writable)
1317 *header_max_depth = htole64(MAX(*depth, le64toh(*header_max_depth)));
1318 }
1319
1320 *p = nextp;
1321 return 0;
1322 }
1323
1324 int journal_file_find_field_object_with_hash(
1325 JournalFile *f,
1326 const void *field, uint64_t size, uint64_t hash,
1327 Object **ret, uint64_t *ret_offset) {
1328
1329 uint64_t p, osize, h, m, depth = 0;
1330 int r;
1331
1332 assert(f);
1333 assert(f->header);
1334 assert(field && size > 0);
1335
1336 /* If the field hash table is empty, we can't find anything */
1337 if (le64toh(f->header->field_hash_table_size) <= 0)
1338 return 0;
1339
1340 /* Map the field hash table, if it isn't mapped yet. */
1341 r = journal_file_map_field_hash_table(f);
1342 if (r < 0)
1343 return r;
1344
1345 osize = offsetof(Object, field.payload) + size;
1346
1347 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1348 if (m <= 0)
1349 return -EBADMSG;
1350
1351 h = hash % m;
1352 p = le64toh(f->field_hash_table[h].head_hash_offset);
1353 while (p > 0) {
1354 Object *o;
1355
1356 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1357 if (r < 0)
1358 return r;
1359
1360 if (le64toh(o->field.hash) == hash &&
1361 le64toh(o->object.size) == osize &&
1362 memcmp(o->field.payload, field, size) == 0) {
1363
1364 if (ret)
1365 *ret = o;
1366 if (ret_offset)
1367 *ret_offset = p;
1368
1369 return 1;
1370 }
1371
1372 r = next_hash_offset(
1373 f,
1374 &p,
1375 &o->field.next_hash_offset,
1376 &depth,
1377 JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) ? &f->header->field_hash_chain_depth : NULL);
1378 if (r < 0)
1379 return r;
1380 }
1381
1382 return 0;
1383 }
1384
1385 uint64_t journal_file_hash_data(
1386 JournalFile *f,
1387 const void *data,
1388 size_t sz) {
1389
1390 assert(f);
1391 assert(data || sz == 0);
1392
1393 /* We try to unify our codebase on siphash, hence new-styled journal files utilizing the keyed hash
1394 * function use siphash. Old journal files use the Jenkins hash. */
1395
1396 if (JOURNAL_HEADER_KEYED_HASH(f->header))
1397 return siphash24(data, sz, f->header->file_id.bytes);
1398
1399 return jenkins_hash64(data, sz);
1400 }
1401
1402 int journal_file_find_field_object(
1403 JournalFile *f,
1404 const void *field, uint64_t size,
1405 Object **ret, uint64_t *ret_offset) {
1406
1407 assert(f);
1408 assert(field && size > 0);
1409
1410 return journal_file_find_field_object_with_hash(
1411 f,
1412 field, size,
1413 journal_file_hash_data(f, field, size),
1414 ret, ret_offset);
1415 }
1416
1417 int journal_file_find_data_object_with_hash(
1418 JournalFile *f,
1419 const void *data, uint64_t size, uint64_t hash,
1420 Object **ret, uint64_t *ret_offset) {
1421
1422 uint64_t p, osize, h, m, depth = 0;
1423 int r;
1424
1425 assert(f);
1426 assert(f->header);
1427 assert(data || size == 0);
1428
1429 /* If there's no data hash table, then there's no entry. */
1430 if (le64toh(f->header->data_hash_table_size) <= 0)
1431 return 0;
1432
1433 /* Map the data hash table, if it isn't mapped yet. */
1434 r = journal_file_map_data_hash_table(f);
1435 if (r < 0)
1436 return r;
1437
1438 osize = offsetof(Object, data.payload) + size;
1439
1440 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1441 if (m <= 0)
1442 return -EBADMSG;
1443
1444 h = hash % m;
1445 p = le64toh(f->data_hash_table[h].head_hash_offset);
1446
1447 while (p > 0) {
1448 Object *o;
1449
1450 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1451 if (r < 0)
1452 return r;
1453
1454 if (le64toh(o->data.hash) != hash)
1455 goto next;
1456
1457 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
1458 #if HAVE_COMPRESSION
1459 uint64_t l;
1460 size_t rsize = 0;
1461
1462 l = le64toh(READ_NOW(o->object.size));
1463 if (l <= offsetof(Object, data.payload))
1464 return -EBADMSG;
1465
1466 l -= offsetof(Object, data.payload);
1467
1468 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
1469 o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
1470 if (r < 0)
1471 return r;
1472
1473 if (rsize == size &&
1474 memcmp(f->compress_buffer, data, size) == 0) {
1475
1476 if (ret)
1477 *ret = o;
1478
1479 if (ret_offset)
1480 *ret_offset = p;
1481
1482 return 1;
1483 }
1484 #else
1485 return -EPROTONOSUPPORT;
1486 #endif
1487 } else if (le64toh(o->object.size) == osize &&
1488 memcmp(o->data.payload, data, size) == 0) {
1489
1490 if (ret)
1491 *ret = o;
1492
1493 if (ret_offset)
1494 *ret_offset = p;
1495
1496 return 1;
1497 }
1498
1499 next:
1500 r = next_hash_offset(
1501 f,
1502 &p,
1503 &o->data.next_hash_offset,
1504 &depth,
1505 JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) ? &f->header->data_hash_chain_depth : NULL);
1506 if (r < 0)
1507 return r;
1508 }
1509
1510 return 0;
1511 }
1512
1513 int journal_file_find_data_object(
1514 JournalFile *f,
1515 const void *data, uint64_t size,
1516 Object **ret, uint64_t *ret_offset) {
1517
1518 assert(f);
1519 assert(data || size == 0);
1520
1521 return journal_file_find_data_object_with_hash(
1522 f,
1523 data, size,
1524 journal_file_hash_data(f, data, size),
1525 ret, ret_offset);
1526 }
1527
1528 static int journal_file_append_field(
1529 JournalFile *f,
1530 const void *field, uint64_t size,
1531 Object **ret, uint64_t *ret_offset) {
1532
1533 uint64_t hash, p;
1534 uint64_t osize;
1535 Object *o;
1536 int r;
1537
1538 assert(f);
1539 assert(field && size > 0);
1540
1541 hash = journal_file_hash_data(f, field, size);
1542
1543 r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
1544 if (r < 0)
1545 return r;
1546 else if (r > 0) {
1547
1548 if (ret)
1549 *ret = o;
1550
1551 if (ret_offset)
1552 *ret_offset = p;
1553
1554 return 0;
1555 }
1556
1557 osize = offsetof(Object, field.payload) + size;
1558 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1559 if (r < 0)
1560 return r;
1561
1562 o->field.hash = htole64(hash);
1563 memcpy(o->field.payload, field, size);
1564
1565 r = journal_file_link_field(f, o, p, hash);
1566 if (r < 0)
1567 return r;
1568
1569 /* The linking might have altered the window, so let's
1570 * refresh our pointer */
1571 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1572 if (r < 0)
1573 return r;
1574
1575 #if HAVE_GCRYPT
1576 r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
1577 if (r < 0)
1578 return r;
1579 #endif
1580
1581 if (ret)
1582 *ret = o;
1583
1584 if (ret_offset)
1585 *ret_offset = p;
1586
1587 return 0;
1588 }
1589
1590 static int journal_file_append_data(
1591 JournalFile *f,
1592 const void *data, uint64_t size,
1593 Object **ret, uint64_t *ret_offset) {
1594
1595 uint64_t hash, p;
1596 uint64_t osize;
1597 Object *o;
1598 int r, compression = 0;
1599 const void *eq;
1600
1601 assert(f);
1602 assert(data || size == 0);
1603
1604 hash = journal_file_hash_data(f, data, size);
1605
1606 r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
1607 if (r < 0)
1608 return r;
1609 if (r > 0) {
1610
1611 if (ret)
1612 *ret = o;
1613
1614 if (ret_offset)
1615 *ret_offset = p;
1616
1617 return 0;
1618 }
1619
1620 osize = offsetof(Object, data.payload) + size;
1621 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1622 if (r < 0)
1623 return r;
1624
1625 o->data.hash = htole64(hash);
1626
1627 #if HAVE_COMPRESSION
1628 if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
1629 size_t rsize = 0;
1630
1631 compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
1632
1633 if (compression >= 0) {
1634 o->object.size = htole64(offsetof(Object, data.payload) + rsize);
1635 o->object.flags |= compression;
1636
1637 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1638 size, rsize, object_compressed_to_string(compression));
1639 } else
1640 /* Compression didn't work, we don't really care why, let's continue without compression */
1641 compression = 0;
1642 }
1643 #endif
1644
1645 if (compression == 0)
1646 memcpy_safe(o->data.payload, data, size);
1647
1648 r = journal_file_link_data(f, o, p, hash);
1649 if (r < 0)
1650 return r;
1651
1652 #if HAVE_GCRYPT
1653 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1654 if (r < 0)
1655 return r;
1656 #endif
1657
1658 /* The linking might have altered the window, so let's
1659 * refresh our pointer */
1660 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1661 if (r < 0)
1662 return r;
1663
1664 if (!data)
1665 eq = NULL;
1666 else
1667 eq = memchr(data, '=', size);
1668 if (eq && eq > data) {
1669 Object *fo = NULL;
1670 uint64_t fp;
1671
1672 /* Create field object ... */
1673 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
1674 if (r < 0)
1675 return r;
1676
1677 /* ... and link it in. */
1678 o->data.next_field_offset = fo->field.head_data_offset;
1679 fo->field.head_data_offset = le64toh(p);
1680 }
1681
1682 if (ret)
1683 *ret = o;
1684
1685 if (ret_offset)
1686 *ret_offset = p;
1687
1688 return 0;
1689 }
1690
1691 uint64_t journal_file_entry_n_items(Object *o) {
1692 uint64_t sz;
1693 assert(o);
1694
1695 if (o->object.type != OBJECT_ENTRY)
1696 return 0;
1697
1698 sz = le64toh(READ_NOW(o->object.size));
1699 if (sz < offsetof(Object, entry.items))
1700 return 0;
1701
1702 return (sz - offsetof(Object, entry.items)) / sizeof(EntryItem);
1703 }
1704
1705 uint64_t journal_file_entry_array_n_items(Object *o) {
1706 uint64_t sz;
1707
1708 assert(o);
1709
1710 if (o->object.type != OBJECT_ENTRY_ARRAY)
1711 return 0;
1712
1713 sz = le64toh(READ_NOW(o->object.size));
1714 if (sz < offsetof(Object, entry_array.items))
1715 return 0;
1716
1717 return (sz - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
1718 }
1719
1720 uint64_t journal_file_hash_table_n_items(Object *o) {
1721 uint64_t sz;
1722
1723 assert(o);
1724
1725 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
1726 return 0;
1727
1728 sz = le64toh(READ_NOW(o->object.size));
1729 if (sz < offsetof(Object, hash_table.items))
1730 return 0;
1731
1732 return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1733 }
1734
1735 static int link_entry_into_array(JournalFile *f,
1736 le64_t *first,
1737 le64_t *idx,
1738 uint64_t p) {
1739 int r;
1740 uint64_t n = 0, ap = 0, q, i, a, hidx;
1741 Object *o;
1742
1743 assert(f);
1744 assert(f->header);
1745 assert(first);
1746 assert(idx);
1747 assert(p > 0);
1748
1749 a = le64toh(*first);
1750 i = hidx = le64toh(READ_NOW(*idx));
1751 while (a > 0) {
1752
1753 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1754 if (r < 0)
1755 return r;
1756
1757 n = journal_file_entry_array_n_items(o);
1758 if (i < n) {
1759 o->entry_array.items[i] = htole64(p);
1760 *idx = htole64(hidx + 1);
1761 return 0;
1762 }
1763
1764 i -= n;
1765 ap = a;
1766 a = le64toh(o->entry_array.next_entry_array_offset);
1767 }
1768
1769 if (hidx > n)
1770 n = (hidx+1) * 2;
1771 else
1772 n = n * 2;
1773
1774 if (n < 4)
1775 n = 4;
1776
1777 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1778 offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
1779 &o, &q);
1780 if (r < 0)
1781 return r;
1782
1783 #if HAVE_GCRYPT
1784 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1785 if (r < 0)
1786 return r;
1787 #endif
1788
1789 o->entry_array.items[i] = htole64(p);
1790
1791 if (ap == 0)
1792 *first = htole64(q);
1793 else {
1794 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1795 if (r < 0)
1796 return r;
1797
1798 o->entry_array.next_entry_array_offset = htole64(q);
1799 }
1800
1801 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1802 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1803
1804 *idx = htole64(hidx + 1);
1805
1806 return 0;
1807 }
1808
1809 static int link_entry_into_array_plus_one(JournalFile *f,
1810 le64_t *extra,
1811 le64_t *first,
1812 le64_t *idx,
1813 uint64_t p) {
1814
1815 uint64_t hidx;
1816 int r;
1817
1818 assert(f);
1819 assert(extra);
1820 assert(first);
1821 assert(idx);
1822 assert(p > 0);
1823
1824 hidx = le64toh(READ_NOW(*idx));
1825 if (hidx == UINT64_MAX)
1826 return -EBADMSG;
1827 if (hidx == 0)
1828 *extra = htole64(p);
1829 else {
1830 le64_t i;
1831
1832 i = htole64(hidx - 1);
1833 r = link_entry_into_array(f, first, &i, p);
1834 if (r < 0)
1835 return r;
1836 }
1837
1838 *idx = htole64(hidx + 1);
1839 return 0;
1840 }
1841
1842 static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
1843 uint64_t p;
1844 int r;
1845
1846 assert(f);
1847 assert(o);
1848 assert(offset > 0);
1849
1850 p = le64toh(o->entry.items[i].object_offset);
1851 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1852 if (r < 0)
1853 return r;
1854
1855 return link_entry_into_array_plus_one(f,
1856 &o->data.entry_offset,
1857 &o->data.entry_array_offset,
1858 &o->data.n_entries,
1859 offset);
1860 }
1861
1862 static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
1863 uint64_t n, i;
1864 int r;
1865
1866 assert(f);
1867 assert(f->header);
1868 assert(o);
1869 assert(offset > 0);
1870
1871 if (o->object.type != OBJECT_ENTRY)
1872 return -EINVAL;
1873
1874 __sync_synchronize();
1875
1876 /* Link up the entry itself */
1877 r = link_entry_into_array(f,
1878 &f->header->entry_array_offset,
1879 &f->header->n_entries,
1880 offset);
1881 if (r < 0)
1882 return r;
1883
1884 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1885
1886 if (f->header->head_entry_realtime == 0)
1887 f->header->head_entry_realtime = o->entry.realtime;
1888
1889 f->header->tail_entry_realtime = o->entry.realtime;
1890 f->header->tail_entry_monotonic = o->entry.monotonic;
1891
1892 /* Link up the items */
1893 n = journal_file_entry_n_items(o);
1894 for (i = 0; i < n; i++) {
1895 r = journal_file_link_entry_item(f, o, offset, i);
1896 if (r < 0)
1897 return r;
1898 }
1899
1900 return 0;
1901 }
1902
1903 static int journal_file_append_entry_internal(
1904 JournalFile *f,
1905 const dual_timestamp *ts,
1906 const sd_id128_t *boot_id,
1907 uint64_t xor_hash,
1908 const EntryItem items[], unsigned n_items,
1909 uint64_t *seqnum,
1910 Object **ret, uint64_t *ret_offset) {
1911 uint64_t np;
1912 uint64_t osize;
1913 Object *o;
1914 int r;
1915
1916 assert(f);
1917 assert(f->header);
1918 assert(items || n_items == 0);
1919 assert(ts);
1920
1921 osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
1922
1923 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
1924 if (r < 0)
1925 return r;
1926
1927 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
1928 memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
1929 o->entry.realtime = htole64(ts->realtime);
1930 o->entry.monotonic = htole64(ts->monotonic);
1931 o->entry.xor_hash = htole64(xor_hash);
1932 if (boot_id)
1933 f->header->boot_id = *boot_id;
1934 o->entry.boot_id = f->header->boot_id;
1935
1936 #if HAVE_GCRYPT
1937 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
1938 if (r < 0)
1939 return r;
1940 #endif
1941
1942 r = journal_file_link_entry(f, o, np);
1943 if (r < 0)
1944 return r;
1945
1946 if (ret)
1947 *ret = o;
1948
1949 if (ret_offset)
1950 *ret_offset = np;
1951
1952 return 0;
1953 }
1954
1955 void journal_file_post_change(JournalFile *f) {
1956 assert(f);
1957
1958 if (f->fd < 0)
1959 return;
1960
1961 /* inotify() does not receive IN_MODIFY events from file
1962 * accesses done via mmap(). After each access we hence
1963 * trigger IN_MODIFY by truncating the journal file to its
1964 * current size which triggers IN_MODIFY. */
1965
1966 __sync_synchronize();
1967
1968 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
1969 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
1970 }
1971
1972 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
1973 assert(userdata);
1974
1975 journal_file_post_change(userdata);
1976
1977 return 1;
1978 }
1979
1980 static void schedule_post_change(JournalFile *f) {
1981 int r;
1982
1983 assert(f);
1984 assert(f->post_change_timer);
1985
1986 r = sd_event_source_get_enabled(f->post_change_timer, NULL);
1987 if (r < 0) {
1988 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
1989 goto fail;
1990 }
1991 if (r > 0)
1992 return;
1993
1994 r = sd_event_source_set_time_relative(f->post_change_timer, f->post_change_timer_period);
1995 if (r < 0) {
1996 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
1997 goto fail;
1998 }
1999
2000 r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
2001 if (r < 0) {
2002 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
2003 goto fail;
2004 }
2005
2006 return;
2007
2008 fail:
2009 /* On failure, let's simply post the change immediately. */
2010 journal_file_post_change(f);
2011 }
2012
2013 /* Enable coalesced change posting in a timer on the provided sd_event instance */
2014 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
2015 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
2016 int r;
2017
2018 assert(f);
2019 assert_return(!f->post_change_timer, -EINVAL);
2020 assert(e);
2021 assert(t);
2022
2023 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
2024 if (r < 0)
2025 return r;
2026
2027 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
2028 if (r < 0)
2029 return r;
2030
2031 f->post_change_timer = TAKE_PTR(timer);
2032 f->post_change_timer_period = t;
2033
2034 return r;
2035 }
2036
2037 static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
2038 return CMP(le64toh(a->object_offset), le64toh(b->object_offset));
2039 }
2040
2041 int journal_file_append_entry(
2042 JournalFile *f,
2043 const dual_timestamp *ts,
2044 const sd_id128_t *boot_id,
2045 const struct iovec iovec[], unsigned n_iovec,
2046 uint64_t *seqnum,
2047 Object **ret, uint64_t *ret_offset) {
2048
2049 unsigned i;
2050 EntryItem *items;
2051 int r;
2052 uint64_t xor_hash = 0;
2053 struct dual_timestamp _ts;
2054
2055 assert(f);
2056 assert(f->header);
2057 assert(iovec || n_iovec == 0);
2058
2059 if (ts) {
2060 if (!VALID_REALTIME(ts->realtime))
2061 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2062 "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
2063 ts->realtime);
2064 if (!VALID_MONOTONIC(ts->monotonic))
2065 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2066 "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
2067 ts->monotonic);
2068 } else {
2069 dual_timestamp_get(&_ts);
2070 ts = &_ts;
2071 }
2072
2073 #if HAVE_GCRYPT
2074 r = journal_file_maybe_append_tag(f, ts->realtime);
2075 if (r < 0)
2076 return r;
2077 #endif
2078
2079 /* alloca() can't take 0, hence let's allocate at least one */
2080 items = newa(EntryItem, MAX(1u, n_iovec));
2081
2082 for (i = 0; i < n_iovec; i++) {
2083 uint64_t p;
2084 Object *o;
2085
2086 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
2087 if (r < 0)
2088 return r;
2089
2090 /* When calculating the XOR hash field, we need to take special care if the "keyed-hash"
2091 * journal file flag is on. We use the XOR hash field to quickly determine the identity of a
2092 * specific record, and give records with otherwise identical position (i.e. match in seqno,
2093 * timestamp, …) a stable ordering. But for that we can't have it that the hash of the
2094 * objects in each file is different since they are keyed. Hence let's calculate the Jenkins
2095 * hash here for that. This also has the benefit that cursors for old and new journal files
2096 * are completely identical (they include the XOR hash after all). For classic Jenkins-hash
2097 * files things are easier, we can just take the value from the stored record directly. */
2098
2099 if (JOURNAL_HEADER_KEYED_HASH(f->header))
2100 xor_hash ^= jenkins_hash64(iovec[i].iov_base, iovec[i].iov_len);
2101 else
2102 xor_hash ^= le64toh(o->data.hash);
2103
2104 items[i].object_offset = htole64(p);
2105 items[i].hash = o->data.hash;
2106 }
2107
2108 /* Order by the position on disk, in order to improve seek
2109 * times for rotating media. */
2110 typesafe_qsort(items, n_iovec, entry_item_cmp);
2111
2112 r = journal_file_append_entry_internal(f, ts, boot_id, xor_hash, items, n_iovec, seqnum, ret, ret_offset);
2113
2114 /* If the memory mapping triggered a SIGBUS then we return an
2115 * IO error and ignore the error code passed down to us, since
2116 * it is very likely just an effect of a nullified replacement
2117 * mapping page */
2118
2119 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
2120 r = -EIO;
2121
2122 if (f->post_change_timer)
2123 schedule_post_change(f);
2124 else
2125 journal_file_post_change(f);
2126
2127 return r;
2128 }
2129
2130 typedef struct ChainCacheItem {
2131 uint64_t first; /* the array at the beginning of the chain */
2132 uint64_t array; /* the cached array */
2133 uint64_t begin; /* the first item in the cached array */
2134 uint64_t total; /* the total number of items in all arrays before this one in the chain */
2135 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
2136 } ChainCacheItem;
2137
2138 static void chain_cache_put(
2139 OrderedHashmap *h,
2140 ChainCacheItem *ci,
2141 uint64_t first,
2142 uint64_t array,
2143 uint64_t begin,
2144 uint64_t total,
2145 uint64_t last_index) {
2146
2147 if (!ci) {
2148 /* If the chain item to cache for this chain is the
2149 * first one it's not worth caching anything */
2150 if (array == first)
2151 return;
2152
2153 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2154 ci = ordered_hashmap_steal_first(h);
2155 assert(ci);
2156 } else {
2157 ci = new(ChainCacheItem, 1);
2158 if (!ci)
2159 return;
2160 }
2161
2162 ci->first = first;
2163
2164 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2165 free(ci);
2166 return;
2167 }
2168 } else
2169 assert(ci->first == first);
2170
2171 ci->array = array;
2172 ci->begin = begin;
2173 ci->total = total;
2174 ci->last_index = last_index;
2175 }
2176
2177 static int generic_array_get(
2178 JournalFile *f,
2179 uint64_t first,
2180 uint64_t i,
2181 Object **ret, uint64_t *ret_offset) {
2182
2183 Object *o;
2184 uint64_t p = 0, a, t = 0;
2185 int r;
2186 ChainCacheItem *ci;
2187
2188 assert(f);
2189
2190 a = first;
2191
2192 /* Try the chain cache first */
2193 ci = ordered_hashmap_get(f->chain_cache, &first);
2194 if (ci && i > ci->total) {
2195 a = ci->array;
2196 i -= ci->total;
2197 t = ci->total;
2198 }
2199
2200 while (a > 0) {
2201 uint64_t k;
2202
2203 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2204 if (r < 0)
2205 return r;
2206
2207 k = journal_file_entry_array_n_items(o);
2208 if (i < k) {
2209 p = le64toh(o->entry_array.items[i]);
2210 goto found;
2211 }
2212
2213 i -= k;
2214 t += k;
2215 a = le64toh(o->entry_array.next_entry_array_offset);
2216 }
2217
2218 return 0;
2219
2220 found:
2221 /* Let's cache this item for the next invocation */
2222 chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
2223
2224 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2225 if (r < 0)
2226 return r;
2227
2228 if (ret)
2229 *ret = o;
2230
2231 if (ret_offset)
2232 *ret_offset = p;
2233
2234 return 1;
2235 }
2236
2237 static int generic_array_get_plus_one(
2238 JournalFile *f,
2239 uint64_t extra,
2240 uint64_t first,
2241 uint64_t i,
2242 Object **ret, uint64_t *ret_offset) {
2243
2244 Object *o;
2245
2246 assert(f);
2247
2248 if (i == 0) {
2249 int r;
2250
2251 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2252 if (r < 0)
2253 return r;
2254
2255 if (ret)
2256 *ret = o;
2257
2258 if (ret_offset)
2259 *ret_offset = extra;
2260
2261 return 1;
2262 }
2263
2264 return generic_array_get(f, first, i-1, ret, ret_offset);
2265 }
2266
2267 enum {
2268 TEST_FOUND,
2269 TEST_LEFT,
2270 TEST_RIGHT
2271 };
2272
2273 static int generic_array_bisect(
2274 JournalFile *f,
2275 uint64_t first,
2276 uint64_t n,
2277 uint64_t needle,
2278 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2279 direction_t direction,
2280 Object **ret,
2281 uint64_t *ret_offset,
2282 uint64_t *ret_idx) {
2283
2284 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
2285 bool subtract_one = false;
2286 Object *o, *array = NULL;
2287 int r;
2288 ChainCacheItem *ci;
2289
2290 assert(f);
2291 assert(test_object);
2292
2293 /* Start with the first array in the chain */
2294 a = first;
2295
2296 ci = ordered_hashmap_get(f->chain_cache, &first);
2297 if (ci && n > ci->total && ci->begin != 0) {
2298 /* Ah, we have iterated this bisection array chain
2299 * previously! Let's see if we can skip ahead in the
2300 * chain, as far as the last time. But we can't jump
2301 * backwards in the chain, so let's check that
2302 * first. */
2303
2304 r = test_object(f, ci->begin, needle);
2305 if (r < 0)
2306 return r;
2307
2308 if (r == TEST_LEFT) {
2309 /* OK, what we are looking for is right of the
2310 * begin of this EntryArray, so let's jump
2311 * straight to previously cached array in the
2312 * chain */
2313
2314 a = ci->array;
2315 n -= ci->total;
2316 t = ci->total;
2317 last_index = ci->last_index;
2318 }
2319 }
2320
2321 while (a > 0) {
2322 uint64_t left, right, k, lp;
2323
2324 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2325 if (r < 0)
2326 return r;
2327
2328 k = journal_file_entry_array_n_items(array);
2329 right = MIN(k, n);
2330 if (right <= 0)
2331 return 0;
2332
2333 i = right - 1;
2334 lp = p = le64toh(array->entry_array.items[i]);
2335 if (p <= 0)
2336 r = -EBADMSG;
2337 else
2338 r = test_object(f, p, needle);
2339 if (r == -EBADMSG) {
2340 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2341 n = i;
2342 continue;
2343 }
2344 if (r < 0)
2345 return r;
2346
2347 if (r == TEST_FOUND)
2348 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2349
2350 if (r == TEST_RIGHT) {
2351 left = 0;
2352 right -= 1;
2353
2354 if (last_index != (uint64_t) -1) {
2355 assert(last_index <= right);
2356
2357 /* If we cached the last index we
2358 * looked at, let's try to not to jump
2359 * too wildly around and see if we can
2360 * limit the range to look at early to
2361 * the immediate neighbors of the last
2362 * index we looked at. */
2363
2364 if (last_index > 0) {
2365 uint64_t x = last_index - 1;
2366
2367 p = le64toh(array->entry_array.items[x]);
2368 if (p <= 0)
2369 return -EBADMSG;
2370
2371 r = test_object(f, p, needle);
2372 if (r < 0)
2373 return r;
2374
2375 if (r == TEST_FOUND)
2376 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2377
2378 if (r == TEST_RIGHT)
2379 right = x;
2380 else
2381 left = x + 1;
2382 }
2383
2384 if (last_index < right) {
2385 uint64_t y = last_index + 1;
2386
2387 p = le64toh(array->entry_array.items[y]);
2388 if (p <= 0)
2389 return -EBADMSG;
2390
2391 r = test_object(f, p, needle);
2392 if (r < 0)
2393 return r;
2394
2395 if (r == TEST_FOUND)
2396 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2397
2398 if (r == TEST_RIGHT)
2399 right = y;
2400 else
2401 left = y + 1;
2402 }
2403 }
2404
2405 for (;;) {
2406 if (left == right) {
2407 if (direction == DIRECTION_UP)
2408 subtract_one = true;
2409
2410 i = left;
2411 goto found;
2412 }
2413
2414 assert(left < right);
2415 i = (left + right) / 2;
2416
2417 p = le64toh(array->entry_array.items[i]);
2418 if (p <= 0)
2419 r = -EBADMSG;
2420 else
2421 r = test_object(f, p, needle);
2422 if (r == -EBADMSG) {
2423 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2424 right = n = i;
2425 continue;
2426 }
2427 if (r < 0)
2428 return r;
2429
2430 if (r == TEST_FOUND)
2431 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2432
2433 if (r == TEST_RIGHT)
2434 right = i;
2435 else
2436 left = i + 1;
2437 }
2438 }
2439
2440 if (k >= n) {
2441 if (direction == DIRECTION_UP) {
2442 i = n;
2443 subtract_one = true;
2444 goto found;
2445 }
2446
2447 return 0;
2448 }
2449
2450 last_p = lp;
2451
2452 n -= k;
2453 t += k;
2454 last_index = (uint64_t) -1;
2455 a = le64toh(array->entry_array.next_entry_array_offset);
2456 }
2457
2458 return 0;
2459
2460 found:
2461 if (subtract_one && t == 0 && i == 0)
2462 return 0;
2463
2464 /* Let's cache this item for the next invocation */
2465 chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
2466
2467 if (subtract_one && i == 0)
2468 p = last_p;
2469 else if (subtract_one)
2470 p = le64toh(array->entry_array.items[i-1]);
2471 else
2472 p = le64toh(array->entry_array.items[i]);
2473
2474 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2475 if (r < 0)
2476 return r;
2477
2478 if (ret)
2479 *ret = o;
2480
2481 if (ret_offset)
2482 *ret_offset = p;
2483
2484 if (ret_idx)
2485 *ret_idx = t + i + (subtract_one ? -1 : 0);
2486
2487 return 1;
2488 }
2489
2490 static int generic_array_bisect_plus_one(
2491 JournalFile *f,
2492 uint64_t extra,
2493 uint64_t first,
2494 uint64_t n,
2495 uint64_t needle,
2496 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2497 direction_t direction,
2498 Object **ret,
2499 uint64_t *ret_offset,
2500 uint64_t *ret_idx) {
2501
2502 int r;
2503 bool step_back = false;
2504 Object *o;
2505
2506 assert(f);
2507 assert(test_object);
2508
2509 if (n <= 0)
2510 return 0;
2511
2512 /* This bisects the array in object 'first', but first checks
2513 * an extra */
2514 r = test_object(f, extra, needle);
2515 if (r < 0)
2516 return r;
2517
2518 if (r == TEST_FOUND)
2519 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2520
2521 /* if we are looking with DIRECTION_UP then we need to first
2522 see if in the actual array there is a matching entry, and
2523 return the last one of that. But if there isn't any we need
2524 to return this one. Hence remember this, and return it
2525 below. */
2526 if (r == TEST_LEFT)
2527 step_back = direction == DIRECTION_UP;
2528
2529 if (r == TEST_RIGHT) {
2530 if (direction == DIRECTION_DOWN)
2531 goto found;
2532 else
2533 return 0;
2534 }
2535
2536 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, ret_offset, ret_idx);
2537
2538 if (r == 0 && step_back)
2539 goto found;
2540
2541 if (r > 0 && ret_idx)
2542 (*ret_idx)++;
2543
2544 return r;
2545
2546 found:
2547 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2548 if (r < 0)
2549 return r;
2550
2551 if (ret)
2552 *ret = o;
2553
2554 if (ret_offset)
2555 *ret_offset = extra;
2556
2557 if (ret_idx)
2558 *ret_idx = 0;
2559
2560 return 1;
2561 }
2562
2563 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2564 assert(f);
2565 assert(p > 0);
2566
2567 if (p == needle)
2568 return TEST_FOUND;
2569 else if (p < needle)
2570 return TEST_LEFT;
2571 else
2572 return TEST_RIGHT;
2573 }
2574
2575 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2576 uint64_t sq;
2577 Object *o;
2578 int r;
2579
2580 assert(f);
2581 assert(p > 0);
2582
2583 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2584 if (r < 0)
2585 return r;
2586
2587 sq = le64toh(READ_NOW(o->entry.seqnum));
2588 if (sq == needle)
2589 return TEST_FOUND;
2590 else if (sq < needle)
2591 return TEST_LEFT;
2592 else
2593 return TEST_RIGHT;
2594 }
2595
2596 int journal_file_move_to_entry_by_seqnum(
2597 JournalFile *f,
2598 uint64_t seqnum,
2599 direction_t direction,
2600 Object **ret,
2601 uint64_t *ret_offset) {
2602 assert(f);
2603 assert(f->header);
2604
2605 return generic_array_bisect(
2606 f,
2607 le64toh(f->header->entry_array_offset),
2608 le64toh(f->header->n_entries),
2609 seqnum,
2610 test_object_seqnum,
2611 direction,
2612 ret, ret_offset, NULL);
2613 }
2614
2615 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2616 Object *o;
2617 uint64_t rt;
2618 int r;
2619
2620 assert(f);
2621 assert(p > 0);
2622
2623 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2624 if (r < 0)
2625 return r;
2626
2627 rt = le64toh(READ_NOW(o->entry.realtime));
2628 if (rt == needle)
2629 return TEST_FOUND;
2630 else if (rt < needle)
2631 return TEST_LEFT;
2632 else
2633 return TEST_RIGHT;
2634 }
2635
2636 int journal_file_move_to_entry_by_realtime(
2637 JournalFile *f,
2638 uint64_t realtime,
2639 direction_t direction,
2640 Object **ret,
2641 uint64_t *ret_offset) {
2642 assert(f);
2643 assert(f->header);
2644
2645 return generic_array_bisect(
2646 f,
2647 le64toh(f->header->entry_array_offset),
2648 le64toh(f->header->n_entries),
2649 realtime,
2650 test_object_realtime,
2651 direction,
2652 ret, ret_offset, NULL);
2653 }
2654
2655 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
2656 Object *o;
2657 uint64_t m;
2658 int r;
2659
2660 assert(f);
2661 assert(p > 0);
2662
2663 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2664 if (r < 0)
2665 return r;
2666
2667 m = le64toh(READ_NOW(o->entry.monotonic));
2668 if (m == needle)
2669 return TEST_FOUND;
2670 else if (m < needle)
2671 return TEST_LEFT;
2672 else
2673 return TEST_RIGHT;
2674 }
2675
2676 static int find_data_object_by_boot_id(
2677 JournalFile *f,
2678 sd_id128_t boot_id,
2679 Object **o,
2680 uint64_t *b) {
2681
2682 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2683
2684 sd_id128_to_string(boot_id, t + 9);
2685 return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
2686 }
2687
2688 int journal_file_move_to_entry_by_monotonic(
2689 JournalFile *f,
2690 sd_id128_t boot_id,
2691 uint64_t monotonic,
2692 direction_t direction,
2693 Object **ret,
2694 uint64_t *ret_offset) {
2695
2696 Object *o;
2697 int r;
2698
2699 assert(f);
2700
2701 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
2702 if (r < 0)
2703 return r;
2704 if (r == 0)
2705 return -ENOENT;
2706
2707 return generic_array_bisect_plus_one(
2708 f,
2709 le64toh(o->data.entry_offset),
2710 le64toh(o->data.entry_array_offset),
2711 le64toh(o->data.n_entries),
2712 monotonic,
2713 test_object_monotonic,
2714 direction,
2715 ret, ret_offset, NULL);
2716 }
2717
2718 void journal_file_reset_location(JournalFile *f) {
2719 f->location_type = LOCATION_HEAD;
2720 f->current_offset = 0;
2721 f->current_seqnum = 0;
2722 f->current_realtime = 0;
2723 f->current_monotonic = 0;
2724 zero(f->current_boot_id);
2725 f->current_xor_hash = 0;
2726 }
2727
2728 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
2729 f->location_type = LOCATION_SEEK;
2730 f->current_offset = offset;
2731 f->current_seqnum = le64toh(o->entry.seqnum);
2732 f->current_realtime = le64toh(o->entry.realtime);
2733 f->current_monotonic = le64toh(o->entry.monotonic);
2734 f->current_boot_id = o->entry.boot_id;
2735 f->current_xor_hash = le64toh(o->entry.xor_hash);
2736 }
2737
2738 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
2739 int r;
2740
2741 assert(af);
2742 assert(af->header);
2743 assert(bf);
2744 assert(bf->header);
2745 assert(af->location_type == LOCATION_SEEK);
2746 assert(bf->location_type == LOCATION_SEEK);
2747
2748 /* If contents and timestamps match, these entries are
2749 * identical, even if the seqnum does not match */
2750 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
2751 af->current_monotonic == bf->current_monotonic &&
2752 af->current_realtime == bf->current_realtime &&
2753 af->current_xor_hash == bf->current_xor_hash)
2754 return 0;
2755
2756 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
2757
2758 /* If this is from the same seqnum source, compare
2759 * seqnums */
2760 r = CMP(af->current_seqnum, bf->current_seqnum);
2761 if (r != 0)
2762 return r;
2763
2764 /* Wow! This is weird, different data but the same
2765 * seqnums? Something is borked, but let's make the
2766 * best of it and compare by time. */
2767 }
2768
2769 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
2770
2771 /* If the boot id matches, compare monotonic time */
2772 r = CMP(af->current_monotonic, bf->current_monotonic);
2773 if (r != 0)
2774 return r;
2775 }
2776
2777 /* Otherwise, compare UTC time */
2778 r = CMP(af->current_realtime, bf->current_realtime);
2779 if (r != 0)
2780 return r;
2781
2782 /* Finally, compare by contents */
2783 return CMP(af->current_xor_hash, bf->current_xor_hash);
2784 }
2785
2786 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2787
2788 /* Increase or decrease the specified index, in the right direction. */
2789
2790 if (direction == DIRECTION_DOWN) {
2791 if (*i >= n - 1)
2792 return 0;
2793
2794 (*i) ++;
2795 } else {
2796 if (*i <= 0)
2797 return 0;
2798
2799 (*i) --;
2800 }
2801
2802 return 1;
2803 }
2804
2805 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
2806
2807 /* Consider it an error if any of the two offsets is uninitialized */
2808 if (old_offset == 0 || new_offset == 0)
2809 return false;
2810
2811 /* If we go down, the new offset must be larger than the old one. */
2812 return direction == DIRECTION_DOWN ?
2813 new_offset > old_offset :
2814 new_offset < old_offset;
2815 }
2816
2817 int journal_file_next_entry(
2818 JournalFile *f,
2819 uint64_t p,
2820 direction_t direction,
2821 Object **ret, uint64_t *ret_offset) {
2822
2823 uint64_t i, n, ofs;
2824 int r;
2825
2826 assert(f);
2827 assert(f->header);
2828
2829 n = le64toh(READ_NOW(f->header->n_entries));
2830 if (n <= 0)
2831 return 0;
2832
2833 if (p == 0)
2834 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2835 else {
2836 r = generic_array_bisect(f,
2837 le64toh(f->header->entry_array_offset),
2838 le64toh(f->header->n_entries),
2839 p,
2840 test_object_offset,
2841 DIRECTION_DOWN,
2842 NULL, NULL,
2843 &i);
2844 if (r <= 0)
2845 return r;
2846
2847 r = bump_array_index(&i, direction, n);
2848 if (r <= 0)
2849 return r;
2850 }
2851
2852 /* And jump to it */
2853 for (;;) {
2854 r = generic_array_get(f,
2855 le64toh(f->header->entry_array_offset),
2856 i,
2857 ret, &ofs);
2858 if (r > 0)
2859 break;
2860 if (r != -EBADMSG)
2861 return r;
2862
2863 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2864 * the next one might work for us instead. */
2865 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2866
2867 r = bump_array_index(&i, direction, n);
2868 if (r <= 0)
2869 return r;
2870 }
2871
2872 /* Ensure our array is properly ordered. */
2873 if (p > 0 && !check_properly_ordered(ofs, p, direction))
2874 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2875 "%s: entry array not properly ordered at entry %" PRIu64,
2876 f->path, i);
2877
2878 if (ret_offset)
2879 *ret_offset = ofs;
2880
2881 return 1;
2882 }
2883
2884 int journal_file_next_entry_for_data(
2885 JournalFile *f,
2886 Object *o, uint64_t p,
2887 uint64_t data_offset,
2888 direction_t direction,
2889 Object **ret, uint64_t *ret_offset) {
2890
2891 uint64_t i, n, ofs;
2892 Object *d;
2893 int r;
2894
2895 assert(f);
2896 assert(p > 0 || !o);
2897
2898 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2899 if (r < 0)
2900 return r;
2901
2902 n = le64toh(READ_NOW(d->data.n_entries));
2903 if (n <= 0)
2904 return n;
2905
2906 if (!o)
2907 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2908 else {
2909 if (o->object.type != OBJECT_ENTRY)
2910 return -EINVAL;
2911
2912 r = generic_array_bisect_plus_one(f,
2913 le64toh(d->data.entry_offset),
2914 le64toh(d->data.entry_array_offset),
2915 le64toh(d->data.n_entries),
2916 p,
2917 test_object_offset,
2918 DIRECTION_DOWN,
2919 NULL, NULL,
2920 &i);
2921
2922 if (r <= 0)
2923 return r;
2924
2925 r = bump_array_index(&i, direction, n);
2926 if (r <= 0)
2927 return r;
2928 }
2929
2930 for (;;) {
2931 r = generic_array_get_plus_one(f,
2932 le64toh(d->data.entry_offset),
2933 le64toh(d->data.entry_array_offset),
2934 i,
2935 ret, &ofs);
2936 if (r > 0)
2937 break;
2938 if (r != -EBADMSG)
2939 return r;
2940
2941 log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
2942
2943 r = bump_array_index(&i, direction, n);
2944 if (r <= 0)
2945 return r;
2946 }
2947
2948 /* Ensure our array is properly ordered. */
2949 if (p > 0 && check_properly_ordered(ofs, p, direction))
2950 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2951 "%s data entry array not properly ordered at entry %" PRIu64,
2952 f->path, i);
2953
2954 if (ret_offset)
2955 *ret_offset = ofs;
2956
2957 return 1;
2958 }
2959
2960 int journal_file_move_to_entry_by_offset_for_data(
2961 JournalFile *f,
2962 uint64_t data_offset,
2963 uint64_t p,
2964 direction_t direction,
2965 Object **ret, uint64_t *ret_offset) {
2966
2967 int r;
2968 Object *d;
2969
2970 assert(f);
2971
2972 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2973 if (r < 0)
2974 return r;
2975
2976 return generic_array_bisect_plus_one(
2977 f,
2978 le64toh(d->data.entry_offset),
2979 le64toh(d->data.entry_array_offset),
2980 le64toh(d->data.n_entries),
2981 p,
2982 test_object_offset,
2983 direction,
2984 ret, ret_offset, NULL);
2985 }
2986
2987 int journal_file_move_to_entry_by_monotonic_for_data(
2988 JournalFile *f,
2989 uint64_t data_offset,
2990 sd_id128_t boot_id,
2991 uint64_t monotonic,
2992 direction_t direction,
2993 Object **ret, uint64_t *ret_offset) {
2994
2995 Object *o, *d;
2996 int r;
2997 uint64_t b, z;
2998
2999 assert(f);
3000
3001 /* First, seek by time */
3002 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
3003 if (r < 0)
3004 return r;
3005 if (r == 0)
3006 return -ENOENT;
3007
3008 r = generic_array_bisect_plus_one(f,
3009 le64toh(o->data.entry_offset),
3010 le64toh(o->data.entry_array_offset),
3011 le64toh(o->data.n_entries),
3012 monotonic,
3013 test_object_monotonic,
3014 direction,
3015 NULL, &z, NULL);
3016 if (r <= 0)
3017 return r;
3018
3019 /* And now, continue seeking until we find an entry that
3020 * exists in both bisection arrays */
3021
3022 for (;;) {
3023 Object *qo;
3024 uint64_t p, q;
3025
3026 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3027 if (r < 0)
3028 return r;
3029
3030 r = generic_array_bisect_plus_one(f,
3031 le64toh(d->data.entry_offset),
3032 le64toh(d->data.entry_array_offset),
3033 le64toh(d->data.n_entries),
3034 z,
3035 test_object_offset,
3036 direction,
3037 NULL, &p, NULL);
3038 if (r <= 0)
3039 return r;
3040
3041 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
3042 if (r < 0)
3043 return r;
3044
3045 r = generic_array_bisect_plus_one(f,
3046 le64toh(o->data.entry_offset),
3047 le64toh(o->data.entry_array_offset),
3048 le64toh(o->data.n_entries),
3049 p,
3050 test_object_offset,
3051 direction,
3052 &qo, &q, NULL);
3053
3054 if (r <= 0)
3055 return r;
3056
3057 if (p == q) {
3058 if (ret)
3059 *ret = qo;
3060 if (ret_offset)
3061 *ret_offset = q;
3062
3063 return 1;
3064 }
3065
3066 z = q;
3067 }
3068 }
3069
3070 int journal_file_move_to_entry_by_seqnum_for_data(
3071 JournalFile *f,
3072 uint64_t data_offset,
3073 uint64_t seqnum,
3074 direction_t direction,
3075 Object **ret, uint64_t *ret_offset) {
3076
3077 Object *d;
3078 int r;
3079
3080 assert(f);
3081
3082 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3083 if (r < 0)
3084 return r;
3085
3086 return generic_array_bisect_plus_one(
3087 f,
3088 le64toh(d->data.entry_offset),
3089 le64toh(d->data.entry_array_offset),
3090 le64toh(d->data.n_entries),
3091 seqnum,
3092 test_object_seqnum,
3093 direction,
3094 ret, ret_offset, NULL);
3095 }
3096
3097 int journal_file_move_to_entry_by_realtime_for_data(
3098 JournalFile *f,
3099 uint64_t data_offset,
3100 uint64_t realtime,
3101 direction_t direction,
3102 Object **ret, uint64_t *ret_offset) {
3103
3104 Object *d;
3105 int r;
3106
3107 assert(f);
3108
3109 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3110 if (r < 0)
3111 return r;
3112
3113 return generic_array_bisect_plus_one(
3114 f,
3115 le64toh(d->data.entry_offset),
3116 le64toh(d->data.entry_array_offset),
3117 le64toh(d->data.n_entries),
3118 realtime,
3119 test_object_realtime,
3120 direction,
3121 ret, ret_offset, NULL);
3122 }
3123
3124 void journal_file_dump(JournalFile *f) {
3125 Object *o;
3126 int r;
3127 uint64_t p;
3128
3129 assert(f);
3130 assert(f->header);
3131
3132 journal_file_print_header(f);
3133
3134 p = le64toh(READ_NOW(f->header->header_size));
3135 while (p != 0) {
3136 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3137 if (r < 0)
3138 goto fail;
3139
3140 switch (o->object.type) {
3141
3142 case OBJECT_UNUSED:
3143 printf("Type: OBJECT_UNUSED\n");
3144 break;
3145
3146 case OBJECT_DATA:
3147 printf("Type: OBJECT_DATA\n");
3148 break;
3149
3150 case OBJECT_FIELD:
3151 printf("Type: OBJECT_FIELD\n");
3152 break;
3153
3154 case OBJECT_ENTRY:
3155 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3156 le64toh(o->entry.seqnum),
3157 le64toh(o->entry.monotonic),
3158 le64toh(o->entry.realtime));
3159 break;
3160
3161 case OBJECT_FIELD_HASH_TABLE:
3162 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
3163 break;
3164
3165 case OBJECT_DATA_HASH_TABLE:
3166 printf("Type: OBJECT_DATA_HASH_TABLE\n");
3167 break;
3168
3169 case OBJECT_ENTRY_ARRAY:
3170 printf("Type: OBJECT_ENTRY_ARRAY\n");
3171 break;
3172
3173 case OBJECT_TAG:
3174 printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3175 le64toh(o->tag.seqnum),
3176 le64toh(o->tag.epoch));
3177 break;
3178
3179 default:
3180 printf("Type: unknown (%i)\n", o->object.type);
3181 break;
3182 }
3183
3184 if (o->object.flags & OBJECT_COMPRESSION_MASK)
3185 printf("Flags: %s\n",
3186 object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
3187
3188 if (p == le64toh(f->header->tail_object_offset))
3189 p = 0;
3190 else
3191 p += ALIGN64(le64toh(o->object.size));
3192 }
3193
3194 return;
3195 fail:
3196 log_error("File corrupt");
3197 }
3198
3199 static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
3200 const char *x;
3201
3202 x = format_timestamp(buf, l, t);
3203 if (x)
3204 return x;
3205 return " --- ";
3206 }
3207
3208 void journal_file_print_header(JournalFile *f) {
3209 char a[SD_ID128_STRING_MAX], b[SD_ID128_STRING_MAX], c[SD_ID128_STRING_MAX], d[SD_ID128_STRING_MAX];
3210 char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
3211 struct stat st;
3212 char bytes[FORMAT_BYTES_MAX];
3213
3214 assert(f);
3215 assert(f->header);
3216
3217 printf("File path: %s\n"
3218 "File ID: %s\n"
3219 "Machine ID: %s\n"
3220 "Boot ID: %s\n"
3221 "Sequential number ID: %s\n"
3222 "State: %s\n"
3223 "Compatible flags:%s%s\n"
3224 "Incompatible flags:%s%s%s%s%s\n"
3225 "Header size: %"PRIu64"\n"
3226 "Arena size: %"PRIu64"\n"
3227 "Data hash table size: %"PRIu64"\n"
3228 "Field hash table size: %"PRIu64"\n"
3229 "Rotate suggested: %s\n"
3230 "Head sequential number: %"PRIu64" (%"PRIx64")\n"
3231 "Tail sequential number: %"PRIu64" (%"PRIx64")\n"
3232 "Head realtime timestamp: %s (%"PRIx64")\n"
3233 "Tail realtime timestamp: %s (%"PRIx64")\n"
3234 "Tail monotonic timestamp: %s (%"PRIx64")\n"
3235 "Objects: %"PRIu64"\n"
3236 "Entry objects: %"PRIu64"\n",
3237 f->path,
3238 sd_id128_to_string(f->header->file_id, a),
3239 sd_id128_to_string(f->header->machine_id, b),
3240 sd_id128_to_string(f->header->boot_id, c),
3241 sd_id128_to_string(f->header->seqnum_id, d),
3242 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3243 f->header->state == STATE_ONLINE ? "ONLINE" :
3244 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3245 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3246 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3247 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3248 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3249 JOURNAL_HEADER_COMPRESSED_ZSTD(f->header) ? " COMPRESSED-ZSTD" : "",
3250 JOURNAL_HEADER_KEYED_HASH(f->header) ? " KEYED-HASH" : "",
3251 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3252 le64toh(f->header->header_size),
3253 le64toh(f->header->arena_size),
3254 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3255 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3256 yes_no(journal_file_rotate_suggested(f, 0)),
3257 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3258 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3259 format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3260 format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3261 format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3262 le64toh(f->header->n_objects),
3263 le64toh(f->header->n_entries));
3264
3265 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3266 printf("Data objects: %"PRIu64"\n"
3267 "Data hash table fill: %.1f%%\n",
3268 le64toh(f->header->n_data),
3269 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3270
3271 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3272 printf("Field objects: %"PRIu64"\n"
3273 "Field hash table fill: %.1f%%\n",
3274 le64toh(f->header->n_fields),
3275 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3276
3277 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3278 printf("Tag objects: %"PRIu64"\n",
3279 le64toh(f->header->n_tags));
3280 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3281 printf("Entry array objects: %"PRIu64"\n",
3282 le64toh(f->header->n_entry_arrays));
3283
3284 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth))
3285 printf("Deepest field hash chain: %" PRIu64"\n",
3286 f->header->field_hash_chain_depth);
3287
3288 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth))
3289 printf("Deepest data hash chain: %" PRIu64"\n",
3290 f->header->data_hash_chain_depth);
3291
3292 if (fstat(f->fd, &st) >= 0)
3293 printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
3294 }
3295
3296 static int journal_file_warn_btrfs(JournalFile *f) {
3297 unsigned attrs;
3298 int r;
3299
3300 assert(f);
3301
3302 /* Before we write anything, check if the COW logic is turned
3303 * off on btrfs. Given our write pattern that is quite
3304 * unfriendly to COW file systems this should greatly improve
3305 * performance on COW file systems, such as btrfs, at the
3306 * expense of data integrity features (which shouldn't be too
3307 * bad, given that we do our own checksumming). */
3308
3309 r = btrfs_is_filesystem(f->fd);
3310 if (r < 0)
3311 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3312 if (!r)
3313 return 0;
3314
3315 r = read_attr_fd(f->fd, &attrs);
3316 if (r < 0)
3317 return log_warning_errno(r, "Failed to read file attributes: %m");
3318
3319 if (attrs & FS_NOCOW_FL) {
3320 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3321 return 0;
3322 }
3323
3324 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3325 "This is likely to slow down journal access substantially, please consider turning "
3326 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3327
3328 return 1;
3329 }
3330
3331 int journal_file_open(
3332 int fd,
3333 const char *fname,
3334 int flags,
3335 mode_t mode,
3336 bool compress,
3337 uint64_t compress_threshold_bytes,
3338 bool seal,
3339 JournalMetrics *metrics,
3340 MMapCache *mmap_cache,
3341 Set *deferred_closes,
3342 JournalFile *template,
3343 JournalFile **ret) {
3344
3345 bool newly_created = false;
3346 JournalFile *f;
3347 void *h;
3348 int r;
3349
3350 assert(ret);
3351 assert(fd >= 0 || fname);
3352
3353 if (!IN_SET((flags & O_ACCMODE), O_RDONLY, O_RDWR))
3354 return -EINVAL;
3355
3356 if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
3357 return -EINVAL;
3358
3359 f = new(JournalFile, 1);
3360 if (!f)
3361 return -ENOMEM;
3362
3363 *f = (JournalFile) {
3364 .fd = fd,
3365 .mode = mode,
3366
3367 .flags = flags,
3368 .prot = prot_from_flags(flags),
3369 .writable = (flags & O_ACCMODE) != O_RDONLY,
3370
3371 #if HAVE_ZSTD
3372 .compress_zstd = compress,
3373 #elif HAVE_LZ4
3374 .compress_lz4 = compress,
3375 #elif HAVE_XZ
3376 .compress_xz = compress,
3377 #endif
3378 .compress_threshold_bytes = compress_threshold_bytes == (uint64_t) -1 ?
3379 DEFAULT_COMPRESS_THRESHOLD :
3380 MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
3381 #if HAVE_GCRYPT
3382 .seal = seal,
3383 #endif
3384 };
3385
3386 /* We turn on keyed hashes by default, but provide an environment variable to turn them off, if
3387 * people really want that */
3388 r = getenv_bool("SYSTEMD_JOURNAL_KEYED_HASH");
3389 if (r < 0) {
3390 if (r != -ENXIO)
3391 log_debug_errno(r, "Failed to parse $SYSTEMD_JOURNAL_KEYED_HASH environment variable, ignoring.");
3392 f->keyed_hash = true;
3393 } else
3394 f->keyed_hash = r;
3395
3396 if (DEBUG_LOGGING) {
3397 static int last_seal = -1, last_compress = -1, last_keyed_hash = -1;
3398 static uint64_t last_bytes = UINT64_MAX;
3399 char bytes[FORMAT_BYTES_MAX];
3400
3401 if (last_seal != f->seal ||
3402 last_keyed_hash != f->keyed_hash ||
3403 last_compress != JOURNAL_FILE_COMPRESS(f) ||
3404 last_bytes != f->compress_threshold_bytes) {
3405
3406 log_debug("Journal effective settings seal=%s keyed_hash=%s compress=%s compress_threshold_bytes=%s",
3407 yes_no(f->seal), yes_no(f->keyed_hash), yes_no(JOURNAL_FILE_COMPRESS(f)),
3408 format_bytes(bytes, sizeof bytes, f->compress_threshold_bytes));
3409 last_seal = f->seal;
3410 last_keyed_hash = f->keyed_hash;
3411 last_compress = JOURNAL_FILE_COMPRESS(f);
3412 last_bytes = f->compress_threshold_bytes;
3413 }
3414 }
3415
3416 if (mmap_cache)
3417 f->mmap = mmap_cache_ref(mmap_cache);
3418 else {
3419 f->mmap = mmap_cache_new();
3420 if (!f->mmap) {
3421 r = -ENOMEM;
3422 goto fail;
3423 }
3424 }
3425
3426 if (fname) {
3427 f->path = strdup(fname);
3428 if (!f->path) {
3429 r = -ENOMEM;
3430 goto fail;
3431 }
3432 } else {
3433 assert(fd >= 0);
3434
3435 /* If we don't know the path, fill in something explanatory and vaguely useful */
3436 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
3437 r = -ENOMEM;
3438 goto fail;
3439 }
3440 }
3441
3442 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3443 if (!f->chain_cache) {
3444 r = -ENOMEM;
3445 goto fail;
3446 }
3447
3448 if (f->fd < 0) {
3449 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3450 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3451 * it doesn't hurt in that case. */
3452
3453 f->fd = open(f->path, f->flags|O_CLOEXEC|O_NONBLOCK, f->mode);
3454 if (f->fd < 0) {
3455 r = -errno;
3456 goto fail;
3457 }
3458
3459 /* fds we opened here by us should also be closed by us. */
3460 f->close_fd = true;
3461
3462 r = fd_nonblock(f->fd, false);
3463 if (r < 0)
3464 goto fail;
3465 }
3466
3467 f->cache_fd = mmap_cache_add_fd(f->mmap, f->fd);
3468 if (!f->cache_fd) {
3469 r = -ENOMEM;
3470 goto fail;
3471 }
3472
3473 r = journal_file_fstat(f);
3474 if (r < 0)
3475 goto fail;
3476
3477 if (f->last_stat.st_size == 0 && f->writable) {
3478
3479 (void) journal_file_warn_btrfs(f);
3480
3481 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3482 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3483 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3484 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3485 * solely on mtime/atime/ctime of the file. */
3486 (void) fd_setcrtime(f->fd, 0);
3487
3488 #if HAVE_GCRYPT
3489 /* Try to load the FSPRG state, and if we can't, then
3490 * just don't do sealing */
3491 if (f->seal) {
3492 r = journal_file_fss_load(f);
3493 if (r < 0)
3494 f->seal = false;
3495 }
3496 #endif
3497
3498 r = journal_file_init_header(f, template);
3499 if (r < 0)
3500 goto fail;
3501
3502 r = journal_file_fstat(f);
3503 if (r < 0)
3504 goto fail;
3505
3506 newly_created = true;
3507 }
3508
3509 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3510 r = -ENODATA;
3511 goto fail;
3512 }
3513
3514 r = mmap_cache_get(f->mmap, f->cache_fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h, NULL);
3515 if (r == -EINVAL) {
3516 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3517 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3518 * code. */
3519 r = -EAFNOSUPPORT;
3520 goto fail;
3521 }
3522 if (r < 0)
3523 goto fail;
3524
3525 f->header = h;
3526
3527 if (!newly_created) {
3528 set_clear_with_destructor(deferred_closes, journal_file_close);
3529
3530 r = journal_file_verify_header(f);
3531 if (r < 0)
3532 goto fail;
3533 }
3534
3535 #if HAVE_GCRYPT
3536 if (!newly_created && f->writable) {
3537 r = journal_file_fss_load(f);
3538 if (r < 0)
3539 goto fail;
3540 }
3541 #endif
3542
3543 if (f->writable) {
3544 if (metrics) {
3545 journal_default_metrics(metrics, f->fd);
3546 f->metrics = *metrics;
3547 } else if (template)
3548 f->metrics = template->metrics;
3549
3550 r = journal_file_refresh_header(f);
3551 if (r < 0)
3552 goto fail;
3553 }
3554
3555 #if HAVE_GCRYPT
3556 r = journal_file_hmac_setup(f);
3557 if (r < 0)
3558 goto fail;
3559 #endif
3560
3561 if (newly_created) {
3562 r = journal_file_setup_field_hash_table(f);
3563 if (r < 0)
3564 goto fail;
3565
3566 r = journal_file_setup_data_hash_table(f);
3567 if (r < 0)
3568 goto fail;
3569
3570 #if HAVE_GCRYPT
3571 r = journal_file_append_first_tag(f);
3572 if (r < 0)
3573 goto fail;
3574 #endif
3575 }
3576
3577 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) {
3578 r = -EIO;
3579 goto fail;
3580 }
3581
3582 if (template && template->post_change_timer) {
3583 r = journal_file_enable_post_change_timer(
3584 f,
3585 sd_event_source_get_event(template->post_change_timer),
3586 template->post_change_timer_period);
3587
3588 if (r < 0)
3589 goto fail;
3590 }
3591
3592 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3593 f->close_fd = true;
3594
3595 *ret = f;
3596 return 0;
3597
3598 fail:
3599 if (f->cache_fd && mmap_cache_got_sigbus(f->mmap, f->cache_fd))
3600 r = -EIO;
3601
3602 (void) journal_file_close(f);
3603
3604 return r;
3605 }
3606
3607 int journal_file_archive(JournalFile *f) {
3608 _cleanup_free_ char *p = NULL;
3609
3610 assert(f);
3611
3612 if (!f->writable)
3613 return -EINVAL;
3614
3615 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3616 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3617 if (path_startswith(f->path, "/proc/self/fd"))
3618 return -EINVAL;
3619
3620 if (!endswith(f->path, ".journal"))
3621 return -EINVAL;
3622
3623 if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3624 (int) strlen(f->path) - 8, f->path,
3625 SD_ID128_FORMAT_VAL(f->header->seqnum_id),
3626 le64toh(f->header->head_entry_seqnum),
3627 le64toh(f->header->head_entry_realtime)) < 0)
3628 return -ENOMEM;
3629
3630 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3631 * ignore that case. */
3632 if (rename(f->path, p) < 0 && errno != ENOENT)
3633 return -errno;
3634
3635 /* Sync the rename to disk */
3636 (void) fsync_directory_of_file(f->fd);
3637
3638 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3639 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3640 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3641 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3642 * occurs. */
3643 f->archive = true;
3644
3645 /* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
3646 * files when we archive them */
3647 f->defrag_on_close = true;
3648
3649 return 0;
3650 }
3651
3652 JournalFile* journal_initiate_close(
3653 JournalFile *f,
3654 Set *deferred_closes) {
3655
3656 int r;
3657
3658 assert(f);
3659
3660 if (deferred_closes) {
3661
3662 r = set_put(deferred_closes, f);
3663 if (r < 0)
3664 log_debug_errno(r, "Failed to add file to deferred close set, closing immediately.");
3665 else {
3666 (void) journal_file_set_offline(f, false);
3667 return NULL;
3668 }
3669 }
3670
3671 return journal_file_close(f);
3672 }
3673
3674 int journal_file_rotate(
3675 JournalFile **f,
3676 bool compress,
3677 uint64_t compress_threshold_bytes,
3678 bool seal,
3679 Set *deferred_closes) {
3680
3681 JournalFile *new_file = NULL;
3682 int r;
3683
3684 assert(f);
3685 assert(*f);
3686
3687 r = journal_file_archive(*f);
3688 if (r < 0)
3689 return r;
3690
3691 r = journal_file_open(
3692 -1,
3693 (*f)->path,
3694 (*f)->flags,
3695 (*f)->mode,
3696 compress,
3697 compress_threshold_bytes,
3698 seal,
3699 NULL, /* metrics */
3700 (*f)->mmap,
3701 deferred_closes,
3702 *f, /* template */
3703 &new_file);
3704
3705 journal_initiate_close(*f, deferred_closes);
3706 *f = new_file;
3707
3708 return r;
3709 }
3710
3711 int journal_file_dispose(int dir_fd, const char *fname) {
3712 _cleanup_free_ char *p = NULL;
3713 _cleanup_close_ int fd = -1;
3714
3715 assert(fname);
3716
3717 /* Renames a journal file to *.journal~, i.e. to mark it as corruped or otherwise uncleanly shutdown. Note that
3718 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3719 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3720 * for writing anymore. */
3721
3722 if (!endswith(fname, ".journal"))
3723 return -EINVAL;
3724
3725 if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
3726 (int) strlen(fname) - 8, fname,
3727 now(CLOCK_REALTIME),
3728 random_u64()) < 0)
3729 return -ENOMEM;
3730
3731 if (renameat(dir_fd, fname, dir_fd, p) < 0)
3732 return -errno;
3733
3734 /* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
3735 fd = openat(dir_fd, p, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW);
3736 if (fd < 0)
3737 log_debug_errno(errno, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
3738 else {
3739 (void) chattr_fd(fd, 0, FS_NOCOW_FL, NULL);
3740 (void) btrfs_defrag_fd(fd);
3741 }
3742
3743 return 0;
3744 }
3745
3746 int journal_file_open_reliably(
3747 const char *fname,
3748 int flags,
3749 mode_t mode,
3750 bool compress,
3751 uint64_t compress_threshold_bytes,
3752 bool seal,
3753 JournalMetrics *metrics,
3754 MMapCache *mmap_cache,
3755 Set *deferred_closes,
3756 JournalFile *template,
3757 JournalFile **ret) {
3758
3759 int r;
3760
3761 r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3762 deferred_closes, template, ret);
3763 if (!IN_SET(r,
3764 -EBADMSG, /* Corrupted */
3765 -ENODATA, /* Truncated */
3766 -EHOSTDOWN, /* Other machine */
3767 -EPROTONOSUPPORT, /* Incompatible feature */
3768 -EBUSY, /* Unclean shutdown */
3769 -ESHUTDOWN, /* Already archived */
3770 -EIO, /* IO error, including SIGBUS on mmap */
3771 -EIDRM, /* File has been deleted */
3772 -ETXTBSY)) /* File is from the future */
3773 return r;
3774
3775 if ((flags & O_ACCMODE) == O_RDONLY)
3776 return r;
3777
3778 if (!(flags & O_CREAT))
3779 return r;
3780
3781 if (!endswith(fname, ".journal"))
3782 return r;
3783
3784 /* The file is corrupted. Rotate it away and try it again (but only once) */
3785 log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
3786
3787 r = journal_file_dispose(AT_FDCWD, fname);
3788 if (r < 0)
3789 return r;
3790
3791 return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3792 deferred_closes, template, ret);
3793 }
3794
3795 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p) {
3796 uint64_t i, n;
3797 uint64_t q, xor_hash = 0;
3798 int r;
3799 EntryItem *items;
3800 dual_timestamp ts;
3801 const sd_id128_t *boot_id;
3802
3803 assert(from);
3804 assert(to);
3805 assert(o);
3806 assert(p);
3807
3808 if (!to->writable)
3809 return -EPERM;
3810
3811 ts.monotonic = le64toh(o->entry.monotonic);
3812 ts.realtime = le64toh(o->entry.realtime);
3813 boot_id = &o->entry.boot_id;
3814
3815 n = journal_file_entry_n_items(o);
3816 /* alloca() can't take 0, hence let's allocate at least one */
3817 items = newa(EntryItem, MAX(1u, n));
3818
3819 for (i = 0; i < n; i++) {
3820 uint64_t l, h;
3821 le64_t le_hash;
3822 size_t t;
3823 void *data;
3824 Object *u;
3825
3826 q = le64toh(o->entry.items[i].object_offset);
3827 le_hash = o->entry.items[i].hash;
3828
3829 r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
3830 if (r < 0)
3831 return r;
3832
3833 if (le_hash != o->data.hash)
3834 return -EBADMSG;
3835
3836 l = le64toh(READ_NOW(o->object.size));
3837 if (l < offsetof(Object, data.payload))
3838 return -EBADMSG;
3839
3840 l -= offsetof(Object, data.payload);
3841 t = (size_t) l;
3842
3843 /* We hit the limit on 32bit machines */
3844 if ((uint64_t) t != l)
3845 return -E2BIG;
3846
3847 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
3848 #if HAVE_COMPRESSION
3849 size_t rsize = 0;
3850
3851 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
3852 o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
3853 if (r < 0)
3854 return r;
3855
3856 data = from->compress_buffer;
3857 l = rsize;
3858 #else
3859 return -EPROTONOSUPPORT;
3860 #endif
3861 } else
3862 data = o->data.payload;
3863
3864 r = journal_file_append_data(to, data, l, &u, &h);
3865 if (r < 0)
3866 return r;
3867
3868 if (JOURNAL_HEADER_KEYED_HASH(to->header))
3869 xor_hash ^= jenkins_hash64(data, l);
3870 else
3871 xor_hash ^= le64toh(u->data.hash);
3872
3873 items[i].object_offset = htole64(h);
3874 items[i].hash = u->data.hash;
3875
3876 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
3877 if (r < 0)
3878 return r;
3879 }
3880
3881 r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n,
3882 NULL, NULL, NULL);
3883
3884 if (mmap_cache_got_sigbus(to->mmap, to->cache_fd))
3885 return -EIO;
3886
3887 return r;
3888 }
3889
3890 void journal_reset_metrics(JournalMetrics *m) {
3891 assert(m);
3892
3893 /* Set everything to "pick automatic values". */
3894
3895 *m = (JournalMetrics) {
3896 .min_use = (uint64_t) -1,
3897 .max_use = (uint64_t) -1,
3898 .min_size = (uint64_t) -1,
3899 .max_size = (uint64_t) -1,
3900 .keep_free = (uint64_t) -1,
3901 .n_max_files = (uint64_t) -1,
3902 };
3903 }
3904
3905 void journal_default_metrics(JournalMetrics *m, int fd) {
3906 char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
3907 struct statvfs ss;
3908 uint64_t fs_size = 0;
3909
3910 assert(m);
3911 assert(fd >= 0);
3912
3913 if (fstatvfs(fd, &ss) >= 0)
3914 fs_size = ss.f_frsize * ss.f_blocks;
3915 else
3916 log_debug_errno(errno, "Failed to determine disk size: %m");
3917
3918 if (m->max_use == (uint64_t) -1) {
3919
3920 if (fs_size > 0)
3921 m->max_use = CLAMP(PAGE_ALIGN(fs_size / 10), /* 10% of file system size */
3922 MAX_USE_LOWER, MAX_USE_UPPER);
3923 else
3924 m->max_use = MAX_USE_LOWER;
3925 } else {
3926 m->max_use = PAGE_ALIGN(m->max_use);
3927
3928 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3929 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3930 }
3931
3932 if (m->min_use == (uint64_t) -1) {
3933 if (fs_size > 0)
3934 m->min_use = CLAMP(PAGE_ALIGN(fs_size / 50), /* 2% of file system size */
3935 MIN_USE_LOW, MIN_USE_HIGH);
3936 else
3937 m->min_use = MIN_USE_LOW;
3938 }
3939
3940 if (m->min_use > m->max_use)
3941 m->min_use = m->max_use;
3942
3943 if (m->max_size == (uint64_t) -1)
3944 m->max_size = MIN(PAGE_ALIGN(m->max_use / 8), /* 8 chunks */
3945 MAX_SIZE_UPPER);
3946 else
3947 m->max_size = PAGE_ALIGN(m->max_size);
3948
3949 if (m->max_size != 0) {
3950 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3951 m->max_size = JOURNAL_FILE_SIZE_MIN;
3952
3953 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3954 m->max_use = m->max_size*2;
3955 }
3956
3957 if (m->min_size == (uint64_t) -1)
3958 m->min_size = JOURNAL_FILE_SIZE_MIN;
3959 else
3960 m->min_size = CLAMP(PAGE_ALIGN(m->min_size),
3961 JOURNAL_FILE_SIZE_MIN,
3962 m->max_size ?: UINT64_MAX);
3963
3964 if (m->keep_free == (uint64_t) -1) {
3965 if (fs_size > 0)
3966 m->keep_free = MIN(PAGE_ALIGN(fs_size / 20), /* 5% of file system size */
3967 KEEP_FREE_UPPER);
3968 else
3969 m->keep_free = DEFAULT_KEEP_FREE;
3970 }
3971
3972 if (m->n_max_files == (uint64_t) -1)
3973 m->n_max_files = DEFAULT_N_MAX_FILES;
3974
3975 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3976 format_bytes(a, sizeof(a), m->min_use),
3977 format_bytes(b, sizeof(b), m->max_use),
3978 format_bytes(c, sizeof(c), m->max_size),
3979 format_bytes(d, sizeof(d), m->min_size),
3980 format_bytes(e, sizeof(e), m->keep_free),
3981 m->n_max_files);
3982 }
3983
3984 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
3985 assert(f);
3986 assert(f->header);
3987 assert(from || to);
3988
3989 if (from) {
3990 if (f->header->head_entry_realtime == 0)
3991 return -ENOENT;
3992
3993 *from = le64toh(f->header->head_entry_realtime);
3994 }
3995
3996 if (to) {
3997 if (f->header->tail_entry_realtime == 0)
3998 return -ENOENT;
3999
4000 *to = le64toh(f->header->tail_entry_realtime);
4001 }
4002
4003 return 1;
4004 }
4005
4006 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
4007 Object *o;
4008 uint64_t p;
4009 int r;
4010
4011 assert(f);
4012 assert(from || to);
4013
4014 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
4015 if (r <= 0)
4016 return r;
4017
4018 if (le64toh(o->data.n_entries) <= 0)
4019 return 0;
4020
4021 if (from) {
4022 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
4023 if (r < 0)
4024 return r;
4025
4026 *from = le64toh(o->entry.monotonic);
4027 }
4028
4029 if (to) {
4030 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
4031 if (r < 0)
4032 return r;
4033
4034 r = generic_array_get_plus_one(f,
4035 le64toh(o->data.entry_offset),
4036 le64toh(o->data.entry_array_offset),
4037 le64toh(o->data.n_entries)-1,
4038 &o, NULL);
4039 if (r <= 0)
4040 return r;
4041
4042 *to = le64toh(o->entry.monotonic);
4043 }
4044
4045 return 1;
4046 }
4047
4048 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
4049 assert(f);
4050 assert(f->header);
4051
4052 /* If we gained new header fields we gained new features,
4053 * hence suggest a rotation */
4054 if (le64toh(f->header->header_size) < sizeof(Header)) {
4055 log_debug("%s uses an outdated header, suggesting rotation.", f->path);
4056 return true;
4057 }
4058
4059 /* Let's check if the hash tables grew over a certain fill level (75%, borrowing this value from
4060 * Java's hash table implementation), and if so suggest a rotation. To calculate the fill level we
4061 * need the n_data field, which only exists in newer versions. */
4062
4063 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
4064 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4065 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
4066 f->path,
4067 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
4068 le64toh(f->header->n_data),
4069 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
4070 (unsigned long long) f->last_stat.st_size,
4071 f->last_stat.st_size / le64toh(f->header->n_data));
4072 return true;
4073 }
4074
4075 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
4076 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
4077 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
4078 f->path,
4079 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
4080 le64toh(f->header->n_fields),
4081 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
4082 return true;
4083 }
4084
4085 /* If there are too many hash collisions somebody is most likely playing games with us. Hence, if our
4086 * longest chain is longer than some threshold, let's suggest rotation. */
4087 if (JOURNAL_HEADER_CONTAINS(f->header, data_hash_chain_depth) &&
4088 le64toh(f->header->data_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4089 log_debug("Data hash table of %s has deepest hash chain of length %" PRIu64 ", suggesting rotation.",
4090 f->path, le64toh(f->header->data_hash_chain_depth));
4091 return true;
4092 }
4093
4094 if (JOURNAL_HEADER_CONTAINS(f->header, field_hash_chain_depth) &&
4095 le64toh(f->header->field_hash_chain_depth) > HASH_CHAIN_DEPTH_MAX) {
4096 log_debug("Field hash table of %s has deepest hash chain of length at %" PRIu64 ", suggesting rotation.",
4097 f->path, le64toh(f->header->field_hash_chain_depth));
4098 return true;
4099 }
4100
4101 /* Are the data objects properly indexed by field objects? */
4102 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
4103 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
4104 le64toh(f->header->n_data) > 0 &&
4105 le64toh(f->header->n_fields) == 0)
4106 return true;
4107
4108 if (max_file_usec > 0) {
4109 usec_t t, h;
4110
4111 h = le64toh(f->header->head_entry_realtime);
4112 t = now(CLOCK_REALTIME);
4113
4114 if (h > 0 && t > h + max_file_usec)
4115 return true;
4116 }
4117
4118 return false;
4119 }