]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journal-file.c
journal: rename hash64() to jenkins_hash64()
[thirdparty/systemd.git] / src / journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <errno.h>
4 #include <fcntl.h>
5 #include <linux/fs.h>
6 #include <pthread.h>
7 #include <stddef.h>
8 #include <sys/mman.h>
9 #include <sys/statvfs.h>
10 #include <sys/uio.h>
11 #include <unistd.h>
12
13 #include "sd-event.h"
14
15 #include "alloc-util.h"
16 #include "btrfs-util.h"
17 #include "chattr-util.h"
18 #include "compress.h"
19 #include "fd-util.h"
20 #include "format-util.h"
21 #include "fs-util.h"
22 #include "journal-authenticate.h"
23 #include "journal-def.h"
24 #include "journal-file.h"
25 #include "lookup3.h"
26 #include "memory-util.h"
27 #include "path-util.h"
28 #include "random-util.h"
29 #include "set.h"
30 #include "sort-util.h"
31 #include "stat-util.h"
32 #include "string-util.h"
33 #include "strv.h"
34 #include "xattr-util.h"
35
36 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
37 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
38
39 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
40 #define MIN_COMPRESS_THRESHOLD (8ULL)
41
42 /* This is the minimum journal file size */
43 #define JOURNAL_FILE_SIZE_MIN (512 * 1024ULL) /* 512 KiB */
44
45 /* These are the lower and upper bounds if we deduce the max_use value
46 * from the file system size */
47 #define MAX_USE_LOWER (1 * 1024 * 1024ULL) /* 1 MiB */
48 #define MAX_USE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
49
50 /* Those are the lower and upper bounds for the minimal use limit,
51 * i.e. how much we'll use even if keep_free suggests otherwise. */
52 #define MIN_USE_LOW (1 * 1024 * 1024ULL) /* 1 MiB */
53 #define MIN_USE_HIGH (16 * 1024 * 1024ULL) /* 16 MiB */
54
55 /* This is the upper bound if we deduce max_size from max_use */
56 #define MAX_SIZE_UPPER (128 * 1024 * 1024ULL) /* 128 MiB */
57
58 /* This is the upper bound if we deduce the keep_free value from the
59 * file system size */
60 #define KEEP_FREE_UPPER (4 * 1024 * 1024 * 1024ULL) /* 4 GiB */
61
62 /* This is the keep_free value when we can't determine the system
63 * size */
64 #define DEFAULT_KEEP_FREE (1024 * 1024ULL) /* 1 MB */
65
66 /* This is the default maximum number of journal files to keep around. */
67 #define DEFAULT_N_MAX_FILES 100
68
69 /* n_data was the first entry we added after the initial file format design */
70 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
71
72 /* How many entries to keep in the entry array chain cache at max */
73 #define CHAIN_CACHE_MAX 20
74
75 /* How much to increase the journal file size at once each time we allocate something new. */
76 #define FILE_SIZE_INCREASE (8 * 1024 * 1024ULL) /* 8MB */
77
78 /* Reread fstat() of the file for detecting deletions at least this often */
79 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
80
81 /* The mmap context to use for the header we pick as one above the last defined typed */
82 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
83
84 #ifdef __clang__
85 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
86 #endif
87
88 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
89 * As a result we use atomic operations on f->offline_state for inter-thread communications with
90 * journal_file_set_offline() and journal_file_set_online(). */
91 static void journal_file_set_offline_internal(JournalFile *f) {
92 assert(f);
93 assert(f->fd >= 0);
94 assert(f->header);
95
96 for (;;) {
97 switch (f->offline_state) {
98 case OFFLINE_CANCEL:
99 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
100 continue;
101 return;
102
103 case OFFLINE_AGAIN_FROM_SYNCING:
104 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
105 continue;
106 break;
107
108 case OFFLINE_AGAIN_FROM_OFFLINING:
109 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
110 continue;
111 break;
112
113 case OFFLINE_SYNCING:
114 (void) fsync(f->fd);
115
116 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
117 continue;
118
119 f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
120 (void) fsync(f->fd);
121 break;
122
123 case OFFLINE_OFFLINING:
124 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
125 continue;
126 _fallthrough_;
127 case OFFLINE_DONE:
128 return;
129
130 case OFFLINE_JOINED:
131 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
132 return;
133 }
134 }
135 }
136
137 static void * journal_file_set_offline_thread(void *arg) {
138 JournalFile *f = arg;
139
140 (void) pthread_setname_np(pthread_self(), "journal-offline");
141
142 journal_file_set_offline_internal(f);
143
144 return NULL;
145 }
146
147 static int journal_file_set_offline_thread_join(JournalFile *f) {
148 int r;
149
150 assert(f);
151
152 if (f->offline_state == OFFLINE_JOINED)
153 return 0;
154
155 r = pthread_join(f->offline_thread, NULL);
156 if (r)
157 return -r;
158
159 f->offline_state = OFFLINE_JOINED;
160
161 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
162 return -EIO;
163
164 return 0;
165 }
166
167 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
168 static bool journal_file_set_offline_try_restart(JournalFile *f) {
169 for (;;) {
170 switch (f->offline_state) {
171 case OFFLINE_AGAIN_FROM_SYNCING:
172 case OFFLINE_AGAIN_FROM_OFFLINING:
173 return true;
174
175 case OFFLINE_CANCEL:
176 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
177 continue;
178 return true;
179
180 case OFFLINE_SYNCING:
181 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
182 continue;
183 return true;
184
185 case OFFLINE_OFFLINING:
186 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
187 continue;
188 return true;
189
190 default:
191 return false;
192 }
193 }
194 }
195
196 /* Sets a journal offline.
197 *
198 * If wait is false then an offline is dispatched in a separate thread for a
199 * subsequent journal_file_set_offline() or journal_file_set_online() of the
200 * same journal to synchronize with.
201 *
202 * If wait is true, then either an existing offline thread will be restarted
203 * and joined, or if none exists the offline is simply performed in this
204 * context without involving another thread.
205 */
206 int journal_file_set_offline(JournalFile *f, bool wait) {
207 bool restarted;
208 int r;
209
210 assert(f);
211
212 if (!f->writable)
213 return -EPERM;
214
215 if (f->fd < 0 || !f->header)
216 return -EINVAL;
217
218 /* An offlining journal is implicitly online and may modify f->header->state,
219 * we must also join any potentially lingering offline thread when not online. */
220 if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
221 return journal_file_set_offline_thread_join(f);
222
223 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
224 restarted = journal_file_set_offline_try_restart(f);
225 if ((restarted && wait) || !restarted) {
226 r = journal_file_set_offline_thread_join(f);
227 if (r < 0)
228 return r;
229 }
230
231 if (restarted)
232 return 0;
233
234 /* Initiate a new offline. */
235 f->offline_state = OFFLINE_SYNCING;
236
237 if (wait) /* Without using a thread if waiting. */
238 journal_file_set_offline_internal(f);
239 else {
240 sigset_t ss, saved_ss;
241 int k;
242
243 assert_se(sigfillset(&ss) >= 0);
244 /* Don't block SIGBUS since the offlining thread accesses a memory mapped file.
245 * Asynchronous SIGBUS signals can safely be handled by either thread. */
246 assert_se(sigdelset(&ss, SIGBUS) >= 0);
247
248 r = pthread_sigmask(SIG_BLOCK, &ss, &saved_ss);
249 if (r > 0)
250 return -r;
251
252 r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
253
254 k = pthread_sigmask(SIG_SETMASK, &saved_ss, NULL);
255 if (r > 0) {
256 f->offline_state = OFFLINE_JOINED;
257 return -r;
258 }
259 if (k > 0)
260 return -k;
261 }
262
263 return 0;
264 }
265
266 static int journal_file_set_online(JournalFile *f) {
267 bool wait = true;
268
269 assert(f);
270
271 if (!f->writable)
272 return -EPERM;
273
274 if (f->fd < 0 || !f->header)
275 return -EINVAL;
276
277 while (wait) {
278 switch (f->offline_state) {
279 case OFFLINE_JOINED:
280 /* No offline thread, no need to wait. */
281 wait = false;
282 break;
283
284 case OFFLINE_SYNCING:
285 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
286 continue;
287 /* Canceled syncing prior to offlining, no need to wait. */
288 wait = false;
289 break;
290
291 case OFFLINE_AGAIN_FROM_SYNCING:
292 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
293 continue;
294 /* Canceled restart from syncing, no need to wait. */
295 wait = false;
296 break;
297
298 case OFFLINE_AGAIN_FROM_OFFLINING:
299 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
300 continue;
301 /* Canceled restart from offlining, must wait for offlining to complete however. */
302 _fallthrough_;
303 default: {
304 int r;
305
306 r = journal_file_set_offline_thread_join(f);
307 if (r < 0)
308 return r;
309
310 wait = false;
311 break;
312 }
313 }
314 }
315
316 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
317 return -EIO;
318
319 switch (f->header->state) {
320 case STATE_ONLINE:
321 return 0;
322
323 case STATE_OFFLINE:
324 f->header->state = STATE_ONLINE;
325 (void) fsync(f->fd);
326 return 0;
327
328 default:
329 return -EINVAL;
330 }
331 }
332
333 bool journal_file_is_offlining(JournalFile *f) {
334 assert(f);
335
336 __sync_synchronize();
337
338 if (IN_SET(f->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
339 return false;
340
341 return true;
342 }
343
344 JournalFile* journal_file_close(JournalFile *f) {
345 if (!f)
346 return NULL;
347
348 #if HAVE_GCRYPT
349 /* Write the final tag */
350 if (f->seal && f->writable) {
351 int r;
352
353 r = journal_file_append_tag(f);
354 if (r < 0)
355 log_error_errno(r, "Failed to append tag when closing journal: %m");
356 }
357 #endif
358
359 if (f->post_change_timer) {
360 if (sd_event_source_get_enabled(f->post_change_timer, NULL) > 0)
361 journal_file_post_change(f);
362
363 sd_event_source_disable_unref(f->post_change_timer);
364 }
365
366 journal_file_set_offline(f, true);
367
368 if (f->mmap && f->cache_fd)
369 mmap_cache_free_fd(f->mmap, f->cache_fd);
370
371 if (f->fd >= 0 && f->defrag_on_close) {
372
373 /* Be friendly to btrfs: turn COW back on again now,
374 * and defragment the file. We won't write to the file
375 * ever again, hence remove all fragmentation, and
376 * reenable all the good bits COW usually provides
377 * (such as data checksumming). */
378
379 (void) chattr_fd(f->fd, 0, FS_NOCOW_FL, NULL);
380 (void) btrfs_defrag_fd(f->fd);
381 }
382
383 if (f->close_fd)
384 safe_close(f->fd);
385 free(f->path);
386
387 mmap_cache_unref(f->mmap);
388
389 ordered_hashmap_free_free(f->chain_cache);
390
391 #if HAVE_XZ || HAVE_LZ4
392 free(f->compress_buffer);
393 #endif
394
395 #if HAVE_GCRYPT
396 if (f->fss_file)
397 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
398 else
399 free(f->fsprg_state);
400
401 free(f->fsprg_seed);
402
403 if (f->hmac)
404 gcry_md_close(f->hmac);
405 #endif
406
407 return mfree(f);
408 }
409
410 static int journal_file_init_header(JournalFile *f, JournalFile *template) {
411 Header h = {};
412 ssize_t k;
413 int r;
414
415 assert(f);
416
417 memcpy(h.signature, HEADER_SIGNATURE, 8);
418 h.header_size = htole64(ALIGN64(sizeof(h)));
419
420 h.incompatible_flags |= htole32(
421 f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
422 f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4);
423
424 h.compatible_flags = htole32(
425 f->seal * HEADER_COMPATIBLE_SEALED);
426
427 r = sd_id128_randomize(&h.file_id);
428 if (r < 0)
429 return r;
430
431 if (template) {
432 h.seqnum_id = template->header->seqnum_id;
433 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
434 } else
435 h.seqnum_id = h.file_id;
436
437 k = pwrite(f->fd, &h, sizeof(h), 0);
438 if (k < 0)
439 return -errno;
440
441 if (k != sizeof(h))
442 return -EIO;
443
444 return 0;
445 }
446
447 static int journal_file_refresh_header(JournalFile *f) {
448 int r;
449
450 assert(f);
451 assert(f->header);
452
453 r = sd_id128_get_machine(&f->header->machine_id);
454 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
455 /* We don't have a machine-id, let's continue without */
456 zero(f->header->machine_id);
457 else if (r < 0)
458 return r;
459
460 r = sd_id128_get_boot(&f->header->boot_id);
461 if (r < 0)
462 return r;
463
464 r = journal_file_set_online(f);
465
466 /* Sync the online state to disk */
467 (void) fsync(f->fd);
468
469 /* We likely just created a new file, also sync the directory this file is located in. */
470 (void) fsync_directory_of_file(f->fd);
471
472 return r;
473 }
474
475 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
476 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
477 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
478 const char *type = compatible ? "compatible" : "incompatible";
479 uint32_t flags;
480
481 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
482
483 if (flags & ~supported) {
484 if (flags & ~any)
485 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
486 f->path, type, flags & ~any);
487 flags = (flags & any) & ~supported;
488 if (flags) {
489 const char* strv[3];
490 unsigned n = 0;
491 _cleanup_free_ char *t = NULL;
492
493 if (compatible && (flags & HEADER_COMPATIBLE_SEALED))
494 strv[n++] = "sealed";
495 if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ))
496 strv[n++] = "xz-compressed";
497 if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4))
498 strv[n++] = "lz4-compressed";
499 strv[n] = NULL;
500 assert(n < ELEMENTSOF(strv));
501
502 t = strv_join((char**) strv, ", ");
503 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
504 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
505 }
506 return true;
507 }
508
509 return false;
510 }
511
512 static int journal_file_verify_header(JournalFile *f) {
513 uint64_t arena_size, header_size;
514
515 assert(f);
516 assert(f->header);
517
518 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
519 return -EBADMSG;
520
521 /* In both read and write mode we refuse to open files with incompatible
522 * flags we don't know. */
523 if (warn_wrong_flags(f, false))
524 return -EPROTONOSUPPORT;
525
526 /* When open for writing we refuse to open files with compatible flags, too. */
527 if (f->writable && warn_wrong_flags(f, true))
528 return -EPROTONOSUPPORT;
529
530 if (f->header->state >= _STATE_MAX)
531 return -EBADMSG;
532
533 header_size = le64toh(READ_NOW(f->header->header_size));
534
535 /* The first addition was n_data, so check that we are at least this large */
536 if (header_size < HEADER_SIZE_MIN)
537 return -EBADMSG;
538
539 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
540 return -EBADMSG;
541
542 arena_size = le64toh(READ_NOW(f->header->arena_size));
543
544 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
545 return -ENODATA;
546
547 if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
548 return -ENODATA;
549
550 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
551 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
552 !VALID64(le64toh(f->header->tail_object_offset)) ||
553 !VALID64(le64toh(f->header->entry_array_offset)))
554 return -ENODATA;
555
556 if (f->writable) {
557 sd_id128_t machine_id;
558 uint8_t state;
559 int r;
560
561 r = sd_id128_get_machine(&machine_id);
562 if (r < 0)
563 return r;
564
565 if (!sd_id128_equal(machine_id, f->header->machine_id))
566 return -EHOSTDOWN;
567
568 state = f->header->state;
569
570 if (state == STATE_ARCHIVED)
571 return -ESHUTDOWN; /* Already archived */
572 else if (state == STATE_ONLINE)
573 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
574 "Journal file %s is already online. Assuming unclean closing.",
575 f->path);
576 else if (state != STATE_OFFLINE)
577 return log_debug_errno(SYNTHETIC_ERRNO(EBUSY),
578 "Journal file %s has unknown state %i.",
579 f->path, state);
580
581 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
582 return -EBADMSG;
583
584 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
585 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
586 * bisection. */
587 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME))
588 return log_debug_errno(SYNTHETIC_ERRNO(ETXTBSY),
589 "Journal file %s is from the future, refusing to append new data to it that'd be older.",
590 f->path);
591 }
592
593 f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
594 f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
595
596 f->seal = JOURNAL_HEADER_SEALED(f->header);
597
598 return 0;
599 }
600
601 int journal_file_fstat(JournalFile *f) {
602 int r;
603
604 assert(f);
605 assert(f->fd >= 0);
606
607 if (fstat(f->fd, &f->last_stat) < 0)
608 return -errno;
609
610 f->last_stat_usec = now(CLOCK_MONOTONIC);
611
612 /* Refuse dealing with with files that aren't regular */
613 r = stat_verify_regular(&f->last_stat);
614 if (r < 0)
615 return r;
616
617 /* Refuse appending to files that are already deleted */
618 if (f->last_stat.st_nlink <= 0)
619 return -EIDRM;
620
621 return 0;
622 }
623
624 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
625 uint64_t old_size, new_size, old_header_size, old_arena_size;
626 int r;
627
628 assert(f);
629 assert(f->header);
630
631 /* We assume that this file is not sparse, and we know that for sure, since we always call
632 * posix_fallocate() ourselves */
633
634 if (size > PAGE_ALIGN_DOWN(UINT64_MAX) - offset)
635 return -EINVAL;
636
637 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
638 return -EIO;
639
640 old_header_size = le64toh(READ_NOW(f->header->header_size));
641 old_arena_size = le64toh(READ_NOW(f->header->arena_size));
642 if (old_arena_size > PAGE_ALIGN_DOWN(UINT64_MAX) - old_header_size)
643 return -EBADMSG;
644
645 old_size = old_header_size + old_arena_size;
646
647 new_size = MAX(PAGE_ALIGN(offset + size), old_header_size);
648
649 if (new_size <= old_size) {
650
651 /* We already pre-allocated enough space, but before
652 * we write to it, let's check with fstat() if the
653 * file got deleted, in order make sure we don't throw
654 * away the data immediately. Don't check fstat() for
655 * all writes though, but only once ever 10s. */
656
657 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
658 return 0;
659
660 return journal_file_fstat(f);
661 }
662
663 /* Allocate more space. */
664
665 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
666 return -E2BIG;
667
668 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
669 struct statvfs svfs;
670
671 if (fstatvfs(f->fd, &svfs) >= 0) {
672 uint64_t available;
673
674 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
675
676 if (new_size - old_size > available)
677 return -E2BIG;
678 }
679 }
680
681 /* Increase by larger blocks at once */
682 new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
683 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
684 new_size = f->metrics.max_size;
685
686 /* Note that the glibc fallocate() fallback is very
687 inefficient, hence we try to minimize the allocation area
688 as we can. */
689 r = posix_fallocate(f->fd, old_size, new_size - old_size);
690 if (r != 0)
691 return -r;
692
693 f->header->arena_size = htole64(new_size - old_header_size);
694
695 return journal_file_fstat(f);
696 }
697
698 static unsigned type_to_context(ObjectType type) {
699 /* One context for each type, plus one catch-all for the rest */
700 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
701 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
702 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
703 }
704
705 static int journal_file_move_to(
706 JournalFile *f,
707 ObjectType type,
708 bool keep_always,
709 uint64_t offset,
710 uint64_t size,
711 void **ret,
712 size_t *ret_size) {
713
714 int r;
715
716 assert(f);
717 assert(ret);
718
719 if (size <= 0)
720 return -EINVAL;
721
722 if (size > UINT64_MAX - offset)
723 return -EBADMSG;
724
725 /* Avoid SIGBUS on invalid accesses */
726 if (offset + size > (uint64_t) f->last_stat.st_size) {
727 /* Hmm, out of range? Let's refresh the fstat() data
728 * first, before we trust that check. */
729
730 r = journal_file_fstat(f);
731 if (r < 0)
732 return r;
733
734 if (offset + size > (uint64_t) f->last_stat.st_size)
735 return -EADDRNOTAVAIL;
736 }
737
738 return mmap_cache_get(f->mmap, f->cache_fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret, ret_size);
739 }
740
741 static uint64_t minimum_header_size(Object *o) {
742
743 static const uint64_t table[] = {
744 [OBJECT_DATA] = sizeof(DataObject),
745 [OBJECT_FIELD] = sizeof(FieldObject),
746 [OBJECT_ENTRY] = sizeof(EntryObject),
747 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
748 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
749 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
750 [OBJECT_TAG] = sizeof(TagObject),
751 };
752
753 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
754 return sizeof(ObjectHeader);
755
756 return table[o->object.type];
757 }
758
759 /* Lightweight object checks. We want this to be fast, so that we won't
760 * slowdown every journal_file_move_to_object() call too much. */
761 static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) {
762 assert(f);
763 assert(o);
764
765 switch (o->object.type) {
766
767 case OBJECT_DATA:
768 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0))
769 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
770 "Bad n_entries: %" PRIu64 ": %" PRIu64,
771 le64toh(o->data.n_entries),
772 offset);
773
774 if (le64toh(o->object.size) <= offsetof(DataObject, payload))
775 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
776 "Bad object size (<= %zu): %" PRIu64 ": %" PRIu64,
777 offsetof(DataObject, payload),
778 le64toh(o->object.size),
779 offset);
780
781 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
782 !VALID64(le64toh(o->data.next_field_offset)) ||
783 !VALID64(le64toh(o->data.entry_offset)) ||
784 !VALID64(le64toh(o->data.entry_array_offset)))
785 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
786 "Invalid offset, next_hash_offset=" OFSfmt ", next_field_offset=" OFSfmt ", entry_offset=" OFSfmt ", entry_array_offset=" OFSfmt ": %" PRIu64,
787 le64toh(o->data.next_hash_offset),
788 le64toh(o->data.next_field_offset),
789 le64toh(o->data.entry_offset),
790 le64toh(o->data.entry_array_offset),
791 offset);
792
793 break;
794
795 case OBJECT_FIELD:
796 if (le64toh(o->object.size) <= offsetof(FieldObject, payload))
797 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
798 "Bad field size (<= %zu): %" PRIu64 ": %" PRIu64,
799 offsetof(FieldObject, payload),
800 le64toh(o->object.size),
801 offset);
802
803 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
804 !VALID64(le64toh(o->field.head_data_offset)))
805 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
806 "Invalid offset, next_hash_offset=" OFSfmt ", head_data_offset=" OFSfmt ": %" PRIu64,
807 le64toh(o->field.next_hash_offset),
808 le64toh(o->field.head_data_offset),
809 offset);
810 break;
811
812 case OBJECT_ENTRY: {
813 uint64_t sz;
814
815 sz = le64toh(READ_NOW(o->object.size));
816 if (sz < offsetof(EntryObject, items) ||
817 (sz - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0)
818 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
819 "Bad entry size (<= %zu): %" PRIu64 ": %" PRIu64,
820 offsetof(EntryObject, items),
821 sz,
822 offset);
823
824 if ((sz - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0)
825 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
826 "Invalid number items in entry: %" PRIu64 ": %" PRIu64,
827 (sz - offsetof(EntryObject, items)) / sizeof(EntryItem),
828 offset);
829
830 if (le64toh(o->entry.seqnum) <= 0)
831 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
832 "Invalid entry seqnum: %" PRIx64 ": %" PRIu64,
833 le64toh(o->entry.seqnum),
834 offset);
835
836 if (!VALID_REALTIME(le64toh(o->entry.realtime)))
837 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
838 "Invalid entry realtime timestamp: %" PRIu64 ": %" PRIu64,
839 le64toh(o->entry.realtime),
840 offset);
841
842 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic)))
843 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
844 "Invalid entry monotonic timestamp: %" PRIu64 ": %" PRIu64,
845 le64toh(o->entry.monotonic),
846 offset);
847
848 break;
849 }
850
851 case OBJECT_DATA_HASH_TABLE:
852 case OBJECT_FIELD_HASH_TABLE: {
853 uint64_t sz;
854
855 sz = le64toh(READ_NOW(o->object.size));
856 if (sz < offsetof(HashTableObject, items) ||
857 (sz - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
858 (sz - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0)
859 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
860 "Invalid %s hash table size: %" PRIu64 ": %" PRIu64,
861 o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
862 sz,
863 offset);
864
865 break;
866 }
867
868 case OBJECT_ENTRY_ARRAY: {
869 uint64_t sz;
870
871 sz = le64toh(READ_NOW(o->object.size));
872 if (sz < offsetof(EntryArrayObject, items) ||
873 (sz - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
874 (sz - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0)
875 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
876 "Invalid object entry array size: %" PRIu64 ": %" PRIu64,
877 sz,
878 offset);
879
880 if (!VALID64(le64toh(o->entry_array.next_entry_array_offset)))
881 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
882 "Invalid object entry array next_entry_array_offset: " OFSfmt ": %" PRIu64,
883 le64toh(o->entry_array.next_entry_array_offset),
884 offset);
885
886 break;
887 }
888
889 case OBJECT_TAG:
890 if (le64toh(o->object.size) != sizeof(TagObject))
891 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
892 "Invalid object tag size: %" PRIu64 ": %" PRIu64,
893 le64toh(o->object.size),
894 offset);
895
896 if (!VALID_EPOCH(le64toh(o->tag.epoch)))
897 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
898 "Invalid object tag epoch: %" PRIu64 ": %" PRIu64,
899 le64toh(o->tag.epoch), offset);
900
901 break;
902 }
903
904 return 0;
905 }
906
907 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
908 int r;
909 void *t;
910 size_t tsize;
911 Object *o;
912 uint64_t s;
913
914 assert(f);
915 assert(ret);
916
917 /* Objects may only be located at multiple of 64 bit */
918 if (!VALID64(offset))
919 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
920 "Attempt to move to object at non-64bit boundary: %" PRIu64,
921 offset);
922
923 /* Object may not be located in the file header */
924 if (offset < le64toh(f->header->header_size))
925 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
926 "Attempt to move to object located in file header: %" PRIu64,
927 offset);
928
929 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t, &tsize);
930 if (r < 0)
931 return r;
932
933 o = (Object*) t;
934 s = le64toh(READ_NOW(o->object.size));
935
936 if (s == 0)
937 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
938 "Attempt to move to uninitialized object: %" PRIu64,
939 offset);
940 if (s < sizeof(ObjectHeader))
941 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
942 "Attempt to move to overly short object: %" PRIu64,
943 offset);
944
945 if (o->object.type <= OBJECT_UNUSED)
946 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
947 "Attempt to move to object with invalid type: %" PRIu64,
948 offset);
949
950 if (s < minimum_header_size(o))
951 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
952 "Attempt to move to truncated object: %" PRIu64,
953 offset);
954
955 if (type > OBJECT_UNUSED && o->object.type != type)
956 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
957 "Attempt to move to object of unexpected type: %" PRIu64,
958 offset);
959
960 if (s > tsize) {
961 r = journal_file_move_to(f, type, false, offset, s, &t, NULL);
962 if (r < 0)
963 return r;
964
965 o = (Object*) t;
966 }
967
968 r = journal_file_check_object(f, offset, o);
969 if (r < 0)
970 return r;
971
972 *ret = o;
973 return 0;
974 }
975
976 static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
977 uint64_t r;
978
979 assert(f);
980 assert(f->header);
981
982 r = le64toh(f->header->tail_entry_seqnum) + 1;
983
984 if (seqnum) {
985 /* If an external seqnum counter was passed, we update
986 * both the local and the external one, and set it to
987 * the maximum of both */
988
989 if (*seqnum + 1 > r)
990 r = *seqnum + 1;
991
992 *seqnum = r;
993 }
994
995 f->header->tail_entry_seqnum = htole64(r);
996
997 if (f->header->head_entry_seqnum == 0)
998 f->header->head_entry_seqnum = htole64(r);
999
1000 return r;
1001 }
1002
1003 int journal_file_append_object(
1004 JournalFile *f,
1005 ObjectType type,
1006 uint64_t size,
1007 Object **ret,
1008 uint64_t *ret_offset) {
1009
1010 int r;
1011 uint64_t p;
1012 Object *tail, *o;
1013 void *t;
1014
1015 assert(f);
1016 assert(f->header);
1017 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1018 assert(size >= sizeof(ObjectHeader));
1019
1020 r = journal_file_set_online(f);
1021 if (r < 0)
1022 return r;
1023
1024 p = le64toh(f->header->tail_object_offset);
1025 if (p == 0)
1026 p = le64toh(f->header->header_size);
1027 else {
1028 uint64_t sz;
1029
1030 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
1031 if (r < 0)
1032 return r;
1033
1034 sz = le64toh(READ_NOW(tail->object.size));
1035 if (sz > UINT64_MAX - sizeof(uint64_t) + 1)
1036 return -EBADMSG;
1037
1038 sz = ALIGN64(sz);
1039 if (p > UINT64_MAX - sz)
1040 return -EBADMSG;
1041
1042 p += sz;
1043 }
1044
1045 r = journal_file_allocate(f, p, size);
1046 if (r < 0)
1047 return r;
1048
1049 r = journal_file_move_to(f, type, false, p, size, &t, NULL);
1050 if (r < 0)
1051 return r;
1052
1053 o = (Object*) t;
1054 o->object = (ObjectHeader) {
1055 .type = type,
1056 .size = htole64(size),
1057 };
1058
1059 f->header->tail_object_offset = htole64(p);
1060 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1061
1062 if (ret)
1063 *ret = o;
1064
1065 if (ret_offset)
1066 *ret_offset = p;
1067
1068 return 0;
1069 }
1070
1071 static int journal_file_setup_data_hash_table(JournalFile *f) {
1072 uint64_t s, p;
1073 Object *o;
1074 int r;
1075
1076 assert(f);
1077 assert(f->header);
1078
1079 /* We estimate that we need 1 hash table entry per 768 bytes
1080 of journal file and we want to make sure we never get
1081 beyond 75% fill level. Calculate the hash table size for
1082 the maximum file size based on these metrics. */
1083
1084 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1085 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1086 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1087
1088 log_debug("Reserving %"PRIu64" entries in data hash table.", s / sizeof(HashItem));
1089
1090 r = journal_file_append_object(f,
1091 OBJECT_DATA_HASH_TABLE,
1092 offsetof(Object, hash_table.items) + s,
1093 &o, &p);
1094 if (r < 0)
1095 return r;
1096
1097 memzero(o->hash_table.items, s);
1098
1099 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1100 f->header->data_hash_table_size = htole64(s);
1101
1102 return 0;
1103 }
1104
1105 static int journal_file_setup_field_hash_table(JournalFile *f) {
1106 uint64_t s, p;
1107 Object *o;
1108 int r;
1109
1110 assert(f);
1111 assert(f->header);
1112
1113 /* We use a fixed size hash table for the fields as this
1114 * number should grow very slowly only */
1115
1116 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1117 log_debug("Reserving %"PRIu64" entries in field hash table.", s / sizeof(HashItem));
1118
1119 r = journal_file_append_object(f,
1120 OBJECT_FIELD_HASH_TABLE,
1121 offsetof(Object, hash_table.items) + s,
1122 &o, &p);
1123 if (r < 0)
1124 return r;
1125
1126 memzero(o->hash_table.items, s);
1127
1128 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1129 f->header->field_hash_table_size = htole64(s);
1130
1131 return 0;
1132 }
1133
1134 int journal_file_map_data_hash_table(JournalFile *f) {
1135 uint64_t s, p;
1136 void *t;
1137 int r;
1138
1139 assert(f);
1140 assert(f->header);
1141
1142 if (f->data_hash_table)
1143 return 0;
1144
1145 p = le64toh(f->header->data_hash_table_offset);
1146 s = le64toh(f->header->data_hash_table_size);
1147
1148 r = journal_file_move_to(f,
1149 OBJECT_DATA_HASH_TABLE,
1150 true,
1151 p, s,
1152 &t, NULL);
1153 if (r < 0)
1154 return r;
1155
1156 f->data_hash_table = t;
1157 return 0;
1158 }
1159
1160 int journal_file_map_field_hash_table(JournalFile *f) {
1161 uint64_t s, p;
1162 void *t;
1163 int r;
1164
1165 assert(f);
1166 assert(f->header);
1167
1168 if (f->field_hash_table)
1169 return 0;
1170
1171 p = le64toh(f->header->field_hash_table_offset);
1172 s = le64toh(f->header->field_hash_table_size);
1173
1174 r = journal_file_move_to(f,
1175 OBJECT_FIELD_HASH_TABLE,
1176 true,
1177 p, s,
1178 &t, NULL);
1179 if (r < 0)
1180 return r;
1181
1182 f->field_hash_table = t;
1183 return 0;
1184 }
1185
1186 static int journal_file_link_field(
1187 JournalFile *f,
1188 Object *o,
1189 uint64_t offset,
1190 uint64_t hash) {
1191
1192 uint64_t p, h, m;
1193 int r;
1194
1195 assert(f);
1196 assert(f->header);
1197 assert(f->field_hash_table);
1198 assert(o);
1199 assert(offset > 0);
1200
1201 if (o->object.type != OBJECT_FIELD)
1202 return -EINVAL;
1203
1204 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1205 if (m <= 0)
1206 return -EBADMSG;
1207
1208 /* This might alter the window we are looking at */
1209 o->field.next_hash_offset = o->field.head_data_offset = 0;
1210
1211 h = hash % m;
1212 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1213 if (p == 0)
1214 f->field_hash_table[h].head_hash_offset = htole64(offset);
1215 else {
1216 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1217 if (r < 0)
1218 return r;
1219
1220 o->field.next_hash_offset = htole64(offset);
1221 }
1222
1223 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1224
1225 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1226 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1227
1228 return 0;
1229 }
1230
1231 static int journal_file_link_data(
1232 JournalFile *f,
1233 Object *o,
1234 uint64_t offset,
1235 uint64_t hash) {
1236
1237 uint64_t p, h, m;
1238 int r;
1239
1240 assert(f);
1241 assert(f->header);
1242 assert(f->data_hash_table);
1243 assert(o);
1244 assert(offset > 0);
1245
1246 if (o->object.type != OBJECT_DATA)
1247 return -EINVAL;
1248
1249 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1250 if (m <= 0)
1251 return -EBADMSG;
1252
1253 /* This might alter the window we are looking at */
1254 o->data.next_hash_offset = o->data.next_field_offset = 0;
1255 o->data.entry_offset = o->data.entry_array_offset = 0;
1256 o->data.n_entries = 0;
1257
1258 h = hash % m;
1259 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1260 if (p == 0)
1261 /* Only entry in the hash table is easy */
1262 f->data_hash_table[h].head_hash_offset = htole64(offset);
1263 else {
1264 /* Move back to the previous data object, to patch in
1265 * pointer */
1266
1267 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1268 if (r < 0)
1269 return r;
1270
1271 o->data.next_hash_offset = htole64(offset);
1272 }
1273
1274 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1275
1276 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1277 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1278
1279 return 0;
1280 }
1281
1282 int journal_file_find_field_object_with_hash(
1283 JournalFile *f,
1284 const void *field, uint64_t size, uint64_t hash,
1285 Object **ret, uint64_t *ret_offset) {
1286
1287 uint64_t p, osize, h, m;
1288 int r;
1289
1290 assert(f);
1291 assert(f->header);
1292 assert(field && size > 0);
1293
1294 /* If the field hash table is empty, we can't find anything */
1295 if (le64toh(f->header->field_hash_table_size) <= 0)
1296 return 0;
1297
1298 /* Map the field hash table, if it isn't mapped yet. */
1299 r = journal_file_map_field_hash_table(f);
1300 if (r < 0)
1301 return r;
1302
1303 osize = offsetof(Object, field.payload) + size;
1304
1305 m = le64toh(READ_NOW(f->header->field_hash_table_size)) / sizeof(HashItem);
1306 if (m <= 0)
1307 return -EBADMSG;
1308
1309 h = hash % m;
1310 p = le64toh(f->field_hash_table[h].head_hash_offset);
1311
1312 while (p > 0) {
1313 Object *o;
1314
1315 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1316 if (r < 0)
1317 return r;
1318
1319 if (le64toh(o->field.hash) == hash &&
1320 le64toh(o->object.size) == osize &&
1321 memcmp(o->field.payload, field, size) == 0) {
1322
1323 if (ret)
1324 *ret = o;
1325 if (ret_offset)
1326 *ret_offset = p;
1327
1328 return 1;
1329 }
1330
1331 p = le64toh(o->field.next_hash_offset);
1332 }
1333
1334 return 0;
1335 }
1336
1337 int journal_file_find_field_object(
1338 JournalFile *f,
1339 const void *field, uint64_t size,
1340 Object **ret, uint64_t *ret_offset) {
1341
1342 uint64_t hash;
1343
1344 assert(f);
1345 assert(field && size > 0);
1346
1347 hash = jenkins_hash64(field, size);
1348
1349 return journal_file_find_field_object_with_hash(
1350 f,
1351 field, size, hash,
1352 ret, ret_offset);
1353 }
1354
1355 int journal_file_find_data_object_with_hash(
1356 JournalFile *f,
1357 const void *data, uint64_t size, uint64_t hash,
1358 Object **ret, uint64_t *ret_offset) {
1359
1360 uint64_t p, osize, h, m;
1361 int r;
1362
1363 assert(f);
1364 assert(f->header);
1365 assert(data || size == 0);
1366
1367 /* If there's no data hash table, then there's no entry. */
1368 if (le64toh(f->header->data_hash_table_size) <= 0)
1369 return 0;
1370
1371 /* Map the data hash table, if it isn't mapped yet. */
1372 r = journal_file_map_data_hash_table(f);
1373 if (r < 0)
1374 return r;
1375
1376 osize = offsetof(Object, data.payload) + size;
1377
1378 m = le64toh(READ_NOW(f->header->data_hash_table_size)) / sizeof(HashItem);
1379 if (m <= 0)
1380 return -EBADMSG;
1381
1382 h = hash % m;
1383 p = le64toh(f->data_hash_table[h].head_hash_offset);
1384
1385 while (p > 0) {
1386 Object *o;
1387
1388 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1389 if (r < 0)
1390 return r;
1391
1392 if (le64toh(o->data.hash) != hash)
1393 goto next;
1394
1395 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
1396 #if HAVE_XZ || HAVE_LZ4
1397 uint64_t l;
1398 size_t rsize = 0;
1399
1400 l = le64toh(READ_NOW(o->object.size));
1401 if (l <= offsetof(Object, data.payload))
1402 return -EBADMSG;
1403
1404 l -= offsetof(Object, data.payload);
1405
1406 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
1407 o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
1408 if (r < 0)
1409 return r;
1410
1411 if (rsize == size &&
1412 memcmp(f->compress_buffer, data, size) == 0) {
1413
1414 if (ret)
1415 *ret = o;
1416
1417 if (ret_offset)
1418 *ret_offset = p;
1419
1420 return 1;
1421 }
1422 #else
1423 return -EPROTONOSUPPORT;
1424 #endif
1425 } else if (le64toh(o->object.size) == osize &&
1426 memcmp(o->data.payload, data, size) == 0) {
1427
1428 if (ret)
1429 *ret = o;
1430
1431 if (ret_offset)
1432 *ret_offset = p;
1433
1434 return 1;
1435 }
1436
1437 next:
1438 p = le64toh(o->data.next_hash_offset);
1439 }
1440
1441 return 0;
1442 }
1443
1444 int journal_file_find_data_object(
1445 JournalFile *f,
1446 const void *data, uint64_t size,
1447 Object **ret, uint64_t *ret_offset) {
1448
1449 uint64_t hash;
1450
1451 assert(f);
1452 assert(data || size == 0);
1453
1454 hash = jenkins_hash64(data, size);
1455
1456 return journal_file_find_data_object_with_hash(
1457 f,
1458 data, size, hash,
1459 ret, ret_offset);
1460 }
1461
1462 static int journal_file_append_field(
1463 JournalFile *f,
1464 const void *field, uint64_t size,
1465 Object **ret, uint64_t *ret_offset) {
1466
1467 uint64_t hash, p;
1468 uint64_t osize;
1469 Object *o;
1470 int r;
1471
1472 assert(f);
1473 assert(field && size > 0);
1474
1475 hash = jenkins_hash64(field, size);
1476
1477 r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
1478 if (r < 0)
1479 return r;
1480 else if (r > 0) {
1481
1482 if (ret)
1483 *ret = o;
1484
1485 if (ret_offset)
1486 *ret_offset = p;
1487
1488 return 0;
1489 }
1490
1491 osize = offsetof(Object, field.payload) + size;
1492 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1493 if (r < 0)
1494 return r;
1495
1496 o->field.hash = htole64(hash);
1497 memcpy(o->field.payload, field, size);
1498
1499 r = journal_file_link_field(f, o, p, hash);
1500 if (r < 0)
1501 return r;
1502
1503 /* The linking might have altered the window, so let's
1504 * refresh our pointer */
1505 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1506 if (r < 0)
1507 return r;
1508
1509 #if HAVE_GCRYPT
1510 r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
1511 if (r < 0)
1512 return r;
1513 #endif
1514
1515 if (ret)
1516 *ret = o;
1517
1518 if (ret_offset)
1519 *ret_offset = p;
1520
1521 return 0;
1522 }
1523
1524 static int journal_file_append_data(
1525 JournalFile *f,
1526 const void *data, uint64_t size,
1527 Object **ret, uint64_t *ret_offset) {
1528
1529 uint64_t hash, p;
1530 uint64_t osize;
1531 Object *o;
1532 int r, compression = 0;
1533 const void *eq;
1534
1535 assert(f);
1536 assert(data || size == 0);
1537
1538 hash = jenkins_hash64(data, size);
1539
1540 r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
1541 if (r < 0)
1542 return r;
1543 if (r > 0) {
1544
1545 if (ret)
1546 *ret = o;
1547
1548 if (ret_offset)
1549 *ret_offset = p;
1550
1551 return 0;
1552 }
1553
1554 osize = offsetof(Object, data.payload) + size;
1555 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1556 if (r < 0)
1557 return r;
1558
1559 o->data.hash = htole64(hash);
1560
1561 #if HAVE_XZ || HAVE_LZ4
1562 if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
1563 size_t rsize = 0;
1564
1565 compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
1566
1567 if (compression >= 0) {
1568 o->object.size = htole64(offsetof(Object, data.payload) + rsize);
1569 o->object.flags |= compression;
1570
1571 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1572 size, rsize, object_compressed_to_string(compression));
1573 } else
1574 /* Compression didn't work, we don't really care why, let's continue without compression */
1575 compression = 0;
1576 }
1577 #endif
1578
1579 if (compression == 0)
1580 memcpy_safe(o->data.payload, data, size);
1581
1582 r = journal_file_link_data(f, o, p, hash);
1583 if (r < 0)
1584 return r;
1585
1586 #if HAVE_GCRYPT
1587 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1588 if (r < 0)
1589 return r;
1590 #endif
1591
1592 /* The linking might have altered the window, so let's
1593 * refresh our pointer */
1594 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1595 if (r < 0)
1596 return r;
1597
1598 if (!data)
1599 eq = NULL;
1600 else
1601 eq = memchr(data, '=', size);
1602 if (eq && eq > data) {
1603 Object *fo = NULL;
1604 uint64_t fp;
1605
1606 /* Create field object ... */
1607 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
1608 if (r < 0)
1609 return r;
1610
1611 /* ... and link it in. */
1612 o->data.next_field_offset = fo->field.head_data_offset;
1613 fo->field.head_data_offset = le64toh(p);
1614 }
1615
1616 if (ret)
1617 *ret = o;
1618
1619 if (ret_offset)
1620 *ret_offset = p;
1621
1622 return 0;
1623 }
1624
1625 uint64_t journal_file_entry_n_items(Object *o) {
1626 uint64_t sz;
1627 assert(o);
1628
1629 if (o->object.type != OBJECT_ENTRY)
1630 return 0;
1631
1632 sz = le64toh(READ_NOW(o->object.size));
1633 if (sz < offsetof(Object, entry.items))
1634 return 0;
1635
1636 return (sz - offsetof(Object, entry.items)) / sizeof(EntryItem);
1637 }
1638
1639 uint64_t journal_file_entry_array_n_items(Object *o) {
1640 uint64_t sz;
1641
1642 assert(o);
1643
1644 if (o->object.type != OBJECT_ENTRY_ARRAY)
1645 return 0;
1646
1647 sz = le64toh(READ_NOW(o->object.size));
1648 if (sz < offsetof(Object, entry_array.items))
1649 return 0;
1650
1651 return (sz - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
1652 }
1653
1654 uint64_t journal_file_hash_table_n_items(Object *o) {
1655 uint64_t sz;
1656
1657 assert(o);
1658
1659 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
1660 return 0;
1661
1662 sz = le64toh(READ_NOW(o->object.size));
1663 if (sz < offsetof(Object, hash_table.items))
1664 return 0;
1665
1666 return (sz - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1667 }
1668
1669 static int link_entry_into_array(JournalFile *f,
1670 le64_t *first,
1671 le64_t *idx,
1672 uint64_t p) {
1673 int r;
1674 uint64_t n = 0, ap = 0, q, i, a, hidx;
1675 Object *o;
1676
1677 assert(f);
1678 assert(f->header);
1679 assert(first);
1680 assert(idx);
1681 assert(p > 0);
1682
1683 a = le64toh(*first);
1684 i = hidx = le64toh(READ_NOW(*idx));
1685 while (a > 0) {
1686
1687 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1688 if (r < 0)
1689 return r;
1690
1691 n = journal_file_entry_array_n_items(o);
1692 if (i < n) {
1693 o->entry_array.items[i] = htole64(p);
1694 *idx = htole64(hidx + 1);
1695 return 0;
1696 }
1697
1698 i -= n;
1699 ap = a;
1700 a = le64toh(o->entry_array.next_entry_array_offset);
1701 }
1702
1703 if (hidx > n)
1704 n = (hidx+1) * 2;
1705 else
1706 n = n * 2;
1707
1708 if (n < 4)
1709 n = 4;
1710
1711 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1712 offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
1713 &o, &q);
1714 if (r < 0)
1715 return r;
1716
1717 #if HAVE_GCRYPT
1718 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1719 if (r < 0)
1720 return r;
1721 #endif
1722
1723 o->entry_array.items[i] = htole64(p);
1724
1725 if (ap == 0)
1726 *first = htole64(q);
1727 else {
1728 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1729 if (r < 0)
1730 return r;
1731
1732 o->entry_array.next_entry_array_offset = htole64(q);
1733 }
1734
1735 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1736 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1737
1738 *idx = htole64(hidx + 1);
1739
1740 return 0;
1741 }
1742
1743 static int link_entry_into_array_plus_one(JournalFile *f,
1744 le64_t *extra,
1745 le64_t *first,
1746 le64_t *idx,
1747 uint64_t p) {
1748
1749 uint64_t hidx;
1750 int r;
1751
1752 assert(f);
1753 assert(extra);
1754 assert(first);
1755 assert(idx);
1756 assert(p > 0);
1757
1758 hidx = le64toh(READ_NOW(*idx));
1759 if (hidx == UINT64_MAX)
1760 return -EBADMSG;
1761 if (hidx == 0)
1762 *extra = htole64(p);
1763 else {
1764 le64_t i;
1765
1766 i = htole64(hidx - 1);
1767 r = link_entry_into_array(f, first, &i, p);
1768 if (r < 0)
1769 return r;
1770 }
1771
1772 *idx = htole64(hidx + 1);
1773 return 0;
1774 }
1775
1776 static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
1777 uint64_t p;
1778 int r;
1779
1780 assert(f);
1781 assert(o);
1782 assert(offset > 0);
1783
1784 p = le64toh(o->entry.items[i].object_offset);
1785 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1786 if (r < 0)
1787 return r;
1788
1789 return link_entry_into_array_plus_one(f,
1790 &o->data.entry_offset,
1791 &o->data.entry_array_offset,
1792 &o->data.n_entries,
1793 offset);
1794 }
1795
1796 static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
1797 uint64_t n, i;
1798 int r;
1799
1800 assert(f);
1801 assert(f->header);
1802 assert(o);
1803 assert(offset > 0);
1804
1805 if (o->object.type != OBJECT_ENTRY)
1806 return -EINVAL;
1807
1808 __sync_synchronize();
1809
1810 /* Link up the entry itself */
1811 r = link_entry_into_array(f,
1812 &f->header->entry_array_offset,
1813 &f->header->n_entries,
1814 offset);
1815 if (r < 0)
1816 return r;
1817
1818 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1819
1820 if (f->header->head_entry_realtime == 0)
1821 f->header->head_entry_realtime = o->entry.realtime;
1822
1823 f->header->tail_entry_realtime = o->entry.realtime;
1824 f->header->tail_entry_monotonic = o->entry.monotonic;
1825
1826 /* Link up the items */
1827 n = journal_file_entry_n_items(o);
1828 for (i = 0; i < n; i++) {
1829 r = journal_file_link_entry_item(f, o, offset, i);
1830 if (r < 0)
1831 return r;
1832 }
1833
1834 return 0;
1835 }
1836
1837 static int journal_file_append_entry_internal(
1838 JournalFile *f,
1839 const dual_timestamp *ts,
1840 const sd_id128_t *boot_id,
1841 uint64_t xor_hash,
1842 const EntryItem items[], unsigned n_items,
1843 uint64_t *seqnum,
1844 Object **ret, uint64_t *ret_offset) {
1845 uint64_t np;
1846 uint64_t osize;
1847 Object *o;
1848 int r;
1849
1850 assert(f);
1851 assert(f->header);
1852 assert(items || n_items == 0);
1853 assert(ts);
1854
1855 osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
1856
1857 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
1858 if (r < 0)
1859 return r;
1860
1861 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
1862 memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
1863 o->entry.realtime = htole64(ts->realtime);
1864 o->entry.monotonic = htole64(ts->monotonic);
1865 o->entry.xor_hash = htole64(xor_hash);
1866 if (boot_id)
1867 f->header->boot_id = *boot_id;
1868 o->entry.boot_id = f->header->boot_id;
1869
1870 #if HAVE_GCRYPT
1871 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
1872 if (r < 0)
1873 return r;
1874 #endif
1875
1876 r = journal_file_link_entry(f, o, np);
1877 if (r < 0)
1878 return r;
1879
1880 if (ret)
1881 *ret = o;
1882
1883 if (ret_offset)
1884 *ret_offset = np;
1885
1886 return 0;
1887 }
1888
1889 void journal_file_post_change(JournalFile *f) {
1890 assert(f);
1891
1892 if (f->fd < 0)
1893 return;
1894
1895 /* inotify() does not receive IN_MODIFY events from file
1896 * accesses done via mmap(). After each access we hence
1897 * trigger IN_MODIFY by truncating the journal file to its
1898 * current size which triggers IN_MODIFY. */
1899
1900 __sync_synchronize();
1901
1902 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
1903 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
1904 }
1905
1906 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
1907 assert(userdata);
1908
1909 journal_file_post_change(userdata);
1910
1911 return 1;
1912 }
1913
1914 static void schedule_post_change(JournalFile *f) {
1915 uint64_t now;
1916 int r;
1917
1918 assert(f);
1919 assert(f->post_change_timer);
1920
1921 r = sd_event_source_get_enabled(f->post_change_timer, NULL);
1922 if (r < 0) {
1923 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
1924 goto fail;
1925 }
1926 if (r > 0)
1927 return;
1928
1929 r = sd_event_now(sd_event_source_get_event(f->post_change_timer), CLOCK_MONOTONIC, &now);
1930 if (r < 0) {
1931 log_debug_errno(r, "Failed to get clock's now for scheduling ftruncate: %m");
1932 goto fail;
1933 }
1934
1935 r = sd_event_source_set_time(f->post_change_timer, now + f->post_change_timer_period);
1936 if (r < 0) {
1937 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
1938 goto fail;
1939 }
1940
1941 r = sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_ONESHOT);
1942 if (r < 0) {
1943 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
1944 goto fail;
1945 }
1946
1947 return;
1948
1949 fail:
1950 /* On failure, let's simply post the change immediately. */
1951 journal_file_post_change(f);
1952 }
1953
1954 /* Enable coalesced change posting in a timer on the provided sd_event instance */
1955 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
1956 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
1957 int r;
1958
1959 assert(f);
1960 assert_return(!f->post_change_timer, -EINVAL);
1961 assert(e);
1962 assert(t);
1963
1964 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
1965 if (r < 0)
1966 return r;
1967
1968 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
1969 if (r < 0)
1970 return r;
1971
1972 f->post_change_timer = TAKE_PTR(timer);
1973 f->post_change_timer_period = t;
1974
1975 return r;
1976 }
1977
1978 static int entry_item_cmp(const EntryItem *a, const EntryItem *b) {
1979 return CMP(le64toh(a->object_offset), le64toh(b->object_offset));
1980 }
1981
1982 int journal_file_append_entry(
1983 JournalFile *f,
1984 const dual_timestamp *ts,
1985 const sd_id128_t *boot_id,
1986 const struct iovec iovec[], unsigned n_iovec,
1987 uint64_t *seqnum,
1988 Object **ret, uint64_t *ret_offset) {
1989
1990 unsigned i;
1991 EntryItem *items;
1992 int r;
1993 uint64_t xor_hash = 0;
1994 struct dual_timestamp _ts;
1995
1996 assert(f);
1997 assert(f->header);
1998 assert(iovec || n_iovec == 0);
1999
2000 if (ts) {
2001 if (!VALID_REALTIME(ts->realtime))
2002 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2003 "Invalid realtime timestamp %" PRIu64 ", refusing entry.",
2004 ts->realtime);
2005 if (!VALID_MONOTONIC(ts->monotonic))
2006 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2007 "Invalid monotomic timestamp %" PRIu64 ", refusing entry.",
2008 ts->monotonic);
2009 } else {
2010 dual_timestamp_get(&_ts);
2011 ts = &_ts;
2012 }
2013
2014 #if HAVE_GCRYPT
2015 r = journal_file_maybe_append_tag(f, ts->realtime);
2016 if (r < 0)
2017 return r;
2018 #endif
2019
2020 /* alloca() can't take 0, hence let's allocate at least one */
2021 items = newa(EntryItem, MAX(1u, n_iovec));
2022
2023 for (i = 0; i < n_iovec; i++) {
2024 uint64_t p;
2025 Object *o;
2026
2027 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
2028 if (r < 0)
2029 return r;
2030
2031 xor_hash ^= le64toh(o->data.hash);
2032 items[i].object_offset = htole64(p);
2033 items[i].hash = o->data.hash;
2034 }
2035
2036 /* Order by the position on disk, in order to improve seek
2037 * times for rotating media. */
2038 typesafe_qsort(items, n_iovec, entry_item_cmp);
2039
2040 r = journal_file_append_entry_internal(f, ts, boot_id, xor_hash, items, n_iovec, seqnum, ret, ret_offset);
2041
2042 /* If the memory mapping triggered a SIGBUS then we return an
2043 * IO error and ignore the error code passed down to us, since
2044 * it is very likely just an effect of a nullified replacement
2045 * mapping page */
2046
2047 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
2048 r = -EIO;
2049
2050 if (f->post_change_timer)
2051 schedule_post_change(f);
2052 else
2053 journal_file_post_change(f);
2054
2055 return r;
2056 }
2057
2058 typedef struct ChainCacheItem {
2059 uint64_t first; /* the array at the beginning of the chain */
2060 uint64_t array; /* the cached array */
2061 uint64_t begin; /* the first item in the cached array */
2062 uint64_t total; /* the total number of items in all arrays before this one in the chain */
2063 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
2064 } ChainCacheItem;
2065
2066 static void chain_cache_put(
2067 OrderedHashmap *h,
2068 ChainCacheItem *ci,
2069 uint64_t first,
2070 uint64_t array,
2071 uint64_t begin,
2072 uint64_t total,
2073 uint64_t last_index) {
2074
2075 if (!ci) {
2076 /* If the chain item to cache for this chain is the
2077 * first one it's not worth caching anything */
2078 if (array == first)
2079 return;
2080
2081 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2082 ci = ordered_hashmap_steal_first(h);
2083 assert(ci);
2084 } else {
2085 ci = new(ChainCacheItem, 1);
2086 if (!ci)
2087 return;
2088 }
2089
2090 ci->first = first;
2091
2092 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2093 free(ci);
2094 return;
2095 }
2096 } else
2097 assert(ci->first == first);
2098
2099 ci->array = array;
2100 ci->begin = begin;
2101 ci->total = total;
2102 ci->last_index = last_index;
2103 }
2104
2105 static int generic_array_get(
2106 JournalFile *f,
2107 uint64_t first,
2108 uint64_t i,
2109 Object **ret, uint64_t *ret_offset) {
2110
2111 Object *o;
2112 uint64_t p = 0, a, t = 0;
2113 int r;
2114 ChainCacheItem *ci;
2115
2116 assert(f);
2117
2118 a = first;
2119
2120 /* Try the chain cache first */
2121 ci = ordered_hashmap_get(f->chain_cache, &first);
2122 if (ci && i > ci->total) {
2123 a = ci->array;
2124 i -= ci->total;
2125 t = ci->total;
2126 }
2127
2128 while (a > 0) {
2129 uint64_t k;
2130
2131 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2132 if (r < 0)
2133 return r;
2134
2135 k = journal_file_entry_array_n_items(o);
2136 if (i < k) {
2137 p = le64toh(o->entry_array.items[i]);
2138 goto found;
2139 }
2140
2141 i -= k;
2142 t += k;
2143 a = le64toh(o->entry_array.next_entry_array_offset);
2144 }
2145
2146 return 0;
2147
2148 found:
2149 /* Let's cache this item for the next invocation */
2150 chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
2151
2152 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2153 if (r < 0)
2154 return r;
2155
2156 if (ret)
2157 *ret = o;
2158
2159 if (ret_offset)
2160 *ret_offset = p;
2161
2162 return 1;
2163 }
2164
2165 static int generic_array_get_plus_one(
2166 JournalFile *f,
2167 uint64_t extra,
2168 uint64_t first,
2169 uint64_t i,
2170 Object **ret, uint64_t *ret_offset) {
2171
2172 Object *o;
2173
2174 assert(f);
2175
2176 if (i == 0) {
2177 int r;
2178
2179 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2180 if (r < 0)
2181 return r;
2182
2183 if (ret)
2184 *ret = o;
2185
2186 if (ret_offset)
2187 *ret_offset = extra;
2188
2189 return 1;
2190 }
2191
2192 return generic_array_get(f, first, i-1, ret, ret_offset);
2193 }
2194
2195 enum {
2196 TEST_FOUND,
2197 TEST_LEFT,
2198 TEST_RIGHT
2199 };
2200
2201 static int generic_array_bisect(
2202 JournalFile *f,
2203 uint64_t first,
2204 uint64_t n,
2205 uint64_t needle,
2206 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2207 direction_t direction,
2208 Object **ret,
2209 uint64_t *ret_offset,
2210 uint64_t *ret_idx) {
2211
2212 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
2213 bool subtract_one = false;
2214 Object *o, *array = NULL;
2215 int r;
2216 ChainCacheItem *ci;
2217
2218 assert(f);
2219 assert(test_object);
2220
2221 /* Start with the first array in the chain */
2222 a = first;
2223
2224 ci = ordered_hashmap_get(f->chain_cache, &first);
2225 if (ci && n > ci->total && ci->begin != 0) {
2226 /* Ah, we have iterated this bisection array chain
2227 * previously! Let's see if we can skip ahead in the
2228 * chain, as far as the last time. But we can't jump
2229 * backwards in the chain, so let's check that
2230 * first. */
2231
2232 r = test_object(f, ci->begin, needle);
2233 if (r < 0)
2234 return r;
2235
2236 if (r == TEST_LEFT) {
2237 /* OK, what we are looking for is right of the
2238 * begin of this EntryArray, so let's jump
2239 * straight to previously cached array in the
2240 * chain */
2241
2242 a = ci->array;
2243 n -= ci->total;
2244 t = ci->total;
2245 last_index = ci->last_index;
2246 }
2247 }
2248
2249 while (a > 0) {
2250 uint64_t left, right, k, lp;
2251
2252 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2253 if (r < 0)
2254 return r;
2255
2256 k = journal_file_entry_array_n_items(array);
2257 right = MIN(k, n);
2258 if (right <= 0)
2259 return 0;
2260
2261 i = right - 1;
2262 lp = p = le64toh(array->entry_array.items[i]);
2263 if (p <= 0)
2264 r = -EBADMSG;
2265 else
2266 r = test_object(f, p, needle);
2267 if (r == -EBADMSG) {
2268 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2269 n = i;
2270 continue;
2271 }
2272 if (r < 0)
2273 return r;
2274
2275 if (r == TEST_FOUND)
2276 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2277
2278 if (r == TEST_RIGHT) {
2279 left = 0;
2280 right -= 1;
2281
2282 if (last_index != (uint64_t) -1) {
2283 assert(last_index <= right);
2284
2285 /* If we cached the last index we
2286 * looked at, let's try to not to jump
2287 * too wildly around and see if we can
2288 * limit the range to look at early to
2289 * the immediate neighbors of the last
2290 * index we looked at. */
2291
2292 if (last_index > 0) {
2293 uint64_t x = last_index - 1;
2294
2295 p = le64toh(array->entry_array.items[x]);
2296 if (p <= 0)
2297 return -EBADMSG;
2298
2299 r = test_object(f, p, needle);
2300 if (r < 0)
2301 return r;
2302
2303 if (r == TEST_FOUND)
2304 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2305
2306 if (r == TEST_RIGHT)
2307 right = x;
2308 else
2309 left = x + 1;
2310 }
2311
2312 if (last_index < right) {
2313 uint64_t y = last_index + 1;
2314
2315 p = le64toh(array->entry_array.items[y]);
2316 if (p <= 0)
2317 return -EBADMSG;
2318
2319 r = test_object(f, p, needle);
2320 if (r < 0)
2321 return r;
2322
2323 if (r == TEST_FOUND)
2324 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2325
2326 if (r == TEST_RIGHT)
2327 right = y;
2328 else
2329 left = y + 1;
2330 }
2331 }
2332
2333 for (;;) {
2334 if (left == right) {
2335 if (direction == DIRECTION_UP)
2336 subtract_one = true;
2337
2338 i = left;
2339 goto found;
2340 }
2341
2342 assert(left < right);
2343 i = (left + right) / 2;
2344
2345 p = le64toh(array->entry_array.items[i]);
2346 if (p <= 0)
2347 r = -EBADMSG;
2348 else
2349 r = test_object(f, p, needle);
2350 if (r == -EBADMSG) {
2351 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2352 right = n = i;
2353 continue;
2354 }
2355 if (r < 0)
2356 return r;
2357
2358 if (r == TEST_FOUND)
2359 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2360
2361 if (r == TEST_RIGHT)
2362 right = i;
2363 else
2364 left = i + 1;
2365 }
2366 }
2367
2368 if (k >= n) {
2369 if (direction == DIRECTION_UP) {
2370 i = n;
2371 subtract_one = true;
2372 goto found;
2373 }
2374
2375 return 0;
2376 }
2377
2378 last_p = lp;
2379
2380 n -= k;
2381 t += k;
2382 last_index = (uint64_t) -1;
2383 a = le64toh(array->entry_array.next_entry_array_offset);
2384 }
2385
2386 return 0;
2387
2388 found:
2389 if (subtract_one && t == 0 && i == 0)
2390 return 0;
2391
2392 /* Let's cache this item for the next invocation */
2393 chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
2394
2395 if (subtract_one && i == 0)
2396 p = last_p;
2397 else if (subtract_one)
2398 p = le64toh(array->entry_array.items[i-1]);
2399 else
2400 p = le64toh(array->entry_array.items[i]);
2401
2402 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2403 if (r < 0)
2404 return r;
2405
2406 if (ret)
2407 *ret = o;
2408
2409 if (ret_offset)
2410 *ret_offset = p;
2411
2412 if (ret_idx)
2413 *ret_idx = t + i + (subtract_one ? -1 : 0);
2414
2415 return 1;
2416 }
2417
2418 static int generic_array_bisect_plus_one(
2419 JournalFile *f,
2420 uint64_t extra,
2421 uint64_t first,
2422 uint64_t n,
2423 uint64_t needle,
2424 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2425 direction_t direction,
2426 Object **ret,
2427 uint64_t *ret_offset,
2428 uint64_t *ret_idx) {
2429
2430 int r;
2431 bool step_back = false;
2432 Object *o;
2433
2434 assert(f);
2435 assert(test_object);
2436
2437 if (n <= 0)
2438 return 0;
2439
2440 /* This bisects the array in object 'first', but first checks
2441 * an extra */
2442 r = test_object(f, extra, needle);
2443 if (r < 0)
2444 return r;
2445
2446 if (r == TEST_FOUND)
2447 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2448
2449 /* if we are looking with DIRECTION_UP then we need to first
2450 see if in the actual array there is a matching entry, and
2451 return the last one of that. But if there isn't any we need
2452 to return this one. Hence remember this, and return it
2453 below. */
2454 if (r == TEST_LEFT)
2455 step_back = direction == DIRECTION_UP;
2456
2457 if (r == TEST_RIGHT) {
2458 if (direction == DIRECTION_DOWN)
2459 goto found;
2460 else
2461 return 0;
2462 }
2463
2464 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, ret_offset, ret_idx);
2465
2466 if (r == 0 && step_back)
2467 goto found;
2468
2469 if (r > 0 && ret_idx)
2470 (*ret_idx)++;
2471
2472 return r;
2473
2474 found:
2475 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2476 if (r < 0)
2477 return r;
2478
2479 if (ret)
2480 *ret = o;
2481
2482 if (ret_offset)
2483 *ret_offset = extra;
2484
2485 if (ret_idx)
2486 *ret_idx = 0;
2487
2488 return 1;
2489 }
2490
2491 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2492 assert(f);
2493 assert(p > 0);
2494
2495 if (p == needle)
2496 return TEST_FOUND;
2497 else if (p < needle)
2498 return TEST_LEFT;
2499 else
2500 return TEST_RIGHT;
2501 }
2502
2503 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2504 uint64_t sq;
2505 Object *o;
2506 int r;
2507
2508 assert(f);
2509 assert(p > 0);
2510
2511 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2512 if (r < 0)
2513 return r;
2514
2515 sq = le64toh(READ_NOW(o->entry.seqnum));
2516 if (sq == needle)
2517 return TEST_FOUND;
2518 else if (sq < needle)
2519 return TEST_LEFT;
2520 else
2521 return TEST_RIGHT;
2522 }
2523
2524 int journal_file_move_to_entry_by_seqnum(
2525 JournalFile *f,
2526 uint64_t seqnum,
2527 direction_t direction,
2528 Object **ret,
2529 uint64_t *ret_offset) {
2530 assert(f);
2531 assert(f->header);
2532
2533 return generic_array_bisect(
2534 f,
2535 le64toh(f->header->entry_array_offset),
2536 le64toh(f->header->n_entries),
2537 seqnum,
2538 test_object_seqnum,
2539 direction,
2540 ret, ret_offset, NULL);
2541 }
2542
2543 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2544 Object *o;
2545 uint64_t rt;
2546 int r;
2547
2548 assert(f);
2549 assert(p > 0);
2550
2551 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2552 if (r < 0)
2553 return r;
2554
2555 rt = le64toh(READ_NOW(o->entry.realtime));
2556 if (rt == needle)
2557 return TEST_FOUND;
2558 else if (rt < needle)
2559 return TEST_LEFT;
2560 else
2561 return TEST_RIGHT;
2562 }
2563
2564 int journal_file_move_to_entry_by_realtime(
2565 JournalFile *f,
2566 uint64_t realtime,
2567 direction_t direction,
2568 Object **ret,
2569 uint64_t *ret_offset) {
2570 assert(f);
2571 assert(f->header);
2572
2573 return generic_array_bisect(
2574 f,
2575 le64toh(f->header->entry_array_offset),
2576 le64toh(f->header->n_entries),
2577 realtime,
2578 test_object_realtime,
2579 direction,
2580 ret, ret_offset, NULL);
2581 }
2582
2583 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
2584 Object *o;
2585 uint64_t m;
2586 int r;
2587
2588 assert(f);
2589 assert(p > 0);
2590
2591 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2592 if (r < 0)
2593 return r;
2594
2595 m = le64toh(READ_NOW(o->entry.monotonic));
2596 if (m == needle)
2597 return TEST_FOUND;
2598 else if (m < needle)
2599 return TEST_LEFT;
2600 else
2601 return TEST_RIGHT;
2602 }
2603
2604 static int find_data_object_by_boot_id(
2605 JournalFile *f,
2606 sd_id128_t boot_id,
2607 Object **o,
2608 uint64_t *b) {
2609
2610 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2611
2612 sd_id128_to_string(boot_id, t + 9);
2613 return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
2614 }
2615
2616 int journal_file_move_to_entry_by_monotonic(
2617 JournalFile *f,
2618 sd_id128_t boot_id,
2619 uint64_t monotonic,
2620 direction_t direction,
2621 Object **ret,
2622 uint64_t *ret_offset) {
2623
2624 Object *o;
2625 int r;
2626
2627 assert(f);
2628
2629 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
2630 if (r < 0)
2631 return r;
2632 if (r == 0)
2633 return -ENOENT;
2634
2635 return generic_array_bisect_plus_one(
2636 f,
2637 le64toh(o->data.entry_offset),
2638 le64toh(o->data.entry_array_offset),
2639 le64toh(o->data.n_entries),
2640 monotonic,
2641 test_object_monotonic,
2642 direction,
2643 ret, ret_offset, NULL);
2644 }
2645
2646 void journal_file_reset_location(JournalFile *f) {
2647 f->location_type = LOCATION_HEAD;
2648 f->current_offset = 0;
2649 f->current_seqnum = 0;
2650 f->current_realtime = 0;
2651 f->current_monotonic = 0;
2652 zero(f->current_boot_id);
2653 f->current_xor_hash = 0;
2654 }
2655
2656 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
2657 f->location_type = LOCATION_SEEK;
2658 f->current_offset = offset;
2659 f->current_seqnum = le64toh(o->entry.seqnum);
2660 f->current_realtime = le64toh(o->entry.realtime);
2661 f->current_monotonic = le64toh(o->entry.monotonic);
2662 f->current_boot_id = o->entry.boot_id;
2663 f->current_xor_hash = le64toh(o->entry.xor_hash);
2664 }
2665
2666 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
2667 int r;
2668
2669 assert(af);
2670 assert(af->header);
2671 assert(bf);
2672 assert(bf->header);
2673 assert(af->location_type == LOCATION_SEEK);
2674 assert(bf->location_type == LOCATION_SEEK);
2675
2676 /* If contents and timestamps match, these entries are
2677 * identical, even if the seqnum does not match */
2678 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
2679 af->current_monotonic == bf->current_monotonic &&
2680 af->current_realtime == bf->current_realtime &&
2681 af->current_xor_hash == bf->current_xor_hash)
2682 return 0;
2683
2684 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
2685
2686 /* If this is from the same seqnum source, compare
2687 * seqnums */
2688 r = CMP(af->current_seqnum, bf->current_seqnum);
2689 if (r != 0)
2690 return r;
2691
2692 /* Wow! This is weird, different data but the same
2693 * seqnums? Something is borked, but let's make the
2694 * best of it and compare by time. */
2695 }
2696
2697 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
2698
2699 /* If the boot id matches, compare monotonic time */
2700 r = CMP(af->current_monotonic, bf->current_monotonic);
2701 if (r != 0)
2702 return r;
2703 }
2704
2705 /* Otherwise, compare UTC time */
2706 r = CMP(af->current_realtime, bf->current_realtime);
2707 if (r != 0)
2708 return r;
2709
2710 /* Finally, compare by contents */
2711 return CMP(af->current_xor_hash, bf->current_xor_hash);
2712 }
2713
2714 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2715
2716 /* Increase or decrease the specified index, in the right direction. */
2717
2718 if (direction == DIRECTION_DOWN) {
2719 if (*i >= n - 1)
2720 return 0;
2721
2722 (*i) ++;
2723 } else {
2724 if (*i <= 0)
2725 return 0;
2726
2727 (*i) --;
2728 }
2729
2730 return 1;
2731 }
2732
2733 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
2734
2735 /* Consider it an error if any of the two offsets is uninitialized */
2736 if (old_offset == 0 || new_offset == 0)
2737 return false;
2738
2739 /* If we go down, the new offset must be larger than the old one. */
2740 return direction == DIRECTION_DOWN ?
2741 new_offset > old_offset :
2742 new_offset < old_offset;
2743 }
2744
2745 int journal_file_next_entry(
2746 JournalFile *f,
2747 uint64_t p,
2748 direction_t direction,
2749 Object **ret, uint64_t *ret_offset) {
2750
2751 uint64_t i, n, ofs;
2752 int r;
2753
2754 assert(f);
2755 assert(f->header);
2756
2757 n = le64toh(READ_NOW(f->header->n_entries));
2758 if (n <= 0)
2759 return 0;
2760
2761 if (p == 0)
2762 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2763 else {
2764 r = generic_array_bisect(f,
2765 le64toh(f->header->entry_array_offset),
2766 le64toh(f->header->n_entries),
2767 p,
2768 test_object_offset,
2769 DIRECTION_DOWN,
2770 NULL, NULL,
2771 &i);
2772 if (r <= 0)
2773 return r;
2774
2775 r = bump_array_index(&i, direction, n);
2776 if (r <= 0)
2777 return r;
2778 }
2779
2780 /* And jump to it */
2781 for (;;) {
2782 r = generic_array_get(f,
2783 le64toh(f->header->entry_array_offset),
2784 i,
2785 ret, &ofs);
2786 if (r > 0)
2787 break;
2788 if (r != -EBADMSG)
2789 return r;
2790
2791 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2792 * the next one might work for us instead. */
2793 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2794
2795 r = bump_array_index(&i, direction, n);
2796 if (r <= 0)
2797 return r;
2798 }
2799
2800 /* Ensure our array is properly ordered. */
2801 if (p > 0 && !check_properly_ordered(ofs, p, direction))
2802 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2803 "%s: entry array not properly ordered at entry %" PRIu64,
2804 f->path, i);
2805
2806 if (ret_offset)
2807 *ret_offset = ofs;
2808
2809 return 1;
2810 }
2811
2812 int journal_file_next_entry_for_data(
2813 JournalFile *f,
2814 Object *o, uint64_t p,
2815 uint64_t data_offset,
2816 direction_t direction,
2817 Object **ret, uint64_t *ret_offset) {
2818
2819 uint64_t i, n, ofs;
2820 Object *d;
2821 int r;
2822
2823 assert(f);
2824 assert(p > 0 || !o);
2825
2826 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2827 if (r < 0)
2828 return r;
2829
2830 n = le64toh(READ_NOW(d->data.n_entries));
2831 if (n <= 0)
2832 return n;
2833
2834 if (!o)
2835 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2836 else {
2837 if (o->object.type != OBJECT_ENTRY)
2838 return -EINVAL;
2839
2840 r = generic_array_bisect_plus_one(f,
2841 le64toh(d->data.entry_offset),
2842 le64toh(d->data.entry_array_offset),
2843 le64toh(d->data.n_entries),
2844 p,
2845 test_object_offset,
2846 DIRECTION_DOWN,
2847 NULL, NULL,
2848 &i);
2849
2850 if (r <= 0)
2851 return r;
2852
2853 r = bump_array_index(&i, direction, n);
2854 if (r <= 0)
2855 return r;
2856 }
2857
2858 for (;;) {
2859 r = generic_array_get_plus_one(f,
2860 le64toh(d->data.entry_offset),
2861 le64toh(d->data.entry_array_offset),
2862 i,
2863 ret, &ofs);
2864 if (r > 0)
2865 break;
2866 if (r != -EBADMSG)
2867 return r;
2868
2869 log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
2870
2871 r = bump_array_index(&i, direction, n);
2872 if (r <= 0)
2873 return r;
2874 }
2875
2876 /* Ensure our array is properly ordered. */
2877 if (p > 0 && check_properly_ordered(ofs, p, direction))
2878 return log_debug_errno(SYNTHETIC_ERRNO(EBADMSG),
2879 "%s data entry array not properly ordered at entry %" PRIu64,
2880 f->path, i);
2881
2882 if (ret_offset)
2883 *ret_offset = ofs;
2884
2885 return 1;
2886 }
2887
2888 int journal_file_move_to_entry_by_offset_for_data(
2889 JournalFile *f,
2890 uint64_t data_offset,
2891 uint64_t p,
2892 direction_t direction,
2893 Object **ret, uint64_t *ret_offset) {
2894
2895 int r;
2896 Object *d;
2897
2898 assert(f);
2899
2900 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2901 if (r < 0)
2902 return r;
2903
2904 return generic_array_bisect_plus_one(
2905 f,
2906 le64toh(d->data.entry_offset),
2907 le64toh(d->data.entry_array_offset),
2908 le64toh(d->data.n_entries),
2909 p,
2910 test_object_offset,
2911 direction,
2912 ret, ret_offset, NULL);
2913 }
2914
2915 int journal_file_move_to_entry_by_monotonic_for_data(
2916 JournalFile *f,
2917 uint64_t data_offset,
2918 sd_id128_t boot_id,
2919 uint64_t monotonic,
2920 direction_t direction,
2921 Object **ret, uint64_t *ret_offset) {
2922
2923 Object *o, *d;
2924 int r;
2925 uint64_t b, z;
2926
2927 assert(f);
2928
2929 /* First, seek by time */
2930 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
2931 if (r < 0)
2932 return r;
2933 if (r == 0)
2934 return -ENOENT;
2935
2936 r = generic_array_bisect_plus_one(f,
2937 le64toh(o->data.entry_offset),
2938 le64toh(o->data.entry_array_offset),
2939 le64toh(o->data.n_entries),
2940 monotonic,
2941 test_object_monotonic,
2942 direction,
2943 NULL, &z, NULL);
2944 if (r <= 0)
2945 return r;
2946
2947 /* And now, continue seeking until we find an entry that
2948 * exists in both bisection arrays */
2949
2950 for (;;) {
2951 Object *qo;
2952 uint64_t p, q;
2953
2954 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2955 if (r < 0)
2956 return r;
2957
2958 r = generic_array_bisect_plus_one(f,
2959 le64toh(d->data.entry_offset),
2960 le64toh(d->data.entry_array_offset),
2961 le64toh(d->data.n_entries),
2962 z,
2963 test_object_offset,
2964 direction,
2965 NULL, &p, NULL);
2966 if (r <= 0)
2967 return r;
2968
2969 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
2970 if (r < 0)
2971 return r;
2972
2973 r = generic_array_bisect_plus_one(f,
2974 le64toh(o->data.entry_offset),
2975 le64toh(o->data.entry_array_offset),
2976 le64toh(o->data.n_entries),
2977 p,
2978 test_object_offset,
2979 direction,
2980 &qo, &q, NULL);
2981
2982 if (r <= 0)
2983 return r;
2984
2985 if (p == q) {
2986 if (ret)
2987 *ret = qo;
2988 if (ret_offset)
2989 *ret_offset = q;
2990
2991 return 1;
2992 }
2993
2994 z = q;
2995 }
2996 }
2997
2998 int journal_file_move_to_entry_by_seqnum_for_data(
2999 JournalFile *f,
3000 uint64_t data_offset,
3001 uint64_t seqnum,
3002 direction_t direction,
3003 Object **ret, uint64_t *ret_offset) {
3004
3005 Object *d;
3006 int r;
3007
3008 assert(f);
3009
3010 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3011 if (r < 0)
3012 return r;
3013
3014 return generic_array_bisect_plus_one(
3015 f,
3016 le64toh(d->data.entry_offset),
3017 le64toh(d->data.entry_array_offset),
3018 le64toh(d->data.n_entries),
3019 seqnum,
3020 test_object_seqnum,
3021 direction,
3022 ret, ret_offset, NULL);
3023 }
3024
3025 int journal_file_move_to_entry_by_realtime_for_data(
3026 JournalFile *f,
3027 uint64_t data_offset,
3028 uint64_t realtime,
3029 direction_t direction,
3030 Object **ret, uint64_t *ret_offset) {
3031
3032 Object *d;
3033 int r;
3034
3035 assert(f);
3036
3037 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3038 if (r < 0)
3039 return r;
3040
3041 return generic_array_bisect_plus_one(
3042 f,
3043 le64toh(d->data.entry_offset),
3044 le64toh(d->data.entry_array_offset),
3045 le64toh(d->data.n_entries),
3046 realtime,
3047 test_object_realtime,
3048 direction,
3049 ret, ret_offset, NULL);
3050 }
3051
3052 void journal_file_dump(JournalFile *f) {
3053 Object *o;
3054 int r;
3055 uint64_t p;
3056
3057 assert(f);
3058 assert(f->header);
3059
3060 journal_file_print_header(f);
3061
3062 p = le64toh(READ_NOW(f->header->header_size));
3063 while (p != 0) {
3064 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3065 if (r < 0)
3066 goto fail;
3067
3068 switch (o->object.type) {
3069
3070 case OBJECT_UNUSED:
3071 printf("Type: OBJECT_UNUSED\n");
3072 break;
3073
3074 case OBJECT_DATA:
3075 printf("Type: OBJECT_DATA\n");
3076 break;
3077
3078 case OBJECT_FIELD:
3079 printf("Type: OBJECT_FIELD\n");
3080 break;
3081
3082 case OBJECT_ENTRY:
3083 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3084 le64toh(o->entry.seqnum),
3085 le64toh(o->entry.monotonic),
3086 le64toh(o->entry.realtime));
3087 break;
3088
3089 case OBJECT_FIELD_HASH_TABLE:
3090 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
3091 break;
3092
3093 case OBJECT_DATA_HASH_TABLE:
3094 printf("Type: OBJECT_DATA_HASH_TABLE\n");
3095 break;
3096
3097 case OBJECT_ENTRY_ARRAY:
3098 printf("Type: OBJECT_ENTRY_ARRAY\n");
3099 break;
3100
3101 case OBJECT_TAG:
3102 printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3103 le64toh(o->tag.seqnum),
3104 le64toh(o->tag.epoch));
3105 break;
3106
3107 default:
3108 printf("Type: unknown (%i)\n", o->object.type);
3109 break;
3110 }
3111
3112 if (o->object.flags & OBJECT_COMPRESSION_MASK)
3113 printf("Flags: %s\n",
3114 object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
3115
3116 if (p == le64toh(f->header->tail_object_offset))
3117 p = 0;
3118 else
3119 p += ALIGN64(le64toh(o->object.size));
3120 }
3121
3122 return;
3123 fail:
3124 log_error("File corrupt");
3125 }
3126
3127 static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
3128 const char *x;
3129
3130 x = format_timestamp(buf, l, t);
3131 if (x)
3132 return x;
3133 return " --- ";
3134 }
3135
3136 void journal_file_print_header(JournalFile *f) {
3137 char a[SD_ID128_STRING_MAX], b[SD_ID128_STRING_MAX], c[SD_ID128_STRING_MAX], d[SD_ID128_STRING_MAX];
3138 char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
3139 struct stat st;
3140 char bytes[FORMAT_BYTES_MAX];
3141
3142 assert(f);
3143 assert(f->header);
3144
3145 printf("File path: %s\n"
3146 "File ID: %s\n"
3147 "Machine ID: %s\n"
3148 "Boot ID: %s\n"
3149 "Sequential number ID: %s\n"
3150 "State: %s\n"
3151 "Compatible flags:%s%s\n"
3152 "Incompatible flags:%s%s%s\n"
3153 "Header size: %"PRIu64"\n"
3154 "Arena size: %"PRIu64"\n"
3155 "Data hash table size: %"PRIu64"\n"
3156 "Field hash table size: %"PRIu64"\n"
3157 "Rotate suggested: %s\n"
3158 "Head sequential number: %"PRIu64" (%"PRIx64")\n"
3159 "Tail sequential number: %"PRIu64" (%"PRIx64")\n"
3160 "Head realtime timestamp: %s (%"PRIx64")\n"
3161 "Tail realtime timestamp: %s (%"PRIx64")\n"
3162 "Tail monotonic timestamp: %s (%"PRIx64")\n"
3163 "Objects: %"PRIu64"\n"
3164 "Entry objects: %"PRIu64"\n",
3165 f->path,
3166 sd_id128_to_string(f->header->file_id, a),
3167 sd_id128_to_string(f->header->machine_id, b),
3168 sd_id128_to_string(f->header->boot_id, c),
3169 sd_id128_to_string(f->header->seqnum_id, d),
3170 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3171 f->header->state == STATE_ONLINE ? "ONLINE" :
3172 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3173 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3174 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3175 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3176 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3177 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3178 le64toh(f->header->header_size),
3179 le64toh(f->header->arena_size),
3180 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3181 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3182 yes_no(journal_file_rotate_suggested(f, 0)),
3183 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3184 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3185 format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3186 format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3187 format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3188 le64toh(f->header->n_objects),
3189 le64toh(f->header->n_entries));
3190
3191 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3192 printf("Data objects: %"PRIu64"\n"
3193 "Data hash table fill: %.1f%%\n",
3194 le64toh(f->header->n_data),
3195 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3196
3197 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3198 printf("Field objects: %"PRIu64"\n"
3199 "Field hash table fill: %.1f%%\n",
3200 le64toh(f->header->n_fields),
3201 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3202
3203 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3204 printf("Tag objects: %"PRIu64"\n",
3205 le64toh(f->header->n_tags));
3206 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3207 printf("Entry array objects: %"PRIu64"\n",
3208 le64toh(f->header->n_entry_arrays));
3209
3210 if (fstat(f->fd, &st) >= 0)
3211 printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
3212 }
3213
3214 static int journal_file_warn_btrfs(JournalFile *f) {
3215 unsigned attrs;
3216 int r;
3217
3218 assert(f);
3219
3220 /* Before we write anything, check if the COW logic is turned
3221 * off on btrfs. Given our write pattern that is quite
3222 * unfriendly to COW file systems this should greatly improve
3223 * performance on COW file systems, such as btrfs, at the
3224 * expense of data integrity features (which shouldn't be too
3225 * bad, given that we do our own checksumming). */
3226
3227 r = btrfs_is_filesystem(f->fd);
3228 if (r < 0)
3229 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3230 if (!r)
3231 return 0;
3232
3233 r = read_attr_fd(f->fd, &attrs);
3234 if (r < 0)
3235 return log_warning_errno(r, "Failed to read file attributes: %m");
3236
3237 if (attrs & FS_NOCOW_FL) {
3238 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3239 return 0;
3240 }
3241
3242 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3243 "This is likely to slow down journal access substantially, please consider turning "
3244 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3245
3246 return 1;
3247 }
3248
3249 int journal_file_open(
3250 int fd,
3251 const char *fname,
3252 int flags,
3253 mode_t mode,
3254 bool compress,
3255 uint64_t compress_threshold_bytes,
3256 bool seal,
3257 JournalMetrics *metrics,
3258 MMapCache *mmap_cache,
3259 Set *deferred_closes,
3260 JournalFile *template,
3261 JournalFile **ret) {
3262
3263 bool newly_created = false;
3264 JournalFile *f;
3265 void *h;
3266 int r;
3267
3268 assert(ret);
3269 assert(fd >= 0 || fname);
3270
3271 if (!IN_SET((flags & O_ACCMODE), O_RDONLY, O_RDWR))
3272 return -EINVAL;
3273
3274 if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
3275 return -EINVAL;
3276
3277 f = new(JournalFile, 1);
3278 if (!f)
3279 return -ENOMEM;
3280
3281 *f = (JournalFile) {
3282 .fd = fd,
3283 .mode = mode,
3284
3285 .flags = flags,
3286 .prot = prot_from_flags(flags),
3287 .writable = (flags & O_ACCMODE) != O_RDONLY,
3288
3289 #if HAVE_LZ4
3290 .compress_lz4 = compress,
3291 #elif HAVE_XZ
3292 .compress_xz = compress,
3293 #endif
3294 .compress_threshold_bytes = compress_threshold_bytes == (uint64_t) -1 ?
3295 DEFAULT_COMPRESS_THRESHOLD :
3296 MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes),
3297 #if HAVE_GCRYPT
3298 .seal = seal,
3299 #endif
3300 };
3301
3302 if (DEBUG_LOGGING) {
3303 static int last_seal = -1, last_compress = -1;
3304 static uint64_t last_bytes = UINT64_MAX;
3305 char bytes[FORMAT_BYTES_MAX];
3306
3307 if (last_seal != f->seal ||
3308 last_compress != JOURNAL_FILE_COMPRESS(f) ||
3309 last_bytes != f->compress_threshold_bytes) {
3310
3311 log_debug("Journal effective settings seal=%s compress=%s compress_threshold_bytes=%s",
3312 yes_no(f->seal), yes_no(JOURNAL_FILE_COMPRESS(f)),
3313 format_bytes(bytes, sizeof bytes, f->compress_threshold_bytes));
3314 last_seal = f->seal;
3315 last_compress = JOURNAL_FILE_COMPRESS(f);
3316 last_bytes = f->compress_threshold_bytes;
3317 }
3318 }
3319
3320 if (mmap_cache)
3321 f->mmap = mmap_cache_ref(mmap_cache);
3322 else {
3323 f->mmap = mmap_cache_new();
3324 if (!f->mmap) {
3325 r = -ENOMEM;
3326 goto fail;
3327 }
3328 }
3329
3330 if (fname) {
3331 f->path = strdup(fname);
3332 if (!f->path) {
3333 r = -ENOMEM;
3334 goto fail;
3335 }
3336 } else {
3337 assert(fd >= 0);
3338
3339 /* If we don't know the path, fill in something explanatory and vaguely useful */
3340 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
3341 r = -ENOMEM;
3342 goto fail;
3343 }
3344 }
3345
3346 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3347 if (!f->chain_cache) {
3348 r = -ENOMEM;
3349 goto fail;
3350 }
3351
3352 if (f->fd < 0) {
3353 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3354 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3355 * it doesn't hurt in that case. */
3356
3357 f->fd = open(f->path, f->flags|O_CLOEXEC|O_NONBLOCK, f->mode);
3358 if (f->fd < 0) {
3359 r = -errno;
3360 goto fail;
3361 }
3362
3363 /* fds we opened here by us should also be closed by us. */
3364 f->close_fd = true;
3365
3366 r = fd_nonblock(f->fd, false);
3367 if (r < 0)
3368 goto fail;
3369 }
3370
3371 f->cache_fd = mmap_cache_add_fd(f->mmap, f->fd);
3372 if (!f->cache_fd) {
3373 r = -ENOMEM;
3374 goto fail;
3375 }
3376
3377 r = journal_file_fstat(f);
3378 if (r < 0)
3379 goto fail;
3380
3381 if (f->last_stat.st_size == 0 && f->writable) {
3382
3383 (void) journal_file_warn_btrfs(f);
3384
3385 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3386 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3387 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3388 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3389 * solely on mtime/atime/ctime of the file. */
3390 (void) fd_setcrtime(f->fd, 0);
3391
3392 #if HAVE_GCRYPT
3393 /* Try to load the FSPRG state, and if we can't, then
3394 * just don't do sealing */
3395 if (f->seal) {
3396 r = journal_file_fss_load(f);
3397 if (r < 0)
3398 f->seal = false;
3399 }
3400 #endif
3401
3402 r = journal_file_init_header(f, template);
3403 if (r < 0)
3404 goto fail;
3405
3406 r = journal_file_fstat(f);
3407 if (r < 0)
3408 goto fail;
3409
3410 newly_created = true;
3411 }
3412
3413 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3414 r = -ENODATA;
3415 goto fail;
3416 }
3417
3418 r = mmap_cache_get(f->mmap, f->cache_fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h, NULL);
3419 if (r == -EINVAL) {
3420 /* Some file systems (jffs2 or p9fs) don't support mmap() properly (or only read-only
3421 * mmap()), and return EINVAL in that case. Let's propagate that as a more recognizable error
3422 * code. */
3423 r = -EAFNOSUPPORT;
3424 goto fail;
3425 }
3426 if (r < 0)
3427 goto fail;
3428
3429 f->header = h;
3430
3431 if (!newly_created) {
3432 set_clear_with_destructor(deferred_closes, journal_file_close);
3433
3434 r = journal_file_verify_header(f);
3435 if (r < 0)
3436 goto fail;
3437 }
3438
3439 #if HAVE_GCRYPT
3440 if (!newly_created && f->writable) {
3441 r = journal_file_fss_load(f);
3442 if (r < 0)
3443 goto fail;
3444 }
3445 #endif
3446
3447 if (f->writable) {
3448 if (metrics) {
3449 journal_default_metrics(metrics, f->fd);
3450 f->metrics = *metrics;
3451 } else if (template)
3452 f->metrics = template->metrics;
3453
3454 r = journal_file_refresh_header(f);
3455 if (r < 0)
3456 goto fail;
3457 }
3458
3459 #if HAVE_GCRYPT
3460 r = journal_file_hmac_setup(f);
3461 if (r < 0)
3462 goto fail;
3463 #endif
3464
3465 if (newly_created) {
3466 r = journal_file_setup_field_hash_table(f);
3467 if (r < 0)
3468 goto fail;
3469
3470 r = journal_file_setup_data_hash_table(f);
3471 if (r < 0)
3472 goto fail;
3473
3474 #if HAVE_GCRYPT
3475 r = journal_file_append_first_tag(f);
3476 if (r < 0)
3477 goto fail;
3478 #endif
3479 }
3480
3481 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) {
3482 r = -EIO;
3483 goto fail;
3484 }
3485
3486 if (template && template->post_change_timer) {
3487 r = journal_file_enable_post_change_timer(
3488 f,
3489 sd_event_source_get_event(template->post_change_timer),
3490 template->post_change_timer_period);
3491
3492 if (r < 0)
3493 goto fail;
3494 }
3495
3496 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3497 f->close_fd = true;
3498
3499 *ret = f;
3500 return 0;
3501
3502 fail:
3503 if (f->cache_fd && mmap_cache_got_sigbus(f->mmap, f->cache_fd))
3504 r = -EIO;
3505
3506 (void) journal_file_close(f);
3507
3508 return r;
3509 }
3510
3511 int journal_file_archive(JournalFile *f) {
3512 _cleanup_free_ char *p = NULL;
3513
3514 assert(f);
3515
3516 if (!f->writable)
3517 return -EINVAL;
3518
3519 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3520 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3521 if (path_startswith(f->path, "/proc/self/fd"))
3522 return -EINVAL;
3523
3524 if (!endswith(f->path, ".journal"))
3525 return -EINVAL;
3526
3527 if (asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3528 (int) strlen(f->path) - 8, f->path,
3529 SD_ID128_FORMAT_VAL(f->header->seqnum_id),
3530 le64toh(f->header->head_entry_seqnum),
3531 le64toh(f->header->head_entry_realtime)) < 0)
3532 return -ENOMEM;
3533
3534 /* Try to rename the file to the archived version. If the file already was deleted, we'll get ENOENT, let's
3535 * ignore that case. */
3536 if (rename(f->path, p) < 0 && errno != ENOENT)
3537 return -errno;
3538
3539 /* Sync the rename to disk */
3540 (void) fsync_directory_of_file(f->fd);
3541
3542 /* Set as archive so offlining commits w/state=STATE_ARCHIVED. Previously we would set old_file->header->state
3543 * to STATE_ARCHIVED directly here, but journal_file_set_offline() short-circuits when state != STATE_ONLINE,
3544 * which would result in the rotated journal never getting fsync() called before closing. Now we simply queue
3545 * the archive state by setting an archive bit, leaving the state as STATE_ONLINE so proper offlining
3546 * occurs. */
3547 f->archive = true;
3548
3549 /* Currently, btrfs is not very good with out write patterns and fragments heavily. Let's defrag our journal
3550 * files when we archive them */
3551 f->defrag_on_close = true;
3552
3553 return 0;
3554 }
3555
3556 JournalFile* journal_initiate_close(
3557 JournalFile *f,
3558 Set *deferred_closes) {
3559
3560 int r;
3561
3562 assert(f);
3563
3564 if (deferred_closes) {
3565
3566 r = set_put(deferred_closes, f);
3567 if (r < 0)
3568 log_debug_errno(r, "Failed to add file to deferred close set, closing immediately.");
3569 else {
3570 (void) journal_file_set_offline(f, false);
3571 return NULL;
3572 }
3573 }
3574
3575 return journal_file_close(f);
3576 }
3577
3578 int journal_file_rotate(
3579 JournalFile **f,
3580 bool compress,
3581 uint64_t compress_threshold_bytes,
3582 bool seal,
3583 Set *deferred_closes) {
3584
3585 JournalFile *new_file = NULL;
3586 int r;
3587
3588 assert(f);
3589 assert(*f);
3590
3591 r = journal_file_archive(*f);
3592 if (r < 0)
3593 return r;
3594
3595 r = journal_file_open(
3596 -1,
3597 (*f)->path,
3598 (*f)->flags,
3599 (*f)->mode,
3600 compress,
3601 compress_threshold_bytes,
3602 seal,
3603 NULL, /* metrics */
3604 (*f)->mmap,
3605 deferred_closes,
3606 *f, /* template */
3607 &new_file);
3608
3609 journal_initiate_close(*f, deferred_closes);
3610 *f = new_file;
3611
3612 return r;
3613 }
3614
3615 int journal_file_dispose(int dir_fd, const char *fname) {
3616 _cleanup_free_ char *p = NULL;
3617 _cleanup_close_ int fd = -1;
3618
3619 assert(fname);
3620
3621 /* Renames a journal file to *.journal~, i.e. to mark it as corruped or otherwise uncleanly shutdown. Note that
3622 * this is done without looking into the file or changing any of its contents. The idea is that this is called
3623 * whenever something is suspicious and we want to move the file away and make clear that it is not accessed
3624 * for writing anymore. */
3625
3626 if (!endswith(fname, ".journal"))
3627 return -EINVAL;
3628
3629 if (asprintf(&p, "%.*s@%016" PRIx64 "-%016" PRIx64 ".journal~",
3630 (int) strlen(fname) - 8, fname,
3631 now(CLOCK_REALTIME),
3632 random_u64()) < 0)
3633 return -ENOMEM;
3634
3635 if (renameat(dir_fd, fname, dir_fd, p) < 0)
3636 return -errno;
3637
3638 /* btrfs doesn't cope well with our write pattern and fragments heavily. Let's defrag all files we rotate */
3639 fd = openat(dir_fd, p, O_RDONLY|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW);
3640 if (fd < 0)
3641 log_debug_errno(errno, "Failed to open file for defragmentation/FS_NOCOW_FL, ignoring: %m");
3642 else {
3643 (void) chattr_fd(fd, 0, FS_NOCOW_FL, NULL);
3644 (void) btrfs_defrag_fd(fd);
3645 }
3646
3647 return 0;
3648 }
3649
3650 int journal_file_open_reliably(
3651 const char *fname,
3652 int flags,
3653 mode_t mode,
3654 bool compress,
3655 uint64_t compress_threshold_bytes,
3656 bool seal,
3657 JournalMetrics *metrics,
3658 MMapCache *mmap_cache,
3659 Set *deferred_closes,
3660 JournalFile *template,
3661 JournalFile **ret) {
3662
3663 int r;
3664
3665 r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3666 deferred_closes, template, ret);
3667 if (!IN_SET(r,
3668 -EBADMSG, /* Corrupted */
3669 -ENODATA, /* Truncated */
3670 -EHOSTDOWN, /* Other machine */
3671 -EPROTONOSUPPORT, /* Incompatible feature */
3672 -EBUSY, /* Unclean shutdown */
3673 -ESHUTDOWN, /* Already archived */
3674 -EIO, /* IO error, including SIGBUS on mmap */
3675 -EIDRM, /* File has been deleted */
3676 -ETXTBSY)) /* File is from the future */
3677 return r;
3678
3679 if ((flags & O_ACCMODE) == O_RDONLY)
3680 return r;
3681
3682 if (!(flags & O_CREAT))
3683 return r;
3684
3685 if (!endswith(fname, ".journal"))
3686 return r;
3687
3688 /* The file is corrupted. Rotate it away and try it again (but only once) */
3689 log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
3690
3691 r = journal_file_dispose(AT_FDCWD, fname);
3692 if (r < 0)
3693 return r;
3694
3695 return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3696 deferred_closes, template, ret);
3697 }
3698
3699 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p) {
3700 uint64_t i, n;
3701 uint64_t q, xor_hash = 0;
3702 int r;
3703 EntryItem *items;
3704 dual_timestamp ts;
3705 const sd_id128_t *boot_id;
3706
3707 assert(from);
3708 assert(to);
3709 assert(o);
3710 assert(p);
3711
3712 if (!to->writable)
3713 return -EPERM;
3714
3715 ts.monotonic = le64toh(o->entry.monotonic);
3716 ts.realtime = le64toh(o->entry.realtime);
3717 boot_id = &o->entry.boot_id;
3718
3719 n = journal_file_entry_n_items(o);
3720 /* alloca() can't take 0, hence let's allocate at least one */
3721 items = newa(EntryItem, MAX(1u, n));
3722
3723 for (i = 0; i < n; i++) {
3724 uint64_t l, h;
3725 le64_t le_hash;
3726 size_t t;
3727 void *data;
3728 Object *u;
3729
3730 q = le64toh(o->entry.items[i].object_offset);
3731 le_hash = o->entry.items[i].hash;
3732
3733 r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
3734 if (r < 0)
3735 return r;
3736
3737 if (le_hash != o->data.hash)
3738 return -EBADMSG;
3739
3740 l = le64toh(READ_NOW(o->object.size));
3741 if (l < offsetof(Object, data.payload))
3742 return -EBADMSG;
3743
3744 l -= offsetof(Object, data.payload);
3745 t = (size_t) l;
3746
3747 /* We hit the limit on 32bit machines */
3748 if ((uint64_t) t != l)
3749 return -E2BIG;
3750
3751 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
3752 #if HAVE_XZ || HAVE_LZ4
3753 size_t rsize = 0;
3754
3755 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
3756 o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
3757 if (r < 0)
3758 return r;
3759
3760 data = from->compress_buffer;
3761 l = rsize;
3762 #else
3763 return -EPROTONOSUPPORT;
3764 #endif
3765 } else
3766 data = o->data.payload;
3767
3768 r = journal_file_append_data(to, data, l, &u, &h);
3769 if (r < 0)
3770 return r;
3771
3772 xor_hash ^= le64toh(u->data.hash);
3773 items[i].object_offset = htole64(h);
3774 items[i].hash = u->data.hash;
3775
3776 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
3777 if (r < 0)
3778 return r;
3779 }
3780
3781 r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n,
3782 NULL, NULL, NULL);
3783
3784 if (mmap_cache_got_sigbus(to->mmap, to->cache_fd))
3785 return -EIO;
3786
3787 return r;
3788 }
3789
3790 void journal_reset_metrics(JournalMetrics *m) {
3791 assert(m);
3792
3793 /* Set everything to "pick automatic values". */
3794
3795 *m = (JournalMetrics) {
3796 .min_use = (uint64_t) -1,
3797 .max_use = (uint64_t) -1,
3798 .min_size = (uint64_t) -1,
3799 .max_size = (uint64_t) -1,
3800 .keep_free = (uint64_t) -1,
3801 .n_max_files = (uint64_t) -1,
3802 };
3803 }
3804
3805 void journal_default_metrics(JournalMetrics *m, int fd) {
3806 char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
3807 struct statvfs ss;
3808 uint64_t fs_size = 0;
3809
3810 assert(m);
3811 assert(fd >= 0);
3812
3813 if (fstatvfs(fd, &ss) >= 0)
3814 fs_size = ss.f_frsize * ss.f_blocks;
3815 else
3816 log_debug_errno(errno, "Failed to determine disk size: %m");
3817
3818 if (m->max_use == (uint64_t) -1) {
3819
3820 if (fs_size > 0)
3821 m->max_use = CLAMP(PAGE_ALIGN(fs_size / 10), /* 10% of file system size */
3822 MAX_USE_LOWER, MAX_USE_UPPER);
3823 else
3824 m->max_use = MAX_USE_LOWER;
3825 } else {
3826 m->max_use = PAGE_ALIGN(m->max_use);
3827
3828 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3829 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3830 }
3831
3832 if (m->min_use == (uint64_t) -1) {
3833 if (fs_size > 0)
3834 m->min_use = CLAMP(PAGE_ALIGN(fs_size / 50), /* 2% of file system size */
3835 MIN_USE_LOW, MIN_USE_HIGH);
3836 else
3837 m->min_use = MIN_USE_LOW;
3838 }
3839
3840 if (m->min_use > m->max_use)
3841 m->min_use = m->max_use;
3842
3843 if (m->max_size == (uint64_t) -1)
3844 m->max_size = MIN(PAGE_ALIGN(m->max_use / 8), /* 8 chunks */
3845 MAX_SIZE_UPPER);
3846 else
3847 m->max_size = PAGE_ALIGN(m->max_size);
3848
3849 if (m->max_size != 0) {
3850 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3851 m->max_size = JOURNAL_FILE_SIZE_MIN;
3852
3853 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3854 m->max_use = m->max_size*2;
3855 }
3856
3857 if (m->min_size == (uint64_t) -1)
3858 m->min_size = JOURNAL_FILE_SIZE_MIN;
3859 else
3860 m->min_size = CLAMP(PAGE_ALIGN(m->min_size),
3861 JOURNAL_FILE_SIZE_MIN,
3862 m->max_size ?: UINT64_MAX);
3863
3864 if (m->keep_free == (uint64_t) -1) {
3865 if (fs_size > 0)
3866 m->keep_free = MIN(PAGE_ALIGN(fs_size / 20), /* 5% of file system size */
3867 KEEP_FREE_UPPER);
3868 else
3869 m->keep_free = DEFAULT_KEEP_FREE;
3870 }
3871
3872 if (m->n_max_files == (uint64_t) -1)
3873 m->n_max_files = DEFAULT_N_MAX_FILES;
3874
3875 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3876 format_bytes(a, sizeof(a), m->min_use),
3877 format_bytes(b, sizeof(b), m->max_use),
3878 format_bytes(c, sizeof(c), m->max_size),
3879 format_bytes(d, sizeof(d), m->min_size),
3880 format_bytes(e, sizeof(e), m->keep_free),
3881 m->n_max_files);
3882 }
3883
3884 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
3885 assert(f);
3886 assert(f->header);
3887 assert(from || to);
3888
3889 if (from) {
3890 if (f->header->head_entry_realtime == 0)
3891 return -ENOENT;
3892
3893 *from = le64toh(f->header->head_entry_realtime);
3894 }
3895
3896 if (to) {
3897 if (f->header->tail_entry_realtime == 0)
3898 return -ENOENT;
3899
3900 *to = le64toh(f->header->tail_entry_realtime);
3901 }
3902
3903 return 1;
3904 }
3905
3906 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
3907 Object *o;
3908 uint64_t p;
3909 int r;
3910
3911 assert(f);
3912 assert(from || to);
3913
3914 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
3915 if (r <= 0)
3916 return r;
3917
3918 if (le64toh(o->data.n_entries) <= 0)
3919 return 0;
3920
3921 if (from) {
3922 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
3923 if (r < 0)
3924 return r;
3925
3926 *from = le64toh(o->entry.monotonic);
3927 }
3928
3929 if (to) {
3930 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
3931 if (r < 0)
3932 return r;
3933
3934 r = generic_array_get_plus_one(f,
3935 le64toh(o->data.entry_offset),
3936 le64toh(o->data.entry_array_offset),
3937 le64toh(o->data.n_entries)-1,
3938 &o, NULL);
3939 if (r <= 0)
3940 return r;
3941
3942 *to = le64toh(o->entry.monotonic);
3943 }
3944
3945 return 1;
3946 }
3947
3948 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
3949 assert(f);
3950 assert(f->header);
3951
3952 /* If we gained new header fields we gained new features,
3953 * hence suggest a rotation */
3954 if (le64toh(f->header->header_size) < sizeof(Header)) {
3955 log_debug("%s uses an outdated header, suggesting rotation.", f->path);
3956 return true;
3957 }
3958
3959 /* Let's check if the hash tables grew over a certain fill
3960 * level (75%, borrowing this value from Java's hash table
3961 * implementation), and if so suggest a rotation. To calculate
3962 * the fill level we need the n_data field, which only exists
3963 * in newer versions. */
3964
3965 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3966 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3967 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
3968 f->path,
3969 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
3970 le64toh(f->header->n_data),
3971 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3972 (unsigned long long) f->last_stat.st_size,
3973 f->last_stat.st_size / le64toh(f->header->n_data));
3974 return true;
3975 }
3976
3977 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3978 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3979 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
3980 f->path,
3981 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
3982 le64toh(f->header->n_fields),
3983 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
3984 return true;
3985 }
3986
3987 /* Are the data objects properly indexed by field objects? */
3988 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
3989 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
3990 le64toh(f->header->n_data) > 0 &&
3991 le64toh(f->header->n_fields) == 0)
3992 return true;
3993
3994 if (max_file_usec > 0) {
3995 usec_t t, h;
3996
3997 h = le64toh(f->header->head_entry_realtime);
3998 t = now(CLOCK_REALTIME);
3999
4000 if (h > 0 && t > h + max_file_usec)
4001 return true;
4002 }
4003
4004 return false;
4005 }