]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journal-file.c
036594c7350ce847affda7d526bffb889f07fdb8
[thirdparty/systemd.git] / src / journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 This file is part of systemd.
4
5 Copyright 2011 Lennart Poettering
6 ***/
7
8 #include <errno.h>
9 #include <fcntl.h>
10 #include <linux/fs.h>
11 #include <pthread.h>
12 #include <stddef.h>
13 #include <sys/mman.h>
14 #include <sys/statvfs.h>
15 #include <sys/uio.h>
16 #include <unistd.h>
17
18 #include "alloc-util.h"
19 #include "btrfs-util.h"
20 #include "chattr-util.h"
21 #include "compress.h"
22 #include "fd-util.h"
23 #include "fs-util.h"
24 #include "journal-authenticate.h"
25 #include "journal-def.h"
26 #include "journal-file.h"
27 #include "lookup3.h"
28 #include "parse-util.h"
29 #include "path-util.h"
30 #include "random-util.h"
31 #include "sd-event.h"
32 #include "set.h"
33 #include "stat-util.h"
34 #include "string-util.h"
35 #include "strv.h"
36 #include "xattr-util.h"
37
38 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
39 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
40
41 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
42 #define MIN_COMPRESS_THRESHOLD (8ULL)
43
44 /* This is the minimum journal file size */
45 #define JOURNAL_FILE_SIZE_MIN (512ULL*1024ULL) /* 512 KiB */
46
47 /* These are the lower and upper bounds if we deduce the max_use value
48 * from the file system size */
49 #define DEFAULT_MAX_USE_LOWER (1ULL*1024ULL*1024ULL) /* 1 MiB */
50 #define DEFAULT_MAX_USE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
51
52 /* This is the default minimal use limit, how much we'll use even if keep_free suggests otherwise. */
53 #define DEFAULT_MIN_USE (1ULL*1024ULL*1024ULL) /* 1 MiB */
54
55 /* This is the upper bound if we deduce max_size from max_use */
56 #define DEFAULT_MAX_SIZE_UPPER (128ULL*1024ULL*1024ULL) /* 128 MiB */
57
58 /* This is the upper bound if we deduce the keep_free value from the
59 * file system size */
60 #define DEFAULT_KEEP_FREE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
61
62 /* This is the keep_free value when we can't determine the system
63 * size */
64 #define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */
65
66 /* This is the default maximum number of journal files to keep around. */
67 #define DEFAULT_N_MAX_FILES (100)
68
69 /* n_data was the first entry we added after the initial file format design */
70 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
71
72 /* How many entries to keep in the entry array chain cache at max */
73 #define CHAIN_CACHE_MAX 20
74
75 /* How much to increase the journal file size at once each time we allocate something new. */
76 #define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */
77
78 /* Reread fstat() of the file for detecting deletions at least this often */
79 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
80
81 /* The mmap context to use for the header we pick as one above the last defined typed */
82 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
83
84 #ifdef __clang__
85 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
86 #endif
87
88 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
89 * As a result we use atomic operations on f->offline_state for inter-thread communications with
90 * journal_file_set_offline() and journal_file_set_online(). */
91 static void journal_file_set_offline_internal(JournalFile *f) {
92 assert(f);
93 assert(f->fd >= 0);
94 assert(f->header);
95
96 for (;;) {
97 switch (f->offline_state) {
98 case OFFLINE_CANCEL:
99 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
100 continue;
101 return;
102
103 case OFFLINE_AGAIN_FROM_SYNCING:
104 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
105 continue;
106 break;
107
108 case OFFLINE_AGAIN_FROM_OFFLINING:
109 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
110 continue;
111 break;
112
113 case OFFLINE_SYNCING:
114 (void) fsync(f->fd);
115
116 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
117 continue;
118
119 f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
120 (void) fsync(f->fd);
121 break;
122
123 case OFFLINE_OFFLINING:
124 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
125 continue;
126 _fallthrough_;
127 case OFFLINE_DONE:
128 return;
129
130 case OFFLINE_JOINED:
131 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
132 return;
133 }
134 }
135 }
136
137 static void * journal_file_set_offline_thread(void *arg) {
138 JournalFile *f = arg;
139
140 (void) pthread_setname_np(pthread_self(), "journal-offline");
141
142 journal_file_set_offline_internal(f);
143
144 return NULL;
145 }
146
147 static int journal_file_set_offline_thread_join(JournalFile *f) {
148 int r;
149
150 assert(f);
151
152 if (f->offline_state == OFFLINE_JOINED)
153 return 0;
154
155 r = pthread_join(f->offline_thread, NULL);
156 if (r)
157 return -r;
158
159 f->offline_state = OFFLINE_JOINED;
160
161 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
162 return -EIO;
163
164 return 0;
165 }
166
167 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
168 static bool journal_file_set_offline_try_restart(JournalFile *f) {
169 for (;;) {
170 switch (f->offline_state) {
171 case OFFLINE_AGAIN_FROM_SYNCING:
172 case OFFLINE_AGAIN_FROM_OFFLINING:
173 return true;
174
175 case OFFLINE_CANCEL:
176 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
177 continue;
178 return true;
179
180 case OFFLINE_SYNCING:
181 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
182 continue;
183 return true;
184
185 case OFFLINE_OFFLINING:
186 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
187 continue;
188 return true;
189
190 default:
191 return false;
192 }
193 }
194 }
195
196 /* Sets a journal offline.
197 *
198 * If wait is false then an offline is dispatched in a separate thread for a
199 * subsequent journal_file_set_offline() or journal_file_set_online() of the
200 * same journal to synchronize with.
201 *
202 * If wait is true, then either an existing offline thread will be restarted
203 * and joined, or if none exists the offline is simply performed in this
204 * context without involving another thread.
205 */
206 int journal_file_set_offline(JournalFile *f, bool wait) {
207 bool restarted;
208 int r;
209
210 assert(f);
211
212 if (!f->writable)
213 return -EPERM;
214
215 if (!(f->fd >= 0 && f->header))
216 return -EINVAL;
217
218 /* An offlining journal is implicitly online and may modify f->header->state,
219 * we must also join any potentially lingering offline thread when not online. */
220 if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
221 return journal_file_set_offline_thread_join(f);
222
223 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
224 restarted = journal_file_set_offline_try_restart(f);
225 if ((restarted && wait) || !restarted) {
226 r = journal_file_set_offline_thread_join(f);
227 if (r < 0)
228 return r;
229 }
230
231 if (restarted)
232 return 0;
233
234 /* Initiate a new offline. */
235 f->offline_state = OFFLINE_SYNCING;
236
237 if (wait) /* Without using a thread if waiting. */
238 journal_file_set_offline_internal(f);
239 else {
240 sigset_t ss, saved_ss;
241 int k;
242
243 if (sigfillset(&ss) < 0)
244 return -errno;
245
246 r = pthread_sigmask(SIG_BLOCK, &ss, &saved_ss);
247 if (r > 0)
248 return -r;
249
250 r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
251
252 k = pthread_sigmask(SIG_SETMASK, &saved_ss, NULL);
253 if (r > 0) {
254 f->offline_state = OFFLINE_JOINED;
255 return -r;
256 }
257 if (k > 0)
258 return -k;
259 }
260
261 return 0;
262 }
263
264 static int journal_file_set_online(JournalFile *f) {
265 bool joined = false;
266
267 assert(f);
268
269 if (!f->writable)
270 return -EPERM;
271
272 if (!(f->fd >= 0 && f->header))
273 return -EINVAL;
274
275 while (!joined) {
276 switch (f->offline_state) {
277 case OFFLINE_JOINED:
278 /* No offline thread, no need to wait. */
279 joined = true;
280 break;
281
282 case OFFLINE_SYNCING:
283 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
284 continue;
285 /* Canceled syncing prior to offlining, no need to wait. */
286 break;
287
288 case OFFLINE_AGAIN_FROM_SYNCING:
289 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
290 continue;
291 /* Canceled restart from syncing, no need to wait. */
292 break;
293
294 case OFFLINE_AGAIN_FROM_OFFLINING:
295 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
296 continue;
297 /* Canceled restart from offlining, must wait for offlining to complete however. */
298 _fallthrough_;
299 default: {
300 int r;
301
302 r = journal_file_set_offline_thread_join(f);
303 if (r < 0)
304 return r;
305
306 joined = true;
307 break;
308 }
309 }
310 }
311
312 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
313 return -EIO;
314
315 switch (f->header->state) {
316 case STATE_ONLINE:
317 return 0;
318
319 case STATE_OFFLINE:
320 f->header->state = STATE_ONLINE;
321 (void) fsync(f->fd);
322 return 0;
323
324 default:
325 return -EINVAL;
326 }
327 }
328
329 bool journal_file_is_offlining(JournalFile *f) {
330 assert(f);
331
332 __sync_synchronize();
333
334 if (IN_SET(f->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
335 return false;
336
337 return true;
338 }
339
340 JournalFile* journal_file_close(JournalFile *f) {
341 assert(f);
342
343 #if HAVE_GCRYPT
344 /* Write the final tag */
345 if (f->seal && f->writable) {
346 int r;
347
348 r = journal_file_append_tag(f);
349 if (r < 0)
350 log_error_errno(r, "Failed to append tag when closing journal: %m");
351 }
352 #endif
353
354 if (f->post_change_timer) {
355 int enabled;
356
357 if (sd_event_source_get_enabled(f->post_change_timer, &enabled) >= 0)
358 if (enabled == SD_EVENT_ONESHOT)
359 journal_file_post_change(f);
360
361 (void) sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_OFF);
362 sd_event_source_unref(f->post_change_timer);
363 }
364
365 journal_file_set_offline(f, true);
366
367 if (f->mmap && f->cache_fd)
368 mmap_cache_free_fd(f->mmap, f->cache_fd);
369
370 if (f->fd >= 0 && f->defrag_on_close) {
371
372 /* Be friendly to btrfs: turn COW back on again now,
373 * and defragment the file. We won't write to the file
374 * ever again, hence remove all fragmentation, and
375 * reenable all the good bits COW usually provides
376 * (such as data checksumming). */
377
378 (void) chattr_fd(f->fd, 0, FS_NOCOW_FL);
379 (void) btrfs_defrag_fd(f->fd);
380 }
381
382 if (f->close_fd)
383 safe_close(f->fd);
384 free(f->path);
385
386 mmap_cache_unref(f->mmap);
387
388 ordered_hashmap_free_free(f->chain_cache);
389
390 #if HAVE_XZ || HAVE_LZ4
391 free(f->compress_buffer);
392 #endif
393
394 #if HAVE_GCRYPT
395 if (f->fss_file)
396 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
397 else
398 free(f->fsprg_state);
399
400 free(f->fsprg_seed);
401
402 if (f->hmac)
403 gcry_md_close(f->hmac);
404 #endif
405
406 return mfree(f);
407 }
408
409 static int journal_file_init_header(JournalFile *f, JournalFile *template) {
410 Header h = {};
411 ssize_t k;
412 int r;
413
414 assert(f);
415
416 memcpy(h.signature, HEADER_SIGNATURE, 8);
417 h.header_size = htole64(ALIGN64(sizeof(h)));
418
419 h.incompatible_flags |= htole32(
420 f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
421 f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4);
422
423 h.compatible_flags = htole32(
424 f->seal * HEADER_COMPATIBLE_SEALED);
425
426 r = sd_id128_randomize(&h.file_id);
427 if (r < 0)
428 return r;
429
430 if (template) {
431 h.seqnum_id = template->header->seqnum_id;
432 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
433 } else
434 h.seqnum_id = h.file_id;
435
436 k = pwrite(f->fd, &h, sizeof(h), 0);
437 if (k < 0)
438 return -errno;
439
440 if (k != sizeof(h))
441 return -EIO;
442
443 return 0;
444 }
445
446 static int journal_file_refresh_header(JournalFile *f) {
447 sd_id128_t boot_id;
448 int r;
449
450 assert(f);
451 assert(f->header);
452
453 r = sd_id128_get_machine(&f->header->machine_id);
454 if (r < 0)
455 return r;
456
457 r = sd_id128_get_boot(&boot_id);
458 if (r < 0)
459 return r;
460
461 f->header->boot_id = boot_id;
462
463 r = journal_file_set_online(f);
464
465 /* Sync the online state to disk */
466 (void) fsync(f->fd);
467
468 /* We likely just created a new file, also sync the directory this file is located in. */
469 (void) fsync_directory_of_file(f->fd);
470
471 return r;
472 }
473
474 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
475 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
476 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
477 const char *type = compatible ? "compatible" : "incompatible";
478 uint32_t flags;
479
480 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
481
482 if (flags & ~supported) {
483 if (flags & ~any)
484 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
485 f->path, type, flags & ~any);
486 flags = (flags & any) & ~supported;
487 if (flags) {
488 const char* strv[3];
489 unsigned n = 0;
490 _cleanup_free_ char *t = NULL;
491
492 if (compatible && (flags & HEADER_COMPATIBLE_SEALED))
493 strv[n++] = "sealed";
494 if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ))
495 strv[n++] = "xz-compressed";
496 if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4))
497 strv[n++] = "lz4-compressed";
498 strv[n] = NULL;
499 assert(n < ELEMENTSOF(strv));
500
501 t = strv_join((char**) strv, ", ");
502 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
503 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
504 }
505 return true;
506 }
507
508 return false;
509 }
510
511 static int journal_file_verify_header(JournalFile *f) {
512 uint64_t arena_size, header_size;
513
514 assert(f);
515 assert(f->header);
516
517 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
518 return -EBADMSG;
519
520 /* In both read and write mode we refuse to open files with incompatible
521 * flags we don't know. */
522 if (warn_wrong_flags(f, false))
523 return -EPROTONOSUPPORT;
524
525 /* When open for writing we refuse to open files with compatible flags, too. */
526 if (f->writable && warn_wrong_flags(f, true))
527 return -EPROTONOSUPPORT;
528
529 if (f->header->state >= _STATE_MAX)
530 return -EBADMSG;
531
532 header_size = le64toh(f->header->header_size);
533
534 /* The first addition was n_data, so check that we are at least this large */
535 if (header_size < HEADER_SIZE_MIN)
536 return -EBADMSG;
537
538 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
539 return -EBADMSG;
540
541 arena_size = le64toh(f->header->arena_size);
542
543 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
544 return -ENODATA;
545
546 if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
547 return -ENODATA;
548
549 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
550 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
551 !VALID64(le64toh(f->header->tail_object_offset)) ||
552 !VALID64(le64toh(f->header->entry_array_offset)))
553 return -ENODATA;
554
555 if (f->writable) {
556 sd_id128_t machine_id;
557 uint8_t state;
558 int r;
559
560 r = sd_id128_get_machine(&machine_id);
561 if (r < 0)
562 return r;
563
564 if (!sd_id128_equal(machine_id, f->header->machine_id))
565 return -EHOSTDOWN;
566
567 state = f->header->state;
568
569 if (state == STATE_ARCHIVED)
570 return -ESHUTDOWN; /* Already archived */
571 else if (state == STATE_ONLINE) {
572 log_debug("Journal file %s is already online. Assuming unclean closing.", f->path);
573 return -EBUSY;
574 } else if (state != STATE_OFFLINE) {
575 log_debug("Journal file %s has unknown state %i.", f->path, state);
576 return -EBUSY;
577 }
578
579 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
580 return -EBADMSG;
581
582 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
583 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
584 * bisection. */
585 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME)) {
586 log_debug("Journal file %s is from the future, refusing to append new data to it that'd be older.", f->path);
587 return -ETXTBSY;
588 }
589 }
590
591 f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
592 f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
593
594 f->seal = JOURNAL_HEADER_SEALED(f->header);
595
596 return 0;
597 }
598
599 static int journal_file_fstat(JournalFile *f) {
600 int r;
601
602 assert(f);
603 assert(f->fd >= 0);
604
605 if (fstat(f->fd, &f->last_stat) < 0)
606 return -errno;
607
608 f->last_stat_usec = now(CLOCK_MONOTONIC);
609
610 /* Refuse dealing with with files that aren't regular */
611 r = stat_verify_regular(&f->last_stat);
612 if (r < 0)
613 return r;
614
615 /* Refuse appending to files that are already deleted */
616 if (f->last_stat.st_nlink <= 0)
617 return -EIDRM;
618
619 return 0;
620 }
621
622 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
623 uint64_t old_size, new_size;
624 int r;
625
626 assert(f);
627 assert(f->header);
628
629 /* We assume that this file is not sparse, and we know that
630 * for sure, since we always call posix_fallocate()
631 * ourselves */
632
633 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
634 return -EIO;
635
636 old_size =
637 le64toh(f->header->header_size) +
638 le64toh(f->header->arena_size);
639
640 new_size = PAGE_ALIGN(offset + size);
641 if (new_size < le64toh(f->header->header_size))
642 new_size = le64toh(f->header->header_size);
643
644 if (new_size <= old_size) {
645
646 /* We already pre-allocated enough space, but before
647 * we write to it, let's check with fstat() if the
648 * file got deleted, in order make sure we don't throw
649 * away the data immediately. Don't check fstat() for
650 * all writes though, but only once ever 10s. */
651
652 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
653 return 0;
654
655 return journal_file_fstat(f);
656 }
657
658 /* Allocate more space. */
659
660 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
661 return -E2BIG;
662
663 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
664 struct statvfs svfs;
665
666 if (fstatvfs(f->fd, &svfs) >= 0) {
667 uint64_t available;
668
669 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
670
671 if (new_size - old_size > available)
672 return -E2BIG;
673 }
674 }
675
676 /* Increase by larger blocks at once */
677 new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
678 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
679 new_size = f->metrics.max_size;
680
681 /* Note that the glibc fallocate() fallback is very
682 inefficient, hence we try to minimize the allocation area
683 as we can. */
684 r = posix_fallocate(f->fd, old_size, new_size - old_size);
685 if (r != 0)
686 return -r;
687
688 f->header->arena_size = htole64(new_size - le64toh(f->header->header_size));
689
690 return journal_file_fstat(f);
691 }
692
693 static unsigned type_to_context(ObjectType type) {
694 /* One context for each type, plus one catch-all for the rest */
695 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
696 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
697 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
698 }
699
700 static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret, size_t *ret_size) {
701 int r;
702
703 assert(f);
704 assert(ret);
705
706 if (size <= 0)
707 return -EINVAL;
708
709 /* Avoid SIGBUS on invalid accesses */
710 if (offset + size > (uint64_t) f->last_stat.st_size) {
711 /* Hmm, out of range? Let's refresh the fstat() data
712 * first, before we trust that check. */
713
714 r = journal_file_fstat(f);
715 if (r < 0)
716 return r;
717
718 if (offset + size > (uint64_t) f->last_stat.st_size)
719 return -EADDRNOTAVAIL;
720 }
721
722 return mmap_cache_get(f->mmap, f->cache_fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret, ret_size);
723 }
724
725 static uint64_t minimum_header_size(Object *o) {
726
727 static const uint64_t table[] = {
728 [OBJECT_DATA] = sizeof(DataObject),
729 [OBJECT_FIELD] = sizeof(FieldObject),
730 [OBJECT_ENTRY] = sizeof(EntryObject),
731 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
732 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
733 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
734 [OBJECT_TAG] = sizeof(TagObject),
735 };
736
737 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
738 return sizeof(ObjectHeader);
739
740 return table[o->object.type];
741 }
742
743 /* Lightweight object checks. We want this to be fast, so that we won't
744 * slowdown every journal_file_move_to_object() call too much. */
745 static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) {
746 assert(f);
747 assert(o);
748
749 switch (o->object.type) {
750
751 case OBJECT_DATA: {
752 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) {
753 log_debug("Bad n_entries: %"PRIu64": %"PRIu64,
754 le64toh(o->data.n_entries), offset);
755 return -EBADMSG;
756 }
757
758 if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) {
759 log_debug("Bad object size (<= %zu): %"PRIu64": %"PRIu64,
760 offsetof(DataObject, payload),
761 le64toh(o->object.size),
762 offset);
763 return -EBADMSG;
764 }
765
766 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
767 !VALID64(le64toh(o->data.next_field_offset)) ||
768 !VALID64(le64toh(o->data.entry_offset)) ||
769 !VALID64(le64toh(o->data.entry_array_offset))) {
770 log_debug("Invalid offset, next_hash_offset="OFSfmt", next_field_offset="OFSfmt
771 ", entry_offset="OFSfmt", entry_array_offset="OFSfmt": %"PRIu64,
772 le64toh(o->data.next_hash_offset),
773 le64toh(o->data.next_field_offset),
774 le64toh(o->data.entry_offset),
775 le64toh(o->data.entry_array_offset),
776 offset);
777 return -EBADMSG;
778 }
779
780 break;
781 }
782
783 case OBJECT_FIELD:
784 if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) {
785 log_debug(
786 "Bad field size (<= %zu): %"PRIu64": %"PRIu64,
787 offsetof(FieldObject, payload),
788 le64toh(o->object.size),
789 offset);
790 return -EBADMSG;
791 }
792
793 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
794 !VALID64(le64toh(o->field.head_data_offset))) {
795 log_debug(
796 "Invalid offset, next_hash_offset="OFSfmt
797 ", head_data_offset="OFSfmt": %"PRIu64,
798 le64toh(o->field.next_hash_offset),
799 le64toh(o->field.head_data_offset),
800 offset);
801 return -EBADMSG;
802 }
803 break;
804
805 case OBJECT_ENTRY:
806 if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) {
807 log_debug(
808 "Bad entry size (<= %zu): %"PRIu64": %"PRIu64,
809 offsetof(EntryObject, items),
810 le64toh(o->object.size),
811 offset);
812 return -EBADMSG;
813 }
814
815 if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) {
816 log_debug(
817 "Invalid number items in entry: %"PRIu64": %"PRIu64,
818 (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem),
819 offset);
820 return -EBADMSG;
821 }
822
823 if (le64toh(o->entry.seqnum) <= 0) {
824 log_debug(
825 "Invalid entry seqnum: %"PRIx64": %"PRIu64,
826 le64toh(o->entry.seqnum),
827 offset);
828 return -EBADMSG;
829 }
830
831 if (!VALID_REALTIME(le64toh(o->entry.realtime))) {
832 log_debug(
833 "Invalid entry realtime timestamp: %"PRIu64": %"PRIu64,
834 le64toh(o->entry.realtime),
835 offset);
836 return -EBADMSG;
837 }
838
839 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) {
840 log_debug(
841 "Invalid entry monotonic timestamp: %"PRIu64": %"PRIu64,
842 le64toh(o->entry.monotonic),
843 offset);
844 return -EBADMSG;
845 }
846
847 break;
848
849 case OBJECT_DATA_HASH_TABLE:
850 case OBJECT_FIELD_HASH_TABLE:
851 if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
852 (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) {
853 log_debug(
854 "Invalid %s hash table size: %"PRIu64": %"PRIu64,
855 o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
856 le64toh(o->object.size),
857 offset);
858 return -EBADMSG;
859 }
860
861 break;
862
863 case OBJECT_ENTRY_ARRAY:
864 if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
865 (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) {
866 log_debug(
867 "Invalid object entry array size: %"PRIu64": %"PRIu64,
868 le64toh(o->object.size),
869 offset);
870 return -EBADMSG;
871 }
872
873 if (!VALID64(le64toh(o->entry_array.next_entry_array_offset))) {
874 log_debug(
875 "Invalid object entry array next_entry_array_offset: "OFSfmt": %"PRIu64,
876 le64toh(o->entry_array.next_entry_array_offset),
877 offset);
878 return -EBADMSG;
879 }
880
881 break;
882
883 case OBJECT_TAG:
884 if (le64toh(o->object.size) != sizeof(TagObject)) {
885 log_debug(
886 "Invalid object tag size: %"PRIu64": %"PRIu64,
887 le64toh(o->object.size),
888 offset);
889 return -EBADMSG;
890 }
891
892 if (!VALID_EPOCH(le64toh(o->tag.epoch))) {
893 log_debug(
894 "Invalid object tag epoch: %"PRIu64": %"PRIu64,
895 le64toh(o->tag.epoch),
896 offset);
897 return -EBADMSG;
898 }
899
900 break;
901 }
902
903 return 0;
904 }
905
906 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
907 int r;
908 void *t;
909 size_t tsize;
910 Object *o;
911 uint64_t s;
912
913 assert(f);
914 assert(ret);
915
916 /* Objects may only be located at multiple of 64 bit */
917 if (!VALID64(offset)) {
918 log_debug("Attempt to move to object at non-64bit boundary: %" PRIu64, offset);
919 return -EBADMSG;
920 }
921
922 /* Object may not be located in the file header */
923 if (offset < le64toh(f->header->header_size)) {
924 log_debug("Attempt to move to object located in file header: %" PRIu64, offset);
925 return -EBADMSG;
926 }
927
928 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t, &tsize);
929 if (r < 0)
930 return r;
931
932 o = (Object*) t;
933 s = le64toh(o->object.size);
934
935 if (s == 0) {
936 log_debug("Attempt to move to uninitialized object: %" PRIu64, offset);
937 return -EBADMSG;
938 }
939 if (s < sizeof(ObjectHeader)) {
940 log_debug("Attempt to move to overly short object: %" PRIu64, offset);
941 return -EBADMSG;
942 }
943
944 if (o->object.type <= OBJECT_UNUSED) {
945 log_debug("Attempt to move to object with invalid type: %" PRIu64, offset);
946 return -EBADMSG;
947 }
948
949 if (s < minimum_header_size(o)) {
950 log_debug("Attempt to move to truncated object: %" PRIu64, offset);
951 return -EBADMSG;
952 }
953
954 if (type > OBJECT_UNUSED && o->object.type != type) {
955 log_debug("Attempt to move to object of unexpected type: %" PRIu64, offset);
956 return -EBADMSG;
957 }
958
959 if (s > tsize) {
960 r = journal_file_move_to(f, type, false, offset, s, &t, NULL);
961 if (r < 0)
962 return r;
963
964 o = (Object*) t;
965 }
966
967 r = journal_file_check_object(f, offset, o);
968 if (r < 0)
969 return r;
970
971 *ret = o;
972 return 0;
973 }
974
975 static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
976 uint64_t r;
977
978 assert(f);
979 assert(f->header);
980
981 r = le64toh(f->header->tail_entry_seqnum) + 1;
982
983 if (seqnum) {
984 /* If an external seqnum counter was passed, we update
985 * both the local and the external one, and set it to
986 * the maximum of both */
987
988 if (*seqnum + 1 > r)
989 r = *seqnum + 1;
990
991 *seqnum = r;
992 }
993
994 f->header->tail_entry_seqnum = htole64(r);
995
996 if (f->header->head_entry_seqnum == 0)
997 f->header->head_entry_seqnum = htole64(r);
998
999 return r;
1000 }
1001
1002 int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset) {
1003 int r;
1004 uint64_t p;
1005 Object *tail, *o;
1006 void *t;
1007
1008 assert(f);
1009 assert(f->header);
1010 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1011 assert(size >= sizeof(ObjectHeader));
1012 assert(offset);
1013 assert(ret);
1014
1015 r = journal_file_set_online(f);
1016 if (r < 0)
1017 return r;
1018
1019 p = le64toh(f->header->tail_object_offset);
1020 if (p == 0)
1021 p = le64toh(f->header->header_size);
1022 else {
1023 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
1024 if (r < 0)
1025 return r;
1026
1027 p += ALIGN64(le64toh(tail->object.size));
1028 }
1029
1030 r = journal_file_allocate(f, p, size);
1031 if (r < 0)
1032 return r;
1033
1034 r = journal_file_move_to(f, type, false, p, size, &t, NULL);
1035 if (r < 0)
1036 return r;
1037
1038 o = (Object*) t;
1039
1040 zero(o->object);
1041 o->object.type = type;
1042 o->object.size = htole64(size);
1043
1044 f->header->tail_object_offset = htole64(p);
1045 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1046
1047 *ret = o;
1048 *offset = p;
1049
1050 return 0;
1051 }
1052
1053 static int journal_file_setup_data_hash_table(JournalFile *f) {
1054 uint64_t s, p;
1055 Object *o;
1056 int r;
1057
1058 assert(f);
1059 assert(f->header);
1060
1061 /* We estimate that we need 1 hash table entry per 768 bytes
1062 of journal file and we want to make sure we never get
1063 beyond 75% fill level. Calculate the hash table size for
1064 the maximum file size based on these metrics. */
1065
1066 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1067 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1068 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1069
1070 log_debug("Reserving %"PRIu64" entries in hash table.", s / sizeof(HashItem));
1071
1072 r = journal_file_append_object(f,
1073 OBJECT_DATA_HASH_TABLE,
1074 offsetof(Object, hash_table.items) + s,
1075 &o, &p);
1076 if (r < 0)
1077 return r;
1078
1079 memzero(o->hash_table.items, s);
1080
1081 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1082 f->header->data_hash_table_size = htole64(s);
1083
1084 return 0;
1085 }
1086
1087 static int journal_file_setup_field_hash_table(JournalFile *f) {
1088 uint64_t s, p;
1089 Object *o;
1090 int r;
1091
1092 assert(f);
1093 assert(f->header);
1094
1095 /* We use a fixed size hash table for the fields as this
1096 * number should grow very slowly only */
1097
1098 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1099 r = journal_file_append_object(f,
1100 OBJECT_FIELD_HASH_TABLE,
1101 offsetof(Object, hash_table.items) + s,
1102 &o, &p);
1103 if (r < 0)
1104 return r;
1105
1106 memzero(o->hash_table.items, s);
1107
1108 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1109 f->header->field_hash_table_size = htole64(s);
1110
1111 return 0;
1112 }
1113
1114 int journal_file_map_data_hash_table(JournalFile *f) {
1115 uint64_t s, p;
1116 void *t;
1117 int r;
1118
1119 assert(f);
1120 assert(f->header);
1121
1122 if (f->data_hash_table)
1123 return 0;
1124
1125 p = le64toh(f->header->data_hash_table_offset);
1126 s = le64toh(f->header->data_hash_table_size);
1127
1128 r = journal_file_move_to(f,
1129 OBJECT_DATA_HASH_TABLE,
1130 true,
1131 p, s,
1132 &t, NULL);
1133 if (r < 0)
1134 return r;
1135
1136 f->data_hash_table = t;
1137 return 0;
1138 }
1139
1140 int journal_file_map_field_hash_table(JournalFile *f) {
1141 uint64_t s, p;
1142 void *t;
1143 int r;
1144
1145 assert(f);
1146 assert(f->header);
1147
1148 if (f->field_hash_table)
1149 return 0;
1150
1151 p = le64toh(f->header->field_hash_table_offset);
1152 s = le64toh(f->header->field_hash_table_size);
1153
1154 r = journal_file_move_to(f,
1155 OBJECT_FIELD_HASH_TABLE,
1156 true,
1157 p, s,
1158 &t, NULL);
1159 if (r < 0)
1160 return r;
1161
1162 f->field_hash_table = t;
1163 return 0;
1164 }
1165
1166 static int journal_file_link_field(
1167 JournalFile *f,
1168 Object *o,
1169 uint64_t offset,
1170 uint64_t hash) {
1171
1172 uint64_t p, h, m;
1173 int r;
1174
1175 assert(f);
1176 assert(f->header);
1177 assert(f->field_hash_table);
1178 assert(o);
1179 assert(offset > 0);
1180
1181 if (o->object.type != OBJECT_FIELD)
1182 return -EINVAL;
1183
1184 m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
1185 if (m <= 0)
1186 return -EBADMSG;
1187
1188 /* This might alter the window we are looking at */
1189 o->field.next_hash_offset = o->field.head_data_offset = 0;
1190
1191 h = hash % m;
1192 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1193 if (p == 0)
1194 f->field_hash_table[h].head_hash_offset = htole64(offset);
1195 else {
1196 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1197 if (r < 0)
1198 return r;
1199
1200 o->field.next_hash_offset = htole64(offset);
1201 }
1202
1203 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1204
1205 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1206 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1207
1208 return 0;
1209 }
1210
1211 static int journal_file_link_data(
1212 JournalFile *f,
1213 Object *o,
1214 uint64_t offset,
1215 uint64_t hash) {
1216
1217 uint64_t p, h, m;
1218 int r;
1219
1220 assert(f);
1221 assert(f->header);
1222 assert(f->data_hash_table);
1223 assert(o);
1224 assert(offset > 0);
1225
1226 if (o->object.type != OBJECT_DATA)
1227 return -EINVAL;
1228
1229 m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
1230 if (m <= 0)
1231 return -EBADMSG;
1232
1233 /* This might alter the window we are looking at */
1234 o->data.next_hash_offset = o->data.next_field_offset = 0;
1235 o->data.entry_offset = o->data.entry_array_offset = 0;
1236 o->data.n_entries = 0;
1237
1238 h = hash % m;
1239 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1240 if (p == 0)
1241 /* Only entry in the hash table is easy */
1242 f->data_hash_table[h].head_hash_offset = htole64(offset);
1243 else {
1244 /* Move back to the previous data object, to patch in
1245 * pointer */
1246
1247 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1248 if (r < 0)
1249 return r;
1250
1251 o->data.next_hash_offset = htole64(offset);
1252 }
1253
1254 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1255
1256 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1257 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1258
1259 return 0;
1260 }
1261
1262 int journal_file_find_field_object_with_hash(
1263 JournalFile *f,
1264 const void *field, uint64_t size, uint64_t hash,
1265 Object **ret, uint64_t *offset) {
1266
1267 uint64_t p, osize, h, m;
1268 int r;
1269
1270 assert(f);
1271 assert(f->header);
1272 assert(field && size > 0);
1273
1274 /* If the field hash table is empty, we can't find anything */
1275 if (le64toh(f->header->field_hash_table_size) <= 0)
1276 return 0;
1277
1278 /* Map the field hash table, if it isn't mapped yet. */
1279 r = journal_file_map_field_hash_table(f);
1280 if (r < 0)
1281 return r;
1282
1283 osize = offsetof(Object, field.payload) + size;
1284
1285 m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
1286 if (m <= 0)
1287 return -EBADMSG;
1288
1289 h = hash % m;
1290 p = le64toh(f->field_hash_table[h].head_hash_offset);
1291
1292 while (p > 0) {
1293 Object *o;
1294
1295 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1296 if (r < 0)
1297 return r;
1298
1299 if (le64toh(o->field.hash) == hash &&
1300 le64toh(o->object.size) == osize &&
1301 memcmp(o->field.payload, field, size) == 0) {
1302
1303 if (ret)
1304 *ret = o;
1305 if (offset)
1306 *offset = p;
1307
1308 return 1;
1309 }
1310
1311 p = le64toh(o->field.next_hash_offset);
1312 }
1313
1314 return 0;
1315 }
1316
1317 int journal_file_find_field_object(
1318 JournalFile *f,
1319 const void *field, uint64_t size,
1320 Object **ret, uint64_t *offset) {
1321
1322 uint64_t hash;
1323
1324 assert(f);
1325 assert(field && size > 0);
1326
1327 hash = hash64(field, size);
1328
1329 return journal_file_find_field_object_with_hash(f,
1330 field, size, hash,
1331 ret, offset);
1332 }
1333
1334 int journal_file_find_data_object_with_hash(
1335 JournalFile *f,
1336 const void *data, uint64_t size, uint64_t hash,
1337 Object **ret, uint64_t *offset) {
1338
1339 uint64_t p, osize, h, m;
1340 int r;
1341
1342 assert(f);
1343 assert(f->header);
1344 assert(data || size == 0);
1345
1346 /* If there's no data hash table, then there's no entry. */
1347 if (le64toh(f->header->data_hash_table_size) <= 0)
1348 return 0;
1349
1350 /* Map the data hash table, if it isn't mapped yet. */
1351 r = journal_file_map_data_hash_table(f);
1352 if (r < 0)
1353 return r;
1354
1355 osize = offsetof(Object, data.payload) + size;
1356
1357 m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
1358 if (m <= 0)
1359 return -EBADMSG;
1360
1361 h = hash % m;
1362 p = le64toh(f->data_hash_table[h].head_hash_offset);
1363
1364 while (p > 0) {
1365 Object *o;
1366
1367 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1368 if (r < 0)
1369 return r;
1370
1371 if (le64toh(o->data.hash) != hash)
1372 goto next;
1373
1374 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
1375 #if HAVE_XZ || HAVE_LZ4
1376 uint64_t l;
1377 size_t rsize = 0;
1378
1379 l = le64toh(o->object.size);
1380 if (l <= offsetof(Object, data.payload))
1381 return -EBADMSG;
1382
1383 l -= offsetof(Object, data.payload);
1384
1385 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
1386 o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
1387 if (r < 0)
1388 return r;
1389
1390 if (rsize == size &&
1391 memcmp(f->compress_buffer, data, size) == 0) {
1392
1393 if (ret)
1394 *ret = o;
1395
1396 if (offset)
1397 *offset = p;
1398
1399 return 1;
1400 }
1401 #else
1402 return -EPROTONOSUPPORT;
1403 #endif
1404 } else if (le64toh(o->object.size) == osize &&
1405 memcmp(o->data.payload, data, size) == 0) {
1406
1407 if (ret)
1408 *ret = o;
1409
1410 if (offset)
1411 *offset = p;
1412
1413 return 1;
1414 }
1415
1416 next:
1417 p = le64toh(o->data.next_hash_offset);
1418 }
1419
1420 return 0;
1421 }
1422
1423 int journal_file_find_data_object(
1424 JournalFile *f,
1425 const void *data, uint64_t size,
1426 Object **ret, uint64_t *offset) {
1427
1428 uint64_t hash;
1429
1430 assert(f);
1431 assert(data || size == 0);
1432
1433 hash = hash64(data, size);
1434
1435 return journal_file_find_data_object_with_hash(f,
1436 data, size, hash,
1437 ret, offset);
1438 }
1439
1440 static int journal_file_append_field(
1441 JournalFile *f,
1442 const void *field, uint64_t size,
1443 Object **ret, uint64_t *offset) {
1444
1445 uint64_t hash, p;
1446 uint64_t osize;
1447 Object *o;
1448 int r;
1449
1450 assert(f);
1451 assert(field && size > 0);
1452
1453 hash = hash64(field, size);
1454
1455 r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
1456 if (r < 0)
1457 return r;
1458 else if (r > 0) {
1459
1460 if (ret)
1461 *ret = o;
1462
1463 if (offset)
1464 *offset = p;
1465
1466 return 0;
1467 }
1468
1469 osize = offsetof(Object, field.payload) + size;
1470 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1471 if (r < 0)
1472 return r;
1473
1474 o->field.hash = htole64(hash);
1475 memcpy(o->field.payload, field, size);
1476
1477 r = journal_file_link_field(f, o, p, hash);
1478 if (r < 0)
1479 return r;
1480
1481 /* The linking might have altered the window, so let's
1482 * refresh our pointer */
1483 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1484 if (r < 0)
1485 return r;
1486
1487 #if HAVE_GCRYPT
1488 r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
1489 if (r < 0)
1490 return r;
1491 #endif
1492
1493 if (ret)
1494 *ret = o;
1495
1496 if (offset)
1497 *offset = p;
1498
1499 return 0;
1500 }
1501
1502 static int journal_file_append_data(
1503 JournalFile *f,
1504 const void *data, uint64_t size,
1505 Object **ret, uint64_t *offset) {
1506
1507 uint64_t hash, p;
1508 uint64_t osize;
1509 Object *o;
1510 int r, compression = 0;
1511 const void *eq;
1512
1513 assert(f);
1514 assert(data || size == 0);
1515
1516 hash = hash64(data, size);
1517
1518 r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
1519 if (r < 0)
1520 return r;
1521 if (r > 0) {
1522
1523 if (ret)
1524 *ret = o;
1525
1526 if (offset)
1527 *offset = p;
1528
1529 return 0;
1530 }
1531
1532 osize = offsetof(Object, data.payload) + size;
1533 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1534 if (r < 0)
1535 return r;
1536
1537 o->data.hash = htole64(hash);
1538
1539 #if HAVE_XZ || HAVE_LZ4
1540 if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
1541 size_t rsize = 0;
1542
1543 compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
1544
1545 if (compression >= 0) {
1546 o->object.size = htole64(offsetof(Object, data.payload) + rsize);
1547 o->object.flags |= compression;
1548
1549 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1550 size, rsize, object_compressed_to_string(compression));
1551 } else
1552 /* Compression didn't work, we don't really care why, let's continue without compression */
1553 compression = 0;
1554 }
1555 #endif
1556
1557 if (compression == 0)
1558 memcpy_safe(o->data.payload, data, size);
1559
1560 r = journal_file_link_data(f, o, p, hash);
1561 if (r < 0)
1562 return r;
1563
1564 #if HAVE_GCRYPT
1565 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1566 if (r < 0)
1567 return r;
1568 #endif
1569
1570 /* The linking might have altered the window, so let's
1571 * refresh our pointer */
1572 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1573 if (r < 0)
1574 return r;
1575
1576 if (!data)
1577 eq = NULL;
1578 else
1579 eq = memchr(data, '=', size);
1580 if (eq && eq > data) {
1581 Object *fo = NULL;
1582 uint64_t fp;
1583
1584 /* Create field object ... */
1585 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
1586 if (r < 0)
1587 return r;
1588
1589 /* ... and link it in. */
1590 o->data.next_field_offset = fo->field.head_data_offset;
1591 fo->field.head_data_offset = le64toh(p);
1592 }
1593
1594 if (ret)
1595 *ret = o;
1596
1597 if (offset)
1598 *offset = p;
1599
1600 return 0;
1601 }
1602
1603 uint64_t journal_file_entry_n_items(Object *o) {
1604 assert(o);
1605
1606 if (o->object.type != OBJECT_ENTRY)
1607 return 0;
1608
1609 return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem);
1610 }
1611
1612 uint64_t journal_file_entry_array_n_items(Object *o) {
1613 assert(o);
1614
1615 if (o->object.type != OBJECT_ENTRY_ARRAY)
1616 return 0;
1617
1618 return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
1619 }
1620
1621 uint64_t journal_file_hash_table_n_items(Object *o) {
1622 assert(o);
1623
1624 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
1625 return 0;
1626
1627 return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1628 }
1629
1630 static int link_entry_into_array(JournalFile *f,
1631 le64_t *first,
1632 le64_t *idx,
1633 uint64_t p) {
1634 int r;
1635 uint64_t n = 0, ap = 0, q, i, a, hidx;
1636 Object *o;
1637
1638 assert(f);
1639 assert(f->header);
1640 assert(first);
1641 assert(idx);
1642 assert(p > 0);
1643
1644 a = le64toh(*first);
1645 i = hidx = le64toh(*idx);
1646 while (a > 0) {
1647
1648 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1649 if (r < 0)
1650 return r;
1651
1652 n = journal_file_entry_array_n_items(o);
1653 if (i < n) {
1654 o->entry_array.items[i] = htole64(p);
1655 *idx = htole64(hidx + 1);
1656 return 0;
1657 }
1658
1659 i -= n;
1660 ap = a;
1661 a = le64toh(o->entry_array.next_entry_array_offset);
1662 }
1663
1664 if (hidx > n)
1665 n = (hidx+1) * 2;
1666 else
1667 n = n * 2;
1668
1669 if (n < 4)
1670 n = 4;
1671
1672 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1673 offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
1674 &o, &q);
1675 if (r < 0)
1676 return r;
1677
1678 #if HAVE_GCRYPT
1679 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1680 if (r < 0)
1681 return r;
1682 #endif
1683
1684 o->entry_array.items[i] = htole64(p);
1685
1686 if (ap == 0)
1687 *first = htole64(q);
1688 else {
1689 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1690 if (r < 0)
1691 return r;
1692
1693 o->entry_array.next_entry_array_offset = htole64(q);
1694 }
1695
1696 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1697 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1698
1699 *idx = htole64(hidx + 1);
1700
1701 return 0;
1702 }
1703
1704 static int link_entry_into_array_plus_one(JournalFile *f,
1705 le64_t *extra,
1706 le64_t *first,
1707 le64_t *idx,
1708 uint64_t p) {
1709
1710 int r;
1711
1712 assert(f);
1713 assert(extra);
1714 assert(first);
1715 assert(idx);
1716 assert(p > 0);
1717
1718 if (*idx == 0)
1719 *extra = htole64(p);
1720 else {
1721 le64_t i;
1722
1723 i = htole64(le64toh(*idx) - 1);
1724 r = link_entry_into_array(f, first, &i, p);
1725 if (r < 0)
1726 return r;
1727 }
1728
1729 *idx = htole64(le64toh(*idx) + 1);
1730 return 0;
1731 }
1732
1733 static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
1734 uint64_t p;
1735 int r;
1736 assert(f);
1737 assert(o);
1738 assert(offset > 0);
1739
1740 p = le64toh(o->entry.items[i].object_offset);
1741 if (p == 0)
1742 return -EINVAL;
1743
1744 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1745 if (r < 0)
1746 return r;
1747
1748 return link_entry_into_array_plus_one(f,
1749 &o->data.entry_offset,
1750 &o->data.entry_array_offset,
1751 &o->data.n_entries,
1752 offset);
1753 }
1754
1755 static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
1756 uint64_t n, i;
1757 int r;
1758
1759 assert(f);
1760 assert(f->header);
1761 assert(o);
1762 assert(offset > 0);
1763
1764 if (o->object.type != OBJECT_ENTRY)
1765 return -EINVAL;
1766
1767 __sync_synchronize();
1768
1769 /* Link up the entry itself */
1770 r = link_entry_into_array(f,
1771 &f->header->entry_array_offset,
1772 &f->header->n_entries,
1773 offset);
1774 if (r < 0)
1775 return r;
1776
1777 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1778
1779 if (f->header->head_entry_realtime == 0)
1780 f->header->head_entry_realtime = o->entry.realtime;
1781
1782 f->header->tail_entry_realtime = o->entry.realtime;
1783 f->header->tail_entry_monotonic = o->entry.monotonic;
1784
1785 /* Link up the items */
1786 n = journal_file_entry_n_items(o);
1787 for (i = 0; i < n; i++) {
1788 r = journal_file_link_entry_item(f, o, offset, i);
1789 if (r < 0)
1790 return r;
1791 }
1792
1793 return 0;
1794 }
1795
1796 static int journal_file_append_entry_internal(
1797 JournalFile *f,
1798 const dual_timestamp *ts,
1799 uint64_t xor_hash,
1800 const EntryItem items[], unsigned n_items,
1801 uint64_t *seqnum,
1802 Object **ret, uint64_t *offset) {
1803 uint64_t np;
1804 uint64_t osize;
1805 Object *o;
1806 int r;
1807
1808 assert(f);
1809 assert(f->header);
1810 assert(items || n_items == 0);
1811 assert(ts);
1812
1813 osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
1814
1815 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
1816 if (r < 0)
1817 return r;
1818
1819 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
1820 memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
1821 o->entry.realtime = htole64(ts->realtime);
1822 o->entry.monotonic = htole64(ts->monotonic);
1823 o->entry.xor_hash = htole64(xor_hash);
1824 o->entry.boot_id = f->header->boot_id;
1825
1826 #if HAVE_GCRYPT
1827 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
1828 if (r < 0)
1829 return r;
1830 #endif
1831
1832 r = journal_file_link_entry(f, o, np);
1833 if (r < 0)
1834 return r;
1835
1836 if (ret)
1837 *ret = o;
1838
1839 if (offset)
1840 *offset = np;
1841
1842 return 0;
1843 }
1844
1845 void journal_file_post_change(JournalFile *f) {
1846 assert(f);
1847
1848 /* inotify() does not receive IN_MODIFY events from file
1849 * accesses done via mmap(). After each access we hence
1850 * trigger IN_MODIFY by truncating the journal file to its
1851 * current size which triggers IN_MODIFY. */
1852
1853 __sync_synchronize();
1854
1855 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
1856 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
1857 }
1858
1859 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
1860 assert(userdata);
1861
1862 journal_file_post_change(userdata);
1863
1864 return 1;
1865 }
1866
1867 static void schedule_post_change(JournalFile *f) {
1868 sd_event_source *timer;
1869 int enabled, r;
1870 uint64_t now;
1871
1872 assert(f);
1873 assert(f->post_change_timer);
1874
1875 timer = f->post_change_timer;
1876
1877 r = sd_event_source_get_enabled(timer, &enabled);
1878 if (r < 0) {
1879 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
1880 goto fail;
1881 }
1882
1883 if (enabled == SD_EVENT_ONESHOT)
1884 return;
1885
1886 r = sd_event_now(sd_event_source_get_event(timer), CLOCK_MONOTONIC, &now);
1887 if (r < 0) {
1888 log_debug_errno(r, "Failed to get clock's now for scheduling ftruncate: %m");
1889 goto fail;
1890 }
1891
1892 r = sd_event_source_set_time(timer, now+f->post_change_timer_period);
1893 if (r < 0) {
1894 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
1895 goto fail;
1896 }
1897
1898 r = sd_event_source_set_enabled(timer, SD_EVENT_ONESHOT);
1899 if (r < 0) {
1900 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
1901 goto fail;
1902 }
1903
1904 return;
1905
1906 fail:
1907 /* On failure, let's simply post the change immediately. */
1908 journal_file_post_change(f);
1909 }
1910
1911 /* Enable coalesced change posting in a timer on the provided sd_event instance */
1912 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
1913 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
1914 int r;
1915
1916 assert(f);
1917 assert_return(!f->post_change_timer, -EINVAL);
1918 assert(e);
1919 assert(t);
1920
1921 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
1922 if (r < 0)
1923 return r;
1924
1925 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
1926 if (r < 0)
1927 return r;
1928
1929 f->post_change_timer = TAKE_PTR(timer);
1930 f->post_change_timer_period = t;
1931
1932 return r;
1933 }
1934
1935 static int entry_item_cmp(const void *_a, const void *_b) {
1936 const EntryItem *a = _a, *b = _b;
1937
1938 if (le64toh(a->object_offset) < le64toh(b->object_offset))
1939 return -1;
1940 if (le64toh(a->object_offset) > le64toh(b->object_offset))
1941 return 1;
1942 return 0;
1943 }
1944
1945 int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const struct iovec iovec[], unsigned n_iovec, uint64_t *seqnum, Object **ret, uint64_t *offset) {
1946 unsigned i;
1947 EntryItem *items;
1948 int r;
1949 uint64_t xor_hash = 0;
1950 struct dual_timestamp _ts;
1951
1952 assert(f);
1953 assert(f->header);
1954 assert(iovec || n_iovec == 0);
1955
1956 if (!ts) {
1957 dual_timestamp_get(&_ts);
1958 ts = &_ts;
1959 }
1960
1961 #if HAVE_GCRYPT
1962 r = journal_file_maybe_append_tag(f, ts->realtime);
1963 if (r < 0)
1964 return r;
1965 #endif
1966
1967 /* alloca() can't take 0, hence let's allocate at least one */
1968 items = newa(EntryItem, MAX(1u, n_iovec));
1969
1970 for (i = 0; i < n_iovec; i++) {
1971 uint64_t p;
1972 Object *o;
1973
1974 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
1975 if (r < 0)
1976 return r;
1977
1978 xor_hash ^= le64toh(o->data.hash);
1979 items[i].object_offset = htole64(p);
1980 items[i].hash = o->data.hash;
1981 }
1982
1983 /* Order by the position on disk, in order to improve seek
1984 * times for rotating media. */
1985 qsort_safe(items, n_iovec, sizeof(EntryItem), entry_item_cmp);
1986
1987 r = journal_file_append_entry_internal(f, ts, xor_hash, items, n_iovec, seqnum, ret, offset);
1988
1989 /* If the memory mapping triggered a SIGBUS then we return an
1990 * IO error and ignore the error code passed down to us, since
1991 * it is very likely just an effect of a nullified replacement
1992 * mapping page */
1993
1994 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
1995 r = -EIO;
1996
1997 if (f->post_change_timer)
1998 schedule_post_change(f);
1999 else
2000 journal_file_post_change(f);
2001
2002 return r;
2003 }
2004
2005 typedef struct ChainCacheItem {
2006 uint64_t first; /* the array at the beginning of the chain */
2007 uint64_t array; /* the cached array */
2008 uint64_t begin; /* the first item in the cached array */
2009 uint64_t total; /* the total number of items in all arrays before this one in the chain */
2010 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
2011 } ChainCacheItem;
2012
2013 static void chain_cache_put(
2014 OrderedHashmap *h,
2015 ChainCacheItem *ci,
2016 uint64_t first,
2017 uint64_t array,
2018 uint64_t begin,
2019 uint64_t total,
2020 uint64_t last_index) {
2021
2022 if (!ci) {
2023 /* If the chain item to cache for this chain is the
2024 * first one it's not worth caching anything */
2025 if (array == first)
2026 return;
2027
2028 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2029 ci = ordered_hashmap_steal_first(h);
2030 assert(ci);
2031 } else {
2032 ci = new(ChainCacheItem, 1);
2033 if (!ci)
2034 return;
2035 }
2036
2037 ci->first = first;
2038
2039 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2040 free(ci);
2041 return;
2042 }
2043 } else
2044 assert(ci->first == first);
2045
2046 ci->array = array;
2047 ci->begin = begin;
2048 ci->total = total;
2049 ci->last_index = last_index;
2050 }
2051
2052 static int generic_array_get(
2053 JournalFile *f,
2054 uint64_t first,
2055 uint64_t i,
2056 Object **ret, uint64_t *offset) {
2057
2058 Object *o;
2059 uint64_t p = 0, a, t = 0;
2060 int r;
2061 ChainCacheItem *ci;
2062
2063 assert(f);
2064
2065 a = first;
2066
2067 /* Try the chain cache first */
2068 ci = ordered_hashmap_get(f->chain_cache, &first);
2069 if (ci && i > ci->total) {
2070 a = ci->array;
2071 i -= ci->total;
2072 t = ci->total;
2073 }
2074
2075 while (a > 0) {
2076 uint64_t k;
2077
2078 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2079 if (r < 0)
2080 return r;
2081
2082 k = journal_file_entry_array_n_items(o);
2083 if (i < k) {
2084 p = le64toh(o->entry_array.items[i]);
2085 goto found;
2086 }
2087
2088 i -= k;
2089 t += k;
2090 a = le64toh(o->entry_array.next_entry_array_offset);
2091 }
2092
2093 return 0;
2094
2095 found:
2096 /* Let's cache this item for the next invocation */
2097 chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
2098
2099 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2100 if (r < 0)
2101 return r;
2102
2103 if (ret)
2104 *ret = o;
2105
2106 if (offset)
2107 *offset = p;
2108
2109 return 1;
2110 }
2111
2112 static int generic_array_get_plus_one(
2113 JournalFile *f,
2114 uint64_t extra,
2115 uint64_t first,
2116 uint64_t i,
2117 Object **ret, uint64_t *offset) {
2118
2119 Object *o;
2120
2121 assert(f);
2122
2123 if (i == 0) {
2124 int r;
2125
2126 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2127 if (r < 0)
2128 return r;
2129
2130 if (ret)
2131 *ret = o;
2132
2133 if (offset)
2134 *offset = extra;
2135
2136 return 1;
2137 }
2138
2139 return generic_array_get(f, first, i-1, ret, offset);
2140 }
2141
2142 enum {
2143 TEST_FOUND,
2144 TEST_LEFT,
2145 TEST_RIGHT
2146 };
2147
2148 static int generic_array_bisect(
2149 JournalFile *f,
2150 uint64_t first,
2151 uint64_t n,
2152 uint64_t needle,
2153 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2154 direction_t direction,
2155 Object **ret,
2156 uint64_t *offset,
2157 uint64_t *idx) {
2158
2159 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
2160 bool subtract_one = false;
2161 Object *o, *array = NULL;
2162 int r;
2163 ChainCacheItem *ci;
2164
2165 assert(f);
2166 assert(test_object);
2167
2168 /* Start with the first array in the chain */
2169 a = first;
2170
2171 ci = ordered_hashmap_get(f->chain_cache, &first);
2172 if (ci && n > ci->total && ci->begin != 0) {
2173 /* Ah, we have iterated this bisection array chain
2174 * previously! Let's see if we can skip ahead in the
2175 * chain, as far as the last time. But we can't jump
2176 * backwards in the chain, so let's check that
2177 * first. */
2178
2179 r = test_object(f, ci->begin, needle);
2180 if (r < 0)
2181 return r;
2182
2183 if (r == TEST_LEFT) {
2184 /* OK, what we are looking for is right of the
2185 * begin of this EntryArray, so let's jump
2186 * straight to previously cached array in the
2187 * chain */
2188
2189 a = ci->array;
2190 n -= ci->total;
2191 t = ci->total;
2192 last_index = ci->last_index;
2193 }
2194 }
2195
2196 while (a > 0) {
2197 uint64_t left, right, k, lp;
2198
2199 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2200 if (r < 0)
2201 return r;
2202
2203 k = journal_file_entry_array_n_items(array);
2204 right = MIN(k, n);
2205 if (right <= 0)
2206 return 0;
2207
2208 i = right - 1;
2209 lp = p = le64toh(array->entry_array.items[i]);
2210 if (p <= 0)
2211 r = -EBADMSG;
2212 else
2213 r = test_object(f, p, needle);
2214 if (r == -EBADMSG) {
2215 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2216 n = i;
2217 continue;
2218 }
2219 if (r < 0)
2220 return r;
2221
2222 if (r == TEST_FOUND)
2223 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2224
2225 if (r == TEST_RIGHT) {
2226 left = 0;
2227 right -= 1;
2228
2229 if (last_index != (uint64_t) -1) {
2230 assert(last_index <= right);
2231
2232 /* If we cached the last index we
2233 * looked at, let's try to not to jump
2234 * too wildly around and see if we can
2235 * limit the range to look at early to
2236 * the immediate neighbors of the last
2237 * index we looked at. */
2238
2239 if (last_index > 0) {
2240 uint64_t x = last_index - 1;
2241
2242 p = le64toh(array->entry_array.items[x]);
2243 if (p <= 0)
2244 return -EBADMSG;
2245
2246 r = test_object(f, p, needle);
2247 if (r < 0)
2248 return r;
2249
2250 if (r == TEST_FOUND)
2251 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2252
2253 if (r == TEST_RIGHT)
2254 right = x;
2255 else
2256 left = x + 1;
2257 }
2258
2259 if (last_index < right) {
2260 uint64_t y = last_index + 1;
2261
2262 p = le64toh(array->entry_array.items[y]);
2263 if (p <= 0)
2264 return -EBADMSG;
2265
2266 r = test_object(f, p, needle);
2267 if (r < 0)
2268 return r;
2269
2270 if (r == TEST_FOUND)
2271 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2272
2273 if (r == TEST_RIGHT)
2274 right = y;
2275 else
2276 left = y + 1;
2277 }
2278 }
2279
2280 for (;;) {
2281 if (left == right) {
2282 if (direction == DIRECTION_UP)
2283 subtract_one = true;
2284
2285 i = left;
2286 goto found;
2287 }
2288
2289 assert(left < right);
2290 i = (left + right) / 2;
2291
2292 p = le64toh(array->entry_array.items[i]);
2293 if (p <= 0)
2294 r = -EBADMSG;
2295 else
2296 r = test_object(f, p, needle);
2297 if (r == -EBADMSG) {
2298 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2299 right = n = i;
2300 continue;
2301 }
2302 if (r < 0)
2303 return r;
2304
2305 if (r == TEST_FOUND)
2306 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2307
2308 if (r == TEST_RIGHT)
2309 right = i;
2310 else
2311 left = i + 1;
2312 }
2313 }
2314
2315 if (k >= n) {
2316 if (direction == DIRECTION_UP) {
2317 i = n;
2318 subtract_one = true;
2319 goto found;
2320 }
2321
2322 return 0;
2323 }
2324
2325 last_p = lp;
2326
2327 n -= k;
2328 t += k;
2329 last_index = (uint64_t) -1;
2330 a = le64toh(array->entry_array.next_entry_array_offset);
2331 }
2332
2333 return 0;
2334
2335 found:
2336 if (subtract_one && t == 0 && i == 0)
2337 return 0;
2338
2339 /* Let's cache this item for the next invocation */
2340 chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
2341
2342 if (subtract_one && i == 0)
2343 p = last_p;
2344 else if (subtract_one)
2345 p = le64toh(array->entry_array.items[i-1]);
2346 else
2347 p = le64toh(array->entry_array.items[i]);
2348
2349 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2350 if (r < 0)
2351 return r;
2352
2353 if (ret)
2354 *ret = o;
2355
2356 if (offset)
2357 *offset = p;
2358
2359 if (idx)
2360 *idx = t + i + (subtract_one ? -1 : 0);
2361
2362 return 1;
2363 }
2364
2365 static int generic_array_bisect_plus_one(
2366 JournalFile *f,
2367 uint64_t extra,
2368 uint64_t first,
2369 uint64_t n,
2370 uint64_t needle,
2371 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2372 direction_t direction,
2373 Object **ret,
2374 uint64_t *offset,
2375 uint64_t *idx) {
2376
2377 int r;
2378 bool step_back = false;
2379 Object *o;
2380
2381 assert(f);
2382 assert(test_object);
2383
2384 if (n <= 0)
2385 return 0;
2386
2387 /* This bisects the array in object 'first', but first checks
2388 * an extra */
2389 r = test_object(f, extra, needle);
2390 if (r < 0)
2391 return r;
2392
2393 if (r == TEST_FOUND)
2394 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2395
2396 /* if we are looking with DIRECTION_UP then we need to first
2397 see if in the actual array there is a matching entry, and
2398 return the last one of that. But if there isn't any we need
2399 to return this one. Hence remember this, and return it
2400 below. */
2401 if (r == TEST_LEFT)
2402 step_back = direction == DIRECTION_UP;
2403
2404 if (r == TEST_RIGHT) {
2405 if (direction == DIRECTION_DOWN)
2406 goto found;
2407 else
2408 return 0;
2409 }
2410
2411 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, offset, idx);
2412
2413 if (r == 0 && step_back)
2414 goto found;
2415
2416 if (r > 0 && idx)
2417 (*idx)++;
2418
2419 return r;
2420
2421 found:
2422 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2423 if (r < 0)
2424 return r;
2425
2426 if (ret)
2427 *ret = o;
2428
2429 if (offset)
2430 *offset = extra;
2431
2432 if (idx)
2433 *idx = 0;
2434
2435 return 1;
2436 }
2437
2438 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2439 assert(f);
2440 assert(p > 0);
2441
2442 if (p == needle)
2443 return TEST_FOUND;
2444 else if (p < needle)
2445 return TEST_LEFT;
2446 else
2447 return TEST_RIGHT;
2448 }
2449
2450 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2451 Object *o;
2452 int r;
2453
2454 assert(f);
2455 assert(p > 0);
2456
2457 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2458 if (r < 0)
2459 return r;
2460
2461 if (le64toh(o->entry.seqnum) == needle)
2462 return TEST_FOUND;
2463 else if (le64toh(o->entry.seqnum) < needle)
2464 return TEST_LEFT;
2465 else
2466 return TEST_RIGHT;
2467 }
2468
2469 int journal_file_move_to_entry_by_seqnum(
2470 JournalFile *f,
2471 uint64_t seqnum,
2472 direction_t direction,
2473 Object **ret,
2474 uint64_t *offset) {
2475 assert(f);
2476 assert(f->header);
2477
2478 return generic_array_bisect(f,
2479 le64toh(f->header->entry_array_offset),
2480 le64toh(f->header->n_entries),
2481 seqnum,
2482 test_object_seqnum,
2483 direction,
2484 ret, offset, NULL);
2485 }
2486
2487 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2488 Object *o;
2489 int r;
2490
2491 assert(f);
2492 assert(p > 0);
2493
2494 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2495 if (r < 0)
2496 return r;
2497
2498 if (le64toh(o->entry.realtime) == needle)
2499 return TEST_FOUND;
2500 else if (le64toh(o->entry.realtime) < needle)
2501 return TEST_LEFT;
2502 else
2503 return TEST_RIGHT;
2504 }
2505
2506 int journal_file_move_to_entry_by_realtime(
2507 JournalFile *f,
2508 uint64_t realtime,
2509 direction_t direction,
2510 Object **ret,
2511 uint64_t *offset) {
2512 assert(f);
2513 assert(f->header);
2514
2515 return generic_array_bisect(f,
2516 le64toh(f->header->entry_array_offset),
2517 le64toh(f->header->n_entries),
2518 realtime,
2519 test_object_realtime,
2520 direction,
2521 ret, offset, NULL);
2522 }
2523
2524 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
2525 Object *o;
2526 int r;
2527
2528 assert(f);
2529 assert(p > 0);
2530
2531 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2532 if (r < 0)
2533 return r;
2534
2535 if (le64toh(o->entry.monotonic) == needle)
2536 return TEST_FOUND;
2537 else if (le64toh(o->entry.monotonic) < needle)
2538 return TEST_LEFT;
2539 else
2540 return TEST_RIGHT;
2541 }
2542
2543 static int find_data_object_by_boot_id(
2544 JournalFile *f,
2545 sd_id128_t boot_id,
2546 Object **o,
2547 uint64_t *b) {
2548
2549 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2550
2551 sd_id128_to_string(boot_id, t + 9);
2552 return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
2553 }
2554
2555 int journal_file_move_to_entry_by_monotonic(
2556 JournalFile *f,
2557 sd_id128_t boot_id,
2558 uint64_t monotonic,
2559 direction_t direction,
2560 Object **ret,
2561 uint64_t *offset) {
2562
2563 Object *o;
2564 int r;
2565
2566 assert(f);
2567
2568 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
2569 if (r < 0)
2570 return r;
2571 if (r == 0)
2572 return -ENOENT;
2573
2574 return generic_array_bisect_plus_one(f,
2575 le64toh(o->data.entry_offset),
2576 le64toh(o->data.entry_array_offset),
2577 le64toh(o->data.n_entries),
2578 monotonic,
2579 test_object_monotonic,
2580 direction,
2581 ret, offset, NULL);
2582 }
2583
2584 void journal_file_reset_location(JournalFile *f) {
2585 f->location_type = LOCATION_HEAD;
2586 f->current_offset = 0;
2587 f->current_seqnum = 0;
2588 f->current_realtime = 0;
2589 f->current_monotonic = 0;
2590 zero(f->current_boot_id);
2591 f->current_xor_hash = 0;
2592 }
2593
2594 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
2595 f->location_type = LOCATION_SEEK;
2596 f->current_offset = offset;
2597 f->current_seqnum = le64toh(o->entry.seqnum);
2598 f->current_realtime = le64toh(o->entry.realtime);
2599 f->current_monotonic = le64toh(o->entry.monotonic);
2600 f->current_boot_id = o->entry.boot_id;
2601 f->current_xor_hash = le64toh(o->entry.xor_hash);
2602 }
2603
2604 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
2605 assert(af);
2606 assert(af->header);
2607 assert(bf);
2608 assert(bf->header);
2609 assert(af->location_type == LOCATION_SEEK);
2610 assert(bf->location_type == LOCATION_SEEK);
2611
2612 /* If contents and timestamps match, these entries are
2613 * identical, even if the seqnum does not match */
2614 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
2615 af->current_monotonic == bf->current_monotonic &&
2616 af->current_realtime == bf->current_realtime &&
2617 af->current_xor_hash == bf->current_xor_hash)
2618 return 0;
2619
2620 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
2621
2622 /* If this is from the same seqnum source, compare
2623 * seqnums */
2624 if (af->current_seqnum < bf->current_seqnum)
2625 return -1;
2626 if (af->current_seqnum > bf->current_seqnum)
2627 return 1;
2628
2629 /* Wow! This is weird, different data but the same
2630 * seqnums? Something is borked, but let's make the
2631 * best of it and compare by time. */
2632 }
2633
2634 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
2635
2636 /* If the boot id matches, compare monotonic time */
2637 if (af->current_monotonic < bf->current_monotonic)
2638 return -1;
2639 if (af->current_monotonic > bf->current_monotonic)
2640 return 1;
2641 }
2642
2643 /* Otherwise, compare UTC time */
2644 if (af->current_realtime < bf->current_realtime)
2645 return -1;
2646 if (af->current_realtime > bf->current_realtime)
2647 return 1;
2648
2649 /* Finally, compare by contents */
2650 if (af->current_xor_hash < bf->current_xor_hash)
2651 return -1;
2652 if (af->current_xor_hash > bf->current_xor_hash)
2653 return 1;
2654
2655 return 0;
2656 }
2657
2658 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2659
2660 /* Increase or decrease the specified index, in the right direction. */
2661
2662 if (direction == DIRECTION_DOWN) {
2663 if (*i >= n - 1)
2664 return 0;
2665
2666 (*i) ++;
2667 } else {
2668 if (*i <= 0)
2669 return 0;
2670
2671 (*i) --;
2672 }
2673
2674 return 1;
2675 }
2676
2677 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
2678
2679 /* Consider it an error if any of the two offsets is uninitialized */
2680 if (old_offset == 0 || new_offset == 0)
2681 return false;
2682
2683 /* If we go down, the new offset must be larger than the old one. */
2684 return direction == DIRECTION_DOWN ?
2685 new_offset > old_offset :
2686 new_offset < old_offset;
2687 }
2688
2689 int journal_file_next_entry(
2690 JournalFile *f,
2691 uint64_t p,
2692 direction_t direction,
2693 Object **ret, uint64_t *offset) {
2694
2695 uint64_t i, n, ofs;
2696 int r;
2697
2698 assert(f);
2699 assert(f->header);
2700
2701 n = le64toh(f->header->n_entries);
2702 if (n <= 0)
2703 return 0;
2704
2705 if (p == 0)
2706 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2707 else {
2708 r = generic_array_bisect(f,
2709 le64toh(f->header->entry_array_offset),
2710 le64toh(f->header->n_entries),
2711 p,
2712 test_object_offset,
2713 DIRECTION_DOWN,
2714 NULL, NULL,
2715 &i);
2716 if (r <= 0)
2717 return r;
2718
2719 r = bump_array_index(&i, direction, n);
2720 if (r <= 0)
2721 return r;
2722 }
2723
2724 /* And jump to it */
2725 for (;;) {
2726 r = generic_array_get(f,
2727 le64toh(f->header->entry_array_offset),
2728 i,
2729 ret, &ofs);
2730 if (r > 0)
2731 break;
2732 if (r != -EBADMSG)
2733 return r;
2734
2735 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2736 * the next one might work for us instead. */
2737 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2738
2739 r = bump_array_index(&i, direction, n);
2740 if (r <= 0)
2741 return r;
2742 }
2743
2744 /* Ensure our array is properly ordered. */
2745 if (p > 0 && !check_properly_ordered(ofs, p, direction)) {
2746 log_debug("%s: entry array not properly ordered at entry %" PRIu64, f->path, i);
2747 return -EBADMSG;
2748 }
2749
2750 if (offset)
2751 *offset = ofs;
2752
2753 return 1;
2754 }
2755
2756 int journal_file_next_entry_for_data(
2757 JournalFile *f,
2758 Object *o, uint64_t p,
2759 uint64_t data_offset,
2760 direction_t direction,
2761 Object **ret, uint64_t *offset) {
2762
2763 uint64_t i, n, ofs;
2764 Object *d;
2765 int r;
2766
2767 assert(f);
2768 assert(p > 0 || !o);
2769
2770 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2771 if (r < 0)
2772 return r;
2773
2774 n = le64toh(d->data.n_entries);
2775 if (n <= 0)
2776 return n;
2777
2778 if (!o)
2779 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2780 else {
2781 if (o->object.type != OBJECT_ENTRY)
2782 return -EINVAL;
2783
2784 r = generic_array_bisect_plus_one(f,
2785 le64toh(d->data.entry_offset),
2786 le64toh(d->data.entry_array_offset),
2787 le64toh(d->data.n_entries),
2788 p,
2789 test_object_offset,
2790 DIRECTION_DOWN,
2791 NULL, NULL,
2792 &i);
2793
2794 if (r <= 0)
2795 return r;
2796
2797 r = bump_array_index(&i, direction, n);
2798 if (r <= 0)
2799 return r;
2800 }
2801
2802 for (;;) {
2803 r = generic_array_get_plus_one(f,
2804 le64toh(d->data.entry_offset),
2805 le64toh(d->data.entry_array_offset),
2806 i,
2807 ret, &ofs);
2808 if (r > 0)
2809 break;
2810 if (r != -EBADMSG)
2811 return r;
2812
2813 log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
2814
2815 r = bump_array_index(&i, direction, n);
2816 if (r <= 0)
2817 return r;
2818 }
2819
2820 /* Ensure our array is properly ordered. */
2821 if (p > 0 && check_properly_ordered(ofs, p, direction)) {
2822 log_debug("%s data entry array not properly ordered at entry %" PRIu64, f->path, i);
2823 return -EBADMSG;
2824 }
2825
2826 if (offset)
2827 *offset = ofs;
2828
2829 return 1;
2830 }
2831
2832 int journal_file_move_to_entry_by_offset_for_data(
2833 JournalFile *f,
2834 uint64_t data_offset,
2835 uint64_t p,
2836 direction_t direction,
2837 Object **ret, uint64_t *offset) {
2838
2839 int r;
2840 Object *d;
2841
2842 assert(f);
2843
2844 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2845 if (r < 0)
2846 return r;
2847
2848 return generic_array_bisect_plus_one(f,
2849 le64toh(d->data.entry_offset),
2850 le64toh(d->data.entry_array_offset),
2851 le64toh(d->data.n_entries),
2852 p,
2853 test_object_offset,
2854 direction,
2855 ret, offset, NULL);
2856 }
2857
2858 int journal_file_move_to_entry_by_monotonic_for_data(
2859 JournalFile *f,
2860 uint64_t data_offset,
2861 sd_id128_t boot_id,
2862 uint64_t monotonic,
2863 direction_t direction,
2864 Object **ret, uint64_t *offset) {
2865
2866 Object *o, *d;
2867 int r;
2868 uint64_t b, z;
2869
2870 assert(f);
2871
2872 /* First, seek by time */
2873 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
2874 if (r < 0)
2875 return r;
2876 if (r == 0)
2877 return -ENOENT;
2878
2879 r = generic_array_bisect_plus_one(f,
2880 le64toh(o->data.entry_offset),
2881 le64toh(o->data.entry_array_offset),
2882 le64toh(o->data.n_entries),
2883 monotonic,
2884 test_object_monotonic,
2885 direction,
2886 NULL, &z, NULL);
2887 if (r <= 0)
2888 return r;
2889
2890 /* And now, continue seeking until we find an entry that
2891 * exists in both bisection arrays */
2892
2893 for (;;) {
2894 Object *qo;
2895 uint64_t p, q;
2896
2897 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2898 if (r < 0)
2899 return r;
2900
2901 r = generic_array_bisect_plus_one(f,
2902 le64toh(d->data.entry_offset),
2903 le64toh(d->data.entry_array_offset),
2904 le64toh(d->data.n_entries),
2905 z,
2906 test_object_offset,
2907 direction,
2908 NULL, &p, NULL);
2909 if (r <= 0)
2910 return r;
2911
2912 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
2913 if (r < 0)
2914 return r;
2915
2916 r = generic_array_bisect_plus_one(f,
2917 le64toh(o->data.entry_offset),
2918 le64toh(o->data.entry_array_offset),
2919 le64toh(o->data.n_entries),
2920 p,
2921 test_object_offset,
2922 direction,
2923 &qo, &q, NULL);
2924
2925 if (r <= 0)
2926 return r;
2927
2928 if (p == q) {
2929 if (ret)
2930 *ret = qo;
2931 if (offset)
2932 *offset = q;
2933
2934 return 1;
2935 }
2936
2937 z = q;
2938 }
2939 }
2940
2941 int journal_file_move_to_entry_by_seqnum_for_data(
2942 JournalFile *f,
2943 uint64_t data_offset,
2944 uint64_t seqnum,
2945 direction_t direction,
2946 Object **ret, uint64_t *offset) {
2947
2948 Object *d;
2949 int r;
2950
2951 assert(f);
2952
2953 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2954 if (r < 0)
2955 return r;
2956
2957 return generic_array_bisect_plus_one(f,
2958 le64toh(d->data.entry_offset),
2959 le64toh(d->data.entry_array_offset),
2960 le64toh(d->data.n_entries),
2961 seqnum,
2962 test_object_seqnum,
2963 direction,
2964 ret, offset, NULL);
2965 }
2966
2967 int journal_file_move_to_entry_by_realtime_for_data(
2968 JournalFile *f,
2969 uint64_t data_offset,
2970 uint64_t realtime,
2971 direction_t direction,
2972 Object **ret, uint64_t *offset) {
2973
2974 Object *d;
2975 int r;
2976
2977 assert(f);
2978
2979 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2980 if (r < 0)
2981 return r;
2982
2983 return generic_array_bisect_plus_one(f,
2984 le64toh(d->data.entry_offset),
2985 le64toh(d->data.entry_array_offset),
2986 le64toh(d->data.n_entries),
2987 realtime,
2988 test_object_realtime,
2989 direction,
2990 ret, offset, NULL);
2991 }
2992
2993 void journal_file_dump(JournalFile *f) {
2994 Object *o;
2995 int r;
2996 uint64_t p;
2997
2998 assert(f);
2999 assert(f->header);
3000
3001 journal_file_print_header(f);
3002
3003 p = le64toh(f->header->header_size);
3004 while (p != 0) {
3005 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3006 if (r < 0)
3007 goto fail;
3008
3009 switch (o->object.type) {
3010
3011 case OBJECT_UNUSED:
3012 printf("Type: OBJECT_UNUSED\n");
3013 break;
3014
3015 case OBJECT_DATA:
3016 printf("Type: OBJECT_DATA\n");
3017 break;
3018
3019 case OBJECT_FIELD:
3020 printf("Type: OBJECT_FIELD\n");
3021 break;
3022
3023 case OBJECT_ENTRY:
3024 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3025 le64toh(o->entry.seqnum),
3026 le64toh(o->entry.monotonic),
3027 le64toh(o->entry.realtime));
3028 break;
3029
3030 case OBJECT_FIELD_HASH_TABLE:
3031 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
3032 break;
3033
3034 case OBJECT_DATA_HASH_TABLE:
3035 printf("Type: OBJECT_DATA_HASH_TABLE\n");
3036 break;
3037
3038 case OBJECT_ENTRY_ARRAY:
3039 printf("Type: OBJECT_ENTRY_ARRAY\n");
3040 break;
3041
3042 case OBJECT_TAG:
3043 printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3044 le64toh(o->tag.seqnum),
3045 le64toh(o->tag.epoch));
3046 break;
3047
3048 default:
3049 printf("Type: unknown (%i)\n", o->object.type);
3050 break;
3051 }
3052
3053 if (o->object.flags & OBJECT_COMPRESSION_MASK)
3054 printf("Flags: %s\n",
3055 object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
3056
3057 if (p == le64toh(f->header->tail_object_offset))
3058 p = 0;
3059 else
3060 p = p + ALIGN64(le64toh(o->object.size));
3061 }
3062
3063 return;
3064 fail:
3065 log_error("File corrupt");
3066 }
3067
3068 static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
3069 const char *x;
3070
3071 x = format_timestamp(buf, l, t);
3072 if (x)
3073 return x;
3074 return " --- ";
3075 }
3076
3077 void journal_file_print_header(JournalFile *f) {
3078 char a[33], b[33], c[33], d[33];
3079 char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
3080 struct stat st;
3081 char bytes[FORMAT_BYTES_MAX];
3082
3083 assert(f);
3084 assert(f->header);
3085
3086 printf("File Path: %s\n"
3087 "File ID: %s\n"
3088 "Machine ID: %s\n"
3089 "Boot ID: %s\n"
3090 "Sequential Number ID: %s\n"
3091 "State: %s\n"
3092 "Compatible Flags:%s%s\n"
3093 "Incompatible Flags:%s%s%s\n"
3094 "Header size: %"PRIu64"\n"
3095 "Arena size: %"PRIu64"\n"
3096 "Data Hash Table Size: %"PRIu64"\n"
3097 "Field Hash Table Size: %"PRIu64"\n"
3098 "Rotate Suggested: %s\n"
3099 "Head Sequential Number: %"PRIu64" (%"PRIx64")\n"
3100 "Tail Sequential Number: %"PRIu64" (%"PRIx64")\n"
3101 "Head Realtime Timestamp: %s (%"PRIx64")\n"
3102 "Tail Realtime Timestamp: %s (%"PRIx64")\n"
3103 "Tail Monotonic Timestamp: %s (%"PRIx64")\n"
3104 "Objects: %"PRIu64"\n"
3105 "Entry Objects: %"PRIu64"\n",
3106 f->path,
3107 sd_id128_to_string(f->header->file_id, a),
3108 sd_id128_to_string(f->header->machine_id, b),
3109 sd_id128_to_string(f->header->boot_id, c),
3110 sd_id128_to_string(f->header->seqnum_id, d),
3111 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3112 f->header->state == STATE_ONLINE ? "ONLINE" :
3113 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3114 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3115 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3116 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3117 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3118 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3119 le64toh(f->header->header_size),
3120 le64toh(f->header->arena_size),
3121 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3122 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3123 yes_no(journal_file_rotate_suggested(f, 0)),
3124 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3125 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3126 format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3127 format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3128 format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3129 le64toh(f->header->n_objects),
3130 le64toh(f->header->n_entries));
3131
3132 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3133 printf("Data Objects: %"PRIu64"\n"
3134 "Data Hash Table Fill: %.1f%%\n",
3135 le64toh(f->header->n_data),
3136 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3137
3138 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3139 printf("Field Objects: %"PRIu64"\n"
3140 "Field Hash Table Fill: %.1f%%\n",
3141 le64toh(f->header->n_fields),
3142 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3143
3144 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3145 printf("Tag Objects: %"PRIu64"\n",
3146 le64toh(f->header->n_tags));
3147 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3148 printf("Entry Array Objects: %"PRIu64"\n",
3149 le64toh(f->header->n_entry_arrays));
3150
3151 if (fstat(f->fd, &st) >= 0)
3152 printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
3153 }
3154
3155 static int journal_file_warn_btrfs(JournalFile *f) {
3156 unsigned attrs;
3157 int r;
3158
3159 assert(f);
3160
3161 /* Before we write anything, check if the COW logic is turned
3162 * off on btrfs. Given our write pattern that is quite
3163 * unfriendly to COW file systems this should greatly improve
3164 * performance on COW file systems, such as btrfs, at the
3165 * expense of data integrity features (which shouldn't be too
3166 * bad, given that we do our own checksumming). */
3167
3168 r = btrfs_is_filesystem(f->fd);
3169 if (r < 0)
3170 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3171 if (!r)
3172 return 0;
3173
3174 r = read_attr_fd(f->fd, &attrs);
3175 if (r < 0)
3176 return log_warning_errno(r, "Failed to read file attributes: %m");
3177
3178 if (attrs & FS_NOCOW_FL) {
3179 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3180 return 0;
3181 }
3182
3183 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3184 "This is likely to slow down journal access substantially, please consider turning "
3185 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3186
3187 return 1;
3188 }
3189
3190 int journal_file_open(
3191 int fd,
3192 const char *fname,
3193 int flags,
3194 mode_t mode,
3195 bool compress,
3196 uint64_t compress_threshold_bytes,
3197 bool seal,
3198 JournalMetrics *metrics,
3199 MMapCache *mmap_cache,
3200 Set *deferred_closes,
3201 JournalFile *template,
3202 JournalFile **ret) {
3203
3204 bool newly_created = false;
3205 JournalFile *f;
3206 void *h;
3207 int r;
3208 char bytes[FORMAT_BYTES_MAX];
3209
3210 assert(ret);
3211 assert(fd >= 0 || fname);
3212
3213 if (!IN_SET((flags & O_ACCMODE), O_RDONLY, O_RDWR))
3214 return -EINVAL;
3215
3216 if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
3217 return -EINVAL;
3218
3219 f = new0(JournalFile, 1);
3220 if (!f)
3221 return -ENOMEM;
3222
3223 f->fd = fd;
3224 f->mode = mode;
3225
3226 f->flags = flags;
3227 f->prot = prot_from_flags(flags);
3228 f->writable = (flags & O_ACCMODE) != O_RDONLY;
3229 #if HAVE_LZ4
3230 f->compress_lz4 = compress;
3231 #elif HAVE_XZ
3232 f->compress_xz = compress;
3233 #endif
3234
3235 if (compress_threshold_bytes == (uint64_t) -1)
3236 f->compress_threshold_bytes = DEFAULT_COMPRESS_THRESHOLD;
3237 else
3238 f->compress_threshold_bytes = MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes);
3239
3240 #if HAVE_GCRYPT
3241 f->seal = seal;
3242 #endif
3243
3244 log_debug("Journal effective settings seal=%s compress=%s compress_threshold_bytes=%s",
3245 yes_no(f->seal), yes_no(JOURNAL_FILE_COMPRESS(f)),
3246 format_bytes(bytes, sizeof(bytes), f->compress_threshold_bytes));
3247
3248 if (mmap_cache)
3249 f->mmap = mmap_cache_ref(mmap_cache);
3250 else {
3251 f->mmap = mmap_cache_new();
3252 if (!f->mmap) {
3253 r = -ENOMEM;
3254 goto fail;
3255 }
3256 }
3257
3258 if (fname) {
3259 f->path = strdup(fname);
3260 if (!f->path) {
3261 r = -ENOMEM;
3262 goto fail;
3263 }
3264 } else {
3265 assert(fd >= 0);
3266
3267 /* If we don't know the path, fill in something explanatory and vaguely useful */
3268 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
3269 r = -ENOMEM;
3270 goto fail;
3271 }
3272 }
3273
3274 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3275 if (!f->chain_cache) {
3276 r = -ENOMEM;
3277 goto fail;
3278 }
3279
3280 if (f->fd < 0) {
3281 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3282 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3283 * it doesn't hurt in that case. */
3284
3285 f->fd = open(f->path, f->flags|O_CLOEXEC|O_NONBLOCK, f->mode);
3286 if (f->fd < 0) {
3287 r = -errno;
3288 goto fail;
3289 }
3290
3291 /* fds we opened here by us should also be closed by us. */
3292 f->close_fd = true;
3293
3294 r = fd_nonblock(f->fd, false);
3295 if (r < 0)
3296 goto fail;
3297 }
3298
3299 f->cache_fd = mmap_cache_add_fd(f->mmap, f->fd);
3300 if (!f->cache_fd) {
3301 r = -ENOMEM;
3302 goto fail;
3303 }
3304
3305 r = journal_file_fstat(f);
3306 if (r < 0)
3307 goto fail;
3308
3309 if (f->last_stat.st_size == 0 && f->writable) {
3310
3311 (void) journal_file_warn_btrfs(f);
3312
3313 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3314 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3315 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3316 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3317 * solely on mtime/atime/ctime of the file. */
3318 (void) fd_setcrtime(f->fd, 0);
3319
3320 #if HAVE_GCRYPT
3321 /* Try to load the FSPRG state, and if we can't, then
3322 * just don't do sealing */
3323 if (f->seal) {
3324 r = journal_file_fss_load(f);
3325 if (r < 0)
3326 f->seal = false;
3327 }
3328 #endif
3329
3330 r = journal_file_init_header(f, template);
3331 if (r < 0)
3332 goto fail;
3333
3334 r = journal_file_fstat(f);
3335 if (r < 0)
3336 goto fail;
3337
3338 newly_created = true;
3339 }
3340
3341 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3342 r = -ENODATA;
3343 goto fail;
3344 }
3345
3346 r = mmap_cache_get(f->mmap, f->cache_fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h, NULL);
3347 if (r < 0)
3348 goto fail;
3349
3350 f->header = h;
3351
3352 if (!newly_created) {
3353 set_clear_with_destructor(deferred_closes, journal_file_close);
3354
3355 r = journal_file_verify_header(f);
3356 if (r < 0)
3357 goto fail;
3358 }
3359
3360 #if HAVE_GCRYPT
3361 if (!newly_created && f->writable) {
3362 r = journal_file_fss_load(f);
3363 if (r < 0)
3364 goto fail;
3365 }
3366 #endif
3367
3368 if (f->writable) {
3369 if (metrics) {
3370 journal_default_metrics(metrics, f->fd);
3371 f->metrics = *metrics;
3372 } else if (template)
3373 f->metrics = template->metrics;
3374
3375 r = journal_file_refresh_header(f);
3376 if (r < 0)
3377 goto fail;
3378 }
3379
3380 #if HAVE_GCRYPT
3381 r = journal_file_hmac_setup(f);
3382 if (r < 0)
3383 goto fail;
3384 #endif
3385
3386 if (newly_created) {
3387 r = journal_file_setup_field_hash_table(f);
3388 if (r < 0)
3389 goto fail;
3390
3391 r = journal_file_setup_data_hash_table(f);
3392 if (r < 0)
3393 goto fail;
3394
3395 #if HAVE_GCRYPT
3396 r = journal_file_append_first_tag(f);
3397 if (r < 0)
3398 goto fail;
3399 #endif
3400 }
3401
3402 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) {
3403 r = -EIO;
3404 goto fail;
3405 }
3406
3407 if (template && template->post_change_timer) {
3408 r = journal_file_enable_post_change_timer(
3409 f,
3410 sd_event_source_get_event(template->post_change_timer),
3411 template->post_change_timer_period);
3412
3413 if (r < 0)
3414 goto fail;
3415 }
3416
3417 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3418 f->close_fd = true;
3419
3420 *ret = f;
3421 return 0;
3422
3423 fail:
3424 if (f->cache_fd && mmap_cache_got_sigbus(f->mmap, f->cache_fd))
3425 r = -EIO;
3426
3427 (void) journal_file_close(f);
3428
3429 return r;
3430 }
3431
3432 int journal_file_rotate(JournalFile **f, bool compress, uint64_t compress_threshold_bytes, bool seal, Set *deferred_closes) {
3433 _cleanup_free_ char *p = NULL;
3434 size_t l;
3435 JournalFile *old_file, *new_file = NULL;
3436 int r;
3437
3438 assert(f);
3439 assert(*f);
3440
3441 old_file = *f;
3442
3443 if (!old_file->writable)
3444 return -EINVAL;
3445
3446 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3447 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3448 if (path_startswith(old_file->path, "/proc/self/fd"))
3449 return -EINVAL;
3450
3451 if (!endswith(old_file->path, ".journal"))
3452 return -EINVAL;
3453
3454 l = strlen(old_file->path);
3455 r = asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3456 (int) l - 8, old_file->path,
3457 SD_ID128_FORMAT_VAL(old_file->header->seqnum_id),
3458 le64toh((*f)->header->head_entry_seqnum),
3459 le64toh((*f)->header->head_entry_realtime));
3460 if (r < 0)
3461 return -ENOMEM;
3462
3463 /* Try to rename the file to the archived version. If the file
3464 * already was deleted, we'll get ENOENT, let's ignore that
3465 * case. */
3466 r = rename(old_file->path, p);
3467 if (r < 0 && errno != ENOENT)
3468 return -errno;
3469
3470 /* Sync the rename to disk */
3471 (void) fsync_directory_of_file(old_file->fd);
3472
3473 /* Set as archive so offlining commits w/state=STATE_ARCHIVED.
3474 * Previously we would set old_file->header->state to STATE_ARCHIVED directly here,
3475 * but journal_file_set_offline() short-circuits when state != STATE_ONLINE, which
3476 * would result in the rotated journal never getting fsync() called before closing.
3477 * Now we simply queue the archive state by setting an archive bit, leaving the state
3478 * as STATE_ONLINE so proper offlining occurs. */
3479 old_file->archive = true;
3480
3481 /* Currently, btrfs is not very good with out write patterns
3482 * and fragments heavily. Let's defrag our journal files when
3483 * we archive them */
3484 old_file->defrag_on_close = true;
3485
3486 r = journal_file_open(-1, old_file->path, old_file->flags, old_file->mode, compress,
3487 compress_threshold_bytes, seal, NULL, old_file->mmap, deferred_closes,
3488 old_file, &new_file);
3489
3490 if (deferred_closes &&
3491 set_put(deferred_closes, old_file) >= 0)
3492 (void) journal_file_set_offline(old_file, false);
3493 else
3494 (void) journal_file_close(old_file);
3495
3496 *f = new_file;
3497 return r;
3498 }
3499
3500 int journal_file_open_reliably(
3501 const char *fname,
3502 int flags,
3503 mode_t mode,
3504 bool compress,
3505 uint64_t compress_threshold_bytes,
3506 bool seal,
3507 JournalMetrics *metrics,
3508 MMapCache *mmap_cache,
3509 Set *deferred_closes,
3510 JournalFile *template,
3511 JournalFile **ret) {
3512
3513 int r;
3514 size_t l;
3515 _cleanup_free_ char *p = NULL;
3516
3517 r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3518 deferred_closes, template, ret);
3519 if (!IN_SET(r,
3520 -EBADMSG, /* Corrupted */
3521 -ENODATA, /* Truncated */
3522 -EHOSTDOWN, /* Other machine */
3523 -EPROTONOSUPPORT, /* Incompatible feature */
3524 -EBUSY, /* Unclean shutdown */
3525 -ESHUTDOWN, /* Already archived */
3526 -EIO, /* IO error, including SIGBUS on mmap */
3527 -EIDRM, /* File has been deleted */
3528 -ETXTBSY)) /* File is from the future */
3529 return r;
3530
3531 if ((flags & O_ACCMODE) == O_RDONLY)
3532 return r;
3533
3534 if (!(flags & O_CREAT))
3535 return r;
3536
3537 if (!endswith(fname, ".journal"))
3538 return r;
3539
3540 /* The file is corrupted. Rotate it away and try it again (but only once) */
3541
3542 l = strlen(fname);
3543 if (asprintf(&p, "%.*s@%016"PRIx64 "-%016"PRIx64 ".journal~",
3544 (int) l - 8, fname,
3545 now(CLOCK_REALTIME),
3546 random_u64()) < 0)
3547 return -ENOMEM;
3548
3549 if (rename(fname, p) < 0)
3550 return -errno;
3551
3552 /* btrfs doesn't cope well with our write pattern and
3553 * fragments heavily. Let's defrag all files we rotate */
3554
3555 (void) chattr_path(p, 0, FS_NOCOW_FL);
3556 (void) btrfs_defrag(p);
3557
3558 log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
3559
3560 return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3561 deferred_closes, template, ret);
3562 }
3563
3564 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, Object **ret, uint64_t *offset) {
3565 uint64_t i, n;
3566 uint64_t q, xor_hash = 0;
3567 int r;
3568 EntryItem *items;
3569 dual_timestamp ts;
3570
3571 assert(from);
3572 assert(to);
3573 assert(o);
3574 assert(p);
3575
3576 if (!to->writable)
3577 return -EPERM;
3578
3579 ts.monotonic = le64toh(o->entry.monotonic);
3580 ts.realtime = le64toh(o->entry.realtime);
3581
3582 n = journal_file_entry_n_items(o);
3583 /* alloca() can't take 0, hence let's allocate at least one */
3584 items = newa(EntryItem, MAX(1u, n));
3585
3586 for (i = 0; i < n; i++) {
3587 uint64_t l, h;
3588 le64_t le_hash;
3589 size_t t;
3590 void *data;
3591 Object *u;
3592
3593 q = le64toh(o->entry.items[i].object_offset);
3594 le_hash = o->entry.items[i].hash;
3595
3596 r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
3597 if (r < 0)
3598 return r;
3599
3600 if (le_hash != o->data.hash)
3601 return -EBADMSG;
3602
3603 l = le64toh(o->object.size) - offsetof(Object, data.payload);
3604 t = (size_t) l;
3605
3606 /* We hit the limit on 32bit machines */
3607 if ((uint64_t) t != l)
3608 return -E2BIG;
3609
3610 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
3611 #if HAVE_XZ || HAVE_LZ4
3612 size_t rsize = 0;
3613
3614 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
3615 o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
3616 if (r < 0)
3617 return r;
3618
3619 data = from->compress_buffer;
3620 l = rsize;
3621 #else
3622 return -EPROTONOSUPPORT;
3623 #endif
3624 } else
3625 data = o->data.payload;
3626
3627 r = journal_file_append_data(to, data, l, &u, &h);
3628 if (r < 0)
3629 return r;
3630
3631 xor_hash ^= le64toh(u->data.hash);
3632 items[i].object_offset = htole64(h);
3633 items[i].hash = u->data.hash;
3634
3635 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
3636 if (r < 0)
3637 return r;
3638 }
3639
3640 r = journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset);
3641
3642 if (mmap_cache_got_sigbus(to->mmap, to->cache_fd))
3643 return -EIO;
3644
3645 return r;
3646 }
3647
3648 void journal_reset_metrics(JournalMetrics *m) {
3649 assert(m);
3650
3651 /* Set everything to "pick automatic values". */
3652
3653 *m = (JournalMetrics) {
3654 .min_use = (uint64_t) -1,
3655 .max_use = (uint64_t) -1,
3656 .min_size = (uint64_t) -1,
3657 .max_size = (uint64_t) -1,
3658 .keep_free = (uint64_t) -1,
3659 .n_max_files = (uint64_t) -1,
3660 };
3661 }
3662
3663 void journal_default_metrics(JournalMetrics *m, int fd) {
3664 char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
3665 struct statvfs ss;
3666 uint64_t fs_size;
3667
3668 assert(m);
3669 assert(fd >= 0);
3670
3671 if (fstatvfs(fd, &ss) >= 0)
3672 fs_size = ss.f_frsize * ss.f_blocks;
3673 else {
3674 log_debug_errno(errno, "Failed to determine disk size: %m");
3675 fs_size = 0;
3676 }
3677
3678 if (m->max_use == (uint64_t) -1) {
3679
3680 if (fs_size > 0) {
3681 m->max_use = PAGE_ALIGN(fs_size / 10); /* 10% of file system size */
3682
3683 if (m->max_use > DEFAULT_MAX_USE_UPPER)
3684 m->max_use = DEFAULT_MAX_USE_UPPER;
3685
3686 if (m->max_use < DEFAULT_MAX_USE_LOWER)
3687 m->max_use = DEFAULT_MAX_USE_LOWER;
3688 } else
3689 m->max_use = DEFAULT_MAX_USE_LOWER;
3690 } else {
3691 m->max_use = PAGE_ALIGN(m->max_use);
3692
3693 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3694 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3695 }
3696
3697 if (m->min_use == (uint64_t) -1)
3698 m->min_use = DEFAULT_MIN_USE;
3699
3700 if (m->min_use > m->max_use)
3701 m->min_use = m->max_use;
3702
3703 if (m->max_size == (uint64_t) -1) {
3704 m->max_size = PAGE_ALIGN(m->max_use / 8); /* 8 chunks */
3705
3706 if (m->max_size > DEFAULT_MAX_SIZE_UPPER)
3707 m->max_size = DEFAULT_MAX_SIZE_UPPER;
3708 } else
3709 m->max_size = PAGE_ALIGN(m->max_size);
3710
3711 if (m->max_size != 0) {
3712 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3713 m->max_size = JOURNAL_FILE_SIZE_MIN;
3714
3715 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3716 m->max_use = m->max_size*2;
3717 }
3718
3719 if (m->min_size == (uint64_t) -1)
3720 m->min_size = JOURNAL_FILE_SIZE_MIN;
3721 else {
3722 m->min_size = PAGE_ALIGN(m->min_size);
3723
3724 if (m->min_size < JOURNAL_FILE_SIZE_MIN)
3725 m->min_size = JOURNAL_FILE_SIZE_MIN;
3726
3727 if (m->max_size != 0 && m->min_size > m->max_size)
3728 m->max_size = m->min_size;
3729 }
3730
3731 if (m->keep_free == (uint64_t) -1) {
3732
3733 if (fs_size > 0) {
3734 m->keep_free = PAGE_ALIGN(fs_size * 3 / 20); /* 15% of file system size */
3735
3736 if (m->keep_free > DEFAULT_KEEP_FREE_UPPER)
3737 m->keep_free = DEFAULT_KEEP_FREE_UPPER;
3738
3739 } else
3740 m->keep_free = DEFAULT_KEEP_FREE;
3741 }
3742
3743 if (m->n_max_files == (uint64_t) -1)
3744 m->n_max_files = DEFAULT_N_MAX_FILES;
3745
3746 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3747 format_bytes(a, sizeof(a), m->min_use),
3748 format_bytes(b, sizeof(b), m->max_use),
3749 format_bytes(c, sizeof(c), m->max_size),
3750 format_bytes(d, sizeof(d), m->min_size),
3751 format_bytes(e, sizeof(e), m->keep_free),
3752 m->n_max_files);
3753 }
3754
3755 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
3756 assert(f);
3757 assert(f->header);
3758 assert(from || to);
3759
3760 if (from) {
3761 if (f->header->head_entry_realtime == 0)
3762 return -ENOENT;
3763
3764 *from = le64toh(f->header->head_entry_realtime);
3765 }
3766
3767 if (to) {
3768 if (f->header->tail_entry_realtime == 0)
3769 return -ENOENT;
3770
3771 *to = le64toh(f->header->tail_entry_realtime);
3772 }
3773
3774 return 1;
3775 }
3776
3777 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
3778 Object *o;
3779 uint64_t p;
3780 int r;
3781
3782 assert(f);
3783 assert(from || to);
3784
3785 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
3786 if (r <= 0)
3787 return r;
3788
3789 if (le64toh(o->data.n_entries) <= 0)
3790 return 0;
3791
3792 if (from) {
3793 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
3794 if (r < 0)
3795 return r;
3796
3797 *from = le64toh(o->entry.monotonic);
3798 }
3799
3800 if (to) {
3801 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
3802 if (r < 0)
3803 return r;
3804
3805 r = generic_array_get_plus_one(f,
3806 le64toh(o->data.entry_offset),
3807 le64toh(o->data.entry_array_offset),
3808 le64toh(o->data.n_entries)-1,
3809 &o, NULL);
3810 if (r <= 0)
3811 return r;
3812
3813 *to = le64toh(o->entry.monotonic);
3814 }
3815
3816 return 1;
3817 }
3818
3819 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
3820 assert(f);
3821 assert(f->header);
3822
3823 /* If we gained new header fields we gained new features,
3824 * hence suggest a rotation */
3825 if (le64toh(f->header->header_size) < sizeof(Header)) {
3826 log_debug("%s uses an outdated header, suggesting rotation.", f->path);
3827 return true;
3828 }
3829
3830 /* Let's check if the hash tables grew over a certain fill
3831 * level (75%, borrowing this value from Java's hash table
3832 * implementation), and if so suggest a rotation. To calculate
3833 * the fill level we need the n_data field, which only exists
3834 * in newer versions. */
3835
3836 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3837 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3838 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
3839 f->path,
3840 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
3841 le64toh(f->header->n_data),
3842 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3843 (unsigned long long) f->last_stat.st_size,
3844 f->last_stat.st_size / le64toh(f->header->n_data));
3845 return true;
3846 }
3847
3848 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3849 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3850 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
3851 f->path,
3852 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
3853 le64toh(f->header->n_fields),
3854 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
3855 return true;
3856 }
3857
3858 /* Are the data objects properly indexed by field objects? */
3859 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
3860 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
3861 le64toh(f->header->n_data) > 0 &&
3862 le64toh(f->header->n_fields) == 0)
3863 return true;
3864
3865 if (max_file_usec > 0) {
3866 usec_t t, h;
3867
3868 h = le64toh(f->header->head_entry_realtime);
3869 t = now(CLOCK_REALTIME);
3870
3871 if (h > 0 && t > h + max_file_usec)
3872 return true;
3873 }
3874
3875 return false;
3876 }