]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journal-file.c
tree-wide: drop 'This file is part of systemd' blurb
[thirdparty/systemd.git] / src / journal / journal-file.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 /***
3 Copyright 2011 Lennart Poettering
4 ***/
5
6 #include <errno.h>
7 #include <fcntl.h>
8 #include <linux/fs.h>
9 #include <pthread.h>
10 #include <stddef.h>
11 #include <sys/mman.h>
12 #include <sys/statvfs.h>
13 #include <sys/uio.h>
14 #include <unistd.h>
15
16 #include "alloc-util.h"
17 #include "btrfs-util.h"
18 #include "chattr-util.h"
19 #include "compress.h"
20 #include "fd-util.h"
21 #include "fs-util.h"
22 #include "journal-authenticate.h"
23 #include "journal-def.h"
24 #include "journal-file.h"
25 #include "lookup3.h"
26 #include "parse-util.h"
27 #include "path-util.h"
28 #include "random-util.h"
29 #include "sd-event.h"
30 #include "set.h"
31 #include "stat-util.h"
32 #include "string-util.h"
33 #include "strv.h"
34 #include "xattr-util.h"
35
36 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
37 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
38
39 #define DEFAULT_COMPRESS_THRESHOLD (512ULL)
40 #define MIN_COMPRESS_THRESHOLD (8ULL)
41
42 /* This is the minimum journal file size */
43 #define JOURNAL_FILE_SIZE_MIN (512ULL*1024ULL) /* 512 KiB */
44
45 /* These are the lower and upper bounds if we deduce the max_use value
46 * from the file system size */
47 #define DEFAULT_MAX_USE_LOWER (1ULL*1024ULL*1024ULL) /* 1 MiB */
48 #define DEFAULT_MAX_USE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
49
50 /* This is the default minimal use limit, how much we'll use even if keep_free suggests otherwise. */
51 #define DEFAULT_MIN_USE (1ULL*1024ULL*1024ULL) /* 1 MiB */
52
53 /* This is the upper bound if we deduce max_size from max_use */
54 #define DEFAULT_MAX_SIZE_UPPER (128ULL*1024ULL*1024ULL) /* 128 MiB */
55
56 /* This is the upper bound if we deduce the keep_free value from the
57 * file system size */
58 #define DEFAULT_KEEP_FREE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
59
60 /* This is the keep_free value when we can't determine the system
61 * size */
62 #define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */
63
64 /* This is the default maximum number of journal files to keep around. */
65 #define DEFAULT_N_MAX_FILES (100)
66
67 /* n_data was the first entry we added after the initial file format design */
68 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
69
70 /* How many entries to keep in the entry array chain cache at max */
71 #define CHAIN_CACHE_MAX 20
72
73 /* How much to increase the journal file size at once each time we allocate something new. */
74 #define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */
75
76 /* Reread fstat() of the file for detecting deletions at least this often */
77 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
78
79 /* The mmap context to use for the header we pick as one above the last defined typed */
80 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
81
82 #ifdef __clang__
83 # pragma GCC diagnostic ignored "-Waddress-of-packed-member"
84 #endif
85
86 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
87 * As a result we use atomic operations on f->offline_state for inter-thread communications with
88 * journal_file_set_offline() and journal_file_set_online(). */
89 static void journal_file_set_offline_internal(JournalFile *f) {
90 assert(f);
91 assert(f->fd >= 0);
92 assert(f->header);
93
94 for (;;) {
95 switch (f->offline_state) {
96 case OFFLINE_CANCEL:
97 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
98 continue;
99 return;
100
101 case OFFLINE_AGAIN_FROM_SYNCING:
102 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
103 continue;
104 break;
105
106 case OFFLINE_AGAIN_FROM_OFFLINING:
107 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
108 continue;
109 break;
110
111 case OFFLINE_SYNCING:
112 (void) fsync(f->fd);
113
114 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
115 continue;
116
117 f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
118 (void) fsync(f->fd);
119 break;
120
121 case OFFLINE_OFFLINING:
122 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
123 continue;
124 _fallthrough_;
125 case OFFLINE_DONE:
126 return;
127
128 case OFFLINE_JOINED:
129 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
130 return;
131 }
132 }
133 }
134
135 static void * journal_file_set_offline_thread(void *arg) {
136 JournalFile *f = arg;
137
138 (void) pthread_setname_np(pthread_self(), "journal-offline");
139
140 journal_file_set_offline_internal(f);
141
142 return NULL;
143 }
144
145 static int journal_file_set_offline_thread_join(JournalFile *f) {
146 int r;
147
148 assert(f);
149
150 if (f->offline_state == OFFLINE_JOINED)
151 return 0;
152
153 r = pthread_join(f->offline_thread, NULL);
154 if (r)
155 return -r;
156
157 f->offline_state = OFFLINE_JOINED;
158
159 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
160 return -EIO;
161
162 return 0;
163 }
164
165 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
166 static bool journal_file_set_offline_try_restart(JournalFile *f) {
167 for (;;) {
168 switch (f->offline_state) {
169 case OFFLINE_AGAIN_FROM_SYNCING:
170 case OFFLINE_AGAIN_FROM_OFFLINING:
171 return true;
172
173 case OFFLINE_CANCEL:
174 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
175 continue;
176 return true;
177
178 case OFFLINE_SYNCING:
179 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
180 continue;
181 return true;
182
183 case OFFLINE_OFFLINING:
184 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
185 continue;
186 return true;
187
188 default:
189 return false;
190 }
191 }
192 }
193
194 /* Sets a journal offline.
195 *
196 * If wait is false then an offline is dispatched in a separate thread for a
197 * subsequent journal_file_set_offline() or journal_file_set_online() of the
198 * same journal to synchronize with.
199 *
200 * If wait is true, then either an existing offline thread will be restarted
201 * and joined, or if none exists the offline is simply performed in this
202 * context without involving another thread.
203 */
204 int journal_file_set_offline(JournalFile *f, bool wait) {
205 bool restarted;
206 int r;
207
208 assert(f);
209
210 if (!f->writable)
211 return -EPERM;
212
213 if (f->fd < 0 || !f->header)
214 return -EINVAL;
215
216 /* An offlining journal is implicitly online and may modify f->header->state,
217 * we must also join any potentially lingering offline thread when not online. */
218 if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
219 return journal_file_set_offline_thread_join(f);
220
221 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
222 restarted = journal_file_set_offline_try_restart(f);
223 if ((restarted && wait) || !restarted) {
224 r = journal_file_set_offline_thread_join(f);
225 if (r < 0)
226 return r;
227 }
228
229 if (restarted)
230 return 0;
231
232 /* Initiate a new offline. */
233 f->offline_state = OFFLINE_SYNCING;
234
235 if (wait) /* Without using a thread if waiting. */
236 journal_file_set_offline_internal(f);
237 else {
238 sigset_t ss, saved_ss;
239 int k;
240
241 if (sigfillset(&ss) < 0)
242 return -errno;
243
244 r = pthread_sigmask(SIG_BLOCK, &ss, &saved_ss);
245 if (r > 0)
246 return -r;
247
248 r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
249
250 k = pthread_sigmask(SIG_SETMASK, &saved_ss, NULL);
251 if (r > 0) {
252 f->offline_state = OFFLINE_JOINED;
253 return -r;
254 }
255 if (k > 0)
256 return -k;
257 }
258
259 return 0;
260 }
261
262 static int journal_file_set_online(JournalFile *f) {
263 bool wait = true;
264
265 assert(f);
266
267 if (!f->writable)
268 return -EPERM;
269
270 if (f->fd < 0 || !f->header)
271 return -EINVAL;
272
273 while (wait) {
274 switch (f->offline_state) {
275 case OFFLINE_JOINED:
276 /* No offline thread, no need to wait. */
277 wait = false;
278 break;
279
280 case OFFLINE_SYNCING:
281 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
282 continue;
283 /* Canceled syncing prior to offlining, no need to wait. */
284 wait = false;
285 break;
286
287 case OFFLINE_AGAIN_FROM_SYNCING:
288 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
289 continue;
290 /* Canceled restart from syncing, no need to wait. */
291 wait = false;
292 break;
293
294 case OFFLINE_AGAIN_FROM_OFFLINING:
295 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
296 continue;
297 /* Canceled restart from offlining, must wait for offlining to complete however. */
298 _fallthrough_;
299 default: {
300 int r;
301
302 r = journal_file_set_offline_thread_join(f);
303 if (r < 0)
304 return r;
305
306 wait = false;
307 break;
308 }
309 }
310 }
311
312 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
313 return -EIO;
314
315 switch (f->header->state) {
316 case STATE_ONLINE:
317 return 0;
318
319 case STATE_OFFLINE:
320 f->header->state = STATE_ONLINE;
321 (void) fsync(f->fd);
322 return 0;
323
324 default:
325 return -EINVAL;
326 }
327 }
328
329 bool journal_file_is_offlining(JournalFile *f) {
330 assert(f);
331
332 __sync_synchronize();
333
334 if (IN_SET(f->offline_state, OFFLINE_DONE, OFFLINE_JOINED))
335 return false;
336
337 return true;
338 }
339
340 JournalFile* journal_file_close(JournalFile *f) {
341 assert(f);
342
343 #if HAVE_GCRYPT
344 /* Write the final tag */
345 if (f->seal && f->writable) {
346 int r;
347
348 r = journal_file_append_tag(f);
349 if (r < 0)
350 log_error_errno(r, "Failed to append tag when closing journal: %m");
351 }
352 #endif
353
354 if (f->post_change_timer) {
355 int enabled;
356
357 if (sd_event_source_get_enabled(f->post_change_timer, &enabled) >= 0)
358 if (enabled == SD_EVENT_ONESHOT)
359 journal_file_post_change(f);
360
361 (void) sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_OFF);
362 sd_event_source_unref(f->post_change_timer);
363 }
364
365 journal_file_set_offline(f, true);
366
367 if (f->mmap && f->cache_fd)
368 mmap_cache_free_fd(f->mmap, f->cache_fd);
369
370 if (f->fd >= 0 && f->defrag_on_close) {
371
372 /* Be friendly to btrfs: turn COW back on again now,
373 * and defragment the file. We won't write to the file
374 * ever again, hence remove all fragmentation, and
375 * reenable all the good bits COW usually provides
376 * (such as data checksumming). */
377
378 (void) chattr_fd(f->fd, 0, FS_NOCOW_FL);
379 (void) btrfs_defrag_fd(f->fd);
380 }
381
382 if (f->close_fd)
383 safe_close(f->fd);
384 free(f->path);
385
386 mmap_cache_unref(f->mmap);
387
388 ordered_hashmap_free_free(f->chain_cache);
389
390 #if HAVE_XZ || HAVE_LZ4
391 free(f->compress_buffer);
392 #endif
393
394 #if HAVE_GCRYPT
395 if (f->fss_file)
396 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
397 else
398 free(f->fsprg_state);
399
400 free(f->fsprg_seed);
401
402 if (f->hmac)
403 gcry_md_close(f->hmac);
404 #endif
405
406 return mfree(f);
407 }
408
409 static int journal_file_init_header(JournalFile *f, JournalFile *template) {
410 Header h = {};
411 ssize_t k;
412 int r;
413
414 assert(f);
415
416 memcpy(h.signature, HEADER_SIGNATURE, 8);
417 h.header_size = htole64(ALIGN64(sizeof(h)));
418
419 h.incompatible_flags |= htole32(
420 f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
421 f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4);
422
423 h.compatible_flags = htole32(
424 f->seal * HEADER_COMPATIBLE_SEALED);
425
426 r = sd_id128_randomize(&h.file_id);
427 if (r < 0)
428 return r;
429
430 if (template) {
431 h.seqnum_id = template->header->seqnum_id;
432 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
433 } else
434 h.seqnum_id = h.file_id;
435
436 k = pwrite(f->fd, &h, sizeof(h), 0);
437 if (k < 0)
438 return -errno;
439
440 if (k != sizeof(h))
441 return -EIO;
442
443 return 0;
444 }
445
446 static int journal_file_refresh_header(JournalFile *f) {
447 sd_id128_t boot_id;
448 int r;
449
450 assert(f);
451 assert(f->header);
452
453 r = sd_id128_get_machine(&f->header->machine_id);
454 if (IN_SET(r, -ENOENT, -ENOMEDIUM))
455 /* We don't have a machine-id, let's continue without */
456 zero(f->header->machine_id);
457 else if (r < 0)
458 return r;
459
460 r = sd_id128_get_boot(&boot_id);
461 if (r < 0)
462 return r;
463
464 f->header->boot_id = boot_id;
465
466 r = journal_file_set_online(f);
467
468 /* Sync the online state to disk */
469 (void) fsync(f->fd);
470
471 /* We likely just created a new file, also sync the directory this file is located in. */
472 (void) fsync_directory_of_file(f->fd);
473
474 return r;
475 }
476
477 static bool warn_wrong_flags(const JournalFile *f, bool compatible) {
478 const uint32_t any = compatible ? HEADER_COMPATIBLE_ANY : HEADER_INCOMPATIBLE_ANY,
479 supported = compatible ? HEADER_COMPATIBLE_SUPPORTED : HEADER_INCOMPATIBLE_SUPPORTED;
480 const char *type = compatible ? "compatible" : "incompatible";
481 uint32_t flags;
482
483 flags = le32toh(compatible ? f->header->compatible_flags : f->header->incompatible_flags);
484
485 if (flags & ~supported) {
486 if (flags & ~any)
487 log_debug("Journal file %s has unknown %s flags 0x%"PRIx32,
488 f->path, type, flags & ~any);
489 flags = (flags & any) & ~supported;
490 if (flags) {
491 const char* strv[3];
492 unsigned n = 0;
493 _cleanup_free_ char *t = NULL;
494
495 if (compatible && (flags & HEADER_COMPATIBLE_SEALED))
496 strv[n++] = "sealed";
497 if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_XZ))
498 strv[n++] = "xz-compressed";
499 if (!compatible && (flags & HEADER_INCOMPATIBLE_COMPRESSED_LZ4))
500 strv[n++] = "lz4-compressed";
501 strv[n] = NULL;
502 assert(n < ELEMENTSOF(strv));
503
504 t = strv_join((char**) strv, ", ");
505 log_debug("Journal file %s uses %s %s %s disabled at compilation time.",
506 f->path, type, n > 1 ? "flags" : "flag", strnull(t));
507 }
508 return true;
509 }
510
511 return false;
512 }
513
514 static int journal_file_verify_header(JournalFile *f) {
515 uint64_t arena_size, header_size;
516
517 assert(f);
518 assert(f->header);
519
520 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
521 return -EBADMSG;
522
523 /* In both read and write mode we refuse to open files with incompatible
524 * flags we don't know. */
525 if (warn_wrong_flags(f, false))
526 return -EPROTONOSUPPORT;
527
528 /* When open for writing we refuse to open files with compatible flags, too. */
529 if (f->writable && warn_wrong_flags(f, true))
530 return -EPROTONOSUPPORT;
531
532 if (f->header->state >= _STATE_MAX)
533 return -EBADMSG;
534
535 header_size = le64toh(f->header->header_size);
536
537 /* The first addition was n_data, so check that we are at least this large */
538 if (header_size < HEADER_SIZE_MIN)
539 return -EBADMSG;
540
541 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
542 return -EBADMSG;
543
544 arena_size = le64toh(f->header->arena_size);
545
546 if (UINT64_MAX - header_size < arena_size || header_size + arena_size > (uint64_t) f->last_stat.st_size)
547 return -ENODATA;
548
549 if (le64toh(f->header->tail_object_offset) > header_size + arena_size)
550 return -ENODATA;
551
552 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
553 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
554 !VALID64(le64toh(f->header->tail_object_offset)) ||
555 !VALID64(le64toh(f->header->entry_array_offset)))
556 return -ENODATA;
557
558 if (f->writable) {
559 sd_id128_t machine_id;
560 uint8_t state;
561 int r;
562
563 r = sd_id128_get_machine(&machine_id);
564 if (r < 0)
565 return r;
566
567 if (!sd_id128_equal(machine_id, f->header->machine_id))
568 return -EHOSTDOWN;
569
570 state = f->header->state;
571
572 if (state == STATE_ARCHIVED)
573 return -ESHUTDOWN; /* Already archived */
574 else if (state == STATE_ONLINE) {
575 log_debug("Journal file %s is already online. Assuming unclean closing.", f->path);
576 return -EBUSY;
577 } else if (state != STATE_OFFLINE) {
578 log_debug("Journal file %s has unknown state %i.", f->path, state);
579 return -EBUSY;
580 }
581
582 if (f->header->field_hash_table_size == 0 || f->header->data_hash_table_size == 0)
583 return -EBADMSG;
584
585 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
586 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
587 * bisection. */
588 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME)) {
589 log_debug("Journal file %s is from the future, refusing to append new data to it that'd be older.", f->path);
590 return -ETXTBSY;
591 }
592 }
593
594 f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
595 f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
596
597 f->seal = JOURNAL_HEADER_SEALED(f->header);
598
599 return 0;
600 }
601
602 static int journal_file_fstat(JournalFile *f) {
603 int r;
604
605 assert(f);
606 assert(f->fd >= 0);
607
608 if (fstat(f->fd, &f->last_stat) < 0)
609 return -errno;
610
611 f->last_stat_usec = now(CLOCK_MONOTONIC);
612
613 /* Refuse dealing with with files that aren't regular */
614 r = stat_verify_regular(&f->last_stat);
615 if (r < 0)
616 return r;
617
618 /* Refuse appending to files that are already deleted */
619 if (f->last_stat.st_nlink <= 0)
620 return -EIDRM;
621
622 return 0;
623 }
624
625 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
626 uint64_t old_size, new_size;
627 int r;
628
629 assert(f);
630 assert(f->header);
631
632 /* We assume that this file is not sparse, and we know that
633 * for sure, since we always call posix_fallocate()
634 * ourselves */
635
636 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
637 return -EIO;
638
639 old_size =
640 le64toh(f->header->header_size) +
641 le64toh(f->header->arena_size);
642
643 new_size = PAGE_ALIGN(offset + size);
644 if (new_size < le64toh(f->header->header_size))
645 new_size = le64toh(f->header->header_size);
646
647 if (new_size <= old_size) {
648
649 /* We already pre-allocated enough space, but before
650 * we write to it, let's check with fstat() if the
651 * file got deleted, in order make sure we don't throw
652 * away the data immediately. Don't check fstat() for
653 * all writes though, but only once ever 10s. */
654
655 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
656 return 0;
657
658 return journal_file_fstat(f);
659 }
660
661 /* Allocate more space. */
662
663 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
664 return -E2BIG;
665
666 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
667 struct statvfs svfs;
668
669 if (fstatvfs(f->fd, &svfs) >= 0) {
670 uint64_t available;
671
672 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
673
674 if (new_size - old_size > available)
675 return -E2BIG;
676 }
677 }
678
679 /* Increase by larger blocks at once */
680 new_size = DIV_ROUND_UP(new_size, FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
681 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
682 new_size = f->metrics.max_size;
683
684 /* Note that the glibc fallocate() fallback is very
685 inefficient, hence we try to minimize the allocation area
686 as we can. */
687 r = posix_fallocate(f->fd, old_size, new_size - old_size);
688 if (r != 0)
689 return -r;
690
691 f->header->arena_size = htole64(new_size - le64toh(f->header->header_size));
692
693 return journal_file_fstat(f);
694 }
695
696 static unsigned type_to_context(ObjectType type) {
697 /* One context for each type, plus one catch-all for the rest */
698 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
699 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
700 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
701 }
702
703 static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret, size_t *ret_size) {
704 int r;
705
706 assert(f);
707 assert(ret);
708
709 if (size <= 0)
710 return -EINVAL;
711
712 /* Avoid SIGBUS on invalid accesses */
713 if (offset + size > (uint64_t) f->last_stat.st_size) {
714 /* Hmm, out of range? Let's refresh the fstat() data
715 * first, before we trust that check. */
716
717 r = journal_file_fstat(f);
718 if (r < 0)
719 return r;
720
721 if (offset + size > (uint64_t) f->last_stat.st_size)
722 return -EADDRNOTAVAIL;
723 }
724
725 return mmap_cache_get(f->mmap, f->cache_fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret, ret_size);
726 }
727
728 static uint64_t minimum_header_size(Object *o) {
729
730 static const uint64_t table[] = {
731 [OBJECT_DATA] = sizeof(DataObject),
732 [OBJECT_FIELD] = sizeof(FieldObject),
733 [OBJECT_ENTRY] = sizeof(EntryObject),
734 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
735 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
736 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
737 [OBJECT_TAG] = sizeof(TagObject),
738 };
739
740 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
741 return sizeof(ObjectHeader);
742
743 return table[o->object.type];
744 }
745
746 /* Lightweight object checks. We want this to be fast, so that we won't
747 * slowdown every journal_file_move_to_object() call too much. */
748 static int journal_file_check_object(JournalFile *f, uint64_t offset, Object *o) {
749 assert(f);
750 assert(o);
751
752 switch (o->object.type) {
753
754 case OBJECT_DATA: {
755 if ((le64toh(o->data.entry_offset) == 0) ^ (le64toh(o->data.n_entries) == 0)) {
756 log_debug("Bad n_entries: %"PRIu64": %"PRIu64,
757 le64toh(o->data.n_entries), offset);
758 return -EBADMSG;
759 }
760
761 if (le64toh(o->object.size) - offsetof(DataObject, payload) <= 0) {
762 log_debug("Bad object size (<= %zu): %"PRIu64": %"PRIu64,
763 offsetof(DataObject, payload),
764 le64toh(o->object.size),
765 offset);
766 return -EBADMSG;
767 }
768
769 if (!VALID64(le64toh(o->data.next_hash_offset)) ||
770 !VALID64(le64toh(o->data.next_field_offset)) ||
771 !VALID64(le64toh(o->data.entry_offset)) ||
772 !VALID64(le64toh(o->data.entry_array_offset))) {
773 log_debug("Invalid offset, next_hash_offset="OFSfmt", next_field_offset="OFSfmt
774 ", entry_offset="OFSfmt", entry_array_offset="OFSfmt": %"PRIu64,
775 le64toh(o->data.next_hash_offset),
776 le64toh(o->data.next_field_offset),
777 le64toh(o->data.entry_offset),
778 le64toh(o->data.entry_array_offset),
779 offset);
780 return -EBADMSG;
781 }
782
783 break;
784 }
785
786 case OBJECT_FIELD:
787 if (le64toh(o->object.size) - offsetof(FieldObject, payload) <= 0) {
788 log_debug(
789 "Bad field size (<= %zu): %"PRIu64": %"PRIu64,
790 offsetof(FieldObject, payload),
791 le64toh(o->object.size),
792 offset);
793 return -EBADMSG;
794 }
795
796 if (!VALID64(le64toh(o->field.next_hash_offset)) ||
797 !VALID64(le64toh(o->field.head_data_offset))) {
798 log_debug(
799 "Invalid offset, next_hash_offset="OFSfmt
800 ", head_data_offset="OFSfmt": %"PRIu64,
801 le64toh(o->field.next_hash_offset),
802 le64toh(o->field.head_data_offset),
803 offset);
804 return -EBADMSG;
805 }
806 break;
807
808 case OBJECT_ENTRY:
809 if ((le64toh(o->object.size) - offsetof(EntryObject, items)) % sizeof(EntryItem) != 0) {
810 log_debug(
811 "Bad entry size (<= %zu): %"PRIu64": %"PRIu64,
812 offsetof(EntryObject, items),
813 le64toh(o->object.size),
814 offset);
815 return -EBADMSG;
816 }
817
818 if ((le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem) <= 0) {
819 log_debug(
820 "Invalid number items in entry: %"PRIu64": %"PRIu64,
821 (le64toh(o->object.size) - offsetof(EntryObject, items)) / sizeof(EntryItem),
822 offset);
823 return -EBADMSG;
824 }
825
826 if (le64toh(o->entry.seqnum) <= 0) {
827 log_debug(
828 "Invalid entry seqnum: %"PRIx64": %"PRIu64,
829 le64toh(o->entry.seqnum),
830 offset);
831 return -EBADMSG;
832 }
833
834 if (!VALID_REALTIME(le64toh(o->entry.realtime))) {
835 log_debug(
836 "Invalid entry realtime timestamp: %"PRIu64": %"PRIu64,
837 le64toh(o->entry.realtime),
838 offset);
839 return -EBADMSG;
840 }
841
842 if (!VALID_MONOTONIC(le64toh(o->entry.monotonic))) {
843 log_debug(
844 "Invalid entry monotonic timestamp: %"PRIu64": %"PRIu64,
845 le64toh(o->entry.monotonic),
846 offset);
847 return -EBADMSG;
848 }
849
850 break;
851
852 case OBJECT_DATA_HASH_TABLE:
853 case OBJECT_FIELD_HASH_TABLE:
854 if ((le64toh(o->object.size) - offsetof(HashTableObject, items)) % sizeof(HashItem) != 0 ||
855 (le64toh(o->object.size) - offsetof(HashTableObject, items)) / sizeof(HashItem) <= 0) {
856 log_debug(
857 "Invalid %s hash table size: %"PRIu64": %"PRIu64,
858 o->object.type == OBJECT_DATA_HASH_TABLE ? "data" : "field",
859 le64toh(o->object.size),
860 offset);
861 return -EBADMSG;
862 }
863
864 break;
865
866 case OBJECT_ENTRY_ARRAY:
867 if ((le64toh(o->object.size) - offsetof(EntryArrayObject, items)) % sizeof(le64_t) != 0 ||
868 (le64toh(o->object.size) - offsetof(EntryArrayObject, items)) / sizeof(le64_t) <= 0) {
869 log_debug(
870 "Invalid object entry array size: %"PRIu64": %"PRIu64,
871 le64toh(o->object.size),
872 offset);
873 return -EBADMSG;
874 }
875
876 if (!VALID64(le64toh(o->entry_array.next_entry_array_offset))) {
877 log_debug(
878 "Invalid object entry array next_entry_array_offset: "OFSfmt": %"PRIu64,
879 le64toh(o->entry_array.next_entry_array_offset),
880 offset);
881 return -EBADMSG;
882 }
883
884 break;
885
886 case OBJECT_TAG:
887 if (le64toh(o->object.size) != sizeof(TagObject)) {
888 log_debug(
889 "Invalid object tag size: %"PRIu64": %"PRIu64,
890 le64toh(o->object.size),
891 offset);
892 return -EBADMSG;
893 }
894
895 if (!VALID_EPOCH(le64toh(o->tag.epoch))) {
896 log_debug(
897 "Invalid object tag epoch: %"PRIu64": %"PRIu64,
898 le64toh(o->tag.epoch),
899 offset);
900 return -EBADMSG;
901 }
902
903 break;
904 }
905
906 return 0;
907 }
908
909 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
910 int r;
911 void *t;
912 size_t tsize;
913 Object *o;
914 uint64_t s;
915
916 assert(f);
917 assert(ret);
918
919 /* Objects may only be located at multiple of 64 bit */
920 if (!VALID64(offset)) {
921 log_debug("Attempt to move to object at non-64bit boundary: %" PRIu64, offset);
922 return -EBADMSG;
923 }
924
925 /* Object may not be located in the file header */
926 if (offset < le64toh(f->header->header_size)) {
927 log_debug("Attempt to move to object located in file header: %" PRIu64, offset);
928 return -EBADMSG;
929 }
930
931 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t, &tsize);
932 if (r < 0)
933 return r;
934
935 o = (Object*) t;
936 s = le64toh(o->object.size);
937
938 if (s == 0) {
939 log_debug("Attempt to move to uninitialized object: %" PRIu64, offset);
940 return -EBADMSG;
941 }
942 if (s < sizeof(ObjectHeader)) {
943 log_debug("Attempt to move to overly short object: %" PRIu64, offset);
944 return -EBADMSG;
945 }
946
947 if (o->object.type <= OBJECT_UNUSED) {
948 log_debug("Attempt to move to object with invalid type: %" PRIu64, offset);
949 return -EBADMSG;
950 }
951
952 if (s < minimum_header_size(o)) {
953 log_debug("Attempt to move to truncated object: %" PRIu64, offset);
954 return -EBADMSG;
955 }
956
957 if (type > OBJECT_UNUSED && o->object.type != type) {
958 log_debug("Attempt to move to object of unexpected type: %" PRIu64, offset);
959 return -EBADMSG;
960 }
961
962 if (s > tsize) {
963 r = journal_file_move_to(f, type, false, offset, s, &t, NULL);
964 if (r < 0)
965 return r;
966
967 o = (Object*) t;
968 }
969
970 r = journal_file_check_object(f, offset, o);
971 if (r < 0)
972 return r;
973
974 *ret = o;
975 return 0;
976 }
977
978 static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
979 uint64_t r;
980
981 assert(f);
982 assert(f->header);
983
984 r = le64toh(f->header->tail_entry_seqnum) + 1;
985
986 if (seqnum) {
987 /* If an external seqnum counter was passed, we update
988 * both the local and the external one, and set it to
989 * the maximum of both */
990
991 if (*seqnum + 1 > r)
992 r = *seqnum + 1;
993
994 *seqnum = r;
995 }
996
997 f->header->tail_entry_seqnum = htole64(r);
998
999 if (f->header->head_entry_seqnum == 0)
1000 f->header->head_entry_seqnum = htole64(r);
1001
1002 return r;
1003 }
1004
1005 int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset) {
1006 int r;
1007 uint64_t p;
1008 Object *tail, *o;
1009 void *t;
1010
1011 assert(f);
1012 assert(f->header);
1013 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
1014 assert(size >= sizeof(ObjectHeader));
1015 assert(offset);
1016 assert(ret);
1017
1018 r = journal_file_set_online(f);
1019 if (r < 0)
1020 return r;
1021
1022 p = le64toh(f->header->tail_object_offset);
1023 if (p == 0)
1024 p = le64toh(f->header->header_size);
1025 else {
1026 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
1027 if (r < 0)
1028 return r;
1029
1030 p += ALIGN64(le64toh(tail->object.size));
1031 }
1032
1033 r = journal_file_allocate(f, p, size);
1034 if (r < 0)
1035 return r;
1036
1037 r = journal_file_move_to(f, type, false, p, size, &t, NULL);
1038 if (r < 0)
1039 return r;
1040
1041 o = (Object*) t;
1042
1043 zero(o->object);
1044 o->object.type = type;
1045 o->object.size = htole64(size);
1046
1047 f->header->tail_object_offset = htole64(p);
1048 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
1049
1050 *ret = o;
1051 *offset = p;
1052
1053 return 0;
1054 }
1055
1056 static int journal_file_setup_data_hash_table(JournalFile *f) {
1057 uint64_t s, p;
1058 Object *o;
1059 int r;
1060
1061 assert(f);
1062 assert(f->header);
1063
1064 /* We estimate that we need 1 hash table entry per 768 bytes
1065 of journal file and we want to make sure we never get
1066 beyond 75% fill level. Calculate the hash table size for
1067 the maximum file size based on these metrics. */
1068
1069 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
1070 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
1071 s = DEFAULT_DATA_HASH_TABLE_SIZE;
1072
1073 log_debug("Reserving %"PRIu64" entries in hash table.", s / sizeof(HashItem));
1074
1075 r = journal_file_append_object(f,
1076 OBJECT_DATA_HASH_TABLE,
1077 offsetof(Object, hash_table.items) + s,
1078 &o, &p);
1079 if (r < 0)
1080 return r;
1081
1082 memzero(o->hash_table.items, s);
1083
1084 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1085 f->header->data_hash_table_size = htole64(s);
1086
1087 return 0;
1088 }
1089
1090 static int journal_file_setup_field_hash_table(JournalFile *f) {
1091 uint64_t s, p;
1092 Object *o;
1093 int r;
1094
1095 assert(f);
1096 assert(f->header);
1097
1098 /* We use a fixed size hash table for the fields as this
1099 * number should grow very slowly only */
1100
1101 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
1102 r = journal_file_append_object(f,
1103 OBJECT_FIELD_HASH_TABLE,
1104 offsetof(Object, hash_table.items) + s,
1105 &o, &p);
1106 if (r < 0)
1107 return r;
1108
1109 memzero(o->hash_table.items, s);
1110
1111 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
1112 f->header->field_hash_table_size = htole64(s);
1113
1114 return 0;
1115 }
1116
1117 int journal_file_map_data_hash_table(JournalFile *f) {
1118 uint64_t s, p;
1119 void *t;
1120 int r;
1121
1122 assert(f);
1123 assert(f->header);
1124
1125 if (f->data_hash_table)
1126 return 0;
1127
1128 p = le64toh(f->header->data_hash_table_offset);
1129 s = le64toh(f->header->data_hash_table_size);
1130
1131 r = journal_file_move_to(f,
1132 OBJECT_DATA_HASH_TABLE,
1133 true,
1134 p, s,
1135 &t, NULL);
1136 if (r < 0)
1137 return r;
1138
1139 f->data_hash_table = t;
1140 return 0;
1141 }
1142
1143 int journal_file_map_field_hash_table(JournalFile *f) {
1144 uint64_t s, p;
1145 void *t;
1146 int r;
1147
1148 assert(f);
1149 assert(f->header);
1150
1151 if (f->field_hash_table)
1152 return 0;
1153
1154 p = le64toh(f->header->field_hash_table_offset);
1155 s = le64toh(f->header->field_hash_table_size);
1156
1157 r = journal_file_move_to(f,
1158 OBJECT_FIELD_HASH_TABLE,
1159 true,
1160 p, s,
1161 &t, NULL);
1162 if (r < 0)
1163 return r;
1164
1165 f->field_hash_table = t;
1166 return 0;
1167 }
1168
1169 static int journal_file_link_field(
1170 JournalFile *f,
1171 Object *o,
1172 uint64_t offset,
1173 uint64_t hash) {
1174
1175 uint64_t p, h, m;
1176 int r;
1177
1178 assert(f);
1179 assert(f->header);
1180 assert(f->field_hash_table);
1181 assert(o);
1182 assert(offset > 0);
1183
1184 if (o->object.type != OBJECT_FIELD)
1185 return -EINVAL;
1186
1187 m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
1188 if (m <= 0)
1189 return -EBADMSG;
1190
1191 /* This might alter the window we are looking at */
1192 o->field.next_hash_offset = o->field.head_data_offset = 0;
1193
1194 h = hash % m;
1195 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1196 if (p == 0)
1197 f->field_hash_table[h].head_hash_offset = htole64(offset);
1198 else {
1199 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1200 if (r < 0)
1201 return r;
1202
1203 o->field.next_hash_offset = htole64(offset);
1204 }
1205
1206 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1207
1208 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1209 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1210
1211 return 0;
1212 }
1213
1214 static int journal_file_link_data(
1215 JournalFile *f,
1216 Object *o,
1217 uint64_t offset,
1218 uint64_t hash) {
1219
1220 uint64_t p, h, m;
1221 int r;
1222
1223 assert(f);
1224 assert(f->header);
1225 assert(f->data_hash_table);
1226 assert(o);
1227 assert(offset > 0);
1228
1229 if (o->object.type != OBJECT_DATA)
1230 return -EINVAL;
1231
1232 m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
1233 if (m <= 0)
1234 return -EBADMSG;
1235
1236 /* This might alter the window we are looking at */
1237 o->data.next_hash_offset = o->data.next_field_offset = 0;
1238 o->data.entry_offset = o->data.entry_array_offset = 0;
1239 o->data.n_entries = 0;
1240
1241 h = hash % m;
1242 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1243 if (p == 0)
1244 /* Only entry in the hash table is easy */
1245 f->data_hash_table[h].head_hash_offset = htole64(offset);
1246 else {
1247 /* Move back to the previous data object, to patch in
1248 * pointer */
1249
1250 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1251 if (r < 0)
1252 return r;
1253
1254 o->data.next_hash_offset = htole64(offset);
1255 }
1256
1257 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1258
1259 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1260 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1261
1262 return 0;
1263 }
1264
1265 int journal_file_find_field_object_with_hash(
1266 JournalFile *f,
1267 const void *field, uint64_t size, uint64_t hash,
1268 Object **ret, uint64_t *offset) {
1269
1270 uint64_t p, osize, h, m;
1271 int r;
1272
1273 assert(f);
1274 assert(f->header);
1275 assert(field && size > 0);
1276
1277 /* If the field hash table is empty, we can't find anything */
1278 if (le64toh(f->header->field_hash_table_size) <= 0)
1279 return 0;
1280
1281 /* Map the field hash table, if it isn't mapped yet. */
1282 r = journal_file_map_field_hash_table(f);
1283 if (r < 0)
1284 return r;
1285
1286 osize = offsetof(Object, field.payload) + size;
1287
1288 m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
1289 if (m <= 0)
1290 return -EBADMSG;
1291
1292 h = hash % m;
1293 p = le64toh(f->field_hash_table[h].head_hash_offset);
1294
1295 while (p > 0) {
1296 Object *o;
1297
1298 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1299 if (r < 0)
1300 return r;
1301
1302 if (le64toh(o->field.hash) == hash &&
1303 le64toh(o->object.size) == osize &&
1304 memcmp(o->field.payload, field, size) == 0) {
1305
1306 if (ret)
1307 *ret = o;
1308 if (offset)
1309 *offset = p;
1310
1311 return 1;
1312 }
1313
1314 p = le64toh(o->field.next_hash_offset);
1315 }
1316
1317 return 0;
1318 }
1319
1320 int journal_file_find_field_object(
1321 JournalFile *f,
1322 const void *field, uint64_t size,
1323 Object **ret, uint64_t *offset) {
1324
1325 uint64_t hash;
1326
1327 assert(f);
1328 assert(field && size > 0);
1329
1330 hash = hash64(field, size);
1331
1332 return journal_file_find_field_object_with_hash(f,
1333 field, size, hash,
1334 ret, offset);
1335 }
1336
1337 int journal_file_find_data_object_with_hash(
1338 JournalFile *f,
1339 const void *data, uint64_t size, uint64_t hash,
1340 Object **ret, uint64_t *offset) {
1341
1342 uint64_t p, osize, h, m;
1343 int r;
1344
1345 assert(f);
1346 assert(f->header);
1347 assert(data || size == 0);
1348
1349 /* If there's no data hash table, then there's no entry. */
1350 if (le64toh(f->header->data_hash_table_size) <= 0)
1351 return 0;
1352
1353 /* Map the data hash table, if it isn't mapped yet. */
1354 r = journal_file_map_data_hash_table(f);
1355 if (r < 0)
1356 return r;
1357
1358 osize = offsetof(Object, data.payload) + size;
1359
1360 m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
1361 if (m <= 0)
1362 return -EBADMSG;
1363
1364 h = hash % m;
1365 p = le64toh(f->data_hash_table[h].head_hash_offset);
1366
1367 while (p > 0) {
1368 Object *o;
1369
1370 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1371 if (r < 0)
1372 return r;
1373
1374 if (le64toh(o->data.hash) != hash)
1375 goto next;
1376
1377 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
1378 #if HAVE_XZ || HAVE_LZ4
1379 uint64_t l;
1380 size_t rsize = 0;
1381
1382 l = le64toh(o->object.size);
1383 if (l <= offsetof(Object, data.payload))
1384 return -EBADMSG;
1385
1386 l -= offsetof(Object, data.payload);
1387
1388 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
1389 o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
1390 if (r < 0)
1391 return r;
1392
1393 if (rsize == size &&
1394 memcmp(f->compress_buffer, data, size) == 0) {
1395
1396 if (ret)
1397 *ret = o;
1398
1399 if (offset)
1400 *offset = p;
1401
1402 return 1;
1403 }
1404 #else
1405 return -EPROTONOSUPPORT;
1406 #endif
1407 } else if (le64toh(o->object.size) == osize &&
1408 memcmp(o->data.payload, data, size) == 0) {
1409
1410 if (ret)
1411 *ret = o;
1412
1413 if (offset)
1414 *offset = p;
1415
1416 return 1;
1417 }
1418
1419 next:
1420 p = le64toh(o->data.next_hash_offset);
1421 }
1422
1423 return 0;
1424 }
1425
1426 int journal_file_find_data_object(
1427 JournalFile *f,
1428 const void *data, uint64_t size,
1429 Object **ret, uint64_t *offset) {
1430
1431 uint64_t hash;
1432
1433 assert(f);
1434 assert(data || size == 0);
1435
1436 hash = hash64(data, size);
1437
1438 return journal_file_find_data_object_with_hash(f,
1439 data, size, hash,
1440 ret, offset);
1441 }
1442
1443 static int journal_file_append_field(
1444 JournalFile *f,
1445 const void *field, uint64_t size,
1446 Object **ret, uint64_t *offset) {
1447
1448 uint64_t hash, p;
1449 uint64_t osize;
1450 Object *o;
1451 int r;
1452
1453 assert(f);
1454 assert(field && size > 0);
1455
1456 hash = hash64(field, size);
1457
1458 r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
1459 if (r < 0)
1460 return r;
1461 else if (r > 0) {
1462
1463 if (ret)
1464 *ret = o;
1465
1466 if (offset)
1467 *offset = p;
1468
1469 return 0;
1470 }
1471
1472 osize = offsetof(Object, field.payload) + size;
1473 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1474 if (r < 0)
1475 return r;
1476
1477 o->field.hash = htole64(hash);
1478 memcpy(o->field.payload, field, size);
1479
1480 r = journal_file_link_field(f, o, p, hash);
1481 if (r < 0)
1482 return r;
1483
1484 /* The linking might have altered the window, so let's
1485 * refresh our pointer */
1486 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1487 if (r < 0)
1488 return r;
1489
1490 #if HAVE_GCRYPT
1491 r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
1492 if (r < 0)
1493 return r;
1494 #endif
1495
1496 if (ret)
1497 *ret = o;
1498
1499 if (offset)
1500 *offset = p;
1501
1502 return 0;
1503 }
1504
1505 static int journal_file_append_data(
1506 JournalFile *f,
1507 const void *data, uint64_t size,
1508 Object **ret, uint64_t *offset) {
1509
1510 uint64_t hash, p;
1511 uint64_t osize;
1512 Object *o;
1513 int r, compression = 0;
1514 const void *eq;
1515
1516 assert(f);
1517 assert(data || size == 0);
1518
1519 hash = hash64(data, size);
1520
1521 r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
1522 if (r < 0)
1523 return r;
1524 if (r > 0) {
1525
1526 if (ret)
1527 *ret = o;
1528
1529 if (offset)
1530 *offset = p;
1531
1532 return 0;
1533 }
1534
1535 osize = offsetof(Object, data.payload) + size;
1536 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1537 if (r < 0)
1538 return r;
1539
1540 o->data.hash = htole64(hash);
1541
1542 #if HAVE_XZ || HAVE_LZ4
1543 if (JOURNAL_FILE_COMPRESS(f) && size >= f->compress_threshold_bytes) {
1544 size_t rsize = 0;
1545
1546 compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
1547
1548 if (compression >= 0) {
1549 o->object.size = htole64(offsetof(Object, data.payload) + rsize);
1550 o->object.flags |= compression;
1551
1552 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1553 size, rsize, object_compressed_to_string(compression));
1554 } else
1555 /* Compression didn't work, we don't really care why, let's continue without compression */
1556 compression = 0;
1557 }
1558 #endif
1559
1560 if (compression == 0)
1561 memcpy_safe(o->data.payload, data, size);
1562
1563 r = journal_file_link_data(f, o, p, hash);
1564 if (r < 0)
1565 return r;
1566
1567 #if HAVE_GCRYPT
1568 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1569 if (r < 0)
1570 return r;
1571 #endif
1572
1573 /* The linking might have altered the window, so let's
1574 * refresh our pointer */
1575 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1576 if (r < 0)
1577 return r;
1578
1579 if (!data)
1580 eq = NULL;
1581 else
1582 eq = memchr(data, '=', size);
1583 if (eq && eq > data) {
1584 Object *fo = NULL;
1585 uint64_t fp;
1586
1587 /* Create field object ... */
1588 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
1589 if (r < 0)
1590 return r;
1591
1592 /* ... and link it in. */
1593 o->data.next_field_offset = fo->field.head_data_offset;
1594 fo->field.head_data_offset = le64toh(p);
1595 }
1596
1597 if (ret)
1598 *ret = o;
1599
1600 if (offset)
1601 *offset = p;
1602
1603 return 0;
1604 }
1605
1606 uint64_t journal_file_entry_n_items(Object *o) {
1607 assert(o);
1608
1609 if (o->object.type != OBJECT_ENTRY)
1610 return 0;
1611
1612 return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem);
1613 }
1614
1615 uint64_t journal_file_entry_array_n_items(Object *o) {
1616 assert(o);
1617
1618 if (o->object.type != OBJECT_ENTRY_ARRAY)
1619 return 0;
1620
1621 return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
1622 }
1623
1624 uint64_t journal_file_hash_table_n_items(Object *o) {
1625 assert(o);
1626
1627 if (!IN_SET(o->object.type, OBJECT_DATA_HASH_TABLE, OBJECT_FIELD_HASH_TABLE))
1628 return 0;
1629
1630 return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1631 }
1632
1633 static int link_entry_into_array(JournalFile *f,
1634 le64_t *first,
1635 le64_t *idx,
1636 uint64_t p) {
1637 int r;
1638 uint64_t n = 0, ap = 0, q, i, a, hidx;
1639 Object *o;
1640
1641 assert(f);
1642 assert(f->header);
1643 assert(first);
1644 assert(idx);
1645 assert(p > 0);
1646
1647 a = le64toh(*first);
1648 i = hidx = le64toh(*idx);
1649 while (a > 0) {
1650
1651 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1652 if (r < 0)
1653 return r;
1654
1655 n = journal_file_entry_array_n_items(o);
1656 if (i < n) {
1657 o->entry_array.items[i] = htole64(p);
1658 *idx = htole64(hidx + 1);
1659 return 0;
1660 }
1661
1662 i -= n;
1663 ap = a;
1664 a = le64toh(o->entry_array.next_entry_array_offset);
1665 }
1666
1667 if (hidx > n)
1668 n = (hidx+1) * 2;
1669 else
1670 n = n * 2;
1671
1672 if (n < 4)
1673 n = 4;
1674
1675 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1676 offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
1677 &o, &q);
1678 if (r < 0)
1679 return r;
1680
1681 #if HAVE_GCRYPT
1682 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1683 if (r < 0)
1684 return r;
1685 #endif
1686
1687 o->entry_array.items[i] = htole64(p);
1688
1689 if (ap == 0)
1690 *first = htole64(q);
1691 else {
1692 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1693 if (r < 0)
1694 return r;
1695
1696 o->entry_array.next_entry_array_offset = htole64(q);
1697 }
1698
1699 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1700 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1701
1702 *idx = htole64(hidx + 1);
1703
1704 return 0;
1705 }
1706
1707 static int link_entry_into_array_plus_one(JournalFile *f,
1708 le64_t *extra,
1709 le64_t *first,
1710 le64_t *idx,
1711 uint64_t p) {
1712
1713 int r;
1714
1715 assert(f);
1716 assert(extra);
1717 assert(first);
1718 assert(idx);
1719 assert(p > 0);
1720
1721 if (*idx == 0)
1722 *extra = htole64(p);
1723 else {
1724 le64_t i;
1725
1726 i = htole64(le64toh(*idx) - 1);
1727 r = link_entry_into_array(f, first, &i, p);
1728 if (r < 0)
1729 return r;
1730 }
1731
1732 *idx = htole64(le64toh(*idx) + 1);
1733 return 0;
1734 }
1735
1736 static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
1737 uint64_t p;
1738 int r;
1739 assert(f);
1740 assert(o);
1741 assert(offset > 0);
1742
1743 p = le64toh(o->entry.items[i].object_offset);
1744 if (p == 0)
1745 return -EINVAL;
1746
1747 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1748 if (r < 0)
1749 return r;
1750
1751 return link_entry_into_array_plus_one(f,
1752 &o->data.entry_offset,
1753 &o->data.entry_array_offset,
1754 &o->data.n_entries,
1755 offset);
1756 }
1757
1758 static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
1759 uint64_t n, i;
1760 int r;
1761
1762 assert(f);
1763 assert(f->header);
1764 assert(o);
1765 assert(offset > 0);
1766
1767 if (o->object.type != OBJECT_ENTRY)
1768 return -EINVAL;
1769
1770 __sync_synchronize();
1771
1772 /* Link up the entry itself */
1773 r = link_entry_into_array(f,
1774 &f->header->entry_array_offset,
1775 &f->header->n_entries,
1776 offset);
1777 if (r < 0)
1778 return r;
1779
1780 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1781
1782 if (f->header->head_entry_realtime == 0)
1783 f->header->head_entry_realtime = o->entry.realtime;
1784
1785 f->header->tail_entry_realtime = o->entry.realtime;
1786 f->header->tail_entry_monotonic = o->entry.monotonic;
1787
1788 /* Link up the items */
1789 n = journal_file_entry_n_items(o);
1790 for (i = 0; i < n; i++) {
1791 r = journal_file_link_entry_item(f, o, offset, i);
1792 if (r < 0)
1793 return r;
1794 }
1795
1796 return 0;
1797 }
1798
1799 static int journal_file_append_entry_internal(
1800 JournalFile *f,
1801 const dual_timestamp *ts,
1802 const sd_id128_t *boot_id,
1803 uint64_t xor_hash,
1804 const EntryItem items[], unsigned n_items,
1805 uint64_t *seqnum,
1806 Object **ret, uint64_t *offset) {
1807 uint64_t np;
1808 uint64_t osize;
1809 Object *o;
1810 int r;
1811
1812 assert(f);
1813 assert(f->header);
1814 assert(items || n_items == 0);
1815 assert(ts);
1816
1817 osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
1818
1819 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
1820 if (r < 0)
1821 return r;
1822
1823 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
1824 memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
1825 o->entry.realtime = htole64(ts->realtime);
1826 o->entry.monotonic = htole64(ts->monotonic);
1827 o->entry.xor_hash = htole64(xor_hash);
1828 o->entry.boot_id = boot_id ? *boot_id : f->header->boot_id;
1829
1830 #if HAVE_GCRYPT
1831 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
1832 if (r < 0)
1833 return r;
1834 #endif
1835
1836 r = journal_file_link_entry(f, o, np);
1837 if (r < 0)
1838 return r;
1839
1840 if (ret)
1841 *ret = o;
1842
1843 if (offset)
1844 *offset = np;
1845
1846 return 0;
1847 }
1848
1849 void journal_file_post_change(JournalFile *f) {
1850 assert(f);
1851
1852 /* inotify() does not receive IN_MODIFY events from file
1853 * accesses done via mmap(). After each access we hence
1854 * trigger IN_MODIFY by truncating the journal file to its
1855 * current size which triggers IN_MODIFY. */
1856
1857 __sync_synchronize();
1858
1859 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
1860 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
1861 }
1862
1863 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
1864 assert(userdata);
1865
1866 journal_file_post_change(userdata);
1867
1868 return 1;
1869 }
1870
1871 static void schedule_post_change(JournalFile *f) {
1872 sd_event_source *timer;
1873 int enabled, r;
1874 uint64_t now;
1875
1876 assert(f);
1877 assert(f->post_change_timer);
1878
1879 timer = f->post_change_timer;
1880
1881 r = sd_event_source_get_enabled(timer, &enabled);
1882 if (r < 0) {
1883 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
1884 goto fail;
1885 }
1886
1887 if (enabled == SD_EVENT_ONESHOT)
1888 return;
1889
1890 r = sd_event_now(sd_event_source_get_event(timer), CLOCK_MONOTONIC, &now);
1891 if (r < 0) {
1892 log_debug_errno(r, "Failed to get clock's now for scheduling ftruncate: %m");
1893 goto fail;
1894 }
1895
1896 r = sd_event_source_set_time(timer, now+f->post_change_timer_period);
1897 if (r < 0) {
1898 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
1899 goto fail;
1900 }
1901
1902 r = sd_event_source_set_enabled(timer, SD_EVENT_ONESHOT);
1903 if (r < 0) {
1904 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
1905 goto fail;
1906 }
1907
1908 return;
1909
1910 fail:
1911 /* On failure, let's simply post the change immediately. */
1912 journal_file_post_change(f);
1913 }
1914
1915 /* Enable coalesced change posting in a timer on the provided sd_event instance */
1916 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
1917 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
1918 int r;
1919
1920 assert(f);
1921 assert_return(!f->post_change_timer, -EINVAL);
1922 assert(e);
1923 assert(t);
1924
1925 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
1926 if (r < 0)
1927 return r;
1928
1929 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
1930 if (r < 0)
1931 return r;
1932
1933 f->post_change_timer = TAKE_PTR(timer);
1934 f->post_change_timer_period = t;
1935
1936 return r;
1937 }
1938
1939 static int entry_item_cmp(const void *_a, const void *_b) {
1940 const EntryItem *a = _a, *b = _b;
1941
1942 if (le64toh(a->object_offset) < le64toh(b->object_offset))
1943 return -1;
1944 if (le64toh(a->object_offset) > le64toh(b->object_offset))
1945 return 1;
1946 return 0;
1947 }
1948
1949 int journal_file_append_entry(
1950 JournalFile *f,
1951 const dual_timestamp *ts,
1952 const sd_id128_t *boot_id,
1953 const struct iovec iovec[], unsigned n_iovec,
1954 uint64_t *seqnum,
1955 Object **ret, uint64_t *offset) {
1956
1957 unsigned i;
1958 EntryItem *items;
1959 int r;
1960 uint64_t xor_hash = 0;
1961 struct dual_timestamp _ts;
1962
1963 assert(f);
1964 assert(f->header);
1965 assert(iovec || n_iovec == 0);
1966
1967 if (ts) {
1968 if (!VALID_REALTIME(ts->realtime)) {
1969 log_debug("Invalid realtime timestamp %"PRIu64", refusing entry.", ts->realtime);
1970 return -EBADMSG;
1971 }
1972 if (!VALID_MONOTONIC(ts->monotonic)) {
1973 log_debug("Invalid monotomic timestamp %"PRIu64", refusing entry.", ts->monotonic);
1974 return -EBADMSG;
1975 }
1976 } else {
1977 dual_timestamp_get(&_ts);
1978 ts = &_ts;
1979 }
1980
1981 #if HAVE_GCRYPT
1982 r = journal_file_maybe_append_tag(f, ts->realtime);
1983 if (r < 0)
1984 return r;
1985 #endif
1986
1987 /* alloca() can't take 0, hence let's allocate at least one */
1988 items = newa(EntryItem, MAX(1u, n_iovec));
1989
1990 for (i = 0; i < n_iovec; i++) {
1991 uint64_t p;
1992 Object *o;
1993
1994 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
1995 if (r < 0)
1996 return r;
1997
1998 xor_hash ^= le64toh(o->data.hash);
1999 items[i].object_offset = htole64(p);
2000 items[i].hash = o->data.hash;
2001 }
2002
2003 /* Order by the position on disk, in order to improve seek
2004 * times for rotating media. */
2005 qsort_safe(items, n_iovec, sizeof(EntryItem), entry_item_cmp);
2006
2007 r = journal_file_append_entry_internal(f, ts, boot_id, xor_hash, items, n_iovec, seqnum, ret, offset);
2008
2009 /* If the memory mapping triggered a SIGBUS then we return an
2010 * IO error and ignore the error code passed down to us, since
2011 * it is very likely just an effect of a nullified replacement
2012 * mapping page */
2013
2014 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd))
2015 r = -EIO;
2016
2017 if (f->post_change_timer)
2018 schedule_post_change(f);
2019 else
2020 journal_file_post_change(f);
2021
2022 return r;
2023 }
2024
2025 typedef struct ChainCacheItem {
2026 uint64_t first; /* the array at the beginning of the chain */
2027 uint64_t array; /* the cached array */
2028 uint64_t begin; /* the first item in the cached array */
2029 uint64_t total; /* the total number of items in all arrays before this one in the chain */
2030 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
2031 } ChainCacheItem;
2032
2033 static void chain_cache_put(
2034 OrderedHashmap *h,
2035 ChainCacheItem *ci,
2036 uint64_t first,
2037 uint64_t array,
2038 uint64_t begin,
2039 uint64_t total,
2040 uint64_t last_index) {
2041
2042 if (!ci) {
2043 /* If the chain item to cache for this chain is the
2044 * first one it's not worth caching anything */
2045 if (array == first)
2046 return;
2047
2048 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
2049 ci = ordered_hashmap_steal_first(h);
2050 assert(ci);
2051 } else {
2052 ci = new(ChainCacheItem, 1);
2053 if (!ci)
2054 return;
2055 }
2056
2057 ci->first = first;
2058
2059 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
2060 free(ci);
2061 return;
2062 }
2063 } else
2064 assert(ci->first == first);
2065
2066 ci->array = array;
2067 ci->begin = begin;
2068 ci->total = total;
2069 ci->last_index = last_index;
2070 }
2071
2072 static int generic_array_get(
2073 JournalFile *f,
2074 uint64_t first,
2075 uint64_t i,
2076 Object **ret, uint64_t *offset) {
2077
2078 Object *o;
2079 uint64_t p = 0, a, t = 0;
2080 int r;
2081 ChainCacheItem *ci;
2082
2083 assert(f);
2084
2085 a = first;
2086
2087 /* Try the chain cache first */
2088 ci = ordered_hashmap_get(f->chain_cache, &first);
2089 if (ci && i > ci->total) {
2090 a = ci->array;
2091 i -= ci->total;
2092 t = ci->total;
2093 }
2094
2095 while (a > 0) {
2096 uint64_t k;
2097
2098 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
2099 if (r < 0)
2100 return r;
2101
2102 k = journal_file_entry_array_n_items(o);
2103 if (i < k) {
2104 p = le64toh(o->entry_array.items[i]);
2105 goto found;
2106 }
2107
2108 i -= k;
2109 t += k;
2110 a = le64toh(o->entry_array.next_entry_array_offset);
2111 }
2112
2113 return 0;
2114
2115 found:
2116 /* Let's cache this item for the next invocation */
2117 chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
2118
2119 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2120 if (r < 0)
2121 return r;
2122
2123 if (ret)
2124 *ret = o;
2125
2126 if (offset)
2127 *offset = p;
2128
2129 return 1;
2130 }
2131
2132 static int generic_array_get_plus_one(
2133 JournalFile *f,
2134 uint64_t extra,
2135 uint64_t first,
2136 uint64_t i,
2137 Object **ret, uint64_t *offset) {
2138
2139 Object *o;
2140
2141 assert(f);
2142
2143 if (i == 0) {
2144 int r;
2145
2146 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2147 if (r < 0)
2148 return r;
2149
2150 if (ret)
2151 *ret = o;
2152
2153 if (offset)
2154 *offset = extra;
2155
2156 return 1;
2157 }
2158
2159 return generic_array_get(f, first, i-1, ret, offset);
2160 }
2161
2162 enum {
2163 TEST_FOUND,
2164 TEST_LEFT,
2165 TEST_RIGHT
2166 };
2167
2168 static int generic_array_bisect(
2169 JournalFile *f,
2170 uint64_t first,
2171 uint64_t n,
2172 uint64_t needle,
2173 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2174 direction_t direction,
2175 Object **ret,
2176 uint64_t *offset,
2177 uint64_t *idx) {
2178
2179 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
2180 bool subtract_one = false;
2181 Object *o, *array = NULL;
2182 int r;
2183 ChainCacheItem *ci;
2184
2185 assert(f);
2186 assert(test_object);
2187
2188 /* Start with the first array in the chain */
2189 a = first;
2190
2191 ci = ordered_hashmap_get(f->chain_cache, &first);
2192 if (ci && n > ci->total && ci->begin != 0) {
2193 /* Ah, we have iterated this bisection array chain
2194 * previously! Let's see if we can skip ahead in the
2195 * chain, as far as the last time. But we can't jump
2196 * backwards in the chain, so let's check that
2197 * first. */
2198
2199 r = test_object(f, ci->begin, needle);
2200 if (r < 0)
2201 return r;
2202
2203 if (r == TEST_LEFT) {
2204 /* OK, what we are looking for is right of the
2205 * begin of this EntryArray, so let's jump
2206 * straight to previously cached array in the
2207 * chain */
2208
2209 a = ci->array;
2210 n -= ci->total;
2211 t = ci->total;
2212 last_index = ci->last_index;
2213 }
2214 }
2215
2216 while (a > 0) {
2217 uint64_t left, right, k, lp;
2218
2219 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2220 if (r < 0)
2221 return r;
2222
2223 k = journal_file_entry_array_n_items(array);
2224 right = MIN(k, n);
2225 if (right <= 0)
2226 return 0;
2227
2228 i = right - 1;
2229 lp = p = le64toh(array->entry_array.items[i]);
2230 if (p <= 0)
2231 r = -EBADMSG;
2232 else
2233 r = test_object(f, p, needle);
2234 if (r == -EBADMSG) {
2235 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2236 n = i;
2237 continue;
2238 }
2239 if (r < 0)
2240 return r;
2241
2242 if (r == TEST_FOUND)
2243 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2244
2245 if (r == TEST_RIGHT) {
2246 left = 0;
2247 right -= 1;
2248
2249 if (last_index != (uint64_t) -1) {
2250 assert(last_index <= right);
2251
2252 /* If we cached the last index we
2253 * looked at, let's try to not to jump
2254 * too wildly around and see if we can
2255 * limit the range to look at early to
2256 * the immediate neighbors of the last
2257 * index we looked at. */
2258
2259 if (last_index > 0) {
2260 uint64_t x = last_index - 1;
2261
2262 p = le64toh(array->entry_array.items[x]);
2263 if (p <= 0)
2264 return -EBADMSG;
2265
2266 r = test_object(f, p, needle);
2267 if (r < 0)
2268 return r;
2269
2270 if (r == TEST_FOUND)
2271 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2272
2273 if (r == TEST_RIGHT)
2274 right = x;
2275 else
2276 left = x + 1;
2277 }
2278
2279 if (last_index < right) {
2280 uint64_t y = last_index + 1;
2281
2282 p = le64toh(array->entry_array.items[y]);
2283 if (p <= 0)
2284 return -EBADMSG;
2285
2286 r = test_object(f, p, needle);
2287 if (r < 0)
2288 return r;
2289
2290 if (r == TEST_FOUND)
2291 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2292
2293 if (r == TEST_RIGHT)
2294 right = y;
2295 else
2296 left = y + 1;
2297 }
2298 }
2299
2300 for (;;) {
2301 if (left == right) {
2302 if (direction == DIRECTION_UP)
2303 subtract_one = true;
2304
2305 i = left;
2306 goto found;
2307 }
2308
2309 assert(left < right);
2310 i = (left + right) / 2;
2311
2312 p = le64toh(array->entry_array.items[i]);
2313 if (p <= 0)
2314 r = -EBADMSG;
2315 else
2316 r = test_object(f, p, needle);
2317 if (r == -EBADMSG) {
2318 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2319 right = n = i;
2320 continue;
2321 }
2322 if (r < 0)
2323 return r;
2324
2325 if (r == TEST_FOUND)
2326 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2327
2328 if (r == TEST_RIGHT)
2329 right = i;
2330 else
2331 left = i + 1;
2332 }
2333 }
2334
2335 if (k >= n) {
2336 if (direction == DIRECTION_UP) {
2337 i = n;
2338 subtract_one = true;
2339 goto found;
2340 }
2341
2342 return 0;
2343 }
2344
2345 last_p = lp;
2346
2347 n -= k;
2348 t += k;
2349 last_index = (uint64_t) -1;
2350 a = le64toh(array->entry_array.next_entry_array_offset);
2351 }
2352
2353 return 0;
2354
2355 found:
2356 if (subtract_one && t == 0 && i == 0)
2357 return 0;
2358
2359 /* Let's cache this item for the next invocation */
2360 chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
2361
2362 if (subtract_one && i == 0)
2363 p = last_p;
2364 else if (subtract_one)
2365 p = le64toh(array->entry_array.items[i-1]);
2366 else
2367 p = le64toh(array->entry_array.items[i]);
2368
2369 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2370 if (r < 0)
2371 return r;
2372
2373 if (ret)
2374 *ret = o;
2375
2376 if (offset)
2377 *offset = p;
2378
2379 if (idx)
2380 *idx = t + i + (subtract_one ? -1 : 0);
2381
2382 return 1;
2383 }
2384
2385 static int generic_array_bisect_plus_one(
2386 JournalFile *f,
2387 uint64_t extra,
2388 uint64_t first,
2389 uint64_t n,
2390 uint64_t needle,
2391 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2392 direction_t direction,
2393 Object **ret,
2394 uint64_t *offset,
2395 uint64_t *idx) {
2396
2397 int r;
2398 bool step_back = false;
2399 Object *o;
2400
2401 assert(f);
2402 assert(test_object);
2403
2404 if (n <= 0)
2405 return 0;
2406
2407 /* This bisects the array in object 'first', but first checks
2408 * an extra */
2409 r = test_object(f, extra, needle);
2410 if (r < 0)
2411 return r;
2412
2413 if (r == TEST_FOUND)
2414 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2415
2416 /* if we are looking with DIRECTION_UP then we need to first
2417 see if in the actual array there is a matching entry, and
2418 return the last one of that. But if there isn't any we need
2419 to return this one. Hence remember this, and return it
2420 below. */
2421 if (r == TEST_LEFT)
2422 step_back = direction == DIRECTION_UP;
2423
2424 if (r == TEST_RIGHT) {
2425 if (direction == DIRECTION_DOWN)
2426 goto found;
2427 else
2428 return 0;
2429 }
2430
2431 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, offset, idx);
2432
2433 if (r == 0 && step_back)
2434 goto found;
2435
2436 if (r > 0 && idx)
2437 (*idx)++;
2438
2439 return r;
2440
2441 found:
2442 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2443 if (r < 0)
2444 return r;
2445
2446 if (ret)
2447 *ret = o;
2448
2449 if (offset)
2450 *offset = extra;
2451
2452 if (idx)
2453 *idx = 0;
2454
2455 return 1;
2456 }
2457
2458 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2459 assert(f);
2460 assert(p > 0);
2461
2462 if (p == needle)
2463 return TEST_FOUND;
2464 else if (p < needle)
2465 return TEST_LEFT;
2466 else
2467 return TEST_RIGHT;
2468 }
2469
2470 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2471 Object *o;
2472 int r;
2473
2474 assert(f);
2475 assert(p > 0);
2476
2477 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2478 if (r < 0)
2479 return r;
2480
2481 if (le64toh(o->entry.seqnum) == needle)
2482 return TEST_FOUND;
2483 else if (le64toh(o->entry.seqnum) < needle)
2484 return TEST_LEFT;
2485 else
2486 return TEST_RIGHT;
2487 }
2488
2489 int journal_file_move_to_entry_by_seqnum(
2490 JournalFile *f,
2491 uint64_t seqnum,
2492 direction_t direction,
2493 Object **ret,
2494 uint64_t *offset) {
2495 assert(f);
2496 assert(f->header);
2497
2498 return generic_array_bisect(f,
2499 le64toh(f->header->entry_array_offset),
2500 le64toh(f->header->n_entries),
2501 seqnum,
2502 test_object_seqnum,
2503 direction,
2504 ret, offset, NULL);
2505 }
2506
2507 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2508 Object *o;
2509 int r;
2510
2511 assert(f);
2512 assert(p > 0);
2513
2514 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2515 if (r < 0)
2516 return r;
2517
2518 if (le64toh(o->entry.realtime) == needle)
2519 return TEST_FOUND;
2520 else if (le64toh(o->entry.realtime) < needle)
2521 return TEST_LEFT;
2522 else
2523 return TEST_RIGHT;
2524 }
2525
2526 int journal_file_move_to_entry_by_realtime(
2527 JournalFile *f,
2528 uint64_t realtime,
2529 direction_t direction,
2530 Object **ret,
2531 uint64_t *offset) {
2532 assert(f);
2533 assert(f->header);
2534
2535 return generic_array_bisect(f,
2536 le64toh(f->header->entry_array_offset),
2537 le64toh(f->header->n_entries),
2538 realtime,
2539 test_object_realtime,
2540 direction,
2541 ret, offset, NULL);
2542 }
2543
2544 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
2545 Object *o;
2546 int r;
2547
2548 assert(f);
2549 assert(p > 0);
2550
2551 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2552 if (r < 0)
2553 return r;
2554
2555 if (le64toh(o->entry.monotonic) == needle)
2556 return TEST_FOUND;
2557 else if (le64toh(o->entry.monotonic) < needle)
2558 return TEST_LEFT;
2559 else
2560 return TEST_RIGHT;
2561 }
2562
2563 static int find_data_object_by_boot_id(
2564 JournalFile *f,
2565 sd_id128_t boot_id,
2566 Object **o,
2567 uint64_t *b) {
2568
2569 char t[STRLEN("_BOOT_ID=") + 32 + 1] = "_BOOT_ID=";
2570
2571 sd_id128_to_string(boot_id, t + 9);
2572 return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
2573 }
2574
2575 int journal_file_move_to_entry_by_monotonic(
2576 JournalFile *f,
2577 sd_id128_t boot_id,
2578 uint64_t monotonic,
2579 direction_t direction,
2580 Object **ret,
2581 uint64_t *offset) {
2582
2583 Object *o;
2584 int r;
2585
2586 assert(f);
2587
2588 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
2589 if (r < 0)
2590 return r;
2591 if (r == 0)
2592 return -ENOENT;
2593
2594 return generic_array_bisect_plus_one(f,
2595 le64toh(o->data.entry_offset),
2596 le64toh(o->data.entry_array_offset),
2597 le64toh(o->data.n_entries),
2598 monotonic,
2599 test_object_monotonic,
2600 direction,
2601 ret, offset, NULL);
2602 }
2603
2604 void journal_file_reset_location(JournalFile *f) {
2605 f->location_type = LOCATION_HEAD;
2606 f->current_offset = 0;
2607 f->current_seqnum = 0;
2608 f->current_realtime = 0;
2609 f->current_monotonic = 0;
2610 zero(f->current_boot_id);
2611 f->current_xor_hash = 0;
2612 }
2613
2614 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
2615 f->location_type = LOCATION_SEEK;
2616 f->current_offset = offset;
2617 f->current_seqnum = le64toh(o->entry.seqnum);
2618 f->current_realtime = le64toh(o->entry.realtime);
2619 f->current_monotonic = le64toh(o->entry.monotonic);
2620 f->current_boot_id = o->entry.boot_id;
2621 f->current_xor_hash = le64toh(o->entry.xor_hash);
2622 }
2623
2624 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
2625 assert(af);
2626 assert(af->header);
2627 assert(bf);
2628 assert(bf->header);
2629 assert(af->location_type == LOCATION_SEEK);
2630 assert(bf->location_type == LOCATION_SEEK);
2631
2632 /* If contents and timestamps match, these entries are
2633 * identical, even if the seqnum does not match */
2634 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
2635 af->current_monotonic == bf->current_monotonic &&
2636 af->current_realtime == bf->current_realtime &&
2637 af->current_xor_hash == bf->current_xor_hash)
2638 return 0;
2639
2640 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
2641
2642 /* If this is from the same seqnum source, compare
2643 * seqnums */
2644 if (af->current_seqnum < bf->current_seqnum)
2645 return -1;
2646 if (af->current_seqnum > bf->current_seqnum)
2647 return 1;
2648
2649 /* Wow! This is weird, different data but the same
2650 * seqnums? Something is borked, but let's make the
2651 * best of it and compare by time. */
2652 }
2653
2654 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
2655
2656 /* If the boot id matches, compare monotonic time */
2657 if (af->current_monotonic < bf->current_monotonic)
2658 return -1;
2659 if (af->current_monotonic > bf->current_monotonic)
2660 return 1;
2661 }
2662
2663 /* Otherwise, compare UTC time */
2664 if (af->current_realtime < bf->current_realtime)
2665 return -1;
2666 if (af->current_realtime > bf->current_realtime)
2667 return 1;
2668
2669 /* Finally, compare by contents */
2670 if (af->current_xor_hash < bf->current_xor_hash)
2671 return -1;
2672 if (af->current_xor_hash > bf->current_xor_hash)
2673 return 1;
2674
2675 return 0;
2676 }
2677
2678 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2679
2680 /* Increase or decrease the specified index, in the right direction. */
2681
2682 if (direction == DIRECTION_DOWN) {
2683 if (*i >= n - 1)
2684 return 0;
2685
2686 (*i) ++;
2687 } else {
2688 if (*i <= 0)
2689 return 0;
2690
2691 (*i) --;
2692 }
2693
2694 return 1;
2695 }
2696
2697 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
2698
2699 /* Consider it an error if any of the two offsets is uninitialized */
2700 if (old_offset == 0 || new_offset == 0)
2701 return false;
2702
2703 /* If we go down, the new offset must be larger than the old one. */
2704 return direction == DIRECTION_DOWN ?
2705 new_offset > old_offset :
2706 new_offset < old_offset;
2707 }
2708
2709 int journal_file_next_entry(
2710 JournalFile *f,
2711 uint64_t p,
2712 direction_t direction,
2713 Object **ret, uint64_t *offset) {
2714
2715 uint64_t i, n, ofs;
2716 int r;
2717
2718 assert(f);
2719 assert(f->header);
2720
2721 n = le64toh(f->header->n_entries);
2722 if (n <= 0)
2723 return 0;
2724
2725 if (p == 0)
2726 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2727 else {
2728 r = generic_array_bisect(f,
2729 le64toh(f->header->entry_array_offset),
2730 le64toh(f->header->n_entries),
2731 p,
2732 test_object_offset,
2733 DIRECTION_DOWN,
2734 NULL, NULL,
2735 &i);
2736 if (r <= 0)
2737 return r;
2738
2739 r = bump_array_index(&i, direction, n);
2740 if (r <= 0)
2741 return r;
2742 }
2743
2744 /* And jump to it */
2745 for (;;) {
2746 r = generic_array_get(f,
2747 le64toh(f->header->entry_array_offset),
2748 i,
2749 ret, &ofs);
2750 if (r > 0)
2751 break;
2752 if (r != -EBADMSG)
2753 return r;
2754
2755 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2756 * the next one might work for us instead. */
2757 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2758
2759 r = bump_array_index(&i, direction, n);
2760 if (r <= 0)
2761 return r;
2762 }
2763
2764 /* Ensure our array is properly ordered. */
2765 if (p > 0 && !check_properly_ordered(ofs, p, direction)) {
2766 log_debug("%s: entry array not properly ordered at entry %" PRIu64, f->path, i);
2767 return -EBADMSG;
2768 }
2769
2770 if (offset)
2771 *offset = ofs;
2772
2773 return 1;
2774 }
2775
2776 int journal_file_next_entry_for_data(
2777 JournalFile *f,
2778 Object *o, uint64_t p,
2779 uint64_t data_offset,
2780 direction_t direction,
2781 Object **ret, uint64_t *offset) {
2782
2783 uint64_t i, n, ofs;
2784 Object *d;
2785 int r;
2786
2787 assert(f);
2788 assert(p > 0 || !o);
2789
2790 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2791 if (r < 0)
2792 return r;
2793
2794 n = le64toh(d->data.n_entries);
2795 if (n <= 0)
2796 return n;
2797
2798 if (!o)
2799 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2800 else {
2801 if (o->object.type != OBJECT_ENTRY)
2802 return -EINVAL;
2803
2804 r = generic_array_bisect_plus_one(f,
2805 le64toh(d->data.entry_offset),
2806 le64toh(d->data.entry_array_offset),
2807 le64toh(d->data.n_entries),
2808 p,
2809 test_object_offset,
2810 DIRECTION_DOWN,
2811 NULL, NULL,
2812 &i);
2813
2814 if (r <= 0)
2815 return r;
2816
2817 r = bump_array_index(&i, direction, n);
2818 if (r <= 0)
2819 return r;
2820 }
2821
2822 for (;;) {
2823 r = generic_array_get_plus_one(f,
2824 le64toh(d->data.entry_offset),
2825 le64toh(d->data.entry_array_offset),
2826 i,
2827 ret, &ofs);
2828 if (r > 0)
2829 break;
2830 if (r != -EBADMSG)
2831 return r;
2832
2833 log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
2834
2835 r = bump_array_index(&i, direction, n);
2836 if (r <= 0)
2837 return r;
2838 }
2839
2840 /* Ensure our array is properly ordered. */
2841 if (p > 0 && check_properly_ordered(ofs, p, direction)) {
2842 log_debug("%s data entry array not properly ordered at entry %" PRIu64, f->path, i);
2843 return -EBADMSG;
2844 }
2845
2846 if (offset)
2847 *offset = ofs;
2848
2849 return 1;
2850 }
2851
2852 int journal_file_move_to_entry_by_offset_for_data(
2853 JournalFile *f,
2854 uint64_t data_offset,
2855 uint64_t p,
2856 direction_t direction,
2857 Object **ret, uint64_t *offset) {
2858
2859 int r;
2860 Object *d;
2861
2862 assert(f);
2863
2864 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2865 if (r < 0)
2866 return r;
2867
2868 return generic_array_bisect_plus_one(f,
2869 le64toh(d->data.entry_offset),
2870 le64toh(d->data.entry_array_offset),
2871 le64toh(d->data.n_entries),
2872 p,
2873 test_object_offset,
2874 direction,
2875 ret, offset, NULL);
2876 }
2877
2878 int journal_file_move_to_entry_by_monotonic_for_data(
2879 JournalFile *f,
2880 uint64_t data_offset,
2881 sd_id128_t boot_id,
2882 uint64_t monotonic,
2883 direction_t direction,
2884 Object **ret, uint64_t *offset) {
2885
2886 Object *o, *d;
2887 int r;
2888 uint64_t b, z;
2889
2890 assert(f);
2891
2892 /* First, seek by time */
2893 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
2894 if (r < 0)
2895 return r;
2896 if (r == 0)
2897 return -ENOENT;
2898
2899 r = generic_array_bisect_plus_one(f,
2900 le64toh(o->data.entry_offset),
2901 le64toh(o->data.entry_array_offset),
2902 le64toh(o->data.n_entries),
2903 monotonic,
2904 test_object_monotonic,
2905 direction,
2906 NULL, &z, NULL);
2907 if (r <= 0)
2908 return r;
2909
2910 /* And now, continue seeking until we find an entry that
2911 * exists in both bisection arrays */
2912
2913 for (;;) {
2914 Object *qo;
2915 uint64_t p, q;
2916
2917 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2918 if (r < 0)
2919 return r;
2920
2921 r = generic_array_bisect_plus_one(f,
2922 le64toh(d->data.entry_offset),
2923 le64toh(d->data.entry_array_offset),
2924 le64toh(d->data.n_entries),
2925 z,
2926 test_object_offset,
2927 direction,
2928 NULL, &p, NULL);
2929 if (r <= 0)
2930 return r;
2931
2932 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
2933 if (r < 0)
2934 return r;
2935
2936 r = generic_array_bisect_plus_one(f,
2937 le64toh(o->data.entry_offset),
2938 le64toh(o->data.entry_array_offset),
2939 le64toh(o->data.n_entries),
2940 p,
2941 test_object_offset,
2942 direction,
2943 &qo, &q, NULL);
2944
2945 if (r <= 0)
2946 return r;
2947
2948 if (p == q) {
2949 if (ret)
2950 *ret = qo;
2951 if (offset)
2952 *offset = q;
2953
2954 return 1;
2955 }
2956
2957 z = q;
2958 }
2959 }
2960
2961 int journal_file_move_to_entry_by_seqnum_for_data(
2962 JournalFile *f,
2963 uint64_t data_offset,
2964 uint64_t seqnum,
2965 direction_t direction,
2966 Object **ret, uint64_t *offset) {
2967
2968 Object *d;
2969 int r;
2970
2971 assert(f);
2972
2973 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2974 if (r < 0)
2975 return r;
2976
2977 return generic_array_bisect_plus_one(f,
2978 le64toh(d->data.entry_offset),
2979 le64toh(d->data.entry_array_offset),
2980 le64toh(d->data.n_entries),
2981 seqnum,
2982 test_object_seqnum,
2983 direction,
2984 ret, offset, NULL);
2985 }
2986
2987 int journal_file_move_to_entry_by_realtime_for_data(
2988 JournalFile *f,
2989 uint64_t data_offset,
2990 uint64_t realtime,
2991 direction_t direction,
2992 Object **ret, uint64_t *offset) {
2993
2994 Object *d;
2995 int r;
2996
2997 assert(f);
2998
2999 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
3000 if (r < 0)
3001 return r;
3002
3003 return generic_array_bisect_plus_one(f,
3004 le64toh(d->data.entry_offset),
3005 le64toh(d->data.entry_array_offset),
3006 le64toh(d->data.n_entries),
3007 realtime,
3008 test_object_realtime,
3009 direction,
3010 ret, offset, NULL);
3011 }
3012
3013 void journal_file_dump(JournalFile *f) {
3014 Object *o;
3015 int r;
3016 uint64_t p;
3017
3018 assert(f);
3019 assert(f->header);
3020
3021 journal_file_print_header(f);
3022
3023 p = le64toh(f->header->header_size);
3024 while (p != 0) {
3025 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
3026 if (r < 0)
3027 goto fail;
3028
3029 switch (o->object.type) {
3030
3031 case OBJECT_UNUSED:
3032 printf("Type: OBJECT_UNUSED\n");
3033 break;
3034
3035 case OBJECT_DATA:
3036 printf("Type: OBJECT_DATA\n");
3037 break;
3038
3039 case OBJECT_FIELD:
3040 printf("Type: OBJECT_FIELD\n");
3041 break;
3042
3043 case OBJECT_ENTRY:
3044 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
3045 le64toh(o->entry.seqnum),
3046 le64toh(o->entry.monotonic),
3047 le64toh(o->entry.realtime));
3048 break;
3049
3050 case OBJECT_FIELD_HASH_TABLE:
3051 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
3052 break;
3053
3054 case OBJECT_DATA_HASH_TABLE:
3055 printf("Type: OBJECT_DATA_HASH_TABLE\n");
3056 break;
3057
3058 case OBJECT_ENTRY_ARRAY:
3059 printf("Type: OBJECT_ENTRY_ARRAY\n");
3060 break;
3061
3062 case OBJECT_TAG:
3063 printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
3064 le64toh(o->tag.seqnum),
3065 le64toh(o->tag.epoch));
3066 break;
3067
3068 default:
3069 printf("Type: unknown (%i)\n", o->object.type);
3070 break;
3071 }
3072
3073 if (o->object.flags & OBJECT_COMPRESSION_MASK)
3074 printf("Flags: %s\n",
3075 object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
3076
3077 if (p == le64toh(f->header->tail_object_offset))
3078 p = 0;
3079 else
3080 p = p + ALIGN64(le64toh(o->object.size));
3081 }
3082
3083 return;
3084 fail:
3085 log_error("File corrupt");
3086 }
3087
3088 static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
3089 const char *x;
3090
3091 x = format_timestamp(buf, l, t);
3092 if (x)
3093 return x;
3094 return " --- ";
3095 }
3096
3097 void journal_file_print_header(JournalFile *f) {
3098 char a[33], b[33], c[33], d[33];
3099 char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
3100 struct stat st;
3101 char bytes[FORMAT_BYTES_MAX];
3102
3103 assert(f);
3104 assert(f->header);
3105
3106 printf("File Path: %s\n"
3107 "File ID: %s\n"
3108 "Machine ID: %s\n"
3109 "Boot ID: %s\n"
3110 "Sequential Number ID: %s\n"
3111 "State: %s\n"
3112 "Compatible Flags:%s%s\n"
3113 "Incompatible Flags:%s%s%s\n"
3114 "Header size: %"PRIu64"\n"
3115 "Arena size: %"PRIu64"\n"
3116 "Data Hash Table Size: %"PRIu64"\n"
3117 "Field Hash Table Size: %"PRIu64"\n"
3118 "Rotate Suggested: %s\n"
3119 "Head Sequential Number: %"PRIu64" (%"PRIx64")\n"
3120 "Tail Sequential Number: %"PRIu64" (%"PRIx64")\n"
3121 "Head Realtime Timestamp: %s (%"PRIx64")\n"
3122 "Tail Realtime Timestamp: %s (%"PRIx64")\n"
3123 "Tail Monotonic Timestamp: %s (%"PRIx64")\n"
3124 "Objects: %"PRIu64"\n"
3125 "Entry Objects: %"PRIu64"\n",
3126 f->path,
3127 sd_id128_to_string(f->header->file_id, a),
3128 sd_id128_to_string(f->header->machine_id, b),
3129 sd_id128_to_string(f->header->boot_id, c),
3130 sd_id128_to_string(f->header->seqnum_id, d),
3131 f->header->state == STATE_OFFLINE ? "OFFLINE" :
3132 f->header->state == STATE_ONLINE ? "ONLINE" :
3133 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
3134 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
3135 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
3136 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
3137 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
3138 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
3139 le64toh(f->header->header_size),
3140 le64toh(f->header->arena_size),
3141 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3142 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
3143 yes_no(journal_file_rotate_suggested(f, 0)),
3144 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
3145 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
3146 format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
3147 format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
3148 format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
3149 le64toh(f->header->n_objects),
3150 le64toh(f->header->n_entries));
3151
3152 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3153 printf("Data Objects: %"PRIu64"\n"
3154 "Data Hash Table Fill: %.1f%%\n",
3155 le64toh(f->header->n_data),
3156 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
3157
3158 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3159 printf("Field Objects: %"PRIu64"\n"
3160 "Field Hash Table Fill: %.1f%%\n",
3161 le64toh(f->header->n_fields),
3162 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
3163
3164 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
3165 printf("Tag Objects: %"PRIu64"\n",
3166 le64toh(f->header->n_tags));
3167 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
3168 printf("Entry Array Objects: %"PRIu64"\n",
3169 le64toh(f->header->n_entry_arrays));
3170
3171 if (fstat(f->fd, &st) >= 0)
3172 printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
3173 }
3174
3175 static int journal_file_warn_btrfs(JournalFile *f) {
3176 unsigned attrs;
3177 int r;
3178
3179 assert(f);
3180
3181 /* Before we write anything, check if the COW logic is turned
3182 * off on btrfs. Given our write pattern that is quite
3183 * unfriendly to COW file systems this should greatly improve
3184 * performance on COW file systems, such as btrfs, at the
3185 * expense of data integrity features (which shouldn't be too
3186 * bad, given that we do our own checksumming). */
3187
3188 r = btrfs_is_filesystem(f->fd);
3189 if (r < 0)
3190 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3191 if (!r)
3192 return 0;
3193
3194 r = read_attr_fd(f->fd, &attrs);
3195 if (r < 0)
3196 return log_warning_errno(r, "Failed to read file attributes: %m");
3197
3198 if (attrs & FS_NOCOW_FL) {
3199 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3200 return 0;
3201 }
3202
3203 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3204 "This is likely to slow down journal access substantially, please consider turning "
3205 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3206
3207 return 1;
3208 }
3209
3210 int journal_file_open(
3211 int fd,
3212 const char *fname,
3213 int flags,
3214 mode_t mode,
3215 bool compress,
3216 uint64_t compress_threshold_bytes,
3217 bool seal,
3218 JournalMetrics *metrics,
3219 MMapCache *mmap_cache,
3220 Set *deferred_closes,
3221 JournalFile *template,
3222 JournalFile **ret) {
3223
3224 bool newly_created = false;
3225 JournalFile *f;
3226 void *h;
3227 int r;
3228 char bytes[FORMAT_BYTES_MAX];
3229
3230 assert(ret);
3231 assert(fd >= 0 || fname);
3232
3233 if (!IN_SET((flags & O_ACCMODE), O_RDONLY, O_RDWR))
3234 return -EINVAL;
3235
3236 if (fname && (flags & O_CREAT) && !endswith(fname, ".journal"))
3237 return -EINVAL;
3238
3239 f = new0(JournalFile, 1);
3240 if (!f)
3241 return -ENOMEM;
3242
3243 f->fd = fd;
3244 f->mode = mode;
3245
3246 f->flags = flags;
3247 f->prot = prot_from_flags(flags);
3248 f->writable = (flags & O_ACCMODE) != O_RDONLY;
3249 #if HAVE_LZ4
3250 f->compress_lz4 = compress;
3251 #elif HAVE_XZ
3252 f->compress_xz = compress;
3253 #endif
3254
3255 if (compress_threshold_bytes == (uint64_t) -1)
3256 f->compress_threshold_bytes = DEFAULT_COMPRESS_THRESHOLD;
3257 else
3258 f->compress_threshold_bytes = MAX(MIN_COMPRESS_THRESHOLD, compress_threshold_bytes);
3259
3260 #if HAVE_GCRYPT
3261 f->seal = seal;
3262 #endif
3263
3264 log_debug("Journal effective settings seal=%s compress=%s compress_threshold_bytes=%s",
3265 yes_no(f->seal), yes_no(JOURNAL_FILE_COMPRESS(f)),
3266 format_bytes(bytes, sizeof(bytes), f->compress_threshold_bytes));
3267
3268 if (mmap_cache)
3269 f->mmap = mmap_cache_ref(mmap_cache);
3270 else {
3271 f->mmap = mmap_cache_new();
3272 if (!f->mmap) {
3273 r = -ENOMEM;
3274 goto fail;
3275 }
3276 }
3277
3278 if (fname) {
3279 f->path = strdup(fname);
3280 if (!f->path) {
3281 r = -ENOMEM;
3282 goto fail;
3283 }
3284 } else {
3285 assert(fd >= 0);
3286
3287 /* If we don't know the path, fill in something explanatory and vaguely useful */
3288 if (asprintf(&f->path, "/proc/self/%i", fd) < 0) {
3289 r = -ENOMEM;
3290 goto fail;
3291 }
3292 }
3293
3294 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3295 if (!f->chain_cache) {
3296 r = -ENOMEM;
3297 goto fail;
3298 }
3299
3300 if (f->fd < 0) {
3301 /* We pass O_NONBLOCK here, so that in case somebody pointed us to some character device node or FIFO
3302 * or so, we likely fail quickly than block for long. For regular files O_NONBLOCK has no effect, hence
3303 * it doesn't hurt in that case. */
3304
3305 f->fd = open(f->path, f->flags|O_CLOEXEC|O_NONBLOCK, f->mode);
3306 if (f->fd < 0) {
3307 r = -errno;
3308 goto fail;
3309 }
3310
3311 /* fds we opened here by us should also be closed by us. */
3312 f->close_fd = true;
3313
3314 r = fd_nonblock(f->fd, false);
3315 if (r < 0)
3316 goto fail;
3317 }
3318
3319 f->cache_fd = mmap_cache_add_fd(f->mmap, f->fd);
3320 if (!f->cache_fd) {
3321 r = -ENOMEM;
3322 goto fail;
3323 }
3324
3325 r = journal_file_fstat(f);
3326 if (r < 0)
3327 goto fail;
3328
3329 if (f->last_stat.st_size == 0 && f->writable) {
3330
3331 (void) journal_file_warn_btrfs(f);
3332
3333 /* Let's attach the creation time to the journal file, so that the vacuuming code knows the age of this
3334 * file even if the file might end up corrupted one day... Ideally we'd just use the creation time many
3335 * file systems maintain for each file, but the API to query this is very new, hence let's emulate this
3336 * via extended attributes. If extended attributes are not supported we'll just skip this, and rely
3337 * solely on mtime/atime/ctime of the file. */
3338 (void) fd_setcrtime(f->fd, 0);
3339
3340 #if HAVE_GCRYPT
3341 /* Try to load the FSPRG state, and if we can't, then
3342 * just don't do sealing */
3343 if (f->seal) {
3344 r = journal_file_fss_load(f);
3345 if (r < 0)
3346 f->seal = false;
3347 }
3348 #endif
3349
3350 r = journal_file_init_header(f, template);
3351 if (r < 0)
3352 goto fail;
3353
3354 r = journal_file_fstat(f);
3355 if (r < 0)
3356 goto fail;
3357
3358 newly_created = true;
3359 }
3360
3361 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3362 r = -ENODATA;
3363 goto fail;
3364 }
3365
3366 r = mmap_cache_get(f->mmap, f->cache_fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h, NULL);
3367 if (r < 0)
3368 goto fail;
3369
3370 f->header = h;
3371
3372 if (!newly_created) {
3373 set_clear_with_destructor(deferred_closes, journal_file_close);
3374
3375 r = journal_file_verify_header(f);
3376 if (r < 0)
3377 goto fail;
3378 }
3379
3380 #if HAVE_GCRYPT
3381 if (!newly_created && f->writable) {
3382 r = journal_file_fss_load(f);
3383 if (r < 0)
3384 goto fail;
3385 }
3386 #endif
3387
3388 if (f->writable) {
3389 if (metrics) {
3390 journal_default_metrics(metrics, f->fd);
3391 f->metrics = *metrics;
3392 } else if (template)
3393 f->metrics = template->metrics;
3394
3395 r = journal_file_refresh_header(f);
3396 if (r < 0)
3397 goto fail;
3398 }
3399
3400 #if HAVE_GCRYPT
3401 r = journal_file_hmac_setup(f);
3402 if (r < 0)
3403 goto fail;
3404 #endif
3405
3406 if (newly_created) {
3407 r = journal_file_setup_field_hash_table(f);
3408 if (r < 0)
3409 goto fail;
3410
3411 r = journal_file_setup_data_hash_table(f);
3412 if (r < 0)
3413 goto fail;
3414
3415 #if HAVE_GCRYPT
3416 r = journal_file_append_first_tag(f);
3417 if (r < 0)
3418 goto fail;
3419 #endif
3420 }
3421
3422 if (mmap_cache_got_sigbus(f->mmap, f->cache_fd)) {
3423 r = -EIO;
3424 goto fail;
3425 }
3426
3427 if (template && template->post_change_timer) {
3428 r = journal_file_enable_post_change_timer(
3429 f,
3430 sd_event_source_get_event(template->post_change_timer),
3431 template->post_change_timer_period);
3432
3433 if (r < 0)
3434 goto fail;
3435 }
3436
3437 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3438 f->close_fd = true;
3439
3440 *ret = f;
3441 return 0;
3442
3443 fail:
3444 if (f->cache_fd && mmap_cache_got_sigbus(f->mmap, f->cache_fd))
3445 r = -EIO;
3446
3447 (void) journal_file_close(f);
3448
3449 return r;
3450 }
3451
3452 int journal_file_rotate(JournalFile **f, bool compress, uint64_t compress_threshold_bytes, bool seal, Set *deferred_closes) {
3453 _cleanup_free_ char *p = NULL;
3454 size_t l;
3455 JournalFile *old_file, *new_file = NULL;
3456 int r;
3457
3458 assert(f);
3459 assert(*f);
3460
3461 old_file = *f;
3462
3463 if (!old_file->writable)
3464 return -EINVAL;
3465
3466 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3467 * rotation, since we don't know the actual path, and couldn't rename the file hence. */
3468 if (path_startswith(old_file->path, "/proc/self/fd"))
3469 return -EINVAL;
3470
3471 if (!endswith(old_file->path, ".journal"))
3472 return -EINVAL;
3473
3474 l = strlen(old_file->path);
3475 r = asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3476 (int) l - 8, old_file->path,
3477 SD_ID128_FORMAT_VAL(old_file->header->seqnum_id),
3478 le64toh((*f)->header->head_entry_seqnum),
3479 le64toh((*f)->header->head_entry_realtime));
3480 if (r < 0)
3481 return -ENOMEM;
3482
3483 /* Try to rename the file to the archived version. If the file
3484 * already was deleted, we'll get ENOENT, let's ignore that
3485 * case. */
3486 r = rename(old_file->path, p);
3487 if (r < 0 && errno != ENOENT)
3488 return -errno;
3489
3490 /* Sync the rename to disk */
3491 (void) fsync_directory_of_file(old_file->fd);
3492
3493 /* Set as archive so offlining commits w/state=STATE_ARCHIVED.
3494 * Previously we would set old_file->header->state to STATE_ARCHIVED directly here,
3495 * but journal_file_set_offline() short-circuits when state != STATE_ONLINE, which
3496 * would result in the rotated journal never getting fsync() called before closing.
3497 * Now we simply queue the archive state by setting an archive bit, leaving the state
3498 * as STATE_ONLINE so proper offlining occurs. */
3499 old_file->archive = true;
3500
3501 /* Currently, btrfs is not very good with out write patterns
3502 * and fragments heavily. Let's defrag our journal files when
3503 * we archive them */
3504 old_file->defrag_on_close = true;
3505
3506 r = journal_file_open(-1, old_file->path, old_file->flags, old_file->mode, compress,
3507 compress_threshold_bytes, seal, NULL, old_file->mmap, deferred_closes,
3508 old_file, &new_file);
3509
3510 if (deferred_closes &&
3511 set_put(deferred_closes, old_file) >= 0)
3512 (void) journal_file_set_offline(old_file, false);
3513 else
3514 (void) journal_file_close(old_file);
3515
3516 *f = new_file;
3517 return r;
3518 }
3519
3520 int journal_file_open_reliably(
3521 const char *fname,
3522 int flags,
3523 mode_t mode,
3524 bool compress,
3525 uint64_t compress_threshold_bytes,
3526 bool seal,
3527 JournalMetrics *metrics,
3528 MMapCache *mmap_cache,
3529 Set *deferred_closes,
3530 JournalFile *template,
3531 JournalFile **ret) {
3532
3533 int r;
3534 size_t l;
3535 _cleanup_free_ char *p = NULL;
3536
3537 r = journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3538 deferred_closes, template, ret);
3539 if (!IN_SET(r,
3540 -EBADMSG, /* Corrupted */
3541 -ENODATA, /* Truncated */
3542 -EHOSTDOWN, /* Other machine */
3543 -EPROTONOSUPPORT, /* Incompatible feature */
3544 -EBUSY, /* Unclean shutdown */
3545 -ESHUTDOWN, /* Already archived */
3546 -EIO, /* IO error, including SIGBUS on mmap */
3547 -EIDRM, /* File has been deleted */
3548 -ETXTBSY)) /* File is from the future */
3549 return r;
3550
3551 if ((flags & O_ACCMODE) == O_RDONLY)
3552 return r;
3553
3554 if (!(flags & O_CREAT))
3555 return r;
3556
3557 if (!endswith(fname, ".journal"))
3558 return r;
3559
3560 /* The file is corrupted. Rotate it away and try it again (but only once) */
3561
3562 l = strlen(fname);
3563 if (asprintf(&p, "%.*s@%016"PRIx64 "-%016"PRIx64 ".journal~",
3564 (int) l - 8, fname,
3565 now(CLOCK_REALTIME),
3566 random_u64()) < 0)
3567 return -ENOMEM;
3568
3569 if (rename(fname, p) < 0)
3570 return -errno;
3571
3572 /* btrfs doesn't cope well with our write pattern and
3573 * fragments heavily. Let's defrag all files we rotate */
3574
3575 (void) chattr_path(p, 0, FS_NOCOW_FL);
3576 (void) btrfs_defrag(p);
3577
3578 log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
3579
3580 return journal_file_open(-1, fname, flags, mode, compress, compress_threshold_bytes, seal, metrics, mmap_cache,
3581 deferred_closes, template, ret);
3582 }
3583
3584 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p) {
3585 uint64_t i, n;
3586 uint64_t q, xor_hash = 0;
3587 int r;
3588 EntryItem *items;
3589 dual_timestamp ts;
3590 const sd_id128_t *boot_id;
3591
3592 assert(from);
3593 assert(to);
3594 assert(o);
3595 assert(p);
3596
3597 if (!to->writable)
3598 return -EPERM;
3599
3600 ts.monotonic = le64toh(o->entry.monotonic);
3601 ts.realtime = le64toh(o->entry.realtime);
3602 boot_id = &o->entry.boot_id;
3603
3604 n = journal_file_entry_n_items(o);
3605 /* alloca() can't take 0, hence let's allocate at least one */
3606 items = newa(EntryItem, MAX(1u, n));
3607
3608 for (i = 0; i < n; i++) {
3609 uint64_t l, h;
3610 le64_t le_hash;
3611 size_t t;
3612 void *data;
3613 Object *u;
3614
3615 q = le64toh(o->entry.items[i].object_offset);
3616 le_hash = o->entry.items[i].hash;
3617
3618 r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
3619 if (r < 0)
3620 return r;
3621
3622 if (le_hash != o->data.hash)
3623 return -EBADMSG;
3624
3625 l = le64toh(o->object.size) - offsetof(Object, data.payload);
3626 t = (size_t) l;
3627
3628 /* We hit the limit on 32bit machines */
3629 if ((uint64_t) t != l)
3630 return -E2BIG;
3631
3632 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
3633 #if HAVE_XZ || HAVE_LZ4
3634 size_t rsize = 0;
3635
3636 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
3637 o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
3638 if (r < 0)
3639 return r;
3640
3641 data = from->compress_buffer;
3642 l = rsize;
3643 #else
3644 return -EPROTONOSUPPORT;
3645 #endif
3646 } else
3647 data = o->data.payload;
3648
3649 r = journal_file_append_data(to, data, l, &u, &h);
3650 if (r < 0)
3651 return r;
3652
3653 xor_hash ^= le64toh(u->data.hash);
3654 items[i].object_offset = htole64(h);
3655 items[i].hash = u->data.hash;
3656
3657 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
3658 if (r < 0)
3659 return r;
3660 }
3661
3662 r = journal_file_append_entry_internal(to, &ts, boot_id, xor_hash, items, n,
3663 NULL, NULL, NULL);
3664
3665 if (mmap_cache_got_sigbus(to->mmap, to->cache_fd))
3666 return -EIO;
3667
3668 return r;
3669 }
3670
3671 void journal_reset_metrics(JournalMetrics *m) {
3672 assert(m);
3673
3674 /* Set everything to "pick automatic values". */
3675
3676 *m = (JournalMetrics) {
3677 .min_use = (uint64_t) -1,
3678 .max_use = (uint64_t) -1,
3679 .min_size = (uint64_t) -1,
3680 .max_size = (uint64_t) -1,
3681 .keep_free = (uint64_t) -1,
3682 .n_max_files = (uint64_t) -1,
3683 };
3684 }
3685
3686 void journal_default_metrics(JournalMetrics *m, int fd) {
3687 char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
3688 struct statvfs ss;
3689 uint64_t fs_size;
3690
3691 assert(m);
3692 assert(fd >= 0);
3693
3694 if (fstatvfs(fd, &ss) >= 0)
3695 fs_size = ss.f_frsize * ss.f_blocks;
3696 else {
3697 log_debug_errno(errno, "Failed to determine disk size: %m");
3698 fs_size = 0;
3699 }
3700
3701 if (m->max_use == (uint64_t) -1) {
3702
3703 if (fs_size > 0) {
3704 m->max_use = PAGE_ALIGN(fs_size / 10); /* 10% of file system size */
3705
3706 if (m->max_use > DEFAULT_MAX_USE_UPPER)
3707 m->max_use = DEFAULT_MAX_USE_UPPER;
3708
3709 if (m->max_use < DEFAULT_MAX_USE_LOWER)
3710 m->max_use = DEFAULT_MAX_USE_LOWER;
3711 } else
3712 m->max_use = DEFAULT_MAX_USE_LOWER;
3713 } else {
3714 m->max_use = PAGE_ALIGN(m->max_use);
3715
3716 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3717 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3718 }
3719
3720 if (m->min_use == (uint64_t) -1)
3721 m->min_use = DEFAULT_MIN_USE;
3722
3723 if (m->min_use > m->max_use)
3724 m->min_use = m->max_use;
3725
3726 if (m->max_size == (uint64_t) -1) {
3727 m->max_size = PAGE_ALIGN(m->max_use / 8); /* 8 chunks */
3728
3729 if (m->max_size > DEFAULT_MAX_SIZE_UPPER)
3730 m->max_size = DEFAULT_MAX_SIZE_UPPER;
3731 } else
3732 m->max_size = PAGE_ALIGN(m->max_size);
3733
3734 if (m->max_size != 0) {
3735 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3736 m->max_size = JOURNAL_FILE_SIZE_MIN;
3737
3738 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3739 m->max_use = m->max_size*2;
3740 }
3741
3742 if (m->min_size == (uint64_t) -1)
3743 m->min_size = JOURNAL_FILE_SIZE_MIN;
3744 else {
3745 m->min_size = PAGE_ALIGN(m->min_size);
3746
3747 if (m->min_size < JOURNAL_FILE_SIZE_MIN)
3748 m->min_size = JOURNAL_FILE_SIZE_MIN;
3749
3750 if (m->max_size != 0 && m->min_size > m->max_size)
3751 m->max_size = m->min_size;
3752 }
3753
3754 if (m->keep_free == (uint64_t) -1) {
3755
3756 if (fs_size > 0) {
3757 m->keep_free = PAGE_ALIGN(fs_size * 3 / 20); /* 15% of file system size */
3758
3759 if (m->keep_free > DEFAULT_KEEP_FREE_UPPER)
3760 m->keep_free = DEFAULT_KEEP_FREE_UPPER;
3761
3762 } else
3763 m->keep_free = DEFAULT_KEEP_FREE;
3764 }
3765
3766 if (m->n_max_files == (uint64_t) -1)
3767 m->n_max_files = DEFAULT_N_MAX_FILES;
3768
3769 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3770 format_bytes(a, sizeof(a), m->min_use),
3771 format_bytes(b, sizeof(b), m->max_use),
3772 format_bytes(c, sizeof(c), m->max_size),
3773 format_bytes(d, sizeof(d), m->min_size),
3774 format_bytes(e, sizeof(e), m->keep_free),
3775 m->n_max_files);
3776 }
3777
3778 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
3779 assert(f);
3780 assert(f->header);
3781 assert(from || to);
3782
3783 if (from) {
3784 if (f->header->head_entry_realtime == 0)
3785 return -ENOENT;
3786
3787 *from = le64toh(f->header->head_entry_realtime);
3788 }
3789
3790 if (to) {
3791 if (f->header->tail_entry_realtime == 0)
3792 return -ENOENT;
3793
3794 *to = le64toh(f->header->tail_entry_realtime);
3795 }
3796
3797 return 1;
3798 }
3799
3800 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
3801 Object *o;
3802 uint64_t p;
3803 int r;
3804
3805 assert(f);
3806 assert(from || to);
3807
3808 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
3809 if (r <= 0)
3810 return r;
3811
3812 if (le64toh(o->data.n_entries) <= 0)
3813 return 0;
3814
3815 if (from) {
3816 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
3817 if (r < 0)
3818 return r;
3819
3820 *from = le64toh(o->entry.monotonic);
3821 }
3822
3823 if (to) {
3824 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
3825 if (r < 0)
3826 return r;
3827
3828 r = generic_array_get_plus_one(f,
3829 le64toh(o->data.entry_offset),
3830 le64toh(o->data.entry_array_offset),
3831 le64toh(o->data.n_entries)-1,
3832 &o, NULL);
3833 if (r <= 0)
3834 return r;
3835
3836 *to = le64toh(o->entry.monotonic);
3837 }
3838
3839 return 1;
3840 }
3841
3842 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
3843 assert(f);
3844 assert(f->header);
3845
3846 /* If we gained new header fields we gained new features,
3847 * hence suggest a rotation */
3848 if (le64toh(f->header->header_size) < sizeof(Header)) {
3849 log_debug("%s uses an outdated header, suggesting rotation.", f->path);
3850 return true;
3851 }
3852
3853 /* Let's check if the hash tables grew over a certain fill
3854 * level (75%, borrowing this value from Java's hash table
3855 * implementation), and if so suggest a rotation. To calculate
3856 * the fill level we need the n_data field, which only exists
3857 * in newer versions. */
3858
3859 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3860 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3861 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
3862 f->path,
3863 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
3864 le64toh(f->header->n_data),
3865 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3866 (unsigned long long) f->last_stat.st_size,
3867 f->last_stat.st_size / le64toh(f->header->n_data));
3868 return true;
3869 }
3870
3871 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3872 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3873 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
3874 f->path,
3875 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
3876 le64toh(f->header->n_fields),
3877 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
3878 return true;
3879 }
3880
3881 /* Are the data objects properly indexed by field objects? */
3882 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
3883 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
3884 le64toh(f->header->n_data) > 0 &&
3885 le64toh(f->header->n_fields) == 0)
3886 return true;
3887
3888 if (max_file_usec > 0) {
3889 usec_t t, h;
3890
3891 h = le64toh(f->header->head_entry_realtime);
3892 t = now(CLOCK_REALTIME);
3893
3894 if (h > 0 && t > h + max_file_usec)
3895 return true;
3896 }
3897
3898 return false;
3899 }