]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journal-file.c
core/timer: use (void)
[thirdparty/systemd.git] / src / journal / journal-file.c
1 /***
2 This file is part of systemd.
3
4 Copyright 2011 Lennart Poettering
5
6 systemd is free software; you can redistribute it and/or modify it
7 under the terms of the GNU Lesser General Public License as published by
8 the Free Software Foundation; either version 2.1 of the License, or
9 (at your option) any later version.
10
11 systemd is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 Lesser General Public License for more details.
15
16 You should have received a copy of the GNU Lesser General Public License
17 along with systemd; If not, see <http://www.gnu.org/licenses/>.
18 ***/
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <linux/fs.h>
23 #include <pthread.h>
24 #include <stddef.h>
25 #include <sys/mman.h>
26 #include <sys/statvfs.h>
27 #include <sys/uio.h>
28 #include <unistd.h>
29
30 #include "alloc-util.h"
31 #include "btrfs-util.h"
32 #include "chattr-util.h"
33 #include "compress.h"
34 #include "fd-util.h"
35 #include "journal-authenticate.h"
36 #include "journal-def.h"
37 #include "journal-file.h"
38 #include "lookup3.h"
39 #include "parse-util.h"
40 #include "path-util.h"
41 #include "random-util.h"
42 #include "sd-event.h"
43 #include "set.h"
44 #include "string-util.h"
45 #include "xattr-util.h"
46
47 #define DEFAULT_DATA_HASH_TABLE_SIZE (2047ULL*sizeof(HashItem))
48 #define DEFAULT_FIELD_HASH_TABLE_SIZE (333ULL*sizeof(HashItem))
49
50 #define COMPRESSION_SIZE_THRESHOLD (512ULL)
51
52 /* This is the minimum journal file size */
53 #define JOURNAL_FILE_SIZE_MIN (512ULL*1024ULL) /* 512 KiB */
54
55 /* These are the lower and upper bounds if we deduce the max_use value
56 * from the file system size */
57 #define DEFAULT_MAX_USE_LOWER (1ULL*1024ULL*1024ULL) /* 1 MiB */
58 #define DEFAULT_MAX_USE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
59
60 /* This is the default minimal use limit, how much we'll use even if keep_free suggests otherwise. */
61 #define DEFAULT_MIN_USE (1ULL*1024ULL*1024ULL) /* 1 MiB */
62
63 /* This is the upper bound if we deduce max_size from max_use */
64 #define DEFAULT_MAX_SIZE_UPPER (128ULL*1024ULL*1024ULL) /* 128 MiB */
65
66 /* This is the upper bound if we deduce the keep_free value from the
67 * file system size */
68 #define DEFAULT_KEEP_FREE_UPPER (4ULL*1024ULL*1024ULL*1024ULL) /* 4 GiB */
69
70 /* This is the keep_free value when we can't determine the system
71 * size */
72 #define DEFAULT_KEEP_FREE (1024ULL*1024ULL) /* 1 MB */
73
74 /* This is the default maximum number of journal files to keep around. */
75 #define DEFAULT_N_MAX_FILES (100)
76
77 /* n_data was the first entry we added after the initial file format design */
78 #define HEADER_SIZE_MIN ALIGN64(offsetof(Header, n_data))
79
80 /* How many entries to keep in the entry array chain cache at max */
81 #define CHAIN_CACHE_MAX 20
82
83 /* How much to increase the journal file size at once each time we allocate something new. */
84 #define FILE_SIZE_INCREASE (8ULL*1024ULL*1024ULL) /* 8MB */
85
86 /* Reread fstat() of the file for detecting deletions at least this often */
87 #define LAST_STAT_REFRESH_USEC (5*USEC_PER_SEC)
88
89 /* The mmap context to use for the header we pick as one above the last defined typed */
90 #define CONTEXT_HEADER _OBJECT_TYPE_MAX
91
92 /* This may be called from a separate thread to prevent blocking the caller for the duration of fsync().
93 * As a result we use atomic operations on f->offline_state for inter-thread communications with
94 * journal_file_set_offline() and journal_file_set_online(). */
95 static void journal_file_set_offline_internal(JournalFile *f) {
96 assert(f);
97 assert(f->fd >= 0);
98 assert(f->header);
99
100 for (;;) {
101 switch (f->offline_state) {
102 case OFFLINE_CANCEL:
103 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_DONE))
104 continue;
105 return;
106
107 case OFFLINE_AGAIN_FROM_SYNCING:
108 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_SYNCING))
109 continue;
110 break;
111
112 case OFFLINE_AGAIN_FROM_OFFLINING:
113 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_SYNCING))
114 continue;
115 break;
116
117 case OFFLINE_SYNCING:
118 (void) fsync(f->fd);
119
120 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_OFFLINING))
121 continue;
122
123 f->header->state = f->archive ? STATE_ARCHIVED : STATE_OFFLINE;
124 (void) fsync(f->fd);
125 break;
126
127 case OFFLINE_OFFLINING:
128 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_DONE))
129 continue;
130 /* fall through */
131
132 case OFFLINE_DONE:
133 return;
134
135 case OFFLINE_JOINED:
136 log_debug("OFFLINE_JOINED unexpected offline state for journal_file_set_offline_internal()");
137 return;
138 }
139 }
140 }
141
142 static void * journal_file_set_offline_thread(void *arg) {
143 JournalFile *f = arg;
144
145 journal_file_set_offline_internal(f);
146
147 return NULL;
148 }
149
150 static int journal_file_set_offline_thread_join(JournalFile *f) {
151 int r;
152
153 assert(f);
154
155 if (f->offline_state == OFFLINE_JOINED)
156 return 0;
157
158 r = pthread_join(f->offline_thread, NULL);
159 if (r)
160 return -r;
161
162 f->offline_state = OFFLINE_JOINED;
163
164 if (mmap_cache_got_sigbus(f->mmap, f->fd))
165 return -EIO;
166
167 return 0;
168 }
169
170 /* Trigger a restart if the offline thread is mid-flight in a restartable state. */
171 static bool journal_file_set_offline_try_restart(JournalFile *f) {
172 for (;;) {
173 switch (f->offline_state) {
174 case OFFLINE_AGAIN_FROM_SYNCING:
175 case OFFLINE_AGAIN_FROM_OFFLINING:
176 return true;
177
178 case OFFLINE_CANCEL:
179 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_CANCEL, OFFLINE_AGAIN_FROM_SYNCING))
180 continue;
181 return true;
182
183 case OFFLINE_SYNCING:
184 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_AGAIN_FROM_SYNCING))
185 continue;
186 return true;
187
188 case OFFLINE_OFFLINING:
189 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_OFFLINING, OFFLINE_AGAIN_FROM_OFFLINING))
190 continue;
191 return true;
192
193 default:
194 return false;
195 }
196 }
197 }
198
199 /* Sets a journal offline.
200 *
201 * If wait is false then an offline is dispatched in a separate thread for a
202 * subsequent journal_file_set_offline() or journal_file_set_online() of the
203 * same journal to synchronize with.
204 *
205 * If wait is true, then either an existing offline thread will be restarted
206 * and joined, or if none exists the offline is simply performed in this
207 * context without involving another thread.
208 */
209 int journal_file_set_offline(JournalFile *f, bool wait) {
210 bool restarted;
211 int r;
212
213 assert(f);
214
215 if (!f->writable)
216 return -EPERM;
217
218 if (!(f->fd >= 0 && f->header))
219 return -EINVAL;
220
221 /* An offlining journal is implicitly online and may modify f->header->state,
222 * we must also join any potentially lingering offline thread when not online. */
223 if (!journal_file_is_offlining(f) && f->header->state != STATE_ONLINE)
224 return journal_file_set_offline_thread_join(f);
225
226 /* Restart an in-flight offline thread and wait if needed, or join a lingering done one. */
227 restarted = journal_file_set_offline_try_restart(f);
228 if ((restarted && wait) || !restarted) {
229 r = journal_file_set_offline_thread_join(f);
230 if (r < 0)
231 return r;
232 }
233
234 if (restarted)
235 return 0;
236
237 /* Initiate a new offline. */
238 f->offline_state = OFFLINE_SYNCING;
239
240 if (wait) /* Without using a thread if waiting. */
241 journal_file_set_offline_internal(f);
242 else {
243 r = pthread_create(&f->offline_thread, NULL, journal_file_set_offline_thread, f);
244 if (r > 0) {
245 f->offline_state = OFFLINE_JOINED;
246 return -r;
247 }
248 }
249
250 return 0;
251 }
252
253 static int journal_file_set_online(JournalFile *f) {
254 bool joined = false;
255
256 assert(f);
257
258 if (!f->writable)
259 return -EPERM;
260
261 if (!(f->fd >= 0 && f->header))
262 return -EINVAL;
263
264 while (!joined) {
265 switch (f->offline_state) {
266 case OFFLINE_JOINED:
267 /* No offline thread, no need to wait. */
268 joined = true;
269 break;
270
271 case OFFLINE_SYNCING:
272 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_SYNCING, OFFLINE_CANCEL))
273 continue;
274 /* Canceled syncing prior to offlining, no need to wait. */
275 break;
276
277 case OFFLINE_AGAIN_FROM_SYNCING:
278 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_SYNCING, OFFLINE_CANCEL))
279 continue;
280 /* Canceled restart from syncing, no need to wait. */
281 break;
282
283 case OFFLINE_AGAIN_FROM_OFFLINING:
284 if (!__sync_bool_compare_and_swap(&f->offline_state, OFFLINE_AGAIN_FROM_OFFLINING, OFFLINE_CANCEL))
285 continue;
286 /* Canceled restart from offlining, must wait for offlining to complete however. */
287
288 /* fall through to wait */
289 default: {
290 int r;
291
292 r = journal_file_set_offline_thread_join(f);
293 if (r < 0)
294 return r;
295
296 joined = true;
297 break;
298 }
299 }
300 }
301
302 if (mmap_cache_got_sigbus(f->mmap, f->fd))
303 return -EIO;
304
305 switch (f->header->state) {
306 case STATE_ONLINE:
307 return 0;
308
309 case STATE_OFFLINE:
310 f->header->state = STATE_ONLINE;
311 (void) fsync(f->fd);
312 return 0;
313
314 default:
315 return -EINVAL;
316 }
317 }
318
319 bool journal_file_is_offlining(JournalFile *f) {
320 assert(f);
321
322 __sync_synchronize();
323
324 if (f->offline_state == OFFLINE_DONE ||
325 f->offline_state == OFFLINE_JOINED)
326 return false;
327
328 return true;
329 }
330
331 JournalFile* journal_file_close(JournalFile *f) {
332 assert(f);
333
334 #ifdef HAVE_GCRYPT
335 /* Write the final tag */
336 if (f->seal && f->writable) {
337 int r;
338
339 r = journal_file_append_tag(f);
340 if (r < 0)
341 log_error_errno(r, "Failed to append tag when closing journal: %m");
342 }
343 #endif
344
345 if (f->post_change_timer) {
346 int enabled;
347
348 if (sd_event_source_get_enabled(f->post_change_timer, &enabled) >= 0)
349 if (enabled == SD_EVENT_ONESHOT)
350 journal_file_post_change(f);
351
352 (void) sd_event_source_set_enabled(f->post_change_timer, SD_EVENT_OFF);
353 sd_event_source_unref(f->post_change_timer);
354 }
355
356 journal_file_set_offline(f, true);
357
358 if (f->mmap && f->fd >= 0)
359 mmap_cache_close_fd(f->mmap, f->fd);
360
361 if (f->fd >= 0 && f->defrag_on_close) {
362
363 /* Be friendly to btrfs: turn COW back on again now,
364 * and defragment the file. We won't write to the file
365 * ever again, hence remove all fragmentation, and
366 * reenable all the good bits COW usually provides
367 * (such as data checksumming). */
368
369 (void) chattr_fd(f->fd, 0, FS_NOCOW_FL);
370 (void) btrfs_defrag_fd(f->fd);
371 }
372
373 if (f->close_fd)
374 safe_close(f->fd);
375 free(f->path);
376
377 mmap_cache_unref(f->mmap);
378
379 ordered_hashmap_free_free(f->chain_cache);
380
381 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
382 free(f->compress_buffer);
383 #endif
384
385 #ifdef HAVE_GCRYPT
386 if (f->fss_file)
387 munmap(f->fss_file, PAGE_ALIGN(f->fss_file_size));
388 else
389 free(f->fsprg_state);
390
391 free(f->fsprg_seed);
392
393 if (f->hmac)
394 gcry_md_close(f->hmac);
395 #endif
396
397 return mfree(f);
398 }
399
400 void journal_file_close_set(Set *s) {
401 JournalFile *f;
402
403 assert(s);
404
405 while ((f = set_steal_first(s)))
406 (void) journal_file_close(f);
407 }
408
409 static int journal_file_init_header(JournalFile *f, JournalFile *template) {
410 Header h = {};
411 ssize_t k;
412 int r;
413
414 assert(f);
415
416 memcpy(h.signature, HEADER_SIGNATURE, 8);
417 h.header_size = htole64(ALIGN64(sizeof(h)));
418
419 h.incompatible_flags |= htole32(
420 f->compress_xz * HEADER_INCOMPATIBLE_COMPRESSED_XZ |
421 f->compress_lz4 * HEADER_INCOMPATIBLE_COMPRESSED_LZ4);
422
423 h.compatible_flags = htole32(
424 f->seal * HEADER_COMPATIBLE_SEALED);
425
426 r = sd_id128_randomize(&h.file_id);
427 if (r < 0)
428 return r;
429
430 if (template) {
431 h.seqnum_id = template->header->seqnum_id;
432 h.tail_entry_seqnum = template->header->tail_entry_seqnum;
433 } else
434 h.seqnum_id = h.file_id;
435
436 k = pwrite(f->fd, &h, sizeof(h), 0);
437 if (k < 0)
438 return -errno;
439
440 if (k != sizeof(h))
441 return -EIO;
442
443 return 0;
444 }
445
446 static int fsync_directory_of_file(int fd) {
447 _cleanup_free_ char *path = NULL, *dn = NULL;
448 _cleanup_close_ int dfd = -1;
449 struct stat st;
450 int r;
451
452 if (fstat(fd, &st) < 0)
453 return -errno;
454
455 if (!S_ISREG(st.st_mode))
456 return -EBADFD;
457
458 r = fd_get_path(fd, &path);
459 if (r < 0)
460 return r;
461
462 if (!path_is_absolute(path))
463 return -EINVAL;
464
465 dn = dirname_malloc(path);
466 if (!dn)
467 return -ENOMEM;
468
469 dfd = open(dn, O_RDONLY|O_CLOEXEC|O_DIRECTORY);
470 if (dfd < 0)
471 return -errno;
472
473 if (fsync(dfd) < 0)
474 return -errno;
475
476 return 0;
477 }
478
479 static int journal_file_refresh_header(JournalFile *f) {
480 sd_id128_t boot_id;
481 int r;
482
483 assert(f);
484 assert(f->header);
485
486 r = sd_id128_get_machine(&f->header->machine_id);
487 if (r < 0)
488 return r;
489
490 r = sd_id128_get_boot(&boot_id);
491 if (r < 0)
492 return r;
493
494 if (sd_id128_equal(boot_id, f->header->boot_id))
495 f->tail_entry_monotonic_valid = true;
496
497 f->header->boot_id = boot_id;
498
499 r = journal_file_set_online(f);
500
501 /* Sync the online state to disk */
502 (void) fsync(f->fd);
503
504 /* We likely just created a new file, also sync the directory this file is located in. */
505 (void) fsync_directory_of_file(f->fd);
506
507 return r;
508 }
509
510 static int journal_file_verify_header(JournalFile *f) {
511 uint32_t flags;
512
513 assert(f);
514 assert(f->header);
515
516 if (memcmp(f->header->signature, HEADER_SIGNATURE, 8))
517 return -EBADMSG;
518
519 /* In both read and write mode we refuse to open files with
520 * incompatible flags we don't know */
521 flags = le32toh(f->header->incompatible_flags);
522 if (flags & ~HEADER_INCOMPATIBLE_SUPPORTED) {
523 if (flags & ~HEADER_INCOMPATIBLE_ANY)
524 log_debug("Journal file %s has unknown incompatible flags %"PRIx32,
525 f->path, flags & ~HEADER_INCOMPATIBLE_ANY);
526 flags = (flags & HEADER_INCOMPATIBLE_ANY) & ~HEADER_INCOMPATIBLE_SUPPORTED;
527 if (flags)
528 log_debug("Journal file %s uses incompatible flags %"PRIx32
529 " disabled at compilation time.", f->path, flags);
530 return -EPROTONOSUPPORT;
531 }
532
533 /* When open for writing we refuse to open files with
534 * compatible flags, too */
535 flags = le32toh(f->header->compatible_flags);
536 if (f->writable && (flags & ~HEADER_COMPATIBLE_SUPPORTED)) {
537 if (flags & ~HEADER_COMPATIBLE_ANY)
538 log_debug("Journal file %s has unknown compatible flags %"PRIx32,
539 f->path, flags & ~HEADER_COMPATIBLE_ANY);
540 flags = (flags & HEADER_COMPATIBLE_ANY) & ~HEADER_COMPATIBLE_SUPPORTED;
541 if (flags)
542 log_debug("Journal file %s uses compatible flags %"PRIx32
543 " disabled at compilation time.", f->path, flags);
544 return -EPROTONOSUPPORT;
545 }
546
547 if (f->header->state >= _STATE_MAX)
548 return -EBADMSG;
549
550 /* The first addition was n_data, so check that we are at least this large */
551 if (le64toh(f->header->header_size) < HEADER_SIZE_MIN)
552 return -EBADMSG;
553
554 if (JOURNAL_HEADER_SEALED(f->header) && !JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
555 return -EBADMSG;
556
557 if ((le64toh(f->header->header_size) + le64toh(f->header->arena_size)) > (uint64_t) f->last_stat.st_size)
558 return -ENODATA;
559
560 if (le64toh(f->header->tail_object_offset) > (le64toh(f->header->header_size) + le64toh(f->header->arena_size)))
561 return -ENODATA;
562
563 if (!VALID64(le64toh(f->header->data_hash_table_offset)) ||
564 !VALID64(le64toh(f->header->field_hash_table_offset)) ||
565 !VALID64(le64toh(f->header->tail_object_offset)) ||
566 !VALID64(le64toh(f->header->entry_array_offset)))
567 return -ENODATA;
568
569 if (f->writable) {
570 sd_id128_t machine_id;
571 uint8_t state;
572 int r;
573
574 r = sd_id128_get_machine(&machine_id);
575 if (r < 0)
576 return r;
577
578 if (!sd_id128_equal(machine_id, f->header->machine_id))
579 return -EHOSTDOWN;
580
581 state = f->header->state;
582
583 if (state == STATE_ONLINE) {
584 log_debug("Journal file %s is already online. Assuming unclean closing.", f->path);
585 return -EBUSY;
586 } else if (state == STATE_ARCHIVED)
587 return -ESHUTDOWN;
588 else if (state != STATE_OFFLINE) {
589 log_debug("Journal file %s has unknown state %i.", f->path, state);
590 return -EBUSY;
591 }
592
593 /* Don't permit appending to files from the future. Because otherwise the realtime timestamps wouldn't
594 * be strictly ordered in the entries in the file anymore, and we can't have that since it breaks
595 * bisection. */
596 if (le64toh(f->header->tail_entry_realtime) > now(CLOCK_REALTIME)) {
597 log_debug("Journal file %s is from the future, refusing to append new data to it that'd be older.", f->path);
598 return -ETXTBSY;
599 }
600 }
601
602 f->compress_xz = JOURNAL_HEADER_COMPRESSED_XZ(f->header);
603 f->compress_lz4 = JOURNAL_HEADER_COMPRESSED_LZ4(f->header);
604
605 f->seal = JOURNAL_HEADER_SEALED(f->header);
606
607 return 0;
608 }
609
610 static int journal_file_fstat(JournalFile *f) {
611 assert(f);
612 assert(f->fd >= 0);
613
614 if (fstat(f->fd, &f->last_stat) < 0)
615 return -errno;
616
617 f->last_stat_usec = now(CLOCK_MONOTONIC);
618
619 /* Refuse appending to files that are already deleted */
620 if (f->last_stat.st_nlink <= 0)
621 return -EIDRM;
622
623 return 0;
624 }
625
626 static int journal_file_allocate(JournalFile *f, uint64_t offset, uint64_t size) {
627 uint64_t old_size, new_size;
628 int r;
629
630 assert(f);
631 assert(f->header);
632
633 /* We assume that this file is not sparse, and we know that
634 * for sure, since we always call posix_fallocate()
635 * ourselves */
636
637 if (mmap_cache_got_sigbus(f->mmap, f->fd))
638 return -EIO;
639
640 old_size =
641 le64toh(f->header->header_size) +
642 le64toh(f->header->arena_size);
643
644 new_size = PAGE_ALIGN(offset + size);
645 if (new_size < le64toh(f->header->header_size))
646 new_size = le64toh(f->header->header_size);
647
648 if (new_size <= old_size) {
649
650 /* We already pre-allocated enough space, but before
651 * we write to it, let's check with fstat() if the
652 * file got deleted, in order make sure we don't throw
653 * away the data immediately. Don't check fstat() for
654 * all writes though, but only once ever 10s. */
655
656 if (f->last_stat_usec + LAST_STAT_REFRESH_USEC > now(CLOCK_MONOTONIC))
657 return 0;
658
659 return journal_file_fstat(f);
660 }
661
662 /* Allocate more space. */
663
664 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
665 return -E2BIG;
666
667 if (new_size > f->metrics.min_size && f->metrics.keep_free > 0) {
668 struct statvfs svfs;
669
670 if (fstatvfs(f->fd, &svfs) >= 0) {
671 uint64_t available;
672
673 available = LESS_BY((uint64_t) svfs.f_bfree * (uint64_t) svfs.f_bsize, f->metrics.keep_free);
674
675 if (new_size - old_size > available)
676 return -E2BIG;
677 }
678 }
679
680 /* Increase by larger blocks at once */
681 new_size = ((new_size+FILE_SIZE_INCREASE-1) / FILE_SIZE_INCREASE) * FILE_SIZE_INCREASE;
682 if (f->metrics.max_size > 0 && new_size > f->metrics.max_size)
683 new_size = f->metrics.max_size;
684
685 /* Note that the glibc fallocate() fallback is very
686 inefficient, hence we try to minimize the allocation area
687 as we can. */
688 r = posix_fallocate(f->fd, old_size, new_size - old_size);
689 if (r != 0)
690 return -r;
691
692 f->header->arena_size = htole64(new_size - le64toh(f->header->header_size));
693
694 return journal_file_fstat(f);
695 }
696
697 static unsigned type_to_context(ObjectType type) {
698 /* One context for each type, plus one catch-all for the rest */
699 assert_cc(_OBJECT_TYPE_MAX <= MMAP_CACHE_MAX_CONTEXTS);
700 assert_cc(CONTEXT_HEADER < MMAP_CACHE_MAX_CONTEXTS);
701 return type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX ? type : 0;
702 }
703
704 static int journal_file_move_to(JournalFile *f, ObjectType type, bool keep_always, uint64_t offset, uint64_t size, void **ret) {
705 int r;
706
707 assert(f);
708 assert(ret);
709
710 if (size <= 0)
711 return -EINVAL;
712
713 /* Avoid SIGBUS on invalid accesses */
714 if (offset + size > (uint64_t) f->last_stat.st_size) {
715 /* Hmm, out of range? Let's refresh the fstat() data
716 * first, before we trust that check. */
717
718 r = journal_file_fstat(f);
719 if (r < 0)
720 return r;
721
722 if (offset + size > (uint64_t) f->last_stat.st_size)
723 return -EADDRNOTAVAIL;
724 }
725
726 return mmap_cache_get(f->mmap, f->fd, f->prot, type_to_context(type), keep_always, offset, size, &f->last_stat, ret);
727 }
728
729 static uint64_t minimum_header_size(Object *o) {
730
731 static const uint64_t table[] = {
732 [OBJECT_DATA] = sizeof(DataObject),
733 [OBJECT_FIELD] = sizeof(FieldObject),
734 [OBJECT_ENTRY] = sizeof(EntryObject),
735 [OBJECT_DATA_HASH_TABLE] = sizeof(HashTableObject),
736 [OBJECT_FIELD_HASH_TABLE] = sizeof(HashTableObject),
737 [OBJECT_ENTRY_ARRAY] = sizeof(EntryArrayObject),
738 [OBJECT_TAG] = sizeof(TagObject),
739 };
740
741 if (o->object.type >= ELEMENTSOF(table) || table[o->object.type] <= 0)
742 return sizeof(ObjectHeader);
743
744 return table[o->object.type];
745 }
746
747 int journal_file_move_to_object(JournalFile *f, ObjectType type, uint64_t offset, Object **ret) {
748 int r;
749 void *t;
750 Object *o;
751 uint64_t s;
752
753 assert(f);
754 assert(ret);
755
756 /* Objects may only be located at multiple of 64 bit */
757 if (!VALID64(offset)) {
758 log_debug("Attempt to move to object at non-64bit boundary: %" PRIu64, offset);
759 return -EBADMSG;
760 }
761
762 /* Object may not be located in the file header */
763 if (offset < le64toh(f->header->header_size)) {
764 log_debug("Attempt to move to object located in file header: %" PRIu64, offset);
765 return -EBADMSG;
766 }
767
768 r = journal_file_move_to(f, type, false, offset, sizeof(ObjectHeader), &t);
769 if (r < 0)
770 return r;
771
772 o = (Object*) t;
773 s = le64toh(o->object.size);
774
775 if (s == 0) {
776 log_debug("Attempt to move to uninitialized object: %" PRIu64, offset);
777 return -EBADMSG;
778 }
779 if (s < sizeof(ObjectHeader)) {
780 log_debug("Attempt to move to overly short object: %" PRIu64, offset);
781 return -EBADMSG;
782 }
783
784 if (o->object.type <= OBJECT_UNUSED) {
785 log_debug("Attempt to move to object with invalid type: %" PRIu64, offset);
786 return -EBADMSG;
787 }
788
789 if (s < minimum_header_size(o)) {
790 log_debug("Attempt to move to truncated object: %" PRIu64, offset);
791 return -EBADMSG;
792 }
793
794 if (type > OBJECT_UNUSED && o->object.type != type) {
795 log_debug("Attempt to move to object of unexpected type: %" PRIu64, offset);
796 return -EBADMSG;
797 }
798
799 if (s > sizeof(ObjectHeader)) {
800 r = journal_file_move_to(f, type, false, offset, s, &t);
801 if (r < 0)
802 return r;
803
804 o = (Object*) t;
805 }
806
807 *ret = o;
808 return 0;
809 }
810
811 static uint64_t journal_file_entry_seqnum(JournalFile *f, uint64_t *seqnum) {
812 uint64_t r;
813
814 assert(f);
815 assert(f->header);
816
817 r = le64toh(f->header->tail_entry_seqnum) + 1;
818
819 if (seqnum) {
820 /* If an external seqnum counter was passed, we update
821 * both the local and the external one, and set it to
822 * the maximum of both */
823
824 if (*seqnum + 1 > r)
825 r = *seqnum + 1;
826
827 *seqnum = r;
828 }
829
830 f->header->tail_entry_seqnum = htole64(r);
831
832 if (f->header->head_entry_seqnum == 0)
833 f->header->head_entry_seqnum = htole64(r);
834
835 return r;
836 }
837
838 int journal_file_append_object(JournalFile *f, ObjectType type, uint64_t size, Object **ret, uint64_t *offset) {
839 int r;
840 uint64_t p;
841 Object *tail, *o;
842 void *t;
843
844 assert(f);
845 assert(f->header);
846 assert(type > OBJECT_UNUSED && type < _OBJECT_TYPE_MAX);
847 assert(size >= sizeof(ObjectHeader));
848 assert(offset);
849 assert(ret);
850
851 r = journal_file_set_online(f);
852 if (r < 0)
853 return r;
854
855 p = le64toh(f->header->tail_object_offset);
856 if (p == 0)
857 p = le64toh(f->header->header_size);
858 else {
859 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &tail);
860 if (r < 0)
861 return r;
862
863 p += ALIGN64(le64toh(tail->object.size));
864 }
865
866 r = journal_file_allocate(f, p, size);
867 if (r < 0)
868 return r;
869
870 r = journal_file_move_to(f, type, false, p, size, &t);
871 if (r < 0)
872 return r;
873
874 o = (Object*) t;
875
876 zero(o->object);
877 o->object.type = type;
878 o->object.size = htole64(size);
879
880 f->header->tail_object_offset = htole64(p);
881 f->header->n_objects = htole64(le64toh(f->header->n_objects) + 1);
882
883 *ret = o;
884 *offset = p;
885
886 return 0;
887 }
888
889 static int journal_file_setup_data_hash_table(JournalFile *f) {
890 uint64_t s, p;
891 Object *o;
892 int r;
893
894 assert(f);
895 assert(f->header);
896
897 /* We estimate that we need 1 hash table entry per 768 bytes
898 of journal file and we want to make sure we never get
899 beyond 75% fill level. Calculate the hash table size for
900 the maximum file size based on these metrics. */
901
902 s = (f->metrics.max_size * 4 / 768 / 3) * sizeof(HashItem);
903 if (s < DEFAULT_DATA_HASH_TABLE_SIZE)
904 s = DEFAULT_DATA_HASH_TABLE_SIZE;
905
906 log_debug("Reserving %"PRIu64" entries in hash table.", s / sizeof(HashItem));
907
908 r = journal_file_append_object(f,
909 OBJECT_DATA_HASH_TABLE,
910 offsetof(Object, hash_table.items) + s,
911 &o, &p);
912 if (r < 0)
913 return r;
914
915 memzero(o->hash_table.items, s);
916
917 f->header->data_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
918 f->header->data_hash_table_size = htole64(s);
919
920 return 0;
921 }
922
923 static int journal_file_setup_field_hash_table(JournalFile *f) {
924 uint64_t s, p;
925 Object *o;
926 int r;
927
928 assert(f);
929 assert(f->header);
930
931 /* We use a fixed size hash table for the fields as this
932 * number should grow very slowly only */
933
934 s = DEFAULT_FIELD_HASH_TABLE_SIZE;
935 r = journal_file_append_object(f,
936 OBJECT_FIELD_HASH_TABLE,
937 offsetof(Object, hash_table.items) + s,
938 &o, &p);
939 if (r < 0)
940 return r;
941
942 memzero(o->hash_table.items, s);
943
944 f->header->field_hash_table_offset = htole64(p + offsetof(Object, hash_table.items));
945 f->header->field_hash_table_size = htole64(s);
946
947 return 0;
948 }
949
950 int journal_file_map_data_hash_table(JournalFile *f) {
951 uint64_t s, p;
952 void *t;
953 int r;
954
955 assert(f);
956 assert(f->header);
957
958 if (f->data_hash_table)
959 return 0;
960
961 p = le64toh(f->header->data_hash_table_offset);
962 s = le64toh(f->header->data_hash_table_size);
963
964 r = journal_file_move_to(f,
965 OBJECT_DATA_HASH_TABLE,
966 true,
967 p, s,
968 &t);
969 if (r < 0)
970 return r;
971
972 f->data_hash_table = t;
973 return 0;
974 }
975
976 int journal_file_map_field_hash_table(JournalFile *f) {
977 uint64_t s, p;
978 void *t;
979 int r;
980
981 assert(f);
982 assert(f->header);
983
984 if (f->field_hash_table)
985 return 0;
986
987 p = le64toh(f->header->field_hash_table_offset);
988 s = le64toh(f->header->field_hash_table_size);
989
990 r = journal_file_move_to(f,
991 OBJECT_FIELD_HASH_TABLE,
992 true,
993 p, s,
994 &t);
995 if (r < 0)
996 return r;
997
998 f->field_hash_table = t;
999 return 0;
1000 }
1001
1002 static int journal_file_link_field(
1003 JournalFile *f,
1004 Object *o,
1005 uint64_t offset,
1006 uint64_t hash) {
1007
1008 uint64_t p, h, m;
1009 int r;
1010
1011 assert(f);
1012 assert(f->header);
1013 assert(f->field_hash_table);
1014 assert(o);
1015 assert(offset > 0);
1016
1017 if (o->object.type != OBJECT_FIELD)
1018 return -EINVAL;
1019
1020 m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
1021 if (m <= 0)
1022 return -EBADMSG;
1023
1024 /* This might alter the window we are looking at */
1025 o->field.next_hash_offset = o->field.head_data_offset = 0;
1026
1027 h = hash % m;
1028 p = le64toh(f->field_hash_table[h].tail_hash_offset);
1029 if (p == 0)
1030 f->field_hash_table[h].head_hash_offset = htole64(offset);
1031 else {
1032 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1033 if (r < 0)
1034 return r;
1035
1036 o->field.next_hash_offset = htole64(offset);
1037 }
1038
1039 f->field_hash_table[h].tail_hash_offset = htole64(offset);
1040
1041 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
1042 f->header->n_fields = htole64(le64toh(f->header->n_fields) + 1);
1043
1044 return 0;
1045 }
1046
1047 static int journal_file_link_data(
1048 JournalFile *f,
1049 Object *o,
1050 uint64_t offset,
1051 uint64_t hash) {
1052
1053 uint64_t p, h, m;
1054 int r;
1055
1056 assert(f);
1057 assert(f->header);
1058 assert(f->data_hash_table);
1059 assert(o);
1060 assert(offset > 0);
1061
1062 if (o->object.type != OBJECT_DATA)
1063 return -EINVAL;
1064
1065 m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
1066 if (m <= 0)
1067 return -EBADMSG;
1068
1069 /* This might alter the window we are looking at */
1070 o->data.next_hash_offset = o->data.next_field_offset = 0;
1071 o->data.entry_offset = o->data.entry_array_offset = 0;
1072 o->data.n_entries = 0;
1073
1074 h = hash % m;
1075 p = le64toh(f->data_hash_table[h].tail_hash_offset);
1076 if (p == 0)
1077 /* Only entry in the hash table is easy */
1078 f->data_hash_table[h].head_hash_offset = htole64(offset);
1079 else {
1080 /* Move back to the previous data object, to patch in
1081 * pointer */
1082
1083 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1084 if (r < 0)
1085 return r;
1086
1087 o->data.next_hash_offset = htole64(offset);
1088 }
1089
1090 f->data_hash_table[h].tail_hash_offset = htole64(offset);
1091
1092 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
1093 f->header->n_data = htole64(le64toh(f->header->n_data) + 1);
1094
1095 return 0;
1096 }
1097
1098 int journal_file_find_field_object_with_hash(
1099 JournalFile *f,
1100 const void *field, uint64_t size, uint64_t hash,
1101 Object **ret, uint64_t *offset) {
1102
1103 uint64_t p, osize, h, m;
1104 int r;
1105
1106 assert(f);
1107 assert(f->header);
1108 assert(field && size > 0);
1109
1110 /* If the field hash table is empty, we can't find anything */
1111 if (le64toh(f->header->field_hash_table_size) <= 0)
1112 return 0;
1113
1114 /* Map the field hash table, if it isn't mapped yet. */
1115 r = journal_file_map_field_hash_table(f);
1116 if (r < 0)
1117 return r;
1118
1119 osize = offsetof(Object, field.payload) + size;
1120
1121 m = le64toh(f->header->field_hash_table_size) / sizeof(HashItem);
1122 if (m <= 0)
1123 return -EBADMSG;
1124
1125 h = hash % m;
1126 p = le64toh(f->field_hash_table[h].head_hash_offset);
1127
1128 while (p > 0) {
1129 Object *o;
1130
1131 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1132 if (r < 0)
1133 return r;
1134
1135 if (le64toh(o->field.hash) == hash &&
1136 le64toh(o->object.size) == osize &&
1137 memcmp(o->field.payload, field, size) == 0) {
1138
1139 if (ret)
1140 *ret = o;
1141 if (offset)
1142 *offset = p;
1143
1144 return 1;
1145 }
1146
1147 p = le64toh(o->field.next_hash_offset);
1148 }
1149
1150 return 0;
1151 }
1152
1153 int journal_file_find_field_object(
1154 JournalFile *f,
1155 const void *field, uint64_t size,
1156 Object **ret, uint64_t *offset) {
1157
1158 uint64_t hash;
1159
1160 assert(f);
1161 assert(field && size > 0);
1162
1163 hash = hash64(field, size);
1164
1165 return journal_file_find_field_object_with_hash(f,
1166 field, size, hash,
1167 ret, offset);
1168 }
1169
1170 int journal_file_find_data_object_with_hash(
1171 JournalFile *f,
1172 const void *data, uint64_t size, uint64_t hash,
1173 Object **ret, uint64_t *offset) {
1174
1175 uint64_t p, osize, h, m;
1176 int r;
1177
1178 assert(f);
1179 assert(f->header);
1180 assert(data || size == 0);
1181
1182 /* If there's no data hash table, then there's no entry. */
1183 if (le64toh(f->header->data_hash_table_size) <= 0)
1184 return 0;
1185
1186 /* Map the data hash table, if it isn't mapped yet. */
1187 r = journal_file_map_data_hash_table(f);
1188 if (r < 0)
1189 return r;
1190
1191 osize = offsetof(Object, data.payload) + size;
1192
1193 m = le64toh(f->header->data_hash_table_size) / sizeof(HashItem);
1194 if (m <= 0)
1195 return -EBADMSG;
1196
1197 h = hash % m;
1198 p = le64toh(f->data_hash_table[h].head_hash_offset);
1199
1200 while (p > 0) {
1201 Object *o;
1202
1203 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1204 if (r < 0)
1205 return r;
1206
1207 if (le64toh(o->data.hash) != hash)
1208 goto next;
1209
1210 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
1211 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
1212 uint64_t l;
1213 size_t rsize = 0;
1214
1215 l = le64toh(o->object.size);
1216 if (l <= offsetof(Object, data.payload))
1217 return -EBADMSG;
1218
1219 l -= offsetof(Object, data.payload);
1220
1221 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
1222 o->data.payload, l, &f->compress_buffer, &f->compress_buffer_size, &rsize, 0);
1223 if (r < 0)
1224 return r;
1225
1226 if (rsize == size &&
1227 memcmp(f->compress_buffer, data, size) == 0) {
1228
1229 if (ret)
1230 *ret = o;
1231
1232 if (offset)
1233 *offset = p;
1234
1235 return 1;
1236 }
1237 #else
1238 return -EPROTONOSUPPORT;
1239 #endif
1240 } else if (le64toh(o->object.size) == osize &&
1241 memcmp(o->data.payload, data, size) == 0) {
1242
1243 if (ret)
1244 *ret = o;
1245
1246 if (offset)
1247 *offset = p;
1248
1249 return 1;
1250 }
1251
1252 next:
1253 p = le64toh(o->data.next_hash_offset);
1254 }
1255
1256 return 0;
1257 }
1258
1259 int journal_file_find_data_object(
1260 JournalFile *f,
1261 const void *data, uint64_t size,
1262 Object **ret, uint64_t *offset) {
1263
1264 uint64_t hash;
1265
1266 assert(f);
1267 assert(data || size == 0);
1268
1269 hash = hash64(data, size);
1270
1271 return journal_file_find_data_object_with_hash(f,
1272 data, size, hash,
1273 ret, offset);
1274 }
1275
1276 static int journal_file_append_field(
1277 JournalFile *f,
1278 const void *field, uint64_t size,
1279 Object **ret, uint64_t *offset) {
1280
1281 uint64_t hash, p;
1282 uint64_t osize;
1283 Object *o;
1284 int r;
1285
1286 assert(f);
1287 assert(field && size > 0);
1288
1289 hash = hash64(field, size);
1290
1291 r = journal_file_find_field_object_with_hash(f, field, size, hash, &o, &p);
1292 if (r < 0)
1293 return r;
1294 else if (r > 0) {
1295
1296 if (ret)
1297 *ret = o;
1298
1299 if (offset)
1300 *offset = p;
1301
1302 return 0;
1303 }
1304
1305 osize = offsetof(Object, field.payload) + size;
1306 r = journal_file_append_object(f, OBJECT_FIELD, osize, &o, &p);
1307 if (r < 0)
1308 return r;
1309
1310 o->field.hash = htole64(hash);
1311 memcpy(o->field.payload, field, size);
1312
1313 r = journal_file_link_field(f, o, p, hash);
1314 if (r < 0)
1315 return r;
1316
1317 /* The linking might have altered the window, so let's
1318 * refresh our pointer */
1319 r = journal_file_move_to_object(f, OBJECT_FIELD, p, &o);
1320 if (r < 0)
1321 return r;
1322
1323 #ifdef HAVE_GCRYPT
1324 r = journal_file_hmac_put_object(f, OBJECT_FIELD, o, p);
1325 if (r < 0)
1326 return r;
1327 #endif
1328
1329 if (ret)
1330 *ret = o;
1331
1332 if (offset)
1333 *offset = p;
1334
1335 return 0;
1336 }
1337
1338 static int journal_file_append_data(
1339 JournalFile *f,
1340 const void *data, uint64_t size,
1341 Object **ret, uint64_t *offset) {
1342
1343 uint64_t hash, p;
1344 uint64_t osize;
1345 Object *o;
1346 int r, compression = 0;
1347 const void *eq;
1348
1349 assert(f);
1350 assert(data || size == 0);
1351
1352 hash = hash64(data, size);
1353
1354 r = journal_file_find_data_object_with_hash(f, data, size, hash, &o, &p);
1355 if (r < 0)
1356 return r;
1357 if (r > 0) {
1358
1359 if (ret)
1360 *ret = o;
1361
1362 if (offset)
1363 *offset = p;
1364
1365 return 0;
1366 }
1367
1368 osize = offsetof(Object, data.payload) + size;
1369 r = journal_file_append_object(f, OBJECT_DATA, osize, &o, &p);
1370 if (r < 0)
1371 return r;
1372
1373 o->data.hash = htole64(hash);
1374
1375 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
1376 if (JOURNAL_FILE_COMPRESS(f) && size >= COMPRESSION_SIZE_THRESHOLD) {
1377 size_t rsize = 0;
1378
1379 compression = compress_blob(data, size, o->data.payload, size - 1, &rsize);
1380
1381 if (compression >= 0) {
1382 o->object.size = htole64(offsetof(Object, data.payload) + rsize);
1383 o->object.flags |= compression;
1384
1385 log_debug("Compressed data object %"PRIu64" -> %zu using %s",
1386 size, rsize, object_compressed_to_string(compression));
1387 } else
1388 /* Compression didn't work, we don't really care why, let's continue without compression */
1389 compression = 0;
1390 }
1391 #endif
1392
1393 if (compression == 0)
1394 memcpy_safe(o->data.payload, data, size);
1395
1396 r = journal_file_link_data(f, o, p, hash);
1397 if (r < 0)
1398 return r;
1399
1400 #ifdef HAVE_GCRYPT
1401 r = journal_file_hmac_put_object(f, OBJECT_DATA, o, p);
1402 if (r < 0)
1403 return r;
1404 #endif
1405
1406 /* The linking might have altered the window, so let's
1407 * refresh our pointer */
1408 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1409 if (r < 0)
1410 return r;
1411
1412 if (!data)
1413 eq = NULL;
1414 else
1415 eq = memchr(data, '=', size);
1416 if (eq && eq > data) {
1417 Object *fo = NULL;
1418 uint64_t fp;
1419
1420 /* Create field object ... */
1421 r = journal_file_append_field(f, data, (uint8_t*) eq - (uint8_t*) data, &fo, &fp);
1422 if (r < 0)
1423 return r;
1424
1425 /* ... and link it in. */
1426 o->data.next_field_offset = fo->field.head_data_offset;
1427 fo->field.head_data_offset = le64toh(p);
1428 }
1429
1430 if (ret)
1431 *ret = o;
1432
1433 if (offset)
1434 *offset = p;
1435
1436 return 0;
1437 }
1438
1439 uint64_t journal_file_entry_n_items(Object *o) {
1440 assert(o);
1441
1442 if (o->object.type != OBJECT_ENTRY)
1443 return 0;
1444
1445 return (le64toh(o->object.size) - offsetof(Object, entry.items)) / sizeof(EntryItem);
1446 }
1447
1448 uint64_t journal_file_entry_array_n_items(Object *o) {
1449 assert(o);
1450
1451 if (o->object.type != OBJECT_ENTRY_ARRAY)
1452 return 0;
1453
1454 return (le64toh(o->object.size) - offsetof(Object, entry_array.items)) / sizeof(uint64_t);
1455 }
1456
1457 uint64_t journal_file_hash_table_n_items(Object *o) {
1458 assert(o);
1459
1460 if (o->object.type != OBJECT_DATA_HASH_TABLE &&
1461 o->object.type != OBJECT_FIELD_HASH_TABLE)
1462 return 0;
1463
1464 return (le64toh(o->object.size) - offsetof(Object, hash_table.items)) / sizeof(HashItem);
1465 }
1466
1467 static int link_entry_into_array(JournalFile *f,
1468 le64_t *first,
1469 le64_t *idx,
1470 uint64_t p) {
1471 int r;
1472 uint64_t n = 0, ap = 0, q, i, a, hidx;
1473 Object *o;
1474
1475 assert(f);
1476 assert(f->header);
1477 assert(first);
1478 assert(idx);
1479 assert(p > 0);
1480
1481 a = le64toh(*first);
1482 i = hidx = le64toh(*idx);
1483 while (a > 0) {
1484
1485 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1486 if (r < 0)
1487 return r;
1488
1489 n = journal_file_entry_array_n_items(o);
1490 if (i < n) {
1491 o->entry_array.items[i] = htole64(p);
1492 *idx = htole64(hidx + 1);
1493 return 0;
1494 }
1495
1496 i -= n;
1497 ap = a;
1498 a = le64toh(o->entry_array.next_entry_array_offset);
1499 }
1500
1501 if (hidx > n)
1502 n = (hidx+1) * 2;
1503 else
1504 n = n * 2;
1505
1506 if (n < 4)
1507 n = 4;
1508
1509 r = journal_file_append_object(f, OBJECT_ENTRY_ARRAY,
1510 offsetof(Object, entry_array.items) + n * sizeof(uint64_t),
1511 &o, &q);
1512 if (r < 0)
1513 return r;
1514
1515 #ifdef HAVE_GCRYPT
1516 r = journal_file_hmac_put_object(f, OBJECT_ENTRY_ARRAY, o, q);
1517 if (r < 0)
1518 return r;
1519 #endif
1520
1521 o->entry_array.items[i] = htole64(p);
1522
1523 if (ap == 0)
1524 *first = htole64(q);
1525 else {
1526 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, ap, &o);
1527 if (r < 0)
1528 return r;
1529
1530 o->entry_array.next_entry_array_offset = htole64(q);
1531 }
1532
1533 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
1534 f->header->n_entry_arrays = htole64(le64toh(f->header->n_entry_arrays) + 1);
1535
1536 *idx = htole64(hidx + 1);
1537
1538 return 0;
1539 }
1540
1541 static int link_entry_into_array_plus_one(JournalFile *f,
1542 le64_t *extra,
1543 le64_t *first,
1544 le64_t *idx,
1545 uint64_t p) {
1546
1547 int r;
1548
1549 assert(f);
1550 assert(extra);
1551 assert(first);
1552 assert(idx);
1553 assert(p > 0);
1554
1555 if (*idx == 0)
1556 *extra = htole64(p);
1557 else {
1558 le64_t i;
1559
1560 i = htole64(le64toh(*idx) - 1);
1561 r = link_entry_into_array(f, first, &i, p);
1562 if (r < 0)
1563 return r;
1564 }
1565
1566 *idx = htole64(le64toh(*idx) + 1);
1567 return 0;
1568 }
1569
1570 static int journal_file_link_entry_item(JournalFile *f, Object *o, uint64_t offset, uint64_t i) {
1571 uint64_t p;
1572 int r;
1573 assert(f);
1574 assert(o);
1575 assert(offset > 0);
1576
1577 p = le64toh(o->entry.items[i].object_offset);
1578 if (p == 0)
1579 return -EINVAL;
1580
1581 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
1582 if (r < 0)
1583 return r;
1584
1585 return link_entry_into_array_plus_one(f,
1586 &o->data.entry_offset,
1587 &o->data.entry_array_offset,
1588 &o->data.n_entries,
1589 offset);
1590 }
1591
1592 static int journal_file_link_entry(JournalFile *f, Object *o, uint64_t offset) {
1593 uint64_t n, i;
1594 int r;
1595
1596 assert(f);
1597 assert(f->header);
1598 assert(o);
1599 assert(offset > 0);
1600
1601 if (o->object.type != OBJECT_ENTRY)
1602 return -EINVAL;
1603
1604 __sync_synchronize();
1605
1606 /* Link up the entry itself */
1607 r = link_entry_into_array(f,
1608 &f->header->entry_array_offset,
1609 &f->header->n_entries,
1610 offset);
1611 if (r < 0)
1612 return r;
1613
1614 /* log_debug("=> %s seqnr=%"PRIu64" n_entries=%"PRIu64, f->path, o->entry.seqnum, f->header->n_entries); */
1615
1616 if (f->header->head_entry_realtime == 0)
1617 f->header->head_entry_realtime = o->entry.realtime;
1618
1619 f->header->tail_entry_realtime = o->entry.realtime;
1620 f->header->tail_entry_monotonic = o->entry.monotonic;
1621
1622 f->tail_entry_monotonic_valid = true;
1623
1624 /* Link up the items */
1625 n = journal_file_entry_n_items(o);
1626 for (i = 0; i < n; i++) {
1627 r = journal_file_link_entry_item(f, o, offset, i);
1628 if (r < 0)
1629 return r;
1630 }
1631
1632 return 0;
1633 }
1634
1635 static int journal_file_append_entry_internal(
1636 JournalFile *f,
1637 const dual_timestamp *ts,
1638 uint64_t xor_hash,
1639 const EntryItem items[], unsigned n_items,
1640 uint64_t *seqnum,
1641 Object **ret, uint64_t *offset) {
1642 uint64_t np;
1643 uint64_t osize;
1644 Object *o;
1645 int r;
1646
1647 assert(f);
1648 assert(f->header);
1649 assert(items || n_items == 0);
1650 assert(ts);
1651
1652 osize = offsetof(Object, entry.items) + (n_items * sizeof(EntryItem));
1653
1654 r = journal_file_append_object(f, OBJECT_ENTRY, osize, &o, &np);
1655 if (r < 0)
1656 return r;
1657
1658 o->entry.seqnum = htole64(journal_file_entry_seqnum(f, seqnum));
1659 memcpy_safe(o->entry.items, items, n_items * sizeof(EntryItem));
1660 o->entry.realtime = htole64(ts->realtime);
1661 o->entry.monotonic = htole64(ts->monotonic);
1662 o->entry.xor_hash = htole64(xor_hash);
1663 o->entry.boot_id = f->header->boot_id;
1664
1665 #ifdef HAVE_GCRYPT
1666 r = journal_file_hmac_put_object(f, OBJECT_ENTRY, o, np);
1667 if (r < 0)
1668 return r;
1669 #endif
1670
1671 r = journal_file_link_entry(f, o, np);
1672 if (r < 0)
1673 return r;
1674
1675 if (ret)
1676 *ret = o;
1677
1678 if (offset)
1679 *offset = np;
1680
1681 return 0;
1682 }
1683
1684 void journal_file_post_change(JournalFile *f) {
1685 assert(f);
1686
1687 /* inotify() does not receive IN_MODIFY events from file
1688 * accesses done via mmap(). After each access we hence
1689 * trigger IN_MODIFY by truncating the journal file to its
1690 * current size which triggers IN_MODIFY. */
1691
1692 __sync_synchronize();
1693
1694 if (ftruncate(f->fd, f->last_stat.st_size) < 0)
1695 log_debug_errno(errno, "Failed to truncate file to its own size: %m");
1696 }
1697
1698 static int post_change_thunk(sd_event_source *timer, uint64_t usec, void *userdata) {
1699 assert(userdata);
1700
1701 journal_file_post_change(userdata);
1702
1703 return 1;
1704 }
1705
1706 static void schedule_post_change(JournalFile *f) {
1707 sd_event_source *timer;
1708 int enabled, r;
1709 uint64_t now;
1710
1711 assert(f);
1712 assert(f->post_change_timer);
1713
1714 timer = f->post_change_timer;
1715
1716 r = sd_event_source_get_enabled(timer, &enabled);
1717 if (r < 0) {
1718 log_debug_errno(r, "Failed to get ftruncate timer state: %m");
1719 goto fail;
1720 }
1721
1722 if (enabled == SD_EVENT_ONESHOT)
1723 return;
1724
1725 r = sd_event_now(sd_event_source_get_event(timer), CLOCK_MONOTONIC, &now);
1726 if (r < 0) {
1727 log_debug_errno(r, "Failed to get clock's now for scheduling ftruncate: %m");
1728 goto fail;
1729 }
1730
1731 r = sd_event_source_set_time(timer, now+f->post_change_timer_period);
1732 if (r < 0) {
1733 log_debug_errno(r, "Failed to set time for scheduling ftruncate: %m");
1734 goto fail;
1735 }
1736
1737 r = sd_event_source_set_enabled(timer, SD_EVENT_ONESHOT);
1738 if (r < 0) {
1739 log_debug_errno(r, "Failed to enable scheduled ftruncate: %m");
1740 goto fail;
1741 }
1742
1743 return;
1744
1745 fail:
1746 /* On failure, let's simply post the change immediately. */
1747 journal_file_post_change(f);
1748 }
1749
1750 /* Enable coalesced change posting in a timer on the provided sd_event instance */
1751 int journal_file_enable_post_change_timer(JournalFile *f, sd_event *e, usec_t t) {
1752 _cleanup_(sd_event_source_unrefp) sd_event_source *timer = NULL;
1753 int r;
1754
1755 assert(f);
1756 assert_return(!f->post_change_timer, -EINVAL);
1757 assert(e);
1758 assert(t);
1759
1760 r = sd_event_add_time(e, &timer, CLOCK_MONOTONIC, 0, 0, post_change_thunk, f);
1761 if (r < 0)
1762 return r;
1763
1764 r = sd_event_source_set_enabled(timer, SD_EVENT_OFF);
1765 if (r < 0)
1766 return r;
1767
1768 f->post_change_timer = timer;
1769 timer = NULL;
1770 f->post_change_timer_period = t;
1771
1772 return r;
1773 }
1774
1775 static int entry_item_cmp(const void *_a, const void *_b) {
1776 const EntryItem *a = _a, *b = _b;
1777
1778 if (le64toh(a->object_offset) < le64toh(b->object_offset))
1779 return -1;
1780 if (le64toh(a->object_offset) > le64toh(b->object_offset))
1781 return 1;
1782 return 0;
1783 }
1784
1785 int journal_file_append_entry(JournalFile *f, const dual_timestamp *ts, const struct iovec iovec[], unsigned n_iovec, uint64_t *seqnum, Object **ret, uint64_t *offset) {
1786 unsigned i;
1787 EntryItem *items;
1788 int r;
1789 uint64_t xor_hash = 0;
1790 struct dual_timestamp _ts;
1791
1792 assert(f);
1793 assert(f->header);
1794 assert(iovec || n_iovec == 0);
1795
1796 if (!ts) {
1797 dual_timestamp_get(&_ts);
1798 ts = &_ts;
1799 }
1800
1801 #ifdef HAVE_GCRYPT
1802 r = journal_file_maybe_append_tag(f, ts->realtime);
1803 if (r < 0)
1804 return r;
1805 #endif
1806
1807 /* alloca() can't take 0, hence let's allocate at least one */
1808 items = alloca(sizeof(EntryItem) * MAX(1u, n_iovec));
1809
1810 for (i = 0; i < n_iovec; i++) {
1811 uint64_t p;
1812 Object *o;
1813
1814 r = journal_file_append_data(f, iovec[i].iov_base, iovec[i].iov_len, &o, &p);
1815 if (r < 0)
1816 return r;
1817
1818 xor_hash ^= le64toh(o->data.hash);
1819 items[i].object_offset = htole64(p);
1820 items[i].hash = o->data.hash;
1821 }
1822
1823 /* Order by the position on disk, in order to improve seek
1824 * times for rotating media. */
1825 qsort_safe(items, n_iovec, sizeof(EntryItem), entry_item_cmp);
1826
1827 r = journal_file_append_entry_internal(f, ts, xor_hash, items, n_iovec, seqnum, ret, offset);
1828
1829 /* If the memory mapping triggered a SIGBUS then we return an
1830 * IO error and ignore the error code passed down to us, since
1831 * it is very likely just an effect of a nullified replacement
1832 * mapping page */
1833
1834 if (mmap_cache_got_sigbus(f->mmap, f->fd))
1835 r = -EIO;
1836
1837 if (f->post_change_timer)
1838 schedule_post_change(f);
1839 else
1840 journal_file_post_change(f);
1841
1842 return r;
1843 }
1844
1845 typedef struct ChainCacheItem {
1846 uint64_t first; /* the array at the beginning of the chain */
1847 uint64_t array; /* the cached array */
1848 uint64_t begin; /* the first item in the cached array */
1849 uint64_t total; /* the total number of items in all arrays before this one in the chain */
1850 uint64_t last_index; /* the last index we looked at, to optimize locality when bisecting */
1851 } ChainCacheItem;
1852
1853 static void chain_cache_put(
1854 OrderedHashmap *h,
1855 ChainCacheItem *ci,
1856 uint64_t first,
1857 uint64_t array,
1858 uint64_t begin,
1859 uint64_t total,
1860 uint64_t last_index) {
1861
1862 if (!ci) {
1863 /* If the chain item to cache for this chain is the
1864 * first one it's not worth caching anything */
1865 if (array == first)
1866 return;
1867
1868 if (ordered_hashmap_size(h) >= CHAIN_CACHE_MAX) {
1869 ci = ordered_hashmap_steal_first(h);
1870 assert(ci);
1871 } else {
1872 ci = new(ChainCacheItem, 1);
1873 if (!ci)
1874 return;
1875 }
1876
1877 ci->first = first;
1878
1879 if (ordered_hashmap_put(h, &ci->first, ci) < 0) {
1880 free(ci);
1881 return;
1882 }
1883 } else
1884 assert(ci->first == first);
1885
1886 ci->array = array;
1887 ci->begin = begin;
1888 ci->total = total;
1889 ci->last_index = last_index;
1890 }
1891
1892 static int generic_array_get(
1893 JournalFile *f,
1894 uint64_t first,
1895 uint64_t i,
1896 Object **ret, uint64_t *offset) {
1897
1898 Object *o;
1899 uint64_t p = 0, a, t = 0;
1900 int r;
1901 ChainCacheItem *ci;
1902
1903 assert(f);
1904
1905 a = first;
1906
1907 /* Try the chain cache first */
1908 ci = ordered_hashmap_get(f->chain_cache, &first);
1909 if (ci && i > ci->total) {
1910 a = ci->array;
1911 i -= ci->total;
1912 t = ci->total;
1913 }
1914
1915 while (a > 0) {
1916 uint64_t k;
1917
1918 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &o);
1919 if (r < 0)
1920 return r;
1921
1922 k = journal_file_entry_array_n_items(o);
1923 if (i < k) {
1924 p = le64toh(o->entry_array.items[i]);
1925 goto found;
1926 }
1927
1928 i -= k;
1929 t += k;
1930 a = le64toh(o->entry_array.next_entry_array_offset);
1931 }
1932
1933 return 0;
1934
1935 found:
1936 /* Let's cache this item for the next invocation */
1937 chain_cache_put(f->chain_cache, ci, first, a, le64toh(o->entry_array.items[0]), t, i);
1938
1939 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
1940 if (r < 0)
1941 return r;
1942
1943 if (ret)
1944 *ret = o;
1945
1946 if (offset)
1947 *offset = p;
1948
1949 return 1;
1950 }
1951
1952 static int generic_array_get_plus_one(
1953 JournalFile *f,
1954 uint64_t extra,
1955 uint64_t first,
1956 uint64_t i,
1957 Object **ret, uint64_t *offset) {
1958
1959 Object *o;
1960
1961 assert(f);
1962
1963 if (i == 0) {
1964 int r;
1965
1966 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
1967 if (r < 0)
1968 return r;
1969
1970 if (ret)
1971 *ret = o;
1972
1973 if (offset)
1974 *offset = extra;
1975
1976 return 1;
1977 }
1978
1979 return generic_array_get(f, first, i-1, ret, offset);
1980 }
1981
1982 enum {
1983 TEST_FOUND,
1984 TEST_LEFT,
1985 TEST_RIGHT
1986 };
1987
1988 static int generic_array_bisect(
1989 JournalFile *f,
1990 uint64_t first,
1991 uint64_t n,
1992 uint64_t needle,
1993 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
1994 direction_t direction,
1995 Object **ret,
1996 uint64_t *offset,
1997 uint64_t *idx) {
1998
1999 uint64_t a, p, t = 0, i = 0, last_p = 0, last_index = (uint64_t) -1;
2000 bool subtract_one = false;
2001 Object *o, *array = NULL;
2002 int r;
2003 ChainCacheItem *ci;
2004
2005 assert(f);
2006 assert(test_object);
2007
2008 /* Start with the first array in the chain */
2009 a = first;
2010
2011 ci = ordered_hashmap_get(f->chain_cache, &first);
2012 if (ci && n > ci->total) {
2013 /* Ah, we have iterated this bisection array chain
2014 * previously! Let's see if we can skip ahead in the
2015 * chain, as far as the last time. But we can't jump
2016 * backwards in the chain, so let's check that
2017 * first. */
2018
2019 r = test_object(f, ci->begin, needle);
2020 if (r < 0)
2021 return r;
2022
2023 if (r == TEST_LEFT) {
2024 /* OK, what we are looking for is right of the
2025 * begin of this EntryArray, so let's jump
2026 * straight to previously cached array in the
2027 * chain */
2028
2029 a = ci->array;
2030 n -= ci->total;
2031 t = ci->total;
2032 last_index = ci->last_index;
2033 }
2034 }
2035
2036 while (a > 0) {
2037 uint64_t left, right, k, lp;
2038
2039 r = journal_file_move_to_object(f, OBJECT_ENTRY_ARRAY, a, &array);
2040 if (r < 0)
2041 return r;
2042
2043 k = journal_file_entry_array_n_items(array);
2044 right = MIN(k, n);
2045 if (right <= 0)
2046 return 0;
2047
2048 i = right - 1;
2049 lp = p = le64toh(array->entry_array.items[i]);
2050 if (p <= 0)
2051 r = -EBADMSG;
2052 else
2053 r = test_object(f, p, needle);
2054 if (r == -EBADMSG) {
2055 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (1)");
2056 n = i;
2057 continue;
2058 }
2059 if (r < 0)
2060 return r;
2061
2062 if (r == TEST_FOUND)
2063 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2064
2065 if (r == TEST_RIGHT) {
2066 left = 0;
2067 right -= 1;
2068
2069 if (last_index != (uint64_t) -1) {
2070 assert(last_index <= right);
2071
2072 /* If we cached the last index we
2073 * looked at, let's try to not to jump
2074 * too wildly around and see if we can
2075 * limit the range to look at early to
2076 * the immediate neighbors of the last
2077 * index we looked at. */
2078
2079 if (last_index > 0) {
2080 uint64_t x = last_index - 1;
2081
2082 p = le64toh(array->entry_array.items[x]);
2083 if (p <= 0)
2084 return -EBADMSG;
2085
2086 r = test_object(f, p, needle);
2087 if (r < 0)
2088 return r;
2089
2090 if (r == TEST_FOUND)
2091 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2092
2093 if (r == TEST_RIGHT)
2094 right = x;
2095 else
2096 left = x + 1;
2097 }
2098
2099 if (last_index < right) {
2100 uint64_t y = last_index + 1;
2101
2102 p = le64toh(array->entry_array.items[y]);
2103 if (p <= 0)
2104 return -EBADMSG;
2105
2106 r = test_object(f, p, needle);
2107 if (r < 0)
2108 return r;
2109
2110 if (r == TEST_FOUND)
2111 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2112
2113 if (r == TEST_RIGHT)
2114 right = y;
2115 else
2116 left = y + 1;
2117 }
2118 }
2119
2120 for (;;) {
2121 if (left == right) {
2122 if (direction == DIRECTION_UP)
2123 subtract_one = true;
2124
2125 i = left;
2126 goto found;
2127 }
2128
2129 assert(left < right);
2130 i = (left + right) / 2;
2131
2132 p = le64toh(array->entry_array.items[i]);
2133 if (p <= 0)
2134 r = -EBADMSG;
2135 else
2136 r = test_object(f, p, needle);
2137 if (r == -EBADMSG) {
2138 log_debug_errno(r, "Encountered invalid entry while bisecting, cutting algorithm short. (2)");
2139 right = n = i;
2140 continue;
2141 }
2142 if (r < 0)
2143 return r;
2144
2145 if (r == TEST_FOUND)
2146 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2147
2148 if (r == TEST_RIGHT)
2149 right = i;
2150 else
2151 left = i + 1;
2152 }
2153 }
2154
2155 if (k >= n) {
2156 if (direction == DIRECTION_UP) {
2157 i = n;
2158 subtract_one = true;
2159 goto found;
2160 }
2161
2162 return 0;
2163 }
2164
2165 last_p = lp;
2166
2167 n -= k;
2168 t += k;
2169 last_index = (uint64_t) -1;
2170 a = le64toh(array->entry_array.next_entry_array_offset);
2171 }
2172
2173 return 0;
2174
2175 found:
2176 if (subtract_one && t == 0 && i == 0)
2177 return 0;
2178
2179 /* Let's cache this item for the next invocation */
2180 chain_cache_put(f->chain_cache, ci, first, a, le64toh(array->entry_array.items[0]), t, subtract_one ? (i > 0 ? i-1 : (uint64_t) -1) : i);
2181
2182 if (subtract_one && i == 0)
2183 p = last_p;
2184 else if (subtract_one)
2185 p = le64toh(array->entry_array.items[i-1]);
2186 else
2187 p = le64toh(array->entry_array.items[i]);
2188
2189 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2190 if (r < 0)
2191 return r;
2192
2193 if (ret)
2194 *ret = o;
2195
2196 if (offset)
2197 *offset = p;
2198
2199 if (idx)
2200 *idx = t + i + (subtract_one ? -1 : 0);
2201
2202 return 1;
2203 }
2204
2205 static int generic_array_bisect_plus_one(
2206 JournalFile *f,
2207 uint64_t extra,
2208 uint64_t first,
2209 uint64_t n,
2210 uint64_t needle,
2211 int (*test_object)(JournalFile *f, uint64_t p, uint64_t needle),
2212 direction_t direction,
2213 Object **ret,
2214 uint64_t *offset,
2215 uint64_t *idx) {
2216
2217 int r;
2218 bool step_back = false;
2219 Object *o;
2220
2221 assert(f);
2222 assert(test_object);
2223
2224 if (n <= 0)
2225 return 0;
2226
2227 /* This bisects the array in object 'first', but first checks
2228 * an extra */
2229 r = test_object(f, extra, needle);
2230 if (r < 0)
2231 return r;
2232
2233 if (r == TEST_FOUND)
2234 r = direction == DIRECTION_DOWN ? TEST_RIGHT : TEST_LEFT;
2235
2236 /* if we are looking with DIRECTION_UP then we need to first
2237 see if in the actual array there is a matching entry, and
2238 return the last one of that. But if there isn't any we need
2239 to return this one. Hence remember this, and return it
2240 below. */
2241 if (r == TEST_LEFT)
2242 step_back = direction == DIRECTION_UP;
2243
2244 if (r == TEST_RIGHT) {
2245 if (direction == DIRECTION_DOWN)
2246 goto found;
2247 else
2248 return 0;
2249 }
2250
2251 r = generic_array_bisect(f, first, n-1, needle, test_object, direction, ret, offset, idx);
2252
2253 if (r == 0 && step_back)
2254 goto found;
2255
2256 if (r > 0 && idx)
2257 (*idx)++;
2258
2259 return r;
2260
2261 found:
2262 r = journal_file_move_to_object(f, OBJECT_ENTRY, extra, &o);
2263 if (r < 0)
2264 return r;
2265
2266 if (ret)
2267 *ret = o;
2268
2269 if (offset)
2270 *offset = extra;
2271
2272 if (idx)
2273 *idx = 0;
2274
2275 return 1;
2276 }
2277
2278 _pure_ static int test_object_offset(JournalFile *f, uint64_t p, uint64_t needle) {
2279 assert(f);
2280 assert(p > 0);
2281
2282 if (p == needle)
2283 return TEST_FOUND;
2284 else if (p < needle)
2285 return TEST_LEFT;
2286 else
2287 return TEST_RIGHT;
2288 }
2289
2290 static int test_object_seqnum(JournalFile *f, uint64_t p, uint64_t needle) {
2291 Object *o;
2292 int r;
2293
2294 assert(f);
2295 assert(p > 0);
2296
2297 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2298 if (r < 0)
2299 return r;
2300
2301 if (le64toh(o->entry.seqnum) == needle)
2302 return TEST_FOUND;
2303 else if (le64toh(o->entry.seqnum) < needle)
2304 return TEST_LEFT;
2305 else
2306 return TEST_RIGHT;
2307 }
2308
2309 int journal_file_move_to_entry_by_seqnum(
2310 JournalFile *f,
2311 uint64_t seqnum,
2312 direction_t direction,
2313 Object **ret,
2314 uint64_t *offset) {
2315 assert(f);
2316 assert(f->header);
2317
2318 return generic_array_bisect(f,
2319 le64toh(f->header->entry_array_offset),
2320 le64toh(f->header->n_entries),
2321 seqnum,
2322 test_object_seqnum,
2323 direction,
2324 ret, offset, NULL);
2325 }
2326
2327 static int test_object_realtime(JournalFile *f, uint64_t p, uint64_t needle) {
2328 Object *o;
2329 int r;
2330
2331 assert(f);
2332 assert(p > 0);
2333
2334 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2335 if (r < 0)
2336 return r;
2337
2338 if (le64toh(o->entry.realtime) == needle)
2339 return TEST_FOUND;
2340 else if (le64toh(o->entry.realtime) < needle)
2341 return TEST_LEFT;
2342 else
2343 return TEST_RIGHT;
2344 }
2345
2346 int journal_file_move_to_entry_by_realtime(
2347 JournalFile *f,
2348 uint64_t realtime,
2349 direction_t direction,
2350 Object **ret,
2351 uint64_t *offset) {
2352 assert(f);
2353 assert(f->header);
2354
2355 return generic_array_bisect(f,
2356 le64toh(f->header->entry_array_offset),
2357 le64toh(f->header->n_entries),
2358 realtime,
2359 test_object_realtime,
2360 direction,
2361 ret, offset, NULL);
2362 }
2363
2364 static int test_object_monotonic(JournalFile *f, uint64_t p, uint64_t needle) {
2365 Object *o;
2366 int r;
2367
2368 assert(f);
2369 assert(p > 0);
2370
2371 r = journal_file_move_to_object(f, OBJECT_ENTRY, p, &o);
2372 if (r < 0)
2373 return r;
2374
2375 if (le64toh(o->entry.monotonic) == needle)
2376 return TEST_FOUND;
2377 else if (le64toh(o->entry.monotonic) < needle)
2378 return TEST_LEFT;
2379 else
2380 return TEST_RIGHT;
2381 }
2382
2383 static int find_data_object_by_boot_id(
2384 JournalFile *f,
2385 sd_id128_t boot_id,
2386 Object **o,
2387 uint64_t *b) {
2388
2389 char t[sizeof("_BOOT_ID=")-1 + 32 + 1] = "_BOOT_ID=";
2390
2391 sd_id128_to_string(boot_id, t + 9);
2392 return journal_file_find_data_object(f, t, sizeof(t) - 1, o, b);
2393 }
2394
2395 int journal_file_move_to_entry_by_monotonic(
2396 JournalFile *f,
2397 sd_id128_t boot_id,
2398 uint64_t monotonic,
2399 direction_t direction,
2400 Object **ret,
2401 uint64_t *offset) {
2402
2403 Object *o;
2404 int r;
2405
2406 assert(f);
2407
2408 r = find_data_object_by_boot_id(f, boot_id, &o, NULL);
2409 if (r < 0)
2410 return r;
2411 if (r == 0)
2412 return -ENOENT;
2413
2414 return generic_array_bisect_plus_one(f,
2415 le64toh(o->data.entry_offset),
2416 le64toh(o->data.entry_array_offset),
2417 le64toh(o->data.n_entries),
2418 monotonic,
2419 test_object_monotonic,
2420 direction,
2421 ret, offset, NULL);
2422 }
2423
2424 void journal_file_reset_location(JournalFile *f) {
2425 f->location_type = LOCATION_HEAD;
2426 f->current_offset = 0;
2427 f->current_seqnum = 0;
2428 f->current_realtime = 0;
2429 f->current_monotonic = 0;
2430 zero(f->current_boot_id);
2431 f->current_xor_hash = 0;
2432 }
2433
2434 void journal_file_save_location(JournalFile *f, Object *o, uint64_t offset) {
2435 f->location_type = LOCATION_SEEK;
2436 f->current_offset = offset;
2437 f->current_seqnum = le64toh(o->entry.seqnum);
2438 f->current_realtime = le64toh(o->entry.realtime);
2439 f->current_monotonic = le64toh(o->entry.monotonic);
2440 f->current_boot_id = o->entry.boot_id;
2441 f->current_xor_hash = le64toh(o->entry.xor_hash);
2442 }
2443
2444 int journal_file_compare_locations(JournalFile *af, JournalFile *bf) {
2445 assert(af);
2446 assert(af->header);
2447 assert(bf);
2448 assert(bf->header);
2449 assert(af->location_type == LOCATION_SEEK);
2450 assert(bf->location_type == LOCATION_SEEK);
2451
2452 /* If contents and timestamps match, these entries are
2453 * identical, even if the seqnum does not match */
2454 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id) &&
2455 af->current_monotonic == bf->current_monotonic &&
2456 af->current_realtime == bf->current_realtime &&
2457 af->current_xor_hash == bf->current_xor_hash)
2458 return 0;
2459
2460 if (sd_id128_equal(af->header->seqnum_id, bf->header->seqnum_id)) {
2461
2462 /* If this is from the same seqnum source, compare
2463 * seqnums */
2464 if (af->current_seqnum < bf->current_seqnum)
2465 return -1;
2466 if (af->current_seqnum > bf->current_seqnum)
2467 return 1;
2468
2469 /* Wow! This is weird, different data but the same
2470 * seqnums? Something is borked, but let's make the
2471 * best of it and compare by time. */
2472 }
2473
2474 if (sd_id128_equal(af->current_boot_id, bf->current_boot_id)) {
2475
2476 /* If the boot id matches, compare monotonic time */
2477 if (af->current_monotonic < bf->current_monotonic)
2478 return -1;
2479 if (af->current_monotonic > bf->current_monotonic)
2480 return 1;
2481 }
2482
2483 /* Otherwise, compare UTC time */
2484 if (af->current_realtime < bf->current_realtime)
2485 return -1;
2486 if (af->current_realtime > bf->current_realtime)
2487 return 1;
2488
2489 /* Finally, compare by contents */
2490 if (af->current_xor_hash < bf->current_xor_hash)
2491 return -1;
2492 if (af->current_xor_hash > bf->current_xor_hash)
2493 return 1;
2494
2495 return 0;
2496 }
2497
2498 static int bump_array_index(uint64_t *i, direction_t direction, uint64_t n) {
2499
2500 /* Increase or decrease the specified index, in the right direction. */
2501
2502 if (direction == DIRECTION_DOWN) {
2503 if (*i >= n - 1)
2504 return 0;
2505
2506 (*i) ++;
2507 } else {
2508 if (*i <= 0)
2509 return 0;
2510
2511 (*i) --;
2512 }
2513
2514 return 1;
2515 }
2516
2517 static bool check_properly_ordered(uint64_t new_offset, uint64_t old_offset, direction_t direction) {
2518
2519 /* Consider it an error if any of the two offsets is uninitialized */
2520 if (old_offset == 0 || new_offset == 0)
2521 return false;
2522
2523 /* If we go down, the new offset must be larger than the old one. */
2524 return direction == DIRECTION_DOWN ?
2525 new_offset > old_offset :
2526 new_offset < old_offset;
2527 }
2528
2529 int journal_file_next_entry(
2530 JournalFile *f,
2531 uint64_t p,
2532 direction_t direction,
2533 Object **ret, uint64_t *offset) {
2534
2535 uint64_t i, n, ofs;
2536 int r;
2537
2538 assert(f);
2539 assert(f->header);
2540
2541 n = le64toh(f->header->n_entries);
2542 if (n <= 0)
2543 return 0;
2544
2545 if (p == 0)
2546 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2547 else {
2548 r = generic_array_bisect(f,
2549 le64toh(f->header->entry_array_offset),
2550 le64toh(f->header->n_entries),
2551 p,
2552 test_object_offset,
2553 DIRECTION_DOWN,
2554 NULL, NULL,
2555 &i);
2556 if (r <= 0)
2557 return r;
2558
2559 r = bump_array_index(&i, direction, n);
2560 if (r <= 0)
2561 return r;
2562 }
2563
2564 /* And jump to it */
2565 for (;;) {
2566 r = generic_array_get(f,
2567 le64toh(f->header->entry_array_offset),
2568 i,
2569 ret, &ofs);
2570 if (r > 0)
2571 break;
2572 if (r != -EBADMSG)
2573 return r;
2574
2575 /* OK, so this entry is borked. Most likely some entry didn't get synced to disk properly, let's see if
2576 * the next one might work for us instead. */
2577 log_debug_errno(r, "Entry item %" PRIu64 " is bad, skipping over it.", i);
2578
2579 r = bump_array_index(&i, direction, n);
2580 if (r <= 0)
2581 return r;
2582 }
2583
2584 /* Ensure our array is properly ordered. */
2585 if (p > 0 && !check_properly_ordered(ofs, p, direction)) {
2586 log_debug("%s: entry array not properly ordered at entry %" PRIu64, f->path, i);
2587 return -EBADMSG;
2588 }
2589
2590 if (offset)
2591 *offset = ofs;
2592
2593 return 1;
2594 }
2595
2596 int journal_file_next_entry_for_data(
2597 JournalFile *f,
2598 Object *o, uint64_t p,
2599 uint64_t data_offset,
2600 direction_t direction,
2601 Object **ret, uint64_t *offset) {
2602
2603 uint64_t i, n, ofs;
2604 Object *d;
2605 int r;
2606
2607 assert(f);
2608 assert(p > 0 || !o);
2609
2610 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2611 if (r < 0)
2612 return r;
2613
2614 n = le64toh(d->data.n_entries);
2615 if (n <= 0)
2616 return n;
2617
2618 if (!o)
2619 i = direction == DIRECTION_DOWN ? 0 : n - 1;
2620 else {
2621 if (o->object.type != OBJECT_ENTRY)
2622 return -EINVAL;
2623
2624 r = generic_array_bisect_plus_one(f,
2625 le64toh(d->data.entry_offset),
2626 le64toh(d->data.entry_array_offset),
2627 le64toh(d->data.n_entries),
2628 p,
2629 test_object_offset,
2630 DIRECTION_DOWN,
2631 NULL, NULL,
2632 &i);
2633
2634 if (r <= 0)
2635 return r;
2636
2637 r = bump_array_index(&i, direction, n);
2638 if (r <= 0)
2639 return r;
2640 }
2641
2642 for (;;) {
2643 r = generic_array_get_plus_one(f,
2644 le64toh(d->data.entry_offset),
2645 le64toh(d->data.entry_array_offset),
2646 i,
2647 ret, &ofs);
2648 if (r > 0)
2649 break;
2650 if (r != -EBADMSG)
2651 return r;
2652
2653 log_debug_errno(r, "Data entry item %" PRIu64 " is bad, skipping over it.", i);
2654
2655 r = bump_array_index(&i, direction, n);
2656 if (r <= 0)
2657 return r;
2658 }
2659
2660 /* Ensure our array is properly ordered. */
2661 if (p > 0 && check_properly_ordered(ofs, p, direction)) {
2662 log_debug("%s data entry array not properly ordered at entry %" PRIu64, f->path, i);
2663 return -EBADMSG;
2664 }
2665
2666 if (offset)
2667 *offset = ofs;
2668
2669 return 1;
2670 }
2671
2672 int journal_file_move_to_entry_by_offset_for_data(
2673 JournalFile *f,
2674 uint64_t data_offset,
2675 uint64_t p,
2676 direction_t direction,
2677 Object **ret, uint64_t *offset) {
2678
2679 int r;
2680 Object *d;
2681
2682 assert(f);
2683
2684 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2685 if (r < 0)
2686 return r;
2687
2688 return generic_array_bisect_plus_one(f,
2689 le64toh(d->data.entry_offset),
2690 le64toh(d->data.entry_array_offset),
2691 le64toh(d->data.n_entries),
2692 p,
2693 test_object_offset,
2694 direction,
2695 ret, offset, NULL);
2696 }
2697
2698 int journal_file_move_to_entry_by_monotonic_for_data(
2699 JournalFile *f,
2700 uint64_t data_offset,
2701 sd_id128_t boot_id,
2702 uint64_t monotonic,
2703 direction_t direction,
2704 Object **ret, uint64_t *offset) {
2705
2706 Object *o, *d;
2707 int r;
2708 uint64_t b, z;
2709
2710 assert(f);
2711
2712 /* First, seek by time */
2713 r = find_data_object_by_boot_id(f, boot_id, &o, &b);
2714 if (r < 0)
2715 return r;
2716 if (r == 0)
2717 return -ENOENT;
2718
2719 r = generic_array_bisect_plus_one(f,
2720 le64toh(o->data.entry_offset),
2721 le64toh(o->data.entry_array_offset),
2722 le64toh(o->data.n_entries),
2723 monotonic,
2724 test_object_monotonic,
2725 direction,
2726 NULL, &z, NULL);
2727 if (r <= 0)
2728 return r;
2729
2730 /* And now, continue seeking until we find an entry that
2731 * exists in both bisection arrays */
2732
2733 for (;;) {
2734 Object *qo;
2735 uint64_t p, q;
2736
2737 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2738 if (r < 0)
2739 return r;
2740
2741 r = generic_array_bisect_plus_one(f,
2742 le64toh(d->data.entry_offset),
2743 le64toh(d->data.entry_array_offset),
2744 le64toh(d->data.n_entries),
2745 z,
2746 test_object_offset,
2747 direction,
2748 NULL, &p, NULL);
2749 if (r <= 0)
2750 return r;
2751
2752 r = journal_file_move_to_object(f, OBJECT_DATA, b, &o);
2753 if (r < 0)
2754 return r;
2755
2756 r = generic_array_bisect_plus_one(f,
2757 le64toh(o->data.entry_offset),
2758 le64toh(o->data.entry_array_offset),
2759 le64toh(o->data.n_entries),
2760 p,
2761 test_object_offset,
2762 direction,
2763 &qo, &q, NULL);
2764
2765 if (r <= 0)
2766 return r;
2767
2768 if (p == q) {
2769 if (ret)
2770 *ret = qo;
2771 if (offset)
2772 *offset = q;
2773
2774 return 1;
2775 }
2776
2777 z = q;
2778 }
2779 }
2780
2781 int journal_file_move_to_entry_by_seqnum_for_data(
2782 JournalFile *f,
2783 uint64_t data_offset,
2784 uint64_t seqnum,
2785 direction_t direction,
2786 Object **ret, uint64_t *offset) {
2787
2788 Object *d;
2789 int r;
2790
2791 assert(f);
2792
2793 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2794 if (r < 0)
2795 return r;
2796
2797 return generic_array_bisect_plus_one(f,
2798 le64toh(d->data.entry_offset),
2799 le64toh(d->data.entry_array_offset),
2800 le64toh(d->data.n_entries),
2801 seqnum,
2802 test_object_seqnum,
2803 direction,
2804 ret, offset, NULL);
2805 }
2806
2807 int journal_file_move_to_entry_by_realtime_for_data(
2808 JournalFile *f,
2809 uint64_t data_offset,
2810 uint64_t realtime,
2811 direction_t direction,
2812 Object **ret, uint64_t *offset) {
2813
2814 Object *d;
2815 int r;
2816
2817 assert(f);
2818
2819 r = journal_file_move_to_object(f, OBJECT_DATA, data_offset, &d);
2820 if (r < 0)
2821 return r;
2822
2823 return generic_array_bisect_plus_one(f,
2824 le64toh(d->data.entry_offset),
2825 le64toh(d->data.entry_array_offset),
2826 le64toh(d->data.n_entries),
2827 realtime,
2828 test_object_realtime,
2829 direction,
2830 ret, offset, NULL);
2831 }
2832
2833 void journal_file_dump(JournalFile *f) {
2834 Object *o;
2835 int r;
2836 uint64_t p;
2837
2838 assert(f);
2839 assert(f->header);
2840
2841 journal_file_print_header(f);
2842
2843 p = le64toh(f->header->header_size);
2844 while (p != 0) {
2845 r = journal_file_move_to_object(f, OBJECT_UNUSED, p, &o);
2846 if (r < 0)
2847 goto fail;
2848
2849 switch (o->object.type) {
2850
2851 case OBJECT_UNUSED:
2852 printf("Type: OBJECT_UNUSED\n");
2853 break;
2854
2855 case OBJECT_DATA:
2856 printf("Type: OBJECT_DATA\n");
2857 break;
2858
2859 case OBJECT_FIELD:
2860 printf("Type: OBJECT_FIELD\n");
2861 break;
2862
2863 case OBJECT_ENTRY:
2864 printf("Type: OBJECT_ENTRY seqnum=%"PRIu64" monotonic=%"PRIu64" realtime=%"PRIu64"\n",
2865 le64toh(o->entry.seqnum),
2866 le64toh(o->entry.monotonic),
2867 le64toh(o->entry.realtime));
2868 break;
2869
2870 case OBJECT_FIELD_HASH_TABLE:
2871 printf("Type: OBJECT_FIELD_HASH_TABLE\n");
2872 break;
2873
2874 case OBJECT_DATA_HASH_TABLE:
2875 printf("Type: OBJECT_DATA_HASH_TABLE\n");
2876 break;
2877
2878 case OBJECT_ENTRY_ARRAY:
2879 printf("Type: OBJECT_ENTRY_ARRAY\n");
2880 break;
2881
2882 case OBJECT_TAG:
2883 printf("Type: OBJECT_TAG seqnum=%"PRIu64" epoch=%"PRIu64"\n",
2884 le64toh(o->tag.seqnum),
2885 le64toh(o->tag.epoch));
2886 break;
2887
2888 default:
2889 printf("Type: unknown (%i)\n", o->object.type);
2890 break;
2891 }
2892
2893 if (o->object.flags & OBJECT_COMPRESSION_MASK)
2894 printf("Flags: %s\n",
2895 object_compressed_to_string(o->object.flags & OBJECT_COMPRESSION_MASK));
2896
2897 if (p == le64toh(f->header->tail_object_offset))
2898 p = 0;
2899 else
2900 p = p + ALIGN64(le64toh(o->object.size));
2901 }
2902
2903 return;
2904 fail:
2905 log_error("File corrupt");
2906 }
2907
2908 static const char* format_timestamp_safe(char *buf, size_t l, usec_t t) {
2909 const char *x;
2910
2911 x = format_timestamp(buf, l, t);
2912 if (x)
2913 return x;
2914 return " --- ";
2915 }
2916
2917 void journal_file_print_header(JournalFile *f) {
2918 char a[33], b[33], c[33], d[33];
2919 char x[FORMAT_TIMESTAMP_MAX], y[FORMAT_TIMESTAMP_MAX], z[FORMAT_TIMESTAMP_MAX];
2920 struct stat st;
2921 char bytes[FORMAT_BYTES_MAX];
2922
2923 assert(f);
2924 assert(f->header);
2925
2926 printf("File Path: %s\n"
2927 "File ID: %s\n"
2928 "Machine ID: %s\n"
2929 "Boot ID: %s\n"
2930 "Sequential Number ID: %s\n"
2931 "State: %s\n"
2932 "Compatible Flags:%s%s\n"
2933 "Incompatible Flags:%s%s%s\n"
2934 "Header size: %"PRIu64"\n"
2935 "Arena size: %"PRIu64"\n"
2936 "Data Hash Table Size: %"PRIu64"\n"
2937 "Field Hash Table Size: %"PRIu64"\n"
2938 "Rotate Suggested: %s\n"
2939 "Head Sequential Number: %"PRIu64" (%"PRIx64")\n"
2940 "Tail Sequential Number: %"PRIu64" (%"PRIx64")\n"
2941 "Head Realtime Timestamp: %s (%"PRIx64")\n"
2942 "Tail Realtime Timestamp: %s (%"PRIx64")\n"
2943 "Tail Monotonic Timestamp: %s (%"PRIx64")\n"
2944 "Objects: %"PRIu64"\n"
2945 "Entry Objects: %"PRIu64"\n",
2946 f->path,
2947 sd_id128_to_string(f->header->file_id, a),
2948 sd_id128_to_string(f->header->machine_id, b),
2949 sd_id128_to_string(f->header->boot_id, c),
2950 sd_id128_to_string(f->header->seqnum_id, d),
2951 f->header->state == STATE_OFFLINE ? "OFFLINE" :
2952 f->header->state == STATE_ONLINE ? "ONLINE" :
2953 f->header->state == STATE_ARCHIVED ? "ARCHIVED" : "UNKNOWN",
2954 JOURNAL_HEADER_SEALED(f->header) ? " SEALED" : "",
2955 (le32toh(f->header->compatible_flags) & ~HEADER_COMPATIBLE_ANY) ? " ???" : "",
2956 JOURNAL_HEADER_COMPRESSED_XZ(f->header) ? " COMPRESSED-XZ" : "",
2957 JOURNAL_HEADER_COMPRESSED_LZ4(f->header) ? " COMPRESSED-LZ4" : "",
2958 (le32toh(f->header->incompatible_flags) & ~HEADER_INCOMPATIBLE_ANY) ? " ???" : "",
2959 le64toh(f->header->header_size),
2960 le64toh(f->header->arena_size),
2961 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
2962 le64toh(f->header->field_hash_table_size) / sizeof(HashItem),
2963 yes_no(journal_file_rotate_suggested(f, 0)),
2964 le64toh(f->header->head_entry_seqnum), le64toh(f->header->head_entry_seqnum),
2965 le64toh(f->header->tail_entry_seqnum), le64toh(f->header->tail_entry_seqnum),
2966 format_timestamp_safe(x, sizeof(x), le64toh(f->header->head_entry_realtime)), le64toh(f->header->head_entry_realtime),
2967 format_timestamp_safe(y, sizeof(y), le64toh(f->header->tail_entry_realtime)), le64toh(f->header->tail_entry_realtime),
2968 format_timespan(z, sizeof(z), le64toh(f->header->tail_entry_monotonic), USEC_PER_MSEC), le64toh(f->header->tail_entry_monotonic),
2969 le64toh(f->header->n_objects),
2970 le64toh(f->header->n_entries));
2971
2972 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
2973 printf("Data Objects: %"PRIu64"\n"
2974 "Data Hash Table Fill: %.1f%%\n",
2975 le64toh(f->header->n_data),
2976 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))));
2977
2978 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
2979 printf("Field Objects: %"PRIu64"\n"
2980 "Field Hash Table Fill: %.1f%%\n",
2981 le64toh(f->header->n_fields),
2982 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))));
2983
2984 if (JOURNAL_HEADER_CONTAINS(f->header, n_tags))
2985 printf("Tag Objects: %"PRIu64"\n",
2986 le64toh(f->header->n_tags));
2987 if (JOURNAL_HEADER_CONTAINS(f->header, n_entry_arrays))
2988 printf("Entry Array Objects: %"PRIu64"\n",
2989 le64toh(f->header->n_entry_arrays));
2990
2991 if (fstat(f->fd, &st) >= 0)
2992 printf("Disk usage: %s\n", format_bytes(bytes, sizeof(bytes), (uint64_t) st.st_blocks * 512ULL));
2993 }
2994
2995 static int journal_file_warn_btrfs(JournalFile *f) {
2996 unsigned attrs;
2997 int r;
2998
2999 assert(f);
3000
3001 /* Before we write anything, check if the COW logic is turned
3002 * off on btrfs. Given our write pattern that is quite
3003 * unfriendly to COW file systems this should greatly improve
3004 * performance on COW file systems, such as btrfs, at the
3005 * expense of data integrity features (which shouldn't be too
3006 * bad, given that we do our own checksumming). */
3007
3008 r = btrfs_is_filesystem(f->fd);
3009 if (r < 0)
3010 return log_warning_errno(r, "Failed to determine if journal is on btrfs: %m");
3011 if (!r)
3012 return 0;
3013
3014 r = read_attr_fd(f->fd, &attrs);
3015 if (r < 0)
3016 return log_warning_errno(r, "Failed to read file attributes: %m");
3017
3018 if (attrs & FS_NOCOW_FL) {
3019 log_debug("Detected btrfs file system with copy-on-write disabled, all is good.");
3020 return 0;
3021 }
3022
3023 log_notice("Creating journal file %s on a btrfs file system, and copy-on-write is enabled. "
3024 "This is likely to slow down journal access substantially, please consider turning "
3025 "off the copy-on-write file attribute on the journal directory, using chattr +C.", f->path);
3026
3027 return 1;
3028 }
3029
3030 int journal_file_open(
3031 int fd,
3032 const char *fname,
3033 int flags,
3034 mode_t mode,
3035 bool compress,
3036 bool seal,
3037 JournalMetrics *metrics,
3038 MMapCache *mmap_cache,
3039 Set *deferred_closes,
3040 JournalFile *template,
3041 JournalFile **ret) {
3042
3043 bool newly_created = false;
3044 JournalFile *f;
3045 void *h;
3046 int r;
3047
3048 assert(ret);
3049 assert(fd >= 0 || fname);
3050
3051 if ((flags & O_ACCMODE) != O_RDONLY &&
3052 (flags & O_ACCMODE) != O_RDWR)
3053 return -EINVAL;
3054
3055 if (fname) {
3056 if (!endswith(fname, ".journal") &&
3057 !endswith(fname, ".journal~"))
3058 return -EINVAL;
3059 }
3060
3061 f = new0(JournalFile, 1);
3062 if (!f)
3063 return -ENOMEM;
3064
3065 f->fd = fd;
3066 f->mode = mode;
3067
3068 f->flags = flags;
3069 f->prot = prot_from_flags(flags);
3070 f->writable = (flags & O_ACCMODE) != O_RDONLY;
3071 #if defined(HAVE_LZ4)
3072 f->compress_lz4 = compress;
3073 #elif defined(HAVE_XZ)
3074 f->compress_xz = compress;
3075 #endif
3076 #ifdef HAVE_GCRYPT
3077 f->seal = seal;
3078 #endif
3079
3080 if (mmap_cache)
3081 f->mmap = mmap_cache_ref(mmap_cache);
3082 else {
3083 f->mmap = mmap_cache_new();
3084 if (!f->mmap) {
3085 r = -ENOMEM;
3086 goto fail;
3087 }
3088 }
3089
3090 if (fname)
3091 f->path = strdup(fname);
3092 else /* If we don't know the path, fill in something explanatory and vaguely useful */
3093 asprintf(&f->path, "/proc/self/%i", fd);
3094 if (!f->path) {
3095 r = -ENOMEM;
3096 goto fail;
3097 }
3098
3099 f->chain_cache = ordered_hashmap_new(&uint64_hash_ops);
3100 if (!f->chain_cache) {
3101 r = -ENOMEM;
3102 goto fail;
3103 }
3104
3105 if (f->fd < 0) {
3106 f->fd = open(f->path, f->flags|O_CLOEXEC, f->mode);
3107 if (f->fd < 0) {
3108 r = -errno;
3109 goto fail;
3110 }
3111
3112 /* fds we opened here by us should also be closed by us. */
3113 f->close_fd = true;
3114 }
3115
3116 r = journal_file_fstat(f);
3117 if (r < 0)
3118 goto fail;
3119
3120 if (f->last_stat.st_size == 0 && f->writable) {
3121
3122 (void) journal_file_warn_btrfs(f);
3123
3124 /* Let's attach the creation time to the journal file,
3125 * so that the vacuuming code knows the age of this
3126 * file even if the file might end up corrupted one
3127 * day... Ideally we'd just use the creation time many
3128 * file systems maintain for each file, but there is
3129 * currently no usable API to query this, hence let's
3130 * emulate this via extended attributes. If extended
3131 * attributes are not supported we'll just skip this,
3132 * and rely solely on mtime/atime/ctime of the file. */
3133
3134 fd_setcrtime(f->fd, 0);
3135
3136 #ifdef HAVE_GCRYPT
3137 /* Try to load the FSPRG state, and if we can't, then
3138 * just don't do sealing */
3139 if (f->seal) {
3140 r = journal_file_fss_load(f);
3141 if (r < 0)
3142 f->seal = false;
3143 }
3144 #endif
3145
3146 r = journal_file_init_header(f, template);
3147 if (r < 0)
3148 goto fail;
3149
3150 r = journal_file_fstat(f);
3151 if (r < 0)
3152 goto fail;
3153
3154 newly_created = true;
3155 }
3156
3157 if (f->last_stat.st_size < (off_t) HEADER_SIZE_MIN) {
3158 r = -ENODATA;
3159 goto fail;
3160 }
3161
3162 r = mmap_cache_get(f->mmap, f->fd, f->prot, CONTEXT_HEADER, true, 0, PAGE_ALIGN(sizeof(Header)), &f->last_stat, &h);
3163 if (r < 0)
3164 goto fail;
3165
3166 f->header = h;
3167
3168 if (!newly_created) {
3169 if (deferred_closes)
3170 journal_file_close_set(deferred_closes);
3171
3172 r = journal_file_verify_header(f);
3173 if (r < 0)
3174 goto fail;
3175 }
3176
3177 #ifdef HAVE_GCRYPT
3178 if (!newly_created && f->writable) {
3179 r = journal_file_fss_load(f);
3180 if (r < 0)
3181 goto fail;
3182 }
3183 #endif
3184
3185 if (f->writable) {
3186 if (metrics) {
3187 journal_default_metrics(metrics, f->fd);
3188 f->metrics = *metrics;
3189 } else if (template)
3190 f->metrics = template->metrics;
3191
3192 r = journal_file_refresh_header(f);
3193 if (r < 0)
3194 goto fail;
3195 }
3196
3197 #ifdef HAVE_GCRYPT
3198 r = journal_file_hmac_setup(f);
3199 if (r < 0)
3200 goto fail;
3201 #endif
3202
3203 if (newly_created) {
3204 r = journal_file_setup_field_hash_table(f);
3205 if (r < 0)
3206 goto fail;
3207
3208 r = journal_file_setup_data_hash_table(f);
3209 if (r < 0)
3210 goto fail;
3211
3212 #ifdef HAVE_GCRYPT
3213 r = journal_file_append_first_tag(f);
3214 if (r < 0)
3215 goto fail;
3216 #endif
3217 }
3218
3219 if (mmap_cache_got_sigbus(f->mmap, f->fd)) {
3220 r = -EIO;
3221 goto fail;
3222 }
3223
3224 if (template && template->post_change_timer) {
3225 r = journal_file_enable_post_change_timer(
3226 f,
3227 sd_event_source_get_event(template->post_change_timer),
3228 template->post_change_timer_period);
3229
3230 if (r < 0)
3231 goto fail;
3232 }
3233
3234 /* The file is opened now successfully, thus we take possession of any passed in fd. */
3235 f->close_fd = true;
3236
3237 *ret = f;
3238 return 0;
3239
3240 fail:
3241 if (f->fd >= 0 && mmap_cache_got_sigbus(f->mmap, f->fd))
3242 r = -EIO;
3243
3244 (void) journal_file_close(f);
3245
3246 return r;
3247 }
3248
3249 int journal_file_rotate(JournalFile **f, bool compress, bool seal, Set *deferred_closes) {
3250 _cleanup_free_ char *p = NULL;
3251 size_t l;
3252 JournalFile *old_file, *new_file = NULL;
3253 int r;
3254
3255 assert(f);
3256 assert(*f);
3257
3258 old_file = *f;
3259
3260 if (!old_file->writable)
3261 return -EINVAL;
3262
3263 /* Is this a journal file that was passed to us as fd? If so, we synthesized a path name for it, and we refuse
3264 * rotation, since we don't know the actual path, and couldn't rename the file hence.*/
3265 if (path_startswith(old_file->path, "/proc/self/fd"))
3266 return -EINVAL;
3267
3268 if (!endswith(old_file->path, ".journal"))
3269 return -EINVAL;
3270
3271 l = strlen(old_file->path);
3272 r = asprintf(&p, "%.*s@" SD_ID128_FORMAT_STR "-%016"PRIx64"-%016"PRIx64".journal",
3273 (int) l - 8, old_file->path,
3274 SD_ID128_FORMAT_VAL(old_file->header->seqnum_id),
3275 le64toh((*f)->header->head_entry_seqnum),
3276 le64toh((*f)->header->head_entry_realtime));
3277 if (r < 0)
3278 return -ENOMEM;
3279
3280 /* Try to rename the file to the archived version. If the file
3281 * already was deleted, we'll get ENOENT, let's ignore that
3282 * case. */
3283 r = rename(old_file->path, p);
3284 if (r < 0 && errno != ENOENT)
3285 return -errno;
3286
3287 /* Sync the rename to disk */
3288 (void) fsync_directory_of_file(old_file->fd);
3289
3290 /* Set as archive so offlining commits w/state=STATE_ARCHIVED.
3291 * Previously we would set old_file->header->state to STATE_ARCHIVED directly here,
3292 * but journal_file_set_offline() short-circuits when state != STATE_ONLINE, which
3293 * would result in the rotated journal never getting fsync() called before closing.
3294 * Now we simply queue the archive state by setting an archive bit, leaving the state
3295 * as STATE_ONLINE so proper offlining occurs. */
3296 old_file->archive = true;
3297
3298 /* Currently, btrfs is not very good with out write patterns
3299 * and fragments heavily. Let's defrag our journal files when
3300 * we archive them */
3301 old_file->defrag_on_close = true;
3302
3303 r = journal_file_open(-1, old_file->path, old_file->flags, old_file->mode, compress, seal, NULL, old_file->mmap, deferred_closes, old_file, &new_file);
3304
3305 if (deferred_closes &&
3306 set_put(deferred_closes, old_file) >= 0)
3307 (void) journal_file_set_offline(old_file, false);
3308 else
3309 (void) journal_file_close(old_file);
3310
3311 *f = new_file;
3312 return r;
3313 }
3314
3315 int journal_file_open_reliably(
3316 const char *fname,
3317 int flags,
3318 mode_t mode,
3319 bool compress,
3320 bool seal,
3321 JournalMetrics *metrics,
3322 MMapCache *mmap_cache,
3323 Set *deferred_closes,
3324 JournalFile *template,
3325 JournalFile **ret) {
3326
3327 int r;
3328 size_t l;
3329 _cleanup_free_ char *p = NULL;
3330
3331 r = journal_file_open(-1, fname, flags, mode, compress, seal, metrics, mmap_cache, deferred_closes, template, ret);
3332 if (!IN_SET(r,
3333 -EBADMSG, /* corrupted */
3334 -ENODATA, /* truncated */
3335 -EHOSTDOWN, /* other machine */
3336 -EPROTONOSUPPORT, /* incompatible feature */
3337 -EBUSY, /* unclean shutdown */
3338 -ESHUTDOWN, /* already archived */
3339 -EIO, /* IO error, including SIGBUS on mmap */
3340 -EIDRM, /* File has been deleted */
3341 -ETXTBSY)) /* File is from the future */
3342 return r;
3343
3344 if ((flags & O_ACCMODE) == O_RDONLY)
3345 return r;
3346
3347 if (!(flags & O_CREAT))
3348 return r;
3349
3350 if (!endswith(fname, ".journal"))
3351 return r;
3352
3353 /* The file is corrupted. Rotate it away and try it again (but only once) */
3354
3355 l = strlen(fname);
3356 if (asprintf(&p, "%.*s@%016"PRIx64 "-%016"PRIx64 ".journal~",
3357 (int) l - 8, fname,
3358 now(CLOCK_REALTIME),
3359 random_u64()) < 0)
3360 return -ENOMEM;
3361
3362 if (rename(fname, p) < 0)
3363 return -errno;
3364
3365 /* btrfs doesn't cope well with our write pattern and
3366 * fragments heavily. Let's defrag all files we rotate */
3367
3368 (void) chattr_path(p, 0, FS_NOCOW_FL);
3369 (void) btrfs_defrag(p);
3370
3371 log_warning_errno(r, "File %s corrupted or uncleanly shut down, renaming and replacing.", fname);
3372
3373 return journal_file_open(-1, fname, flags, mode, compress, seal, metrics, mmap_cache, deferred_closes, template, ret);
3374 }
3375
3376 int journal_file_copy_entry(JournalFile *from, JournalFile *to, Object *o, uint64_t p, uint64_t *seqnum, Object **ret, uint64_t *offset) {
3377 uint64_t i, n;
3378 uint64_t q, xor_hash = 0;
3379 int r;
3380 EntryItem *items;
3381 dual_timestamp ts;
3382
3383 assert(from);
3384 assert(to);
3385 assert(o);
3386 assert(p);
3387
3388 if (!to->writable)
3389 return -EPERM;
3390
3391 ts.monotonic = le64toh(o->entry.monotonic);
3392 ts.realtime = le64toh(o->entry.realtime);
3393
3394 n = journal_file_entry_n_items(o);
3395 /* alloca() can't take 0, hence let's allocate at least one */
3396 items = alloca(sizeof(EntryItem) * MAX(1u, n));
3397
3398 for (i = 0; i < n; i++) {
3399 uint64_t l, h;
3400 le64_t le_hash;
3401 size_t t;
3402 void *data;
3403 Object *u;
3404
3405 q = le64toh(o->entry.items[i].object_offset);
3406 le_hash = o->entry.items[i].hash;
3407
3408 r = journal_file_move_to_object(from, OBJECT_DATA, q, &o);
3409 if (r < 0)
3410 return r;
3411
3412 if (le_hash != o->data.hash)
3413 return -EBADMSG;
3414
3415 l = le64toh(o->object.size) - offsetof(Object, data.payload);
3416 t = (size_t) l;
3417
3418 /* We hit the limit on 32bit machines */
3419 if ((uint64_t) t != l)
3420 return -E2BIG;
3421
3422 if (o->object.flags & OBJECT_COMPRESSION_MASK) {
3423 #if defined(HAVE_XZ) || defined(HAVE_LZ4)
3424 size_t rsize = 0;
3425
3426 r = decompress_blob(o->object.flags & OBJECT_COMPRESSION_MASK,
3427 o->data.payload, l, &from->compress_buffer, &from->compress_buffer_size, &rsize, 0);
3428 if (r < 0)
3429 return r;
3430
3431 data = from->compress_buffer;
3432 l = rsize;
3433 #else
3434 return -EPROTONOSUPPORT;
3435 #endif
3436 } else
3437 data = o->data.payload;
3438
3439 r = journal_file_append_data(to, data, l, &u, &h);
3440 if (r < 0)
3441 return r;
3442
3443 xor_hash ^= le64toh(u->data.hash);
3444 items[i].object_offset = htole64(h);
3445 items[i].hash = u->data.hash;
3446
3447 r = journal_file_move_to_object(from, OBJECT_ENTRY, p, &o);
3448 if (r < 0)
3449 return r;
3450 }
3451
3452 r = journal_file_append_entry_internal(to, &ts, xor_hash, items, n, seqnum, ret, offset);
3453
3454 if (mmap_cache_got_sigbus(to->mmap, to->fd))
3455 return -EIO;
3456
3457 return r;
3458 }
3459
3460 void journal_reset_metrics(JournalMetrics *m) {
3461 assert(m);
3462
3463 /* Set everything to "pick automatic values". */
3464
3465 *m = (JournalMetrics) {
3466 .min_use = (uint64_t) -1,
3467 .max_use = (uint64_t) -1,
3468 .min_size = (uint64_t) -1,
3469 .max_size = (uint64_t) -1,
3470 .keep_free = (uint64_t) -1,
3471 .n_max_files = (uint64_t) -1,
3472 };
3473 }
3474
3475 void journal_default_metrics(JournalMetrics *m, int fd) {
3476 char a[FORMAT_BYTES_MAX], b[FORMAT_BYTES_MAX], c[FORMAT_BYTES_MAX], d[FORMAT_BYTES_MAX], e[FORMAT_BYTES_MAX];
3477 struct statvfs ss;
3478 uint64_t fs_size;
3479
3480 assert(m);
3481 assert(fd >= 0);
3482
3483 if (fstatvfs(fd, &ss) >= 0)
3484 fs_size = ss.f_frsize * ss.f_blocks;
3485 else {
3486 log_debug_errno(errno, "Failed to detremine disk size: %m");
3487 fs_size = 0;
3488 }
3489
3490 if (m->max_use == (uint64_t) -1) {
3491
3492 if (fs_size > 0) {
3493 m->max_use = PAGE_ALIGN(fs_size / 10); /* 10% of file system size */
3494
3495 if (m->max_use > DEFAULT_MAX_USE_UPPER)
3496 m->max_use = DEFAULT_MAX_USE_UPPER;
3497
3498 if (m->max_use < DEFAULT_MAX_USE_LOWER)
3499 m->max_use = DEFAULT_MAX_USE_LOWER;
3500 } else
3501 m->max_use = DEFAULT_MAX_USE_LOWER;
3502 } else {
3503 m->max_use = PAGE_ALIGN(m->max_use);
3504
3505 if (m->max_use != 0 && m->max_use < JOURNAL_FILE_SIZE_MIN*2)
3506 m->max_use = JOURNAL_FILE_SIZE_MIN*2;
3507 }
3508
3509 if (m->min_use == (uint64_t) -1)
3510 m->min_use = DEFAULT_MIN_USE;
3511
3512 if (m->min_use > m->max_use)
3513 m->min_use = m->max_use;
3514
3515 if (m->max_size == (uint64_t) -1) {
3516 m->max_size = PAGE_ALIGN(m->max_use / 8); /* 8 chunks */
3517
3518 if (m->max_size > DEFAULT_MAX_SIZE_UPPER)
3519 m->max_size = DEFAULT_MAX_SIZE_UPPER;
3520 } else
3521 m->max_size = PAGE_ALIGN(m->max_size);
3522
3523 if (m->max_size != 0) {
3524 if (m->max_size < JOURNAL_FILE_SIZE_MIN)
3525 m->max_size = JOURNAL_FILE_SIZE_MIN;
3526
3527 if (m->max_use != 0 && m->max_size*2 > m->max_use)
3528 m->max_use = m->max_size*2;
3529 }
3530
3531 if (m->min_size == (uint64_t) -1)
3532 m->min_size = JOURNAL_FILE_SIZE_MIN;
3533 else {
3534 m->min_size = PAGE_ALIGN(m->min_size);
3535
3536 if (m->min_size < JOURNAL_FILE_SIZE_MIN)
3537 m->min_size = JOURNAL_FILE_SIZE_MIN;
3538
3539 if (m->max_size != 0 && m->min_size > m->max_size)
3540 m->max_size = m->min_size;
3541 }
3542
3543 if (m->keep_free == (uint64_t) -1) {
3544
3545 if (fs_size > 0) {
3546 m->keep_free = PAGE_ALIGN(fs_size * 3 / 20); /* 15% of file system size */
3547
3548 if (m->keep_free > DEFAULT_KEEP_FREE_UPPER)
3549 m->keep_free = DEFAULT_KEEP_FREE_UPPER;
3550
3551 } else
3552 m->keep_free = DEFAULT_KEEP_FREE;
3553 }
3554
3555 if (m->n_max_files == (uint64_t) -1)
3556 m->n_max_files = DEFAULT_N_MAX_FILES;
3557
3558 log_debug("Fixed min_use=%s max_use=%s max_size=%s min_size=%s keep_free=%s n_max_files=%" PRIu64,
3559 format_bytes(a, sizeof(a), m->min_use),
3560 format_bytes(b, sizeof(b), m->max_use),
3561 format_bytes(c, sizeof(c), m->max_size),
3562 format_bytes(d, sizeof(d), m->min_size),
3563 format_bytes(e, sizeof(e), m->keep_free),
3564 m->n_max_files);
3565 }
3566
3567 int journal_file_get_cutoff_realtime_usec(JournalFile *f, usec_t *from, usec_t *to) {
3568 assert(f);
3569 assert(f->header);
3570 assert(from || to);
3571
3572 if (from) {
3573 if (f->header->head_entry_realtime == 0)
3574 return -ENOENT;
3575
3576 *from = le64toh(f->header->head_entry_realtime);
3577 }
3578
3579 if (to) {
3580 if (f->header->tail_entry_realtime == 0)
3581 return -ENOENT;
3582
3583 *to = le64toh(f->header->tail_entry_realtime);
3584 }
3585
3586 return 1;
3587 }
3588
3589 int journal_file_get_cutoff_monotonic_usec(JournalFile *f, sd_id128_t boot_id, usec_t *from, usec_t *to) {
3590 Object *o;
3591 uint64_t p;
3592 int r;
3593
3594 assert(f);
3595 assert(from || to);
3596
3597 r = find_data_object_by_boot_id(f, boot_id, &o, &p);
3598 if (r <= 0)
3599 return r;
3600
3601 if (le64toh(o->data.n_entries) <= 0)
3602 return 0;
3603
3604 if (from) {
3605 r = journal_file_move_to_object(f, OBJECT_ENTRY, le64toh(o->data.entry_offset), &o);
3606 if (r < 0)
3607 return r;
3608
3609 *from = le64toh(o->entry.monotonic);
3610 }
3611
3612 if (to) {
3613 r = journal_file_move_to_object(f, OBJECT_DATA, p, &o);
3614 if (r < 0)
3615 return r;
3616
3617 r = generic_array_get_plus_one(f,
3618 le64toh(o->data.entry_offset),
3619 le64toh(o->data.entry_array_offset),
3620 le64toh(o->data.n_entries)-1,
3621 &o, NULL);
3622 if (r <= 0)
3623 return r;
3624
3625 *to = le64toh(o->entry.monotonic);
3626 }
3627
3628 return 1;
3629 }
3630
3631 bool journal_file_rotate_suggested(JournalFile *f, usec_t max_file_usec) {
3632 assert(f);
3633 assert(f->header);
3634
3635 /* If we gained new header fields we gained new features,
3636 * hence suggest a rotation */
3637 if (le64toh(f->header->header_size) < sizeof(Header)) {
3638 log_debug("%s uses an outdated header, suggesting rotation.", f->path);
3639 return true;
3640 }
3641
3642 /* Let's check if the hash tables grew over a certain fill
3643 * level (75%, borrowing this value from Java's hash table
3644 * implementation), and if so suggest a rotation. To calculate
3645 * the fill level we need the n_data field, which only exists
3646 * in newer versions. */
3647
3648 if (JOURNAL_HEADER_CONTAINS(f->header, n_data))
3649 if (le64toh(f->header->n_data) * 4ULL > (le64toh(f->header->data_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3650 log_debug("Data hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items, %llu file size, %"PRIu64" bytes per hash table item), suggesting rotation.",
3651 f->path,
3652 100.0 * (double) le64toh(f->header->n_data) / ((double) (le64toh(f->header->data_hash_table_size) / sizeof(HashItem))),
3653 le64toh(f->header->n_data),
3654 le64toh(f->header->data_hash_table_size) / sizeof(HashItem),
3655 (unsigned long long) f->last_stat.st_size,
3656 f->last_stat.st_size / le64toh(f->header->n_data));
3657 return true;
3658 }
3659
3660 if (JOURNAL_HEADER_CONTAINS(f->header, n_fields))
3661 if (le64toh(f->header->n_fields) * 4ULL > (le64toh(f->header->field_hash_table_size) / sizeof(HashItem)) * 3ULL) {
3662 log_debug("Field hash table of %s has a fill level at %.1f (%"PRIu64" of %"PRIu64" items), suggesting rotation.",
3663 f->path,
3664 100.0 * (double) le64toh(f->header->n_fields) / ((double) (le64toh(f->header->field_hash_table_size) / sizeof(HashItem))),
3665 le64toh(f->header->n_fields),
3666 le64toh(f->header->field_hash_table_size) / sizeof(HashItem));
3667 return true;
3668 }
3669
3670 /* Are the data objects properly indexed by field objects? */
3671 if (JOURNAL_HEADER_CONTAINS(f->header, n_data) &&
3672 JOURNAL_HEADER_CONTAINS(f->header, n_fields) &&
3673 le64toh(f->header->n_data) > 0 &&
3674 le64toh(f->header->n_fields) == 0)
3675 return true;
3676
3677 if (max_file_usec > 0) {
3678 usec_t t, h;
3679
3680 h = le64toh(f->header->head_entry_realtime);
3681 t = now(CLOCK_REALTIME);
3682
3683 if (h > 0 && t > h + max_file_usec)
3684 return true;
3685 }
3686
3687 return false;
3688 }