]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journald-server.c
journald: do not close all files stored in Server.deferred_closes
[thirdparty/systemd.git] / src / journal / journald-server.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #if HAVE_SELINUX
4 #include <selinux/selinux.h>
5 #endif
6 #include <sys/ioctl.h>
7 #include <sys/mman.h>
8 #include <sys/signalfd.h>
9 #include <sys/statvfs.h>
10 #include <linux/sockios.h>
11
12 #include "sd-daemon.h"
13 #include "sd-journal.h"
14 #include "sd-messages.h"
15
16 #include "acl-util.h"
17 #include "alloc-util.h"
18 #include "audit-util.h"
19 #include "cgroup-util.h"
20 #include "conf-parser.h"
21 #include "dirent-util.h"
22 #include "extract-word.h"
23 #include "fd-util.h"
24 #include "fileio.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "hashmap.h"
28 #include "hostname-util.h"
29 #include "id128-util.h"
30 #include "initrd-util.h"
31 #include "io-util.h"
32 #include "journal-authenticate.h"
33 #include "journal-internal.h"
34 #include "journal-vacuum.h"
35 #include "journald-audit.h"
36 #include "journald-context.h"
37 #include "journald-kmsg.h"
38 #include "journald-native.h"
39 #include "journald-rate-limit.h"
40 #include "journald-server.h"
41 #include "journald-stream.h"
42 #include "journald-syslog.h"
43 #include "log.h"
44 #include "missing_audit.h"
45 #include "mkdir.h"
46 #include "parse-util.h"
47 #include "path-util.h"
48 #include "proc-cmdline.h"
49 #include "process-util.h"
50 #include "rm-rf.h"
51 #include "selinux-util.h"
52 #include "signal-util.h"
53 #include "socket-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "syslog-util.h"
58 #include "uid-alloc-range.h"
59 #include "user-util.h"
60
61 #define USER_JOURNALS_MAX 1024
62
63 #define DEFAULT_SYNC_INTERVAL_USEC (5*USEC_PER_MINUTE)
64 #define DEFAULT_RATE_LIMIT_INTERVAL (30*USEC_PER_SEC)
65 #define DEFAULT_RATE_LIMIT_BURST 10000
66 #define DEFAULT_MAX_FILE_USEC USEC_PER_MONTH
67
68 #define DEFAULT_KMSG_OWN_INTERVAL (5 * USEC_PER_SEC)
69 #define DEFAULT_KMSG_OWN_BURST 50
70
71 #define RECHECK_SPACE_USEC (30*USEC_PER_SEC)
72
73 #define NOTIFY_SNDBUF_SIZE (8*1024*1024)
74
75 /* The period to insert between posting changes for coalescing */
76 #define POST_CHANGE_TIMER_INTERVAL_USEC (250*USEC_PER_MSEC)
77
78 /* Pick a good default that is likely to fit into AF_UNIX and AF_INET SOCK_DGRAM datagrams, and even leaves some room
79 * for a bit of additional metadata. */
80 #define DEFAULT_LINE_MAX (48*1024)
81
82 #define DEFERRED_CLOSES_MAX (4096)
83
84 #define IDLE_TIMEOUT_USEC (30*USEC_PER_SEC)
85
86 #define FAILED_TO_WRITE_ENTRY_RATELIMIT ((const RateLimit) { .interval = 1 * USEC_PER_SEC, .burst = 1 })
87
88 static int server_determine_path_usage(
89 Server *s,
90 const char *path,
91 uint64_t *ret_used,
92 uint64_t *ret_free) {
93
94 _cleanup_closedir_ DIR *d = NULL;
95 struct statvfs ss;
96
97 assert(s);
98 assert(path);
99 assert(ret_used);
100 assert(ret_free);
101
102 d = opendir(path);
103 if (!d)
104 return log_ratelimit_full_errno(errno == ENOENT ? LOG_DEBUG : LOG_ERR,
105 errno, JOURNAL_LOG_RATELIMIT, "Failed to open %s: %m", path);
106
107 if (fstatvfs(dirfd(d), &ss) < 0)
108 return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT,
109 "Failed to fstatvfs(%s): %m", path);
110
111 *ret_free = ss.f_bsize * ss.f_bavail;
112 *ret_used = 0;
113 FOREACH_DIRENT_ALL(de, d, break) {
114 struct stat st;
115
116 if (!endswith(de->d_name, ".journal") &&
117 !endswith(de->d_name, ".journal~"))
118 continue;
119
120 if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
121 log_debug_errno(errno, "Failed to stat %s/%s, ignoring: %m", path, de->d_name);
122 continue;
123 }
124
125 if (!S_ISREG(st.st_mode))
126 continue;
127
128 *ret_used += (uint64_t) st.st_blocks * 512UL;
129 }
130
131 return 0;
132 }
133
134 static void cache_space_invalidate(JournalStorageSpace *space) {
135 zero(*space);
136 }
137
138 static int cache_space_refresh(Server *s, JournalStorage *storage) {
139 JournalStorageSpace *space;
140 JournalMetrics *metrics;
141 uint64_t vfs_used, vfs_avail, avail;
142 usec_t ts;
143 int r;
144
145 assert(s);
146
147 metrics = &storage->metrics;
148 space = &storage->space;
149
150 ts = now(CLOCK_MONOTONIC);
151
152 if (space->timestamp != 0 && usec_add(space->timestamp, RECHECK_SPACE_USEC) > ts)
153 return 0;
154
155 r = server_determine_path_usage(s, storage->path, &vfs_used, &vfs_avail);
156 if (r < 0)
157 return r;
158
159 space->vfs_used = vfs_used;
160 space->vfs_available = vfs_avail;
161
162 avail = LESS_BY(vfs_avail, metrics->keep_free);
163
164 space->limit = CLAMP(vfs_used + avail, metrics->min_use, metrics->max_use);
165 space->available = LESS_BY(space->limit, vfs_used);
166 space->timestamp = ts;
167 return 1;
168 }
169
170 static void patch_min_use(JournalStorage *storage) {
171 assert(storage);
172
173 /* Let's bump the min_use limit to the current usage on disk. We do
174 * this when starting up and first opening the journal files. This way
175 * sudden spikes in disk usage will not cause journald to vacuum files
176 * without bounds. Note that this means that only a restart of journald
177 * will make it reset this value. */
178
179 storage->metrics.min_use = MAX(storage->metrics.min_use, storage->space.vfs_used);
180 }
181
182 static JournalStorage* server_current_storage(Server *s) {
183 assert(s);
184
185 return s->system_journal ? &s->system_storage : &s->runtime_storage;
186 }
187
188 static int server_determine_space(Server *s, uint64_t *available, uint64_t *limit) {
189 JournalStorage *js;
190 int r;
191
192 assert(s);
193
194 js = server_current_storage(s);
195
196 r = cache_space_refresh(s, js);
197 if (r >= 0) {
198 if (available)
199 *available = js->space.available;
200 if (limit)
201 *limit = js->space.limit;
202 }
203 return r;
204 }
205
206 void server_space_usage_message(Server *s, JournalStorage *storage) {
207 assert(s);
208
209 if (!storage)
210 storage = server_current_storage(s);
211
212 if (cache_space_refresh(s, storage) < 0)
213 return;
214
215 const JournalMetrics *metrics = &storage->metrics;
216
217 server_driver_message(s, 0,
218 "MESSAGE_ID=" SD_MESSAGE_JOURNAL_USAGE_STR,
219 LOG_MESSAGE("%s (%s) is %s, max %s, %s free.",
220 storage->name, storage->path,
221 FORMAT_BYTES(storage->space.vfs_used),
222 FORMAT_BYTES(storage->space.limit),
223 FORMAT_BYTES(storage->space.available)),
224 "JOURNAL_NAME=%s", storage->name,
225 "JOURNAL_PATH=%s", storage->path,
226 "CURRENT_USE=%"PRIu64, storage->space.vfs_used,
227 "CURRENT_USE_PRETTY=%s", FORMAT_BYTES(storage->space.vfs_used),
228 "MAX_USE=%"PRIu64, metrics->max_use,
229 "MAX_USE_PRETTY=%s", FORMAT_BYTES(metrics->max_use),
230 "DISK_KEEP_FREE=%"PRIu64, metrics->keep_free,
231 "DISK_KEEP_FREE_PRETTY=%s", FORMAT_BYTES(metrics->keep_free),
232 "DISK_AVAILABLE=%"PRIu64, storage->space.vfs_available,
233 "DISK_AVAILABLE_PRETTY=%s", FORMAT_BYTES(storage->space.vfs_available),
234 "LIMIT=%"PRIu64, storage->space.limit,
235 "LIMIT_PRETTY=%s", FORMAT_BYTES(storage->space.limit),
236 "AVAILABLE=%"PRIu64, storage->space.available,
237 "AVAILABLE_PRETTY=%s", FORMAT_BYTES(storage->space.available),
238 NULL);
239 }
240
241 static void server_add_acls(ManagedJournalFile *f, uid_t uid) {
242 assert(f);
243
244 #if HAVE_ACL
245 int r;
246
247 if (uid_for_system_journal(uid))
248 return;
249
250 r = fd_add_uid_acl_permission(f->file->fd, uid, ACL_READ);
251 if (r < 0)
252 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
253 "Failed to set ACL on %s, ignoring: %m", f->file->path);
254 #endif
255 }
256
257 static int server_open_journal(
258 Server *s,
259 bool reliably,
260 const char *fname,
261 int open_flags,
262 bool seal,
263 JournalMetrics *metrics,
264 ManagedJournalFile **ret) {
265
266 _cleanup_(managed_journal_file_closep) ManagedJournalFile *f = NULL;
267 JournalFileFlags file_flags;
268 int r;
269
270 assert(s);
271 assert(fname);
272 assert(ret);
273
274 file_flags =
275 (s->compress.enabled ? JOURNAL_COMPRESS : 0) |
276 (seal ? JOURNAL_SEAL : 0) |
277 JOURNAL_STRICT_ORDER;
278
279 if (reliably)
280 r = managed_journal_file_open_reliably(
281 fname,
282 open_flags,
283 file_flags,
284 0640,
285 s->compress.threshold_bytes,
286 metrics,
287 s->mmap,
288 s->deferred_closes,
289 /* template= */ NULL,
290 &f);
291 else
292 r = managed_journal_file_open(
293 /* fd= */ -1,
294 fname,
295 open_flags,
296 file_flags,
297 0640,
298 s->compress.threshold_bytes,
299 metrics,
300 s->mmap,
301 s->deferred_closes,
302 /* template= */ NULL,
303 &f);
304 if (r < 0)
305 return r;
306
307 r = journal_file_enable_post_change_timer(f->file, s->event, POST_CHANGE_TIMER_INTERVAL_USEC);
308 if (r < 0)
309 return r;
310
311 *ret = TAKE_PTR(f);
312 return r;
313 }
314
315 static bool server_flushed_flag_is_set(Server *s) {
316 const char *fn;
317
318 assert(s);
319
320 /* We don't support the "flushing" concept for namespace instances, we assume them to always have
321 * access to /var */
322 if (s->namespace)
323 return true;
324
325 fn = strjoina(s->runtime_directory, "/flushed");
326 return access(fn, F_OK) >= 0;
327 }
328
329 static int server_system_journal_open(
330 Server *s,
331 bool flush_requested,
332 bool relinquish_requested) {
333
334 const char *fn;
335 int r = 0;
336
337 if (!s->system_journal &&
338 IN_SET(s->storage, STORAGE_PERSISTENT, STORAGE_AUTO) &&
339 (flush_requested || server_flushed_flag_is_set(s)) &&
340 !relinquish_requested) {
341
342 /* If in auto mode: first try to create the machine path, but not the prefix.
343 *
344 * If in persistent mode: create /var/log/journal and the machine path */
345
346 if (s->storage == STORAGE_PERSISTENT)
347 (void) mkdir_parents(s->system_storage.path, 0755);
348
349 (void) mkdir(s->system_storage.path, 0755);
350
351 fn = strjoina(s->system_storage.path, "/system.journal");
352 r = server_open_journal(
353 s,
354 /* reliably= */ true,
355 fn,
356 O_RDWR|O_CREAT,
357 s->seal,
358 &s->system_storage.metrics,
359 &s->system_journal);
360 if (r >= 0) {
361 server_add_acls(s->system_journal, 0);
362 (void) cache_space_refresh(s, &s->system_storage);
363 patch_min_use(&s->system_storage);
364 } else {
365 if (!IN_SET(r, -ENOENT, -EROFS))
366 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
367 "Failed to open system journal: %m");
368
369 r = 0;
370 }
371
372 /* If the runtime journal is open, and we're post-flush, we're recovering from a failed
373 * system journal rotate (ENOSPC) for which the runtime journal was reopened.
374 *
375 * Perform an implicit flush to var, leaving the runtime journal closed, now that the system
376 * journal is back.
377 */
378 if (!flush_requested)
379 (void) server_flush_to_var(s, true);
380 }
381
382 if (!s->runtime_journal &&
383 (s->storage != STORAGE_NONE)) {
384
385 fn = strjoina(s->runtime_storage.path, "/system.journal");
386
387 if (!s->system_journal || relinquish_requested) {
388
389 /* OK, we really need the runtime journal, so create it if necessary. */
390
391 (void) mkdir_parents(s->runtime_storage.path, 0755);
392 (void) mkdir(s->runtime_storage.path, 0750);
393
394 r = server_open_journal(
395 s,
396 /* reliably= */ true,
397 fn,
398 O_RDWR|O_CREAT,
399 /* seal= */ false,
400 &s->runtime_storage.metrics,
401 &s->runtime_journal);
402 if (r < 0)
403 return log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
404 "Failed to open runtime journal: %m");
405
406 } else if (!server_flushed_flag_is_set(s)) {
407 /* Try to open the runtime journal, but only if it already exists, so that we can
408 * flush it into the system journal */
409
410 r = server_open_journal(
411 s,
412 /* reliably= */ false,
413 fn,
414 O_RDWR,
415 /* seal= */ false,
416 &s->runtime_storage.metrics,
417 &s->runtime_journal);
418 if (r < 0) {
419 if (r != -ENOENT)
420 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
421 "Failed to open runtime journal: %m");
422
423 r = 0;
424 }
425 }
426
427 if (s->runtime_journal) {
428 server_add_acls(s->runtime_journal, 0);
429 (void) cache_space_refresh(s, &s->runtime_storage);
430 patch_min_use(&s->runtime_storage);
431 }
432 }
433
434 return r;
435 }
436
437 static int server_find_user_journal(Server *s, uid_t uid, ManagedJournalFile **ret) {
438 _cleanup_(managed_journal_file_closep) ManagedJournalFile *f = NULL;
439 _cleanup_free_ char *p = NULL;
440 int r;
441
442 assert(!uid_for_system_journal(uid));
443
444 f = ordered_hashmap_get(s->user_journals, UID_TO_PTR(uid));
445 if (f)
446 goto found;
447
448 if (asprintf(&p, "%s/user-" UID_FMT ".journal", s->system_storage.path, uid) < 0)
449 return log_oom();
450
451 /* Too many open? Then let's close one (or more) */
452 while (ordered_hashmap_size(s->user_journals) >= USER_JOURNALS_MAX) {
453 ManagedJournalFile *first;
454
455 assert_se(first = ordered_hashmap_steal_first(s->user_journals));
456 (void) managed_journal_file_close(first);
457 }
458
459 r = server_open_journal(
460 s,
461 /* reliably= */ true,
462 p,
463 O_RDWR|O_CREAT,
464 s->seal,
465 &s->system_storage.metrics,
466 &f);
467 if (r < 0)
468 return r;
469
470 r = ordered_hashmap_put(s->user_journals, UID_TO_PTR(uid), f);
471 if (r < 0)
472 return r;
473
474 server_add_acls(f, uid);
475
476 found:
477 *ret = TAKE_PTR(f);
478 return 0;
479 }
480
481 static ManagedJournalFile* server_find_journal(Server *s, uid_t uid) {
482 int r;
483
484 assert(s);
485
486 /* A rotate that fails to create the new journal (ENOSPC) leaves the rotated journal as NULL. Unless
487 * we revisit opening, even after space is made available we'll continue to return NULL indefinitely.
488 *
489 * system_journal_open() is a noop if the journals are already open, so we can just call it here to
490 * recover from failed rotates (or anything else that's left the journals as NULL).
491 *
492 * Fixes https://github.com/systemd/systemd/issues/3968 */
493 (void) server_system_journal_open(s, /* flush_requested= */ false, /* relinquish_requested= */ false);
494
495 /* We split up user logs only on /var, not on /run. If the runtime file is open, we write to it
496 * exclusively, in order to guarantee proper order as soon as we flush /run to /var and close the
497 * runtime file. */
498
499 if (s->runtime_journal)
500 return s->runtime_journal;
501
502 /* If we are not in persistent mode, then we need return NULL immediately rather than opening a
503 * persistent journal of any sort.
504 *
505 * Fixes https://github.com/systemd/systemd/issues/20390 */
506 if (!IN_SET(s->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
507 return NULL;
508
509 if (!uid_for_system_journal(uid)) {
510 ManagedJournalFile *f = NULL;
511
512 r = server_find_user_journal(s, uid, &f);
513 if (r >= 0)
514 return ASSERT_PTR(f);
515
516 log_warning_errno(r, "Failed to open user journal file, falling back to system journal: %m");
517 }
518
519 return s->system_journal;
520 }
521
522 static int server_do_rotate(
523 Server *s,
524 ManagedJournalFile **f,
525 const char* name,
526 bool seal,
527 uint32_t uid) {
528
529 JournalFileFlags file_flags;
530 int r;
531
532 assert(s);
533
534 if (!*f)
535 return -EINVAL;
536
537 file_flags =
538 (s->compress.enabled ? JOURNAL_COMPRESS : 0)|
539 (seal ? JOURNAL_SEAL : 0) |
540 JOURNAL_STRICT_ORDER;
541
542 r = managed_journal_file_rotate(f, s->mmap, file_flags, s->compress.threshold_bytes, s->deferred_closes);
543 if (r < 0) {
544 if (*f)
545 return log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT,
546 "Failed to rotate %s: %m", (*f)->file->path);
547 else
548 return log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT,
549 "Failed to create new %s journal: %m", name);
550 }
551
552 server_add_acls(*f, uid);
553 return r;
554 }
555
556 static void server_process_deferred_closes(Server *s) {
557 ManagedJournalFile *f;
558
559 /* Perform any deferred closes which aren't still offlining. */
560 SET_FOREACH(f, s->deferred_closes) {
561 if (managed_journal_file_is_offlining(f))
562 continue;
563
564 (void) set_remove(s->deferred_closes, f);
565 (void) managed_journal_file_close(f);
566 }
567 }
568
569 static void server_vacuum_deferred_closes(Server *s) {
570 assert(s);
571
572 /* Make some room in the deferred closes list, so that it doesn't grow without bounds */
573 if (set_size(s->deferred_closes) < DEFERRED_CLOSES_MAX)
574 return;
575
576 /* Let's first remove all journal files that might already have completed closing */
577 server_process_deferred_closes(s);
578
579 /* And now, let's close some more until we reach the limit again. */
580 while (set_size(s->deferred_closes) >= DEFERRED_CLOSES_MAX) {
581 ManagedJournalFile *f;
582
583 assert_se(f = set_steal_first(s->deferred_closes));
584 managed_journal_file_close(f);
585 }
586 }
587
588 static int server_archive_offline_user_journals(Server *s) {
589 _cleanup_closedir_ DIR *d = NULL;
590 int r;
591
592 assert(s);
593
594 d = opendir(s->system_storage.path);
595 if (!d) {
596 if (errno == ENOENT)
597 return 0;
598
599 return log_ratelimit_error_errno(errno, JOURNAL_LOG_RATELIMIT,
600 "Failed to open %s: %m", s->system_storage.path);
601 }
602
603 for (;;) {
604 _cleanup_free_ char *full = NULL;
605 _cleanup_close_ int fd = -EBADF;
606 struct dirent *de;
607 ManagedJournalFile *f;
608 uid_t uid;
609
610 errno = 0;
611 de = readdir_no_dot(d);
612 if (!de) {
613 if (errno != 0)
614 log_ratelimit_warning_errno(errno, JOURNAL_LOG_RATELIMIT,
615 "Failed to enumerate %s, ignoring: %m",
616 s->system_storage.path);
617 break;
618 }
619
620 r = journal_file_parse_uid_from_filename(de->d_name, &uid);
621 if (r < 0) {
622 /* Don't warn if the file is not an online or offline user journal. */
623 if (r != -EREMOTE)
624 log_warning_errno(r, "Failed to parse UID from file name '%s', ignoring: %m", de->d_name);
625 continue;
626 }
627
628 /* Already rotated in the above loop? i.e. is it an open user journal? */
629 if (ordered_hashmap_contains(s->user_journals, UID_TO_PTR(uid)))
630 continue;
631
632 full = path_join(s->system_storage.path, de->d_name);
633 if (!full)
634 return log_oom();
635
636 fd = openat(dirfd(d), de->d_name, O_RDWR|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW|O_NONBLOCK);
637 if (fd < 0) {
638 log_ratelimit_full_errno(IN_SET(errno, ELOOP, ENOENT) ? LOG_DEBUG : LOG_WARNING,
639 errno, JOURNAL_LOG_RATELIMIT,
640 "Failed to open journal file '%s' for rotation: %m", full);
641 continue;
642 }
643
644 /* Make some room in the set of deferred close()s */
645 server_vacuum_deferred_closes(s);
646
647 /* Open the file briefly, so that we can archive it */
648 r = managed_journal_file_open(
649 fd,
650 full,
651 O_RDWR,
652 (s->compress.enabled ? JOURNAL_COMPRESS : 0) |
653 (s->seal ? JOURNAL_SEAL : 0), /* strict order does not matter here */
654 0640,
655 s->compress.threshold_bytes,
656 &s->system_storage.metrics,
657 s->mmap,
658 /* deferred_closes= */ NULL,
659 /* template= */ NULL,
660 &f);
661 if (r < 0) {
662 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
663 "Failed to read journal file %s for rotation, trying to move it out of the way: %m",
664 full);
665
666 r = journal_file_dispose(dirfd(d), de->d_name);
667 if (r < 0)
668 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
669 "Failed to move %s out of the way, ignoring: %m",
670 full);
671 else
672 log_debug("Successfully moved %s out of the way.", full);
673
674 continue;
675 }
676
677 TAKE_FD(fd); /* Donated to managed_journal_file_open() */
678
679 r = journal_file_archive(f->file, NULL);
680 if (r < 0)
681 log_debug_errno(r, "Failed to archive journal file '%s', ignoring: %m", full);
682
683 managed_journal_file_initiate_close(TAKE_PTR(f), s->deferred_closes);
684 }
685
686 return 0;
687 }
688
689 void server_rotate(Server *s) {
690 ManagedJournalFile *f;
691 void *k;
692 int r;
693
694 log_debug("Rotating...");
695
696 /* First, rotate the system journal (either in its runtime flavour or in its runtime flavour) */
697 (void) server_do_rotate(s, &s->runtime_journal, "runtime", /* seal= */ false, /* uid= */ 0);
698 (void) server_do_rotate(s, &s->system_journal, "system", s->seal, /* uid= */ 0);
699
700 /* Then, rotate all user journals we have open (keeping them open) */
701 ORDERED_HASHMAP_FOREACH_KEY(f, k, s->user_journals) {
702 r = server_do_rotate(s, &f, "user", s->seal, PTR_TO_UID(k));
703 if (r >= 0)
704 ordered_hashmap_replace(s->user_journals, k, f);
705 else if (!f)
706 /* Old file has been closed and deallocated */
707 ordered_hashmap_remove(s->user_journals, k);
708 }
709
710 /* Finally, also rotate all user journals we currently do not have open. (But do so only if we
711 * actually have access to /var, i.e. are not in the log-to-runtime-journal mode). */
712 if (!s->runtime_journal)
713 (void) server_archive_offline_user_journals(s);
714
715 server_process_deferred_closes(s);
716 }
717
718 void server_sync(Server *s) {
719 ManagedJournalFile *f;
720 int r;
721
722 if (s->system_journal) {
723 r = managed_journal_file_set_offline(s->system_journal, false);
724 if (r < 0)
725 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
726 "Failed to sync system journal, ignoring: %m");
727 }
728
729 ORDERED_HASHMAP_FOREACH(f, s->user_journals) {
730 r = managed_journal_file_set_offline(f, false);
731 if (r < 0)
732 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
733 "Failed to sync user journal, ignoring: %m");
734 }
735
736 if (s->sync_event_source) {
737 r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_OFF);
738 if (r < 0)
739 log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT,
740 "Failed to disable sync timer source: %m");
741 }
742
743 s->sync_scheduled = false;
744 }
745
746 static void server_do_vacuum(Server *s, JournalStorage *storage, bool verbose) {
747
748 int r;
749
750 assert(s);
751 assert(storage);
752
753 (void) cache_space_refresh(s, storage);
754
755 if (verbose)
756 server_space_usage_message(s, storage);
757
758 r = journal_directory_vacuum(storage->path, storage->space.limit,
759 storage->metrics.n_max_files, s->max_retention_usec,
760 &s->oldest_file_usec, verbose);
761 if (r < 0 && r != -ENOENT)
762 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
763 "Failed to vacuum %s, ignoring: %m", storage->path);
764
765 cache_space_invalidate(&storage->space);
766 }
767
768 void server_vacuum(Server *s, bool verbose) {
769 assert(s);
770
771 log_debug("Vacuuming...");
772
773 s->oldest_file_usec = 0;
774
775 if (s->system_journal)
776 server_do_vacuum(s, &s->system_storage, verbose);
777 if (s->runtime_journal)
778 server_do_vacuum(s, &s->runtime_storage, verbose);
779 }
780
781 static void server_cache_machine_id(Server *s) {
782 sd_id128_t id;
783 int r;
784
785 assert(s);
786
787 r = sd_id128_get_machine(&id);
788 if (r < 0)
789 return;
790
791 sd_id128_to_string(id, stpcpy(s->machine_id_field, "_MACHINE_ID="));
792 }
793
794 static void server_cache_boot_id(Server *s) {
795 sd_id128_t id;
796 int r;
797
798 assert(s);
799
800 r = sd_id128_get_boot(&id);
801 if (r < 0)
802 return;
803
804 sd_id128_to_string(id, stpcpy(s->boot_id_field, "_BOOT_ID="));
805 }
806
807 static void server_cache_hostname(Server *s) {
808 _cleanup_free_ char *t = NULL;
809 char *x;
810
811 assert(s);
812
813 t = gethostname_malloc();
814 if (!t)
815 return;
816
817 x = strjoin("_HOSTNAME=", t);
818 if (!x)
819 return;
820
821 free_and_replace(s->hostname_field, x);
822 }
823
824 static bool shall_try_append_again(JournalFile *f, int r) {
825 switch (r) {
826
827 case -E2BIG: /* Hit configured limit */
828 case -EFBIG: /* Hit fs limit */
829 case -EDQUOT: /* Quota limit hit */
830 case -ENOSPC: /* Disk full */
831 log_debug_errno(r, "%s: Allocation limit reached, rotating.", f->path);
832 return true;
833
834 case -EROFS: /* Read-only file system */
835 /* When appending an entry fails if shall_try_append_again returns true, the journal is
836 * rotated. If the FS is read-only, rotation will fail and s->system_journal will be set to
837 * NULL. After that, when find_journal will try to open the journal since s->system_journal
838 * will be NULL, it will open the runtime journal. */
839 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Read-only file system, rotating.", f->path);
840 return true;
841
842 case -EIO: /* I/O error of some kind (mmap) */
843 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT, "%s: IO error, rotating.", f->path);
844 return true;
845
846 case -EHOSTDOWN: /* Other machine */
847 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Journal file from other machine, rotating.", f->path);
848 return true;
849
850 case -EBUSY: /* Unclean shutdown */
851 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Unclean shutdown, rotating.", f->path);
852 return true;
853
854 case -EPROTONOSUPPORT: /* Unsupported feature */
855 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Unsupported feature, rotating.", f->path);
856 return true;
857
858 case -EBADMSG: /* Corrupted */
859 case -ENODATA: /* Truncated */
860 case -ESHUTDOWN: /* Already archived */
861 case -EADDRNOTAVAIL: /* Referenced object offset out of bounds */
862 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Journal file corrupted, rotating.", f->path);
863 return true;
864
865 case -EIDRM: /* Journal file has been deleted */
866 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Journal file has been deleted, rotating.", f->path);
867 return true;
868
869 case -EREMCHG: /* Wallclock time (CLOCK_REALTIME) jumped backwards relative to last journal entry */
870 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Realtime clock jumped backwards relative to last journal entry, rotating.", f->path);
871 return true;
872
873 case -ENOTNAM: /* Monotonic time (CLOCK_MONOTONIC) jumped backwards relative to last journal entry with the same boot ID */
874 log_ratelimit_info_errno(
875 r,
876 JOURNAL_LOG_RATELIMIT,
877 "%s: Monotonic clock jumped backwards relative to last journal entry with the same boot ID, rotating.",
878 f->path);
879 return true;
880
881 case -EILSEQ: /* seqnum ID last used in the file doesn't match the one we'd passed when writing an entry to it */
882 log_ratelimit_info_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Journal file uses a different sequence number ID, rotating.", f->path);
883 return true;
884
885 case -EAFNOSUPPORT:
886 log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Underlying file system does not support memory mapping or another required file system feature.", f->path);
887 return false;
888
889 default:
890 log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "%s: Unexpected error while writing to journal file: %m", f->path);
891 return false;
892 }
893 }
894
895 static void server_write_to_journal(
896 Server *s,
897 uid_t uid,
898 const struct iovec *iovec,
899 size_t n,
900 int priority) {
901
902 bool vacuumed = false, rotate = false;
903 struct dual_timestamp ts;
904 ManagedJournalFile *f;
905 int r;
906
907 assert(s);
908 assert(iovec);
909 assert(n > 0);
910
911 /* Get the closest, linearized time we have for this log event from the event loop. (Note that we do not use
912 * the source time, and not even the time the event was originally seen, but instead simply the time we started
913 * processing it, as we want strictly linear ordering in what we write out.) */
914 assert_se(sd_event_now(s->event, CLOCK_REALTIME, &ts.realtime) >= 0);
915 assert_se(sd_event_now(s->event, CLOCK_MONOTONIC, &ts.monotonic) >= 0);
916
917 if (ts.realtime < s->last_realtime_clock) {
918 /* When the time jumps backwards, let's immediately rotate. Of course, this should not happen during
919 * regular operation. However, when it does happen, then we should make sure that we start fresh files
920 * to ensure that the entries in the journal files are strictly ordered by time, in order to ensure
921 * bisection works correctly. */
922
923 log_ratelimit_info(JOURNAL_LOG_RATELIMIT, "Time jumped backwards, rotating.");
924 rotate = true;
925 } else {
926
927 f = server_find_journal(s, uid);
928 if (!f)
929 return;
930
931 if (journal_file_rotate_suggested(f->file, s->max_file_usec, LOG_DEBUG)) {
932 log_debug("%s: Journal header limits reached or header out-of-date, rotating.",
933 f->file->path);
934 rotate = true;
935 }
936 }
937
938 if (rotate) {
939 server_rotate(s);
940 server_vacuum(s, false);
941 vacuumed = true;
942
943 f = server_find_journal(s, uid);
944 if (!f)
945 return;
946 }
947
948 s->last_realtime_clock = ts.realtime;
949
950 r = journal_file_append_entry(
951 f->file,
952 &ts,
953 /* boot_id= */ NULL,
954 iovec, n,
955 &s->seqnum->seqnum,
956 &s->seqnum->id,
957 /* ret_object= */ NULL,
958 /* ret_offset= */ NULL);
959 if (r >= 0) {
960 server_schedule_sync(s, priority);
961 return;
962 }
963
964 log_debug_errno(r, "Failed to write entry to %s (%zu items, %zu bytes): %m", f->file->path, n, IOVEC_TOTAL_SIZE(iovec, n));
965
966 if (!shall_try_append_again(f->file, r))
967 return;
968 if (vacuumed) {
969 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
970 "Suppressing rotation, as we already rotated immediately before write attempt. Giving up.");
971 return;
972 }
973
974 server_rotate(s);
975 server_vacuum(s, false);
976
977 f = server_find_journal(s, uid);
978 if (!f)
979 return;
980
981 log_debug_errno(r, "Retrying write.");
982 r = journal_file_append_entry(
983 f->file,
984 &ts,
985 /* boot_id= */ NULL,
986 iovec, n,
987 &s->seqnum->seqnum,
988 &s->seqnum->id,
989 /* ret_object= */ NULL,
990 /* ret_offset= */ NULL);
991 if (r < 0)
992 log_ratelimit_error_errno(r, FAILED_TO_WRITE_ENTRY_RATELIMIT,
993 "Failed to write entry to %s (%zu items, %zu bytes) despite vacuuming, ignoring: %m",
994 f->file->path, n, IOVEC_TOTAL_SIZE(iovec, n));
995 else
996 server_schedule_sync(s, priority);
997 }
998
999 #define IOVEC_ADD_NUMERIC_FIELD(iovec, n, value, type, isset, format, field) \
1000 if (isset(value)) { \
1001 char *k; \
1002 k = newa(char, STRLEN(field "=") + DECIMAL_STR_MAX(type) + 1); \
1003 sprintf(k, field "=" format, value); \
1004 iovec[n++] = IOVEC_MAKE_STRING(k); \
1005 }
1006
1007 #define IOVEC_ADD_STRING_FIELD(iovec, n, value, field) \
1008 if (!isempty(value)) { \
1009 char *k; \
1010 k = strjoina(field "=", value); \
1011 iovec[n++] = IOVEC_MAKE_STRING(k); \
1012 }
1013
1014 #define IOVEC_ADD_ID128_FIELD(iovec, n, value, field) \
1015 if (!sd_id128_is_null(value)) { \
1016 char *k; \
1017 k = newa(char, STRLEN(field "=") + SD_ID128_STRING_MAX); \
1018 sd_id128_to_string(value, stpcpy(k, field "=")); \
1019 iovec[n++] = IOVEC_MAKE_STRING(k); \
1020 }
1021
1022 #define IOVEC_ADD_SIZED_FIELD(iovec, n, value, value_size, field) \
1023 if (value_size > 0) { \
1024 char *k; \
1025 k = newa(char, STRLEN(field "=") + value_size + 1); \
1026 *((char*) mempcpy(stpcpy(k, field "="), value, value_size)) = 0; \
1027 iovec[n++] = IOVEC_MAKE_STRING(k); \
1028 } \
1029
1030 static void server_dispatch_message_real(
1031 Server *s,
1032 struct iovec *iovec, size_t n, size_t m,
1033 const ClientContext *c,
1034 const struct timeval *tv,
1035 int priority,
1036 pid_t object_pid) {
1037
1038 char source_time[sizeof("_SOURCE_REALTIME_TIMESTAMP=") + DECIMAL_STR_MAX(usec_t)];
1039 _unused_ _cleanup_free_ char *cmdline1 = NULL, *cmdline2 = NULL;
1040 uid_t journal_uid;
1041 ClientContext *o;
1042
1043 assert(s);
1044 assert(iovec);
1045 assert(n > 0);
1046 assert(n +
1047 N_IOVEC_META_FIELDS +
1048 (pid_is_valid(object_pid) ? N_IOVEC_OBJECT_FIELDS : 0) +
1049 client_context_extra_fields_n_iovec(c) <= m);
1050
1051 if (c) {
1052 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->pid, pid_t, pid_is_valid, PID_FMT, "_PID");
1053 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->uid, uid_t, uid_is_valid, UID_FMT, "_UID");
1054 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->gid, gid_t, gid_is_valid, GID_FMT, "_GID");
1055
1056 IOVEC_ADD_STRING_FIELD(iovec, n, c->comm, "_COMM"); /* At most TASK_COMM_LENGTH (16 bytes) */
1057 IOVEC_ADD_STRING_FIELD(iovec, n, c->exe, "_EXE"); /* A path, so at most PATH_MAX (4096 bytes) */
1058
1059 if (c->cmdline)
1060 /* At most _SC_ARG_MAX (2MB usually), which is too much to put on stack.
1061 * Let's use a heap allocation for this one. */
1062 cmdline1 = set_iovec_string_field(iovec, &n, "_CMDLINE=", c->cmdline);
1063
1064 IOVEC_ADD_STRING_FIELD(iovec, n, c->capeff, "_CAP_EFFECTIVE"); /* Read from /proc/.../status */
1065 IOVEC_ADD_SIZED_FIELD(iovec, n, c->label, c->label_size, "_SELINUX_CONTEXT");
1066 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->auditid, uint32_t, audit_session_is_valid, "%" PRIu32, "_AUDIT_SESSION");
1067 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->loginuid, uid_t, uid_is_valid, UID_FMT, "_AUDIT_LOGINUID");
1068
1069 IOVEC_ADD_STRING_FIELD(iovec, n, c->cgroup, "_SYSTEMD_CGROUP"); /* A path */
1070 IOVEC_ADD_STRING_FIELD(iovec, n, c->session, "_SYSTEMD_SESSION");
1071 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->owner_uid, uid_t, uid_is_valid, UID_FMT, "_SYSTEMD_OWNER_UID");
1072 IOVEC_ADD_STRING_FIELD(iovec, n, c->unit, "_SYSTEMD_UNIT"); /* Unit names are bounded by UNIT_NAME_MAX */
1073 IOVEC_ADD_STRING_FIELD(iovec, n, c->user_unit, "_SYSTEMD_USER_UNIT");
1074 IOVEC_ADD_STRING_FIELD(iovec, n, c->slice, "_SYSTEMD_SLICE");
1075 IOVEC_ADD_STRING_FIELD(iovec, n, c->user_slice, "_SYSTEMD_USER_SLICE");
1076
1077 IOVEC_ADD_ID128_FIELD(iovec, n, c->invocation_id, "_SYSTEMD_INVOCATION_ID");
1078
1079 if (c->extra_fields_n_iovec > 0) {
1080 memcpy(iovec + n, c->extra_fields_iovec, c->extra_fields_n_iovec * sizeof(struct iovec));
1081 n += c->extra_fields_n_iovec;
1082 }
1083 }
1084
1085 assert(n <= m);
1086
1087 if (pid_is_valid(object_pid) && client_context_get(s, object_pid, NULL, NULL, 0, NULL, &o) >= 0) {
1088
1089 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->pid, pid_t, pid_is_valid, PID_FMT, "OBJECT_PID");
1090 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_UID");
1091 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->gid, gid_t, gid_is_valid, GID_FMT, "OBJECT_GID");
1092
1093 /* See above for size limits, only ->cmdline may be large, so use a heap allocation for it. */
1094 IOVEC_ADD_STRING_FIELD(iovec, n, o->comm, "OBJECT_COMM");
1095 IOVEC_ADD_STRING_FIELD(iovec, n, o->exe, "OBJECT_EXE");
1096 if (o->cmdline)
1097 cmdline2 = set_iovec_string_field(iovec, &n, "OBJECT_CMDLINE=", o->cmdline);
1098
1099 IOVEC_ADD_STRING_FIELD(iovec, n, o->capeff, "OBJECT_CAP_EFFECTIVE");
1100 IOVEC_ADD_SIZED_FIELD(iovec, n, o->label, o->label_size, "OBJECT_SELINUX_CONTEXT");
1101 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->auditid, uint32_t, audit_session_is_valid, "%" PRIu32, "OBJECT_AUDIT_SESSION");
1102 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->loginuid, uid_t, uid_is_valid, UID_FMT, "OBJECT_AUDIT_LOGINUID");
1103
1104 IOVEC_ADD_STRING_FIELD(iovec, n, o->cgroup, "OBJECT_SYSTEMD_CGROUP");
1105 IOVEC_ADD_STRING_FIELD(iovec, n, o->session, "OBJECT_SYSTEMD_SESSION");
1106 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->owner_uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_SYSTEMD_OWNER_UID");
1107 IOVEC_ADD_STRING_FIELD(iovec, n, o->unit, "OBJECT_SYSTEMD_UNIT");
1108 IOVEC_ADD_STRING_FIELD(iovec, n, o->user_unit, "OBJECT_SYSTEMD_USER_UNIT");
1109 IOVEC_ADD_STRING_FIELD(iovec, n, o->slice, "OBJECT_SYSTEMD_SLICE");
1110 IOVEC_ADD_STRING_FIELD(iovec, n, o->user_slice, "OBJECT_SYSTEMD_USER_SLICE");
1111
1112 IOVEC_ADD_ID128_FIELD(iovec, n, o->invocation_id, "OBJECT_SYSTEMD_INVOCATION_ID=");
1113 }
1114
1115 assert(n <= m);
1116
1117 if (tv) {
1118 sprintf(source_time, "_SOURCE_REALTIME_TIMESTAMP=" USEC_FMT, timeval_load(tv));
1119 iovec[n++] = IOVEC_MAKE_STRING(source_time);
1120 }
1121
1122 /* Note that strictly speaking storing the boot id here is
1123 * redundant since the entry includes this in-line
1124 * anyway. However, we need this indexed, too. */
1125 if (!isempty(s->boot_id_field))
1126 iovec[n++] = IOVEC_MAKE_STRING(s->boot_id_field);
1127
1128 if (!isempty(s->machine_id_field))
1129 iovec[n++] = IOVEC_MAKE_STRING(s->machine_id_field);
1130
1131 if (!isempty(s->hostname_field))
1132 iovec[n++] = IOVEC_MAKE_STRING(s->hostname_field);
1133
1134 if (!isempty(s->namespace_field))
1135 iovec[n++] = IOVEC_MAKE_STRING(s->namespace_field);
1136
1137 iovec[n++] = in_initrd() ? IOVEC_MAKE_STRING("_RUNTIME_SCOPE=initrd") : IOVEC_MAKE_STRING("_RUNTIME_SCOPE=system");
1138 assert(n <= m);
1139
1140 if (s->split_mode == SPLIT_UID && c && uid_is_valid(c->uid))
1141 /* Split up strictly by (non-root) UID */
1142 journal_uid = c->uid;
1143 else if (s->split_mode == SPLIT_LOGIN && c && c->uid > 0 && uid_is_valid(c->owner_uid))
1144 /* Split up by login UIDs. We do this only if the
1145 * realuid is not root, in order not to accidentally
1146 * leak privileged information to the user that is
1147 * logged by a privileged process that is part of an
1148 * unprivileged session. */
1149 journal_uid = c->owner_uid;
1150 else
1151 journal_uid = 0;
1152
1153 server_write_to_journal(s, journal_uid, iovec, n, priority);
1154 }
1155
1156 void server_driver_message(Server *s, pid_t object_pid, const char *message_id, const char *format, ...) {
1157
1158 struct iovec *iovec;
1159 size_t n = 0, k, m;
1160 va_list ap;
1161 int r;
1162
1163 assert(s);
1164 assert(format);
1165
1166 m = N_IOVEC_META_FIELDS + 5 + N_IOVEC_PAYLOAD_FIELDS + client_context_extra_fields_n_iovec(s->my_context) + N_IOVEC_OBJECT_FIELDS;
1167 iovec = newa(struct iovec, m);
1168
1169 assert_cc(3 == LOG_FAC(LOG_DAEMON));
1170 iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_FACILITY=3");
1171 iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=systemd-journald");
1172
1173 iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=driver");
1174 assert_cc(6 == LOG_INFO);
1175 iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=6");
1176
1177 if (message_id)
1178 iovec[n++] = IOVEC_MAKE_STRING(message_id);
1179 k = n;
1180
1181 va_start(ap, format);
1182 r = log_format_iovec(iovec, m, &n, false, 0, format, ap);
1183 /* Error handling below */
1184 va_end(ap);
1185
1186 if (r >= 0)
1187 server_dispatch_message_real(s, iovec, n, m, s->my_context, /* tv= */ NULL, LOG_INFO, object_pid);
1188
1189 while (k < n)
1190 free(iovec[k++].iov_base);
1191
1192 if (r < 0) {
1193 /* We failed to format the message. Emit a warning instead. */
1194 char buf[LINE_MAX];
1195
1196 errno = -r;
1197 xsprintf(buf, "MESSAGE=Entry printing failed: %m");
1198
1199 n = 3;
1200 iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=4");
1201 iovec[n++] = IOVEC_MAKE_STRING(buf);
1202 server_dispatch_message_real(s, iovec, n, m, s->my_context, /* tv= */ NULL, LOG_INFO, object_pid);
1203 }
1204 }
1205
1206 void server_dispatch_message(
1207 Server *s,
1208 struct iovec *iovec, size_t n, size_t m,
1209 ClientContext *c,
1210 const struct timeval *tv,
1211 int priority,
1212 pid_t object_pid) {
1213
1214 uint64_t available = 0;
1215 int rl;
1216
1217 assert(s);
1218 assert(iovec || n == 0);
1219
1220 if (n == 0)
1221 return;
1222
1223 if (LOG_PRI(priority) > s->max_level_store)
1224 return;
1225
1226 /* Stop early in case the information will not be stored
1227 * in a journal. */
1228 if (s->storage == STORAGE_NONE)
1229 return;
1230
1231 if (c && c->unit) {
1232 (void) server_determine_space(s, &available, /* limit= */ NULL);
1233
1234 rl = journal_ratelimit_test(s->ratelimit, c->unit, c->log_ratelimit_interval, c->log_ratelimit_burst, priority & LOG_PRIMASK, available);
1235 if (rl == 0)
1236 return;
1237
1238 /* Write a suppression message if we suppressed something */
1239 if (rl > 1)
1240 server_driver_message(s, c->pid,
1241 "MESSAGE_ID=" SD_MESSAGE_JOURNAL_DROPPED_STR,
1242 LOG_MESSAGE("Suppressed %i messages from %s", rl - 1, c->unit),
1243 "N_DROPPED=%i", rl - 1,
1244 NULL);
1245 }
1246
1247 server_dispatch_message_real(s, iovec, n, m, c, tv, priority, object_pid);
1248 }
1249
1250 int server_flush_to_var(Server *s, bool require_flag_file) {
1251 sd_journal *j = NULL;
1252 const char *fn;
1253 unsigned n = 0;
1254 usec_t start;
1255 int r, k;
1256
1257 assert(s);
1258
1259 if (!IN_SET(s->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
1260 return 0;
1261
1262 if (s->namespace) /* Flushing concept does not exist for namespace instances */
1263 return 0;
1264
1265 if (!s->runtime_journal) /* Nothing to flush? */
1266 return 0;
1267
1268 if (require_flag_file && !server_flushed_flag_is_set(s))
1269 return 0;
1270
1271 (void) server_system_journal_open(s, /* flush_requested=*/ true, /* relinquish_requested= */ false);
1272
1273 if (!s->system_journal)
1274 return 0;
1275
1276 log_debug("Flushing to %s...", s->system_storage.path);
1277
1278 start = now(CLOCK_MONOTONIC);
1279
1280 r = sd_journal_open(&j, SD_JOURNAL_RUNTIME_ONLY);
1281 if (r < 0)
1282 return log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT,
1283 "Failed to read runtime journal: %m");
1284
1285 sd_journal_set_data_threshold(j, 0);
1286
1287 SD_JOURNAL_FOREACH(j) {
1288 Object *o = NULL;
1289 JournalFile *f;
1290
1291 f = j->current_file;
1292 assert(f && f->current_offset > 0);
1293
1294 n++;
1295
1296 r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
1297 if (r < 0) {
1298 log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "Can't read entry: %m");
1299 goto finish;
1300 }
1301
1302 r = journal_file_copy_entry(
1303 f,
1304 s->system_journal->file,
1305 o,
1306 f->current_offset,
1307 &s->seqnum->seqnum,
1308 &s->seqnum->id);
1309 if (r >= 0)
1310 continue;
1311
1312 if (!shall_try_append_again(s->system_journal->file, r)) {
1313 log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "Can't write entry: %m");
1314 goto finish;
1315 }
1316
1317 log_ratelimit_info(JOURNAL_LOG_RATELIMIT, "Rotating system journal.");
1318
1319 server_rotate(s);
1320 server_vacuum(s, false);
1321
1322 if (!s->system_journal) {
1323 log_ratelimit_notice(JOURNAL_LOG_RATELIMIT,
1324 "Didn't flush runtime journal since rotation of system journal wasn't successful.");
1325 r = -EIO;
1326 goto finish;
1327 }
1328
1329 log_debug("Retrying write.");
1330 r = journal_file_copy_entry(
1331 f,
1332 s->system_journal->file,
1333 o,
1334 f->current_offset,
1335 &s->seqnum->seqnum,
1336 &s->seqnum->id);
1337 if (r < 0) {
1338 log_ratelimit_error_errno(r, JOURNAL_LOG_RATELIMIT, "Can't write entry: %m");
1339 goto finish;
1340 }
1341 }
1342
1343 r = 0;
1344
1345 finish:
1346 if (s->system_journal)
1347 journal_file_post_change(s->system_journal->file);
1348
1349 s->runtime_journal = managed_journal_file_close(s->runtime_journal);
1350
1351 if (r >= 0)
1352 (void) rm_rf(s->runtime_storage.path, REMOVE_ROOT);
1353
1354 sd_journal_close(j);
1355
1356 server_driver_message(s, 0, NULL,
1357 LOG_MESSAGE("Time spent on flushing to %s is %s for %u entries.",
1358 s->system_storage.path,
1359 FORMAT_TIMESPAN(usec_sub_unsigned(now(CLOCK_MONOTONIC), start), 0),
1360 n),
1361 NULL);
1362
1363 fn = strjoina(s->runtime_directory, "/flushed");
1364 k = touch(fn);
1365 if (k < 0)
1366 log_ratelimit_warning_errno(k, JOURNAL_LOG_RATELIMIT,
1367 "Failed to touch %s, ignoring: %m", fn);
1368
1369 server_refresh_idle_timer(s);
1370 return r;
1371 }
1372
1373 static int server_relinquish_var(Server *s) {
1374 const char *fn;
1375 assert(s);
1376
1377 if (s->storage == STORAGE_NONE)
1378 return 0;
1379
1380 if (s->namespace) /* Concept does not exist for namespaced instances */
1381 return -EOPNOTSUPP;
1382
1383 if (s->runtime_journal && !s->system_journal)
1384 return 0;
1385
1386 log_debug("Relinquishing %s...", s->system_storage.path);
1387
1388 (void) server_system_journal_open(s, /* flush_requested */ false, /* relinquish_requested=*/ true);
1389
1390 s->system_journal = managed_journal_file_close(s->system_journal);
1391 ordered_hashmap_clear_with_destructor(s->user_journals, managed_journal_file_close);
1392 set_clear_with_destructor(s->deferred_closes, managed_journal_file_close);
1393
1394 fn = strjoina(s->runtime_directory, "/flushed");
1395 if (unlink(fn) < 0 && errno != ENOENT)
1396 log_ratelimit_warning_errno(errno, JOURNAL_LOG_RATELIMIT,
1397 "Failed to unlink %s, ignoring: %m", fn);
1398
1399 server_refresh_idle_timer(s);
1400 return 0;
1401 }
1402
1403 int server_process_datagram(
1404 sd_event_source *es,
1405 int fd,
1406 uint32_t revents,
1407 void *userdata) {
1408
1409 size_t label_len = 0, m;
1410 Server *s = ASSERT_PTR(userdata);
1411 struct ucred *ucred = NULL;
1412 struct timeval tv_buf, *tv = NULL;
1413 struct cmsghdr *cmsg;
1414 char *label = NULL;
1415 struct iovec iovec;
1416 ssize_t n;
1417 int *fds = NULL, v = 0;
1418 size_t n_fds = 0;
1419
1420 /* We use NAME_MAX space for the SELinux label here. The kernel currently enforces no limit, but
1421 * according to suggestions from the SELinux people this will change and it will probably be
1422 * identical to NAME_MAX. For now we use that, but this should be updated one day when the final
1423 * limit is known.
1424 *
1425 * Here, we need to explicitly initialize the buffer with zero, as glibc has a bug in
1426 * __convert_scm_timestamps(), which assumes the buffer is initialized. See #20741. */
1427 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred)) +
1428 CMSG_SPACE_TIMEVAL +
1429 CMSG_SPACE(sizeof(int)) + /* fd */
1430 CMSG_SPACE(NAME_MAX) /* selinux label */) control = {};
1431
1432 union sockaddr_union sa = {};
1433
1434 struct msghdr msghdr = {
1435 .msg_iov = &iovec,
1436 .msg_iovlen = 1,
1437 .msg_control = &control,
1438 .msg_controllen = sizeof(control),
1439 .msg_name = &sa,
1440 .msg_namelen = sizeof(sa),
1441 };
1442
1443 assert(fd == s->native_fd || fd == s->syslog_fd || fd == s->audit_fd);
1444
1445 if (revents != EPOLLIN)
1446 return log_error_errno(SYNTHETIC_ERRNO(EIO),
1447 "Got invalid event from epoll for datagram fd: %" PRIx32,
1448 revents);
1449
1450 /* Try to get the right size, if we can. (Not all sockets support SIOCINQ, hence we just try, but don't rely on
1451 * it.) */
1452 (void) ioctl(fd, SIOCINQ, &v);
1453
1454 /* Fix it up, if it is too small. We use the same fixed value as auditd here. Awful! */
1455 m = PAGE_ALIGN(MAX3((size_t) v + 1,
1456 (size_t) LINE_MAX,
1457 ALIGN(sizeof(struct nlmsghdr)) + ALIGN((size_t) MAX_AUDIT_MESSAGE_LENGTH)) + 1);
1458
1459 if (!GREEDY_REALLOC(s->buffer, m))
1460 return log_oom();
1461
1462 iovec = IOVEC_MAKE(s->buffer, MALLOC_ELEMENTSOF(s->buffer) - 1); /* Leave room for trailing NUL we add later */
1463
1464 n = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC);
1465 if (n < 0) {
1466 if (ERRNO_IS_TRANSIENT(n))
1467 return 0;
1468 if (n == -EXFULL) {
1469 log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
1470 "Got message with truncated control data (too many fds sent?), ignoring.");
1471 return 0;
1472 }
1473 return log_ratelimit_error_errno(n, JOURNAL_LOG_RATELIMIT, "recvmsg() failed: %m");
1474 }
1475
1476 CMSG_FOREACH(cmsg, &msghdr)
1477 if (cmsg->cmsg_level == SOL_SOCKET &&
1478 cmsg->cmsg_type == SCM_CREDENTIALS &&
1479 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
1480 assert(!ucred);
1481 ucred = CMSG_TYPED_DATA(cmsg, struct ucred);
1482 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1483 cmsg->cmsg_type == SCM_SECURITY) {
1484 assert(!label);
1485 label = CMSG_TYPED_DATA(cmsg, char);
1486 label_len = cmsg->cmsg_len - CMSG_LEN(0);
1487 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1488 cmsg->cmsg_type == SCM_TIMESTAMP &&
1489 cmsg->cmsg_len == CMSG_LEN(sizeof(struct timeval))) {
1490 assert(!tv);
1491 tv = memcpy(&tv_buf, CMSG_DATA(cmsg), sizeof(struct timeval));
1492 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1493 cmsg->cmsg_type == SCM_RIGHTS) {
1494 assert(!fds);
1495 fds = CMSG_TYPED_DATA(cmsg, int);
1496 n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
1497 }
1498
1499 /* And a trailing NUL, just in case */
1500 s->buffer[n] = 0;
1501
1502 if (fd == s->syslog_fd) {
1503 if (n > 0 && n_fds == 0)
1504 server_process_syslog_message(s, s->buffer, n, ucred, tv, label, label_len);
1505 else if (n_fds > 0)
1506 log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
1507 "Got file descriptors via syslog socket. Ignoring.");
1508
1509 } else if (fd == s->native_fd) {
1510 if (n > 0 && n_fds == 0)
1511 server_process_native_message(s, s->buffer, n, ucred, tv, label, label_len);
1512 else if (n == 0 && n_fds == 1)
1513 server_process_native_file(s, fds[0], ucred, tv, label, label_len);
1514 else if (n_fds > 0)
1515 log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
1516 "Got too many file descriptors via native socket. Ignoring.");
1517
1518 } else {
1519 assert(fd == s->audit_fd);
1520
1521 if (n > 0 && n_fds == 0)
1522 server_process_audit_message(s, s->buffer, n, ucred, &sa, msghdr.msg_namelen);
1523 else if (n_fds > 0)
1524 log_ratelimit_warning(JOURNAL_LOG_RATELIMIT,
1525 "Got file descriptors via audit socket. Ignoring.");
1526 }
1527
1528 close_many(fds, n_fds);
1529
1530 server_refresh_idle_timer(s);
1531 return 0;
1532 }
1533
1534 static void server_full_flush(Server *s) {
1535 assert(s);
1536
1537 (void) server_flush_to_var(s, false);
1538 server_sync(s);
1539 server_vacuum(s, false);
1540
1541 server_space_usage_message(s, NULL);
1542
1543 server_refresh_idle_timer(s);
1544 }
1545
1546 static int dispatch_sigusr1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1547 Server *s = ASSERT_PTR(userdata);
1548
1549 if (s->namespace) {
1550 log_error("Received SIGUSR1 signal from PID %u, but flushing runtime journals not supported for namespaced instances.", si->ssi_pid);
1551 return 0;
1552 }
1553
1554 log_info("Received SIGUSR1 signal from PID %u, as request to flush runtime journal.", si->ssi_pid);
1555 server_full_flush(s);
1556
1557 return 0;
1558 }
1559
1560 static void server_full_rotate(Server *s) {
1561 const char *fn;
1562 int r;
1563
1564 assert(s);
1565
1566 server_rotate(s);
1567 server_vacuum(s, true);
1568
1569 if (s->system_journal)
1570 patch_min_use(&s->system_storage);
1571 if (s->runtime_journal)
1572 patch_min_use(&s->runtime_storage);
1573
1574 /* Let clients know when the most recent rotation happened. */
1575 fn = strjoina(s->runtime_directory, "/rotated");
1576 r = write_timestamp_file_atomic(fn, now(CLOCK_MONOTONIC));
1577 if (r < 0)
1578 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
1579 "Failed to write %s, ignoring: %m", fn);
1580 }
1581
1582 static int dispatch_sigusr2(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1583 Server *s = ASSERT_PTR(userdata);
1584
1585 log_info("Received SIGUSR2 signal from PID %u, as request to rotate journal, rotating.", si->ssi_pid);
1586 server_full_rotate(s);
1587
1588 return 0;
1589 }
1590
1591 static int dispatch_sigterm(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1592 _cleanup_(sd_event_source_disable_unrefp) sd_event_source *news = NULL;
1593 Server *s = ASSERT_PTR(userdata);
1594 int r;
1595
1596 log_received_signal(LOG_INFO, si);
1597
1598 (void) sd_event_source_set_enabled(es, SD_EVENT_OFF); /* Make sure this handler is called at most once */
1599
1600 /* So on one hand we want to ensure that SIGTERMs are definitely handled in appropriate, bounded
1601 * time. On the other hand we want that everything pending is first comprehensively processed and
1602 * written to disk. These goals are incompatible, hence we try to find a middle ground: we'll process
1603 * SIGTERM with high priority, but from the handler (this one right here) we'll install two new event
1604 * sources: one low priority idle one that will issue the exit once everything else is processed (and
1605 * which is hopefully the regular, clean codepath); and one high priority timer that acts as safety
1606 * net: if our idle handler isn't run within 10s, we'll exit anyway.
1607 *
1608 * TLDR: we'll exit either when everything is processed, or after 10s max, depending on what happens
1609 * first.
1610 *
1611 * Note that exiting before the idle event is hit doesn't typically mean that we lose any data, as
1612 * messages will remain queued in the sockets they came in from, and thus can be processed when we
1613 * start up next – unless we are going down for the final system shutdown, in which case everything
1614 * is lost. */
1615
1616 r = sd_event_add_defer(s->event, &news, NULL, NULL); /* NULL handler means → exit when triggered */
1617 if (r < 0) {
1618 log_error_errno(r, "Failed to allocate exit idle event handler: %m");
1619 goto fail;
1620 }
1621
1622 (void) sd_event_source_set_description(news, "exit-idle");
1623
1624 /* Run everything relevant before this. */
1625 r = sd_event_source_set_priority(news, SD_EVENT_PRIORITY_NORMAL+20);
1626 if (r < 0) {
1627 log_error_errno(r, "Failed to adjust priority of exit idle event handler: %m");
1628 goto fail;
1629 }
1630
1631 /* Give up ownership, so that this event source is freed automatically when the event loop is freed. */
1632 r = sd_event_source_set_floating(news, true);
1633 if (r < 0) {
1634 log_error_errno(r, "Failed to make exit idle event handler floating: %m");
1635 goto fail;
1636 }
1637
1638 news = sd_event_source_unref(news);
1639
1640 r = sd_event_add_time_relative(s->event, &news, CLOCK_MONOTONIC, 10 * USEC_PER_SEC, 0, NULL, NULL);
1641 if (r < 0) {
1642 log_error_errno(r, "Failed to allocate exit timeout event handler: %m");
1643 goto fail;
1644 }
1645
1646 (void) sd_event_source_set_description(news, "exit-timeout");
1647
1648 r = sd_event_source_set_priority(news, SD_EVENT_PRIORITY_IMPORTANT-20); /* This is a safety net, with highest priority */
1649 if (r < 0) {
1650 log_error_errno(r, "Failed to adjust priority of exit timeout event handler: %m");
1651 goto fail;
1652 }
1653
1654 r = sd_event_source_set_floating(news, true);
1655 if (r < 0) {
1656 log_error_errno(r, "Failed to make exit timeout event handler floating: %m");
1657 goto fail;
1658 }
1659
1660 news = sd_event_source_unref(news);
1661
1662 log_debug("Exit event sources are now pending.");
1663 return 0;
1664
1665 fail:
1666 sd_event_exit(s->event, 0);
1667 return 0;
1668 }
1669
1670 static void server_full_sync(Server *s) {
1671 const char *fn;
1672 int r;
1673
1674 assert(s);
1675
1676 server_sync(s);
1677
1678 /* Let clients know when the most recent sync happened. */
1679 fn = strjoina(s->runtime_directory, "/synced");
1680 r = write_timestamp_file_atomic(fn, now(CLOCK_MONOTONIC));
1681 if (r < 0)
1682 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
1683 "Failed to write %s, ignoring: %m", fn);
1684
1685 return;
1686 }
1687
1688 static int dispatch_sigrtmin1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1689 Server *s = ASSERT_PTR(userdata);
1690
1691 log_debug("Received SIGRTMIN1 signal from PID %u, as request to sync.", si->ssi_pid);
1692 server_full_sync(s);
1693
1694 return 0;
1695 }
1696
1697 static int server_setup_signals(Server *s) {
1698 int r;
1699
1700 assert(s);
1701
1702 assert_se(sigprocmask_many(SIG_SETMASK, NULL, SIGINT, SIGTERM, SIGUSR1, SIGUSR2, SIGRTMIN+1, SIGRTMIN+18, -1) >= 0);
1703
1704 r = sd_event_add_signal(s->event, &s->sigusr1_event_source, SIGUSR1, dispatch_sigusr1, s);
1705 if (r < 0)
1706 return r;
1707
1708 r = sd_event_add_signal(s->event, &s->sigusr2_event_source, SIGUSR2, dispatch_sigusr2, s);
1709 if (r < 0)
1710 return r;
1711
1712 r = sd_event_add_signal(s->event, &s->sigterm_event_source, SIGTERM, dispatch_sigterm, s);
1713 if (r < 0)
1714 return r;
1715
1716 /* Let's process SIGTERM early, so that we definitely react to it */
1717 r = sd_event_source_set_priority(s->sigterm_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
1718 if (r < 0)
1719 return r;
1720
1721 /* When journald is invoked on the terminal (when debugging), it's useful if C-c is handled
1722 * equivalent to SIGTERM. */
1723 r = sd_event_add_signal(s->event, &s->sigint_event_source, SIGINT, dispatch_sigterm, s);
1724 if (r < 0)
1725 return r;
1726
1727 r = sd_event_source_set_priority(s->sigint_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
1728 if (r < 0)
1729 return r;
1730
1731 /* SIGRTMIN+1 causes an immediate sync. We process this very late, so that everything else queued at
1732 * this point is really written to disk. Clients can watch /run/systemd/journal/synced with inotify
1733 * until its mtime changes to see when a sync happened. */
1734 r = sd_event_add_signal(s->event, &s->sigrtmin1_event_source, SIGRTMIN+1, dispatch_sigrtmin1, s);
1735 if (r < 0)
1736 return r;
1737
1738 r = sd_event_source_set_priority(s->sigrtmin1_event_source, SD_EVENT_PRIORITY_NORMAL+15);
1739 if (r < 0)
1740 return r;
1741
1742 r = sd_event_add_signal(s->event, NULL, SIGRTMIN+18, sigrtmin18_handler, &s->sigrtmin18_info);
1743 if (r < 0)
1744 return r;
1745
1746 return 0;
1747 }
1748
1749 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1750 Server *s = ASSERT_PTR(data);
1751 int r;
1752
1753 if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_syslog")) {
1754
1755 r = value ? parse_boolean(value) : true;
1756 if (r < 0)
1757 log_warning("Failed to parse forward to syslog switch \"%s\". Ignoring.", value);
1758 else
1759 s->forward_to_syslog = r;
1760
1761 } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_kmsg")) {
1762
1763 r = value ? parse_boolean(value) : true;
1764 if (r < 0)
1765 log_warning("Failed to parse forward to kmsg switch \"%s\". Ignoring.", value);
1766 else
1767 s->forward_to_kmsg = r;
1768
1769 } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_console")) {
1770
1771 r = value ? parse_boolean(value) : true;
1772 if (r < 0)
1773 log_warning("Failed to parse forward to console switch \"%s\". Ignoring.", value);
1774 else
1775 s->forward_to_console = r;
1776
1777 } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_wall")) {
1778
1779 r = value ? parse_boolean(value) : true;
1780 if (r < 0)
1781 log_warning("Failed to parse forward to wall switch \"%s\". Ignoring.", value);
1782 else
1783 s->forward_to_wall = r;
1784
1785 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_console")) {
1786
1787 if (proc_cmdline_value_missing(key, value))
1788 return 0;
1789
1790 r = log_level_from_string(value);
1791 if (r < 0)
1792 log_warning("Failed to parse max level console value \"%s\". Ignoring.", value);
1793 else
1794 s->max_level_console = r;
1795
1796 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_store")) {
1797
1798 if (proc_cmdline_value_missing(key, value))
1799 return 0;
1800
1801 r = log_level_from_string(value);
1802 if (r < 0)
1803 log_warning("Failed to parse max level store value \"%s\". Ignoring.", value);
1804 else
1805 s->max_level_store = r;
1806
1807 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_syslog")) {
1808
1809 if (proc_cmdline_value_missing(key, value))
1810 return 0;
1811
1812 r = log_level_from_string(value);
1813 if (r < 0)
1814 log_warning("Failed to parse max level syslog value \"%s\". Ignoring.", value);
1815 else
1816 s->max_level_syslog = r;
1817
1818 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_kmsg")) {
1819
1820 if (proc_cmdline_value_missing(key, value))
1821 return 0;
1822
1823 r = log_level_from_string(value);
1824 if (r < 0)
1825 log_warning("Failed to parse max level kmsg value \"%s\". Ignoring.", value);
1826 else
1827 s->max_level_kmsg = r;
1828
1829 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_wall")) {
1830
1831 if (proc_cmdline_value_missing(key, value))
1832 return 0;
1833
1834 r = log_level_from_string(value);
1835 if (r < 0)
1836 log_warning("Failed to parse max level wall value \"%s\". Ignoring.", value);
1837 else
1838 s->max_level_wall = r;
1839
1840 } else if (startswith(key, "systemd.journald"))
1841 log_warning("Unknown journald kernel command line option \"%s\". Ignoring.", key);
1842
1843 /* do not warn about state here, since probably systemd already did */
1844 return 0;
1845 }
1846
1847 static int server_parse_config_file(Server *s) {
1848 const char *conf_file = "journald.conf";
1849
1850 assert(s);
1851
1852 if (s->namespace)
1853 conf_file = strjoina("journald@", s->namespace, ".conf");
1854
1855 return config_parse_config_file(conf_file, "Journal\0",
1856 config_item_perf_lookup, journald_gperf_lookup,
1857 CONFIG_PARSE_WARN, s);
1858 }
1859
1860 static int server_dispatch_sync(sd_event_source *es, usec_t t, void *userdata) {
1861 Server *s = ASSERT_PTR(userdata);
1862
1863 server_sync(s);
1864 return 0;
1865 }
1866
1867 int server_schedule_sync(Server *s, int priority) {
1868 int r;
1869
1870 assert(s);
1871
1872 if (priority <= LOG_CRIT) {
1873 /* Immediately sync to disk when this is of priority CRIT, ALERT, EMERG */
1874 server_sync(s);
1875 return 0;
1876 }
1877
1878 if (s->sync_scheduled)
1879 return 0;
1880
1881 if (s->sync_interval_usec > 0) {
1882
1883 if (!s->sync_event_source) {
1884 r = sd_event_add_time_relative(
1885 s->event,
1886 &s->sync_event_source,
1887 CLOCK_MONOTONIC,
1888 s->sync_interval_usec, 0,
1889 server_dispatch_sync, s);
1890 if (r < 0)
1891 return r;
1892
1893 r = sd_event_source_set_priority(s->sync_event_source, SD_EVENT_PRIORITY_IMPORTANT);
1894 } else {
1895 r = sd_event_source_set_time_relative(s->sync_event_source, s->sync_interval_usec);
1896 if (r < 0)
1897 return r;
1898
1899 r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_ONESHOT);
1900 }
1901 if (r < 0)
1902 return r;
1903
1904 s->sync_scheduled = true;
1905 }
1906
1907 return 0;
1908 }
1909
1910 static int dispatch_hostname_change(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
1911 Server *s = ASSERT_PTR(userdata);
1912
1913 server_cache_hostname(s);
1914 return 0;
1915 }
1916
1917 static int server_open_hostname(Server *s) {
1918 int r;
1919
1920 assert(s);
1921
1922 s->hostname_fd = open("/proc/sys/kernel/hostname",
1923 O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
1924 if (s->hostname_fd < 0)
1925 return log_error_errno(errno, "Failed to open /proc/sys/kernel/hostname: %m");
1926
1927 r = sd_event_add_io(s->event, &s->hostname_event_source, s->hostname_fd, 0, dispatch_hostname_change, s);
1928 if (r < 0) {
1929 /* kernels prior to 3.2 don't support polling this file. Ignore
1930 * the failure. */
1931 if (r == -EPERM) {
1932 log_warning_errno(r, "Failed to register hostname fd in event loop, ignoring: %m");
1933 s->hostname_fd = safe_close(s->hostname_fd);
1934 return 0;
1935 }
1936
1937 return log_error_errno(r, "Failed to register hostname fd in event loop: %m");
1938 }
1939
1940 r = sd_event_source_set_priority(s->hostname_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
1941 if (r < 0)
1942 return log_error_errno(r, "Failed to adjust priority of hostname event source: %m");
1943
1944 return 0;
1945 }
1946
1947 static int dispatch_notify_event(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
1948 Server *s = ASSERT_PTR(userdata);
1949 int r;
1950
1951 assert(s->notify_event_source == es);
1952 assert(s->notify_fd == fd);
1953
1954 /* The $NOTIFY_SOCKET is writable again, now send exactly one
1955 * message on it. Either it's the watchdog event, the initial
1956 * READY=1 event or an stdout stream event. If there's nothing
1957 * to write anymore, turn our event source off. The next time
1958 * there's something to send it will be turned on again. */
1959
1960 if (!s->sent_notify_ready) {
1961 static const char p[] = "READY=1\n"
1962 "STATUS=Processing requests...";
1963
1964 if (send(s->notify_fd, p, strlen(p), MSG_DONTWAIT) < 0) {
1965 if (errno == EAGAIN)
1966 return 0;
1967
1968 return log_error_errno(errno, "Failed to send READY=1 notification message: %m");
1969 }
1970
1971 s->sent_notify_ready = true;
1972 log_debug("Sent READY=1 notification.");
1973
1974 } else if (s->send_watchdog) {
1975 static const char p[] = "WATCHDOG=1";
1976
1977 if (send(s->notify_fd, p, strlen(p), MSG_DONTWAIT) < 0) {
1978 if (errno == EAGAIN)
1979 return 0;
1980
1981 return log_error_errno(errno, "Failed to send WATCHDOG=1 notification message: %m");
1982 }
1983
1984 s->send_watchdog = false;
1985 log_debug("Sent WATCHDOG=1 notification.");
1986
1987 } else if (s->stdout_streams_notify_queue)
1988 /* Dispatch one stream notification event */
1989 stdout_stream_send_notify(s->stdout_streams_notify_queue);
1990
1991 /* Leave us enabled if there's still more to do. */
1992 if (s->send_watchdog || s->stdout_streams_notify_queue)
1993 return 0;
1994
1995 /* There was nothing to do anymore, let's turn ourselves off. */
1996 r = sd_event_source_set_enabled(es, SD_EVENT_OFF);
1997 if (r < 0)
1998 return log_error_errno(r, "Failed to turn off notify event source: %m");
1999
2000 return 0;
2001 }
2002
2003 static int dispatch_watchdog(sd_event_source *es, uint64_t usec, void *userdata) {
2004 Server *s = ASSERT_PTR(userdata);
2005 int r;
2006
2007 s->send_watchdog = true;
2008
2009 r = sd_event_source_set_enabled(s->notify_event_source, SD_EVENT_ON);
2010 if (r < 0)
2011 log_warning_errno(r, "Failed to turn on notify event source: %m");
2012
2013 r = sd_event_source_set_time(s->watchdog_event_source, usec + s->watchdog_usec / 2);
2014 if (r < 0)
2015 return log_error_errno(r, "Failed to restart watchdog event source: %m");
2016
2017 r = sd_event_source_set_enabled(s->watchdog_event_source, SD_EVENT_ON);
2018 if (r < 0)
2019 return log_error_errno(r, "Failed to enable watchdog event source: %m");
2020
2021 return 0;
2022 }
2023
2024 static int server_connect_notify(Server *s) {
2025 union sockaddr_union sa;
2026 socklen_t sa_len;
2027 const char *e;
2028 int r;
2029
2030 assert(s);
2031 assert(s->notify_fd < 0);
2032 assert(!s->notify_event_source);
2033
2034 /*
2035 * So here's the problem: we'd like to send notification messages to PID 1, but we cannot do that via
2036 * sd_notify(), since that's synchronous, and we might end up blocking on it. Specifically: given
2037 * that PID 1 might block on dbus-daemon during IPC, and dbus-daemon is logging to us, and might
2038 * hence block on us, we might end up in a deadlock if we block on sending PID 1 notification
2039 * messages — by generating a full blocking circle. To avoid this, let's create a non-blocking
2040 * socket, and connect it to the notification socket, and then wait for POLLOUT before we send
2041 * anything. This should efficiently avoid any deadlocks, as we'll never block on PID 1, hence PID 1
2042 * can safely block on dbus-daemon which can safely block on us again.
2043 *
2044 * Don't think that this issue is real? It is, see: https://github.com/systemd/systemd/issues/1505
2045 */
2046
2047 e = getenv("NOTIFY_SOCKET");
2048 if (!e)
2049 return 0;
2050
2051 r = sockaddr_un_set_path(&sa.un, e);
2052 if (r < 0)
2053 return log_error_errno(r, "NOTIFY_SOCKET set to invalid value '%s': %m", e);
2054 sa_len = r;
2055
2056 s->notify_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
2057 if (s->notify_fd < 0)
2058 return log_error_errno(errno, "Failed to create notify socket: %m");
2059
2060 (void) fd_inc_sndbuf(s->notify_fd, NOTIFY_SNDBUF_SIZE);
2061
2062 r = connect(s->notify_fd, &sa.sa, sa_len);
2063 if (r < 0)
2064 return log_error_errno(errno, "Failed to connect to notify socket: %m");
2065
2066 r = sd_event_add_io(s->event, &s->notify_event_source, s->notify_fd, EPOLLOUT, dispatch_notify_event, s);
2067 if (r < 0)
2068 return log_error_errno(r, "Failed to watch notification socket: %m");
2069
2070 if (sd_watchdog_enabled(false, &s->watchdog_usec) > 0) {
2071 s->send_watchdog = true;
2072
2073 r = sd_event_add_time_relative(s->event, &s->watchdog_event_source, CLOCK_MONOTONIC, s->watchdog_usec/2, s->watchdog_usec/4, dispatch_watchdog, s);
2074 if (r < 0)
2075 return log_error_errno(r, "Failed to add watchdog time event: %m");
2076 }
2077
2078 /* This should fire pretty soon, which we'll use to send the READY=1 event. */
2079
2080 return 0;
2081 }
2082
2083 static int synchronize_second_half(sd_event_source *event_source, void *userdata) {
2084 Varlink *link = ASSERT_PTR(userdata);
2085 Server *s;
2086 int r;
2087
2088 assert_se(s = varlink_get_userdata(link));
2089
2090 /* This is the "second half" of the Synchronize() varlink method. This function is called as deferred
2091 * event source at a low priority to ensure the synchronization completes after all queued log
2092 * messages are processed. */
2093 server_full_sync(s);
2094
2095 /* Let's get rid of the event source now, by marking it as non-floating again. It then has no ref
2096 * anymore and is immediately destroyed after we return from this function, i.e. from this event
2097 * source handler at the end. */
2098 r = sd_event_source_set_floating(event_source, false);
2099 if (r < 0)
2100 return log_error_errno(r, "Failed to mark event source as non-floating: %m");
2101
2102 return varlink_reply(link, NULL);
2103 }
2104
2105 static void synchronize_destroy(void *userdata) {
2106 varlink_unref(userdata);
2107 }
2108
2109 static int vl_method_synchronize(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
2110 _cleanup_(sd_event_source_unrefp) sd_event_source *event_source = NULL;
2111 Server *s = ASSERT_PTR(userdata);
2112 int r;
2113
2114 assert(link);
2115
2116 if (json_variant_elements(parameters) > 0)
2117 return varlink_error_invalid_parameter(link, parameters);
2118
2119 log_info("Received client request to sync journal.");
2120
2121 /* We don't do the main work now, but instead enqueue a deferred event loop job which will do
2122 * it. That job is scheduled at low priority, so that we return from this method call only after all
2123 * queued but not processed log messages are written to disk, so that this method call returning can
2124 * be used as nice synchronization point. */
2125 r = sd_event_add_defer(s->event, &event_source, synchronize_second_half, link);
2126 if (r < 0)
2127 return log_error_errno(r, "Failed to allocate defer event source: %m");
2128
2129 r = sd_event_source_set_destroy_callback(event_source, synchronize_destroy);
2130 if (r < 0)
2131 return log_error_errno(r, "Failed to set event source destroy callback: %m");
2132
2133 varlink_ref(link); /* The varlink object is now left to the destroy callback to unref */
2134
2135 r = sd_event_source_set_priority(event_source, SD_EVENT_PRIORITY_NORMAL+15);
2136 if (r < 0)
2137 return log_error_errno(r, "Failed to set defer event source priority: %m");
2138
2139 /* Give up ownership of this event source. It will now be destroyed along with event loop itself,
2140 * unless it destroys itself earlier. */
2141 r = sd_event_source_set_floating(event_source, true);
2142 if (r < 0)
2143 return log_error_errno(r, "Failed to mark event source as floating: %m");
2144
2145 (void) sd_event_source_set_description(event_source, "deferred-sync");
2146
2147 return 0;
2148 }
2149
2150 static int vl_method_rotate(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
2151 Server *s = ASSERT_PTR(userdata);
2152
2153 assert(link);
2154
2155 if (json_variant_elements(parameters) > 0)
2156 return varlink_error_invalid_parameter(link, parameters);
2157
2158 log_info("Received client request to rotate journal, rotating.");
2159 server_full_rotate(s);
2160
2161 return varlink_reply(link, NULL);
2162 }
2163
2164 static int vl_method_flush_to_var(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
2165 Server *s = ASSERT_PTR(userdata);
2166
2167 assert(link);
2168
2169 if (json_variant_elements(parameters) > 0)
2170 return varlink_error_invalid_parameter(link, parameters);
2171 if (s->namespace)
2172 return varlink_error(link, "io.systemd.Journal.NotSupportedByNamespaces", NULL);
2173
2174 log_info("Received client request to flush runtime journal.");
2175 server_full_flush(s);
2176
2177 return varlink_reply(link, NULL);
2178 }
2179
2180 static int vl_method_relinquish_var(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
2181 Server *s = ASSERT_PTR(userdata);
2182
2183 assert(link);
2184
2185 if (json_variant_elements(parameters) > 0)
2186 return varlink_error_invalid_parameter(link, parameters);
2187 if (s->namespace)
2188 return varlink_error(link, "io.systemd.Journal.NotSupportedByNamespaces", NULL);
2189
2190 log_info("Received client request to relinquish %s access.", s->system_storage.path);
2191 server_relinquish_var(s);
2192
2193 return varlink_reply(link, NULL);
2194 }
2195
2196 static int vl_connect(VarlinkServer *server, Varlink *link, void *userdata) {
2197 Server *s = ASSERT_PTR(userdata);
2198
2199 assert(server);
2200 assert(link);
2201
2202 (void) server_start_or_stop_idle_timer(s); /* maybe we are no longer idle */
2203
2204 return 0;
2205 }
2206
2207 static void vl_disconnect(VarlinkServer *server, Varlink *link, void *userdata) {
2208 Server *s = ASSERT_PTR(userdata);
2209
2210 assert(server);
2211 assert(link);
2212
2213 (void) server_start_or_stop_idle_timer(s); /* maybe we are idle now */
2214 }
2215
2216 static int server_open_varlink(Server *s, const char *socket, int fd) {
2217 int r;
2218
2219 assert(s);
2220
2221 r = varlink_server_new(&s->varlink_server, VARLINK_SERVER_ROOT_ONLY|VARLINK_SERVER_INHERIT_USERDATA);
2222 if (r < 0)
2223 return r;
2224
2225 varlink_server_set_userdata(s->varlink_server, s);
2226
2227 r = varlink_server_bind_method_many(
2228 s->varlink_server,
2229 "io.systemd.Journal.Synchronize", vl_method_synchronize,
2230 "io.systemd.Journal.Rotate", vl_method_rotate,
2231 "io.systemd.Journal.FlushToVar", vl_method_flush_to_var,
2232 "io.systemd.Journal.RelinquishVar", vl_method_relinquish_var);
2233 if (r < 0)
2234 return r;
2235
2236 r = varlink_server_bind_connect(s->varlink_server, vl_connect);
2237 if (r < 0)
2238 return r;
2239
2240 r = varlink_server_bind_disconnect(s->varlink_server, vl_disconnect);
2241 if (r < 0)
2242 return r;
2243
2244 if (fd < 0)
2245 r = varlink_server_listen_address(s->varlink_server, socket, 0600);
2246 else
2247 r = varlink_server_listen_fd(s->varlink_server, fd);
2248 if (r < 0)
2249 return r;
2250
2251 r = varlink_server_attach_event(s->varlink_server, s->event, SD_EVENT_PRIORITY_NORMAL);
2252 if (r < 0)
2253 return r;
2254
2255 return 0;
2256 }
2257
2258 int server_map_seqnum_file(
2259 Server *s,
2260 const char *fname,
2261 size_t size,
2262 void **ret) {
2263
2264 _cleanup_free_ char *fn = NULL;
2265 _cleanup_close_ int fd = -EBADF;
2266 uint64_t *p;
2267 int r;
2268
2269 assert(s);
2270 assert(fname);
2271 assert(size > 0);
2272 assert(ret);
2273
2274 fn = path_join(s->runtime_directory, fname);
2275 if (!fn)
2276 return -ENOMEM;
2277
2278 fd = open(fn, O_RDWR|O_CREAT|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW, 0644);
2279 if (fd < 0)
2280 return -errno;
2281
2282 r = posix_fallocate_loop(fd, 0, size);
2283 if (r < 0)
2284 return r;
2285
2286 p = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
2287 if (p == MAP_FAILED)
2288 return -errno;
2289
2290 *ret = p;
2291 return 0;
2292 }
2293
2294 void server_unmap_seqnum_file(void *p, size_t size) {
2295 assert(size > 0);
2296
2297 if (!p)
2298 return;
2299
2300 assert_se(munmap(p, size) >= 0);
2301 }
2302
2303 static bool server_is_idle(Server *s) {
2304 assert(s);
2305
2306 /* The server for the main namespace is never idle */
2307 if (!s->namespace)
2308 return false;
2309
2310 /* If a retention maximum is set larger than the idle time we need to be running to enforce it, hence
2311 * turn off the idle logic. */
2312 if (s->max_retention_usec > IDLE_TIMEOUT_USEC)
2313 return false;
2314
2315 /* We aren't idle if we have a varlink client */
2316 if (varlink_server_current_connections(s->varlink_server) > 0)
2317 return false;
2318
2319 /* If we have stdout streams we aren't idle */
2320 if (s->n_stdout_streams > 0)
2321 return false;
2322
2323 return true;
2324 }
2325
2326 static int server_idle_handler(sd_event_source *source, uint64_t usec, void *userdata) {
2327 Server *s = ASSERT_PTR(userdata);
2328
2329 assert(source);
2330
2331 log_debug("Server is idle, exiting.");
2332 sd_event_exit(s->event, 0);
2333 return 0;
2334 }
2335
2336 int server_start_or_stop_idle_timer(Server *s) {
2337 _cleanup_(sd_event_source_unrefp) sd_event_source *source = NULL;
2338 int r;
2339
2340 assert(s);
2341
2342 if (!server_is_idle(s)) {
2343 s->idle_event_source = sd_event_source_disable_unref(s->idle_event_source);
2344 return 0;
2345 }
2346
2347 if (s->idle_event_source)
2348 return 1;
2349
2350 r = sd_event_add_time_relative(s->event, &source, CLOCK_MONOTONIC, IDLE_TIMEOUT_USEC, 0, server_idle_handler, s);
2351 if (r < 0)
2352 return log_error_errno(r, "Failed to allocate idle timer: %m");
2353
2354 r = sd_event_source_set_priority(source, SD_EVENT_PRIORITY_IDLE);
2355 if (r < 0)
2356 return log_error_errno(r, "Failed to set idle timer priority: %m");
2357
2358 (void) sd_event_source_set_description(source, "idle-timer");
2359
2360 s->idle_event_source = TAKE_PTR(source);
2361 return 1;
2362 }
2363
2364 int server_refresh_idle_timer(Server *s) {
2365 int r;
2366
2367 assert(s);
2368
2369 if (!s->idle_event_source)
2370 return 0;
2371
2372 r = sd_event_source_set_time_relative(s->idle_event_source, IDLE_TIMEOUT_USEC);
2373 if (r < 0)
2374 return log_error_errno(r, "Failed to refresh idle timer: %m");
2375
2376 return 1;
2377 }
2378
2379 static int server_set_namespace(Server *s, const char *namespace) {
2380 assert(s);
2381
2382 if (!namespace)
2383 return 0;
2384
2385 if (!log_namespace_name_valid(namespace))
2386 return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Specified namespace name not valid, refusing: %s", namespace);
2387
2388 s->namespace = strdup(namespace);
2389 if (!s->namespace)
2390 return log_oom();
2391
2392 s->namespace_field = strjoin("_NAMESPACE=", namespace);
2393 if (!s->namespace_field)
2394 return log_oom();
2395
2396 return 1;
2397 }
2398
2399 static int server_memory_pressure(sd_event_source *es, void *userdata) {
2400 Server *s = ASSERT_PTR(userdata);
2401
2402 log_info("Under memory pressure, flushing caches.");
2403
2404 /* Flushed the cached info we might have about client processes */
2405 client_context_flush_regular(s);
2406
2407 /* Let's also close all user files (but keep the system/runtime one open) */
2408 for (;;) {
2409 ManagedJournalFile *first = ordered_hashmap_steal_first(s->user_journals);
2410
2411 if (!first)
2412 break;
2413
2414 (void) managed_journal_file_close(first);
2415 }
2416
2417 sd_event_trim_memory();
2418
2419 return 0;
2420 }
2421
2422 static int server_setup_memory_pressure(Server *s) {
2423 int r;
2424
2425 assert(s);
2426
2427 r = sd_event_add_memory_pressure(s->event, NULL, server_memory_pressure, s);
2428 if (r < 0)
2429 log_full_errno(ERRNO_IS_NOT_SUPPORTED(r) || ERRNO_IS_PRIVILEGE(r) || (r == -EHOSTDOWN) ? LOG_DEBUG : LOG_NOTICE, r,
2430 "Failed to install memory pressure event source, ignoring: %m");
2431
2432 return 0;
2433 }
2434
2435 int server_init(Server *s, const char *namespace) {
2436 const char *native_socket, *syslog_socket, *stdout_socket, *varlink_socket, *e;
2437 _cleanup_fdset_free_ FDSet *fds = NULL;
2438 int n, r, fd, varlink_fd = -EBADF;
2439 bool no_sockets;
2440
2441 assert(s);
2442
2443 *s = (Server) {
2444 .syslog_fd = -EBADF,
2445 .native_fd = -EBADF,
2446 .stdout_fd = -EBADF,
2447 .dev_kmsg_fd = -EBADF,
2448 .audit_fd = -EBADF,
2449 .hostname_fd = -EBADF,
2450 .notify_fd = -EBADF,
2451
2452 .compress.enabled = true,
2453 .compress.threshold_bytes = UINT64_MAX,
2454 .seal = true,
2455
2456 .set_audit = true,
2457
2458 .watchdog_usec = USEC_INFINITY,
2459
2460 .sync_interval_usec = DEFAULT_SYNC_INTERVAL_USEC,
2461 .sync_scheduled = false,
2462
2463 .ratelimit_interval = DEFAULT_RATE_LIMIT_INTERVAL,
2464 .ratelimit_burst = DEFAULT_RATE_LIMIT_BURST,
2465
2466 .forward_to_wall = true,
2467
2468 .max_file_usec = DEFAULT_MAX_FILE_USEC,
2469
2470 .max_level_store = LOG_DEBUG,
2471 .max_level_syslog = LOG_DEBUG,
2472 .max_level_kmsg = LOG_NOTICE,
2473 .max_level_console = LOG_INFO,
2474 .max_level_wall = LOG_EMERG,
2475
2476 .line_max = DEFAULT_LINE_MAX,
2477
2478 .runtime_storage.name = "Runtime Journal",
2479 .system_storage.name = "System Journal",
2480
2481 .kmsg_own_ratelimit = {
2482 .interval = DEFAULT_KMSG_OWN_INTERVAL,
2483 .burst = DEFAULT_KMSG_OWN_BURST,
2484 },
2485
2486 .sigrtmin18_info.memory_pressure_handler = server_memory_pressure,
2487 .sigrtmin18_info.memory_pressure_userdata = s,
2488 };
2489
2490 r = server_set_namespace(s, namespace);
2491 if (r < 0)
2492 return r;
2493
2494 /* By default, only read from /dev/kmsg if are the main namespace */
2495 s->read_kmsg = !s->namespace;
2496 s->storage = s->namespace ? STORAGE_PERSISTENT : STORAGE_AUTO;
2497
2498 journal_reset_metrics(&s->system_storage.metrics);
2499 journal_reset_metrics(&s->runtime_storage.metrics);
2500
2501 server_parse_config_file(s);
2502
2503 if (!s->namespace) {
2504 /* Parse kernel command line, but only if we are not a namespace instance */
2505 r = proc_cmdline_parse(parse_proc_cmdline_item, s, PROC_CMDLINE_STRIP_RD_PREFIX);
2506 if (r < 0)
2507 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2508 }
2509
2510 if (!!s->ratelimit_interval != !!s->ratelimit_burst) { /* One set to 0 and the other not? */
2511 log_debug("Setting both rate limit interval and burst from "USEC_FMT",%u to 0,0",
2512 s->ratelimit_interval, s->ratelimit_burst);
2513 s->ratelimit_interval = s->ratelimit_burst = 0;
2514 }
2515
2516 e = getenv("RUNTIME_DIRECTORY");
2517 if (e)
2518 s->runtime_directory = strdup(e);
2519 else if (s->namespace)
2520 s->runtime_directory = strjoin("/run/systemd/journal.", s->namespace);
2521 else
2522 s->runtime_directory = strdup("/run/systemd/journal");
2523 if (!s->runtime_directory)
2524 return log_oom();
2525
2526 (void) mkdir_p(s->runtime_directory, 0755);
2527
2528 s->user_journals = ordered_hashmap_new(NULL);
2529 if (!s->user_journals)
2530 return log_oom();
2531
2532 s->mmap = mmap_cache_new();
2533 if (!s->mmap)
2534 return log_oom();
2535
2536 s->deferred_closes = set_new(NULL);
2537 if (!s->deferred_closes)
2538 return log_oom();
2539
2540 r = sd_event_default(&s->event);
2541 if (r < 0)
2542 return log_error_errno(r, "Failed to create event loop: %m");
2543
2544 n = sd_listen_fds(true);
2545 if (n < 0)
2546 return log_error_errno(n, "Failed to read listening file descriptors from environment: %m");
2547
2548 native_socket = strjoina(s->runtime_directory, "/socket");
2549 stdout_socket = strjoina(s->runtime_directory, "/stdout");
2550 syslog_socket = strjoina(s->runtime_directory, "/dev-log");
2551 varlink_socket = strjoina(s->runtime_directory, "/io.systemd.journal");
2552
2553 for (fd = SD_LISTEN_FDS_START; fd < SD_LISTEN_FDS_START + n; fd++) {
2554
2555 if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, native_socket, 0) > 0) {
2556
2557 if (s->native_fd >= 0)
2558 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2559 "Too many native sockets passed.");
2560
2561 s->native_fd = fd;
2562
2563 } else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, stdout_socket, 0) > 0) {
2564
2565 if (s->stdout_fd >= 0)
2566 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2567 "Too many stdout sockets passed.");
2568
2569 s->stdout_fd = fd;
2570
2571 } else if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, syslog_socket, 0) > 0) {
2572
2573 if (s->syslog_fd >= 0)
2574 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2575 "Too many /dev/log sockets passed.");
2576
2577 s->syslog_fd = fd;
2578
2579 } else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, varlink_socket, 0) > 0) {
2580
2581 if (varlink_fd >= 0)
2582 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2583 "Too many varlink sockets passed.");
2584
2585 varlink_fd = fd;
2586 } else if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
2587
2588 if (s->audit_fd >= 0)
2589 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2590 "Too many audit sockets passed.");
2591
2592 s->audit_fd = fd;
2593
2594 } else {
2595
2596 if (!fds) {
2597 fds = fdset_new();
2598 if (!fds)
2599 return log_oom();
2600 }
2601
2602 r = fdset_put(fds, fd);
2603 if (r < 0)
2604 return log_oom();
2605 }
2606 }
2607
2608 /* Try to restore streams, but don't bother if this fails */
2609 (void) server_restore_streams(s, fds);
2610
2611 if (fdset_size(fds) > 0) {
2612 log_warning("%u unknown file descriptors passed, closing.", fdset_size(fds));
2613 fds = fdset_free(fds);
2614 }
2615
2616 no_sockets = s->native_fd < 0 && s->stdout_fd < 0 && s->syslog_fd < 0 && s->audit_fd < 0 && varlink_fd < 0;
2617
2618 /* always open stdout, syslog, native, and kmsg sockets */
2619
2620 /* systemd-journald.socket: /run/systemd/journal/stdout */
2621 r = server_open_stdout_socket(s, stdout_socket);
2622 if (r < 0)
2623 return r;
2624
2625 /* systemd-journald-dev-log.socket: /run/systemd/journal/dev-log */
2626 r = server_open_syslog_socket(s, syslog_socket);
2627 if (r < 0)
2628 return r;
2629
2630 /* systemd-journald.socket: /run/systemd/journal/socket */
2631 r = server_open_native_socket(s, native_socket);
2632 if (r < 0)
2633 return r;
2634
2635 /* /dev/kmsg */
2636 r = server_open_dev_kmsg(s);
2637 if (r < 0)
2638 return r;
2639
2640 /* Unless we got *some* sockets and not audit, open audit socket */
2641 if (s->audit_fd >= 0 || no_sockets) {
2642 log_info("Collecting audit messages is enabled.");
2643
2644 r = server_open_audit(s);
2645 if (r < 0)
2646 return r;
2647 } else
2648 log_info("Collecting audit messages is disabled.");
2649
2650 r = server_open_varlink(s, varlink_socket, varlink_fd);
2651 if (r < 0)
2652 return r;
2653
2654 r = server_map_seqnum_file(s, "seqnum", sizeof(SeqnumData), (void**) &s->seqnum);
2655 if (r < 0)
2656 return log_error_errno(r, "Failed to map main seqnum file: %m");
2657
2658 r = server_open_kernel_seqnum(s);
2659 if (r < 0)
2660 return r;
2661
2662 r = server_open_hostname(s);
2663 if (r < 0)
2664 return r;
2665
2666 r = server_setup_signals(s);
2667 if (r < 0)
2668 return r;
2669
2670 r = server_setup_memory_pressure(s);
2671 if (r < 0)
2672 return r;
2673
2674 s->ratelimit = journal_ratelimit_new();
2675 if (!s->ratelimit)
2676 return log_oom();
2677
2678 r = cg_get_root_path(&s->cgroup_root);
2679 if (r < 0)
2680 return log_error_errno(r, "Failed to acquire cgroup root path: %m");
2681
2682 server_cache_hostname(s);
2683 server_cache_boot_id(s);
2684 server_cache_machine_id(s);
2685
2686 if (s->namespace)
2687 s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s), ".", s->namespace);
2688 else
2689 s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s));
2690 if (!s->runtime_storage.path)
2691 return log_oom();
2692
2693 e = getenv("LOGS_DIRECTORY");
2694 if (e)
2695 s->system_storage.path = strdup(e);
2696 else if (s->namespace)
2697 s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s), ".", s->namespace);
2698 else
2699 s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s));
2700 if (!s->system_storage.path)
2701 return log_oom();
2702
2703 (void) server_connect_notify(s);
2704
2705 (void) client_context_acquire_default(s);
2706
2707 r = server_system_journal_open(s, /* flush_requested= */ false, /* relinquish_requested= */ false);
2708 if (r < 0)
2709 return r;
2710
2711 server_start_or_stop_idle_timer(s);
2712 return 0;
2713 }
2714
2715 void server_maybe_append_tags(Server *s) {
2716 #if HAVE_GCRYPT
2717 ManagedJournalFile *f;
2718 usec_t n;
2719
2720 n = now(CLOCK_REALTIME);
2721
2722 if (s->system_journal)
2723 journal_file_maybe_append_tag(s->system_journal->file, n);
2724
2725 ORDERED_HASHMAP_FOREACH(f, s->user_journals)
2726 journal_file_maybe_append_tag(f->file, n);
2727 #endif
2728 }
2729
2730 void server_done(Server *s) {
2731 assert(s);
2732
2733 free(s->namespace);
2734 free(s->namespace_field);
2735
2736 set_free_with_destructor(s->deferred_closes, managed_journal_file_close);
2737
2738 while (s->stdout_streams)
2739 stdout_stream_free(s->stdout_streams);
2740
2741 client_context_flush_all(s);
2742
2743 (void) managed_journal_file_close(s->system_journal);
2744 (void) managed_journal_file_close(s->runtime_journal);
2745
2746 ordered_hashmap_free_with_destructor(s->user_journals, managed_journal_file_close);
2747
2748 varlink_server_unref(s->varlink_server);
2749
2750 sd_event_source_unref(s->syslog_event_source);
2751 sd_event_source_unref(s->native_event_source);
2752 sd_event_source_unref(s->stdout_event_source);
2753 sd_event_source_unref(s->dev_kmsg_event_source);
2754 sd_event_source_unref(s->audit_event_source);
2755 sd_event_source_unref(s->sync_event_source);
2756 sd_event_source_unref(s->sigusr1_event_source);
2757 sd_event_source_unref(s->sigusr2_event_source);
2758 sd_event_source_unref(s->sigterm_event_source);
2759 sd_event_source_unref(s->sigint_event_source);
2760 sd_event_source_unref(s->sigrtmin1_event_source);
2761 sd_event_source_unref(s->hostname_event_source);
2762 sd_event_source_unref(s->notify_event_source);
2763 sd_event_source_unref(s->watchdog_event_source);
2764 sd_event_source_unref(s->idle_event_source);
2765 sd_event_unref(s->event);
2766
2767 safe_close(s->syslog_fd);
2768 safe_close(s->native_fd);
2769 safe_close(s->stdout_fd);
2770 safe_close(s->dev_kmsg_fd);
2771 safe_close(s->audit_fd);
2772 safe_close(s->hostname_fd);
2773 safe_close(s->notify_fd);
2774
2775 if (s->ratelimit)
2776 journal_ratelimit_free(s->ratelimit);
2777
2778 server_unmap_seqnum_file(s->seqnum, sizeof(*s->seqnum));
2779 server_unmap_seqnum_file(s->kernel_seqnum, sizeof(*s->kernel_seqnum));
2780
2781 free(s->buffer);
2782 free(s->tty_path);
2783 free(s->cgroup_root);
2784 free(s->hostname_field);
2785 free(s->runtime_storage.path);
2786 free(s->system_storage.path);
2787 free(s->runtime_directory);
2788
2789 mmap_cache_unref(s->mmap);
2790 }
2791
2792 static const char* const storage_table[_STORAGE_MAX] = {
2793 [STORAGE_AUTO] = "auto",
2794 [STORAGE_VOLATILE] = "volatile",
2795 [STORAGE_PERSISTENT] = "persistent",
2796 [STORAGE_NONE] = "none"
2797 };
2798
2799 DEFINE_STRING_TABLE_LOOKUP(storage, Storage);
2800 DEFINE_CONFIG_PARSE_ENUM(config_parse_storage, storage, Storage, "Failed to parse storage setting");
2801
2802 static const char* const split_mode_table[_SPLIT_MAX] = {
2803 [SPLIT_LOGIN] = "login",
2804 [SPLIT_UID] = "uid",
2805 [SPLIT_NONE] = "none",
2806 };
2807
2808 DEFINE_STRING_TABLE_LOOKUP(split_mode, SplitMode);
2809 DEFINE_CONFIG_PARSE_ENUM(config_parse_split_mode, split_mode, SplitMode, "Failed to parse split mode setting");
2810
2811 int config_parse_line_max(
2812 const char* unit,
2813 const char *filename,
2814 unsigned line,
2815 const char *section,
2816 unsigned section_line,
2817 const char *lvalue,
2818 int ltype,
2819 const char *rvalue,
2820 void *data,
2821 void *userdata) {
2822
2823 size_t *sz = ASSERT_PTR(data);
2824 int r;
2825
2826 assert(filename);
2827 assert(lvalue);
2828 assert(rvalue);
2829
2830 if (isempty(rvalue))
2831 /* Empty assignment means default */
2832 *sz = DEFAULT_LINE_MAX;
2833 else {
2834 uint64_t v;
2835
2836 r = parse_size(rvalue, 1024, &v);
2837 if (r < 0) {
2838 log_syntax(unit, LOG_WARNING, filename, line, r, "Failed to parse LineMax= value, ignoring: %s", rvalue);
2839 return 0;
2840 }
2841
2842 if (v < 79) {
2843 /* Why specify 79 here as minimum line length? Simply, because the most common traditional
2844 * terminal size is 80ch, and it might make sense to break one character before the natural
2845 * line break would occur on that. */
2846 log_syntax(unit, LOG_WARNING, filename, line, 0, "LineMax= too small, clamping to 79: %s", rvalue);
2847 *sz = 79;
2848 } else if (v > (uint64_t) (SSIZE_MAX-1)) {
2849 /* So, why specify SSIZE_MAX-1 here? Because that's one below the largest size value read()
2850 * can return, and we need one extra byte for the trailing NUL byte. Of course IRL such large
2851 * memory allocations will fail anyway, hence this limit is mostly theoretical anyway, as we'll
2852 * fail much earlier anyway. */
2853 log_syntax(unit, LOG_WARNING, filename, line, 0, "LineMax= too large, clamping to %" PRIu64 ": %s", (uint64_t) (SSIZE_MAX-1), rvalue);
2854 *sz = SSIZE_MAX-1;
2855 } else
2856 *sz = (size_t) v;
2857 }
2858
2859 return 0;
2860 }
2861
2862 int config_parse_compress(
2863 const char* unit,
2864 const char *filename,
2865 unsigned line,
2866 const char *section,
2867 unsigned section_line,
2868 const char *lvalue,
2869 int ltype,
2870 const char *rvalue,
2871 void *data,
2872 void *userdata) {
2873
2874 JournalCompressOptions* compress = data;
2875 int r;
2876
2877 if (isempty(rvalue)) {
2878 compress->enabled = true;
2879 compress->threshold_bytes = UINT64_MAX;
2880 } else if (streq(rvalue, "1")) {
2881 log_syntax(unit, LOG_WARNING, filename, line, 0,
2882 "Compress= ambiguously specified as 1, enabling compression with default threshold");
2883 compress->enabled = true;
2884 } else if (streq(rvalue, "0")) {
2885 log_syntax(unit, LOG_WARNING, filename, line, 0,
2886 "Compress= ambiguously specified as 0, disabling compression");
2887 compress->enabled = false;
2888 } else {
2889 r = parse_boolean(rvalue);
2890 if (r < 0) {
2891 r = parse_size(rvalue, 1024, &compress->threshold_bytes);
2892 if (r < 0)
2893 log_syntax(unit, LOG_WARNING, filename, line, r,
2894 "Failed to parse Compress= value, ignoring: %s", rvalue);
2895 else
2896 compress->enabled = true;
2897 } else
2898 compress->enabled = r;
2899 }
2900
2901 return 0;
2902 }