]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journald-server.c
tree-wide: make use of new relative time events in sd-event.h
[thirdparty/systemd.git] / src / journal / journald-server.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #if HAVE_SELINUX
4 #include <selinux/selinux.h>
5 #endif
6 #include <sys/ioctl.h>
7 #include <sys/mman.h>
8 #include <sys/signalfd.h>
9 #include <sys/statvfs.h>
10 #include <linux/sockios.h>
11
12 #include "sd-daemon.h"
13 #include "sd-journal.h"
14 #include "sd-messages.h"
15
16 #include "acl-util.h"
17 #include "alloc-util.h"
18 #include "audit-util.h"
19 #include "cgroup-util.h"
20 #include "conf-parser.h"
21 #include "dirent-util.h"
22 #include "extract-word.h"
23 #include "fd-util.h"
24 #include "fileio.h"
25 #include "format-util.h"
26 #include "fs-util.h"
27 #include "hashmap.h"
28 #include "hostname-util.h"
29 #include "id128-util.h"
30 #include "io-util.h"
31 #include "journal-authenticate.h"
32 #include "journal-file.h"
33 #include "journal-internal.h"
34 #include "journal-vacuum.h"
35 #include "journald-audit.h"
36 #include "journald-context.h"
37 #include "journald-kmsg.h"
38 #include "journald-native.h"
39 #include "journald-rate-limit.h"
40 #include "journald-server.h"
41 #include "journald-stream.h"
42 #include "journald-syslog.h"
43 #include "log.h"
44 #include "missing_audit.h"
45 #include "mkdir.h"
46 #include "parse-util.h"
47 #include "path-util.h"
48 #include "proc-cmdline.h"
49 #include "process-util.h"
50 #include "rm-rf.h"
51 #include "selinux-util.h"
52 #include "signal-util.h"
53 #include "socket-util.h"
54 #include "stdio-util.h"
55 #include "string-table.h"
56 #include "string-util.h"
57 #include "syslog-util.h"
58 #include "user-util.h"
59
60 #define USER_JOURNALS_MAX 1024
61
62 #define DEFAULT_SYNC_INTERVAL_USEC (5*USEC_PER_MINUTE)
63 #define DEFAULT_RATE_LIMIT_INTERVAL (30*USEC_PER_SEC)
64 #define DEFAULT_RATE_LIMIT_BURST 10000
65 #define DEFAULT_MAX_FILE_USEC USEC_PER_MONTH
66
67 #define RECHECK_SPACE_USEC (30*USEC_PER_SEC)
68
69 #define NOTIFY_SNDBUF_SIZE (8*1024*1024)
70
71 /* The period to insert between posting changes for coalescing */
72 #define POST_CHANGE_TIMER_INTERVAL_USEC (250*USEC_PER_MSEC)
73
74 /* Pick a good default that is likely to fit into AF_UNIX and AF_INET SOCK_DGRAM datagrams, and even leaves some room
75 * for a bit of additional metadata. */
76 #define DEFAULT_LINE_MAX (48*1024)
77
78 #define DEFERRED_CLOSES_MAX (4096)
79
80 #define IDLE_TIMEOUT_USEC (30*USEC_PER_SEC)
81
82 static int determine_path_usage(
83 Server *s,
84 const char *path,
85 uint64_t *ret_used,
86 uint64_t *ret_free) {
87
88 _cleanup_closedir_ DIR *d = NULL;
89 struct dirent *de;
90 struct statvfs ss;
91
92 assert(s);
93 assert(path);
94 assert(ret_used);
95 assert(ret_free);
96
97 d = opendir(path);
98 if (!d)
99 return log_full_errno(errno == ENOENT ? LOG_DEBUG : LOG_ERR,
100 errno, "Failed to open %s: %m", path);
101
102 if (fstatvfs(dirfd(d), &ss) < 0)
103 return log_error_errno(errno, "Failed to fstatvfs(%s): %m", path);
104
105 *ret_free = ss.f_bsize * ss.f_bavail;
106 *ret_used = 0;
107 FOREACH_DIRENT_ALL(de, d, break) {
108 struct stat st;
109
110 if (!endswith(de->d_name, ".journal") &&
111 !endswith(de->d_name, ".journal~"))
112 continue;
113
114 if (fstatat(dirfd(d), de->d_name, &st, AT_SYMLINK_NOFOLLOW) < 0) {
115 log_debug_errno(errno, "Failed to stat %s/%s, ignoring: %m", path, de->d_name);
116 continue;
117 }
118
119 if (!S_ISREG(st.st_mode))
120 continue;
121
122 *ret_used += (uint64_t) st.st_blocks * 512UL;
123 }
124
125 return 0;
126 }
127
128 static void cache_space_invalidate(JournalStorageSpace *space) {
129 zero(*space);
130 }
131
132 static int cache_space_refresh(Server *s, JournalStorage *storage) {
133 JournalStorageSpace *space;
134 JournalMetrics *metrics;
135 uint64_t vfs_used, vfs_avail, avail;
136 usec_t ts;
137 int r;
138
139 assert(s);
140
141 metrics = &storage->metrics;
142 space = &storage->space;
143
144 ts = now(CLOCK_MONOTONIC);
145
146 if (space->timestamp != 0 && space->timestamp + RECHECK_SPACE_USEC > ts)
147 return 0;
148
149 r = determine_path_usage(s, storage->path, &vfs_used, &vfs_avail);
150 if (r < 0)
151 return r;
152
153 space->vfs_used = vfs_used;
154 space->vfs_available = vfs_avail;
155
156 avail = LESS_BY(vfs_avail, metrics->keep_free);
157
158 space->limit = MIN(MAX(vfs_used + avail, metrics->min_use), metrics->max_use);
159 space->available = LESS_BY(space->limit, vfs_used);
160 space->timestamp = ts;
161 return 1;
162 }
163
164 static void patch_min_use(JournalStorage *storage) {
165 assert(storage);
166
167 /* Let's bump the min_use limit to the current usage on disk. We do
168 * this when starting up and first opening the journal files. This way
169 * sudden spikes in disk usage will not cause journald to vacuum files
170 * without bounds. Note that this means that only a restart of journald
171 * will make it reset this value. */
172
173 storage->metrics.min_use = MAX(storage->metrics.min_use, storage->space.vfs_used);
174 }
175
176 static JournalStorage* server_current_storage(Server *s) {
177 assert(s);
178
179 return s->system_journal ? &s->system_storage : &s->runtime_storage;
180 }
181
182 static int determine_space(Server *s, uint64_t *available, uint64_t *limit) {
183 JournalStorage *js;
184 int r;
185
186 assert(s);
187
188 js = server_current_storage(s);
189
190 r = cache_space_refresh(s, js);
191 if (r >= 0) {
192 if (available)
193 *available = js->space.available;
194 if (limit)
195 *limit = js->space.limit;
196 }
197 return r;
198 }
199
200 void server_space_usage_message(Server *s, JournalStorage *storage) {
201 char fb1[FORMAT_BYTES_MAX], fb2[FORMAT_BYTES_MAX], fb3[FORMAT_BYTES_MAX],
202 fb4[FORMAT_BYTES_MAX], fb5[FORMAT_BYTES_MAX], fb6[FORMAT_BYTES_MAX];
203 JournalMetrics *metrics;
204
205 assert(s);
206
207 if (!storage)
208 storage = server_current_storage(s);
209
210 if (cache_space_refresh(s, storage) < 0)
211 return;
212
213 metrics = &storage->metrics;
214 format_bytes(fb1, sizeof(fb1), storage->space.vfs_used);
215 format_bytes(fb2, sizeof(fb2), metrics->max_use);
216 format_bytes(fb3, sizeof(fb3), metrics->keep_free);
217 format_bytes(fb4, sizeof(fb4), storage->space.vfs_available);
218 format_bytes(fb5, sizeof(fb5), storage->space.limit);
219 format_bytes(fb6, sizeof(fb6), storage->space.available);
220
221 server_driver_message(s, 0,
222 "MESSAGE_ID=" SD_MESSAGE_JOURNAL_USAGE_STR,
223 LOG_MESSAGE("%s (%s) is %s, max %s, %s free.",
224 storage->name, storage->path, fb1, fb5, fb6),
225 "JOURNAL_NAME=%s", storage->name,
226 "JOURNAL_PATH=%s", storage->path,
227 "CURRENT_USE=%"PRIu64, storage->space.vfs_used,
228 "CURRENT_USE_PRETTY=%s", fb1,
229 "MAX_USE=%"PRIu64, metrics->max_use,
230 "MAX_USE_PRETTY=%s", fb2,
231 "DISK_KEEP_FREE=%"PRIu64, metrics->keep_free,
232 "DISK_KEEP_FREE_PRETTY=%s", fb3,
233 "DISK_AVAILABLE=%"PRIu64, storage->space.vfs_available,
234 "DISK_AVAILABLE_PRETTY=%s", fb4,
235 "LIMIT=%"PRIu64, storage->space.limit,
236 "LIMIT_PRETTY=%s", fb5,
237 "AVAILABLE=%"PRIu64, storage->space.available,
238 "AVAILABLE_PRETTY=%s", fb6,
239 NULL);
240 }
241
242 static bool uid_for_system_journal(uid_t uid) {
243
244 /* Returns true if the specified UID shall get its data stored in the system journal*/
245
246 return uid_is_system(uid) || uid_is_dynamic(uid) || uid == UID_NOBODY;
247 }
248
249 static void server_add_acls(JournalFile *f, uid_t uid) {
250 #if HAVE_ACL
251 int r;
252 #endif
253 assert(f);
254
255 #if HAVE_ACL
256 if (uid_for_system_journal(uid))
257 return;
258
259 r = add_acls_for_user(f->fd, uid);
260 if (r < 0)
261 log_warning_errno(r, "Failed to set ACL on %s, ignoring: %m", f->path);
262 #endif
263 }
264
265 static int open_journal(
266 Server *s,
267 bool reliably,
268 const char *fname,
269 int flags,
270 bool seal,
271 JournalMetrics *metrics,
272 JournalFile **ret) {
273
274 _cleanup_(journal_file_closep) JournalFile *f = NULL;
275 int r;
276
277 assert(s);
278 assert(fname);
279 assert(ret);
280
281 if (reliably)
282 r = journal_file_open_reliably(fname, flags, 0640, s->compress.enabled, s->compress.threshold_bytes,
283 seal, metrics, s->mmap, s->deferred_closes, NULL, &f);
284 else
285 r = journal_file_open(-1, fname, flags, 0640, s->compress.enabled, s->compress.threshold_bytes, seal,
286 metrics, s->mmap, s->deferred_closes, NULL, &f);
287
288 if (r < 0)
289 return r;
290
291 r = journal_file_enable_post_change_timer(f, s->event, POST_CHANGE_TIMER_INTERVAL_USEC);
292 if (r < 0)
293 return r;
294
295 *ret = TAKE_PTR(f);
296 return r;
297 }
298
299 static bool flushed_flag_is_set(Server *s) {
300 const char *fn;
301
302 assert(s);
303
304 /* We don't support the "flushing" concept for namespace instances, we assume them to always have
305 * access to /var */
306 if (s->namespace)
307 return true;
308
309 fn = strjoina(s->runtime_directory, "/flushed");
310 return access(fn, F_OK) >= 0;
311 }
312
313 static int system_journal_open(Server *s, bool flush_requested, bool relinquish_requested) {
314 const char *fn;
315 int r = 0;
316
317 if (!s->system_journal &&
318 IN_SET(s->storage, STORAGE_PERSISTENT, STORAGE_AUTO) &&
319 (flush_requested || flushed_flag_is_set(s)) &&
320 !relinquish_requested) {
321
322 /* If in auto mode: first try to create the machine path, but not the prefix.
323 *
324 * If in persistent mode: create /var/log/journal and the machine path */
325
326 if (s->storage == STORAGE_PERSISTENT)
327 (void) mkdir_parents(s->system_storage.path, 0755);
328
329 (void) mkdir(s->system_storage.path, 0755);
330
331 fn = strjoina(s->system_storage.path, "/system.journal");
332 r = open_journal(s, true, fn, O_RDWR|O_CREAT, s->seal, &s->system_storage.metrics, &s->system_journal);
333 if (r >= 0) {
334 server_add_acls(s->system_journal, 0);
335 (void) cache_space_refresh(s, &s->system_storage);
336 patch_min_use(&s->system_storage);
337 } else {
338 if (!IN_SET(r, -ENOENT, -EROFS))
339 log_warning_errno(r, "Failed to open system journal: %m");
340
341 r = 0;
342 }
343
344 /* If the runtime journal is open, and we're post-flush, we're recovering from a failed
345 * system journal rotate (ENOSPC) for which the runtime journal was reopened.
346 *
347 * Perform an implicit flush to var, leaving the runtime journal closed, now that the system
348 * journal is back.
349 */
350 if (!flush_requested)
351 (void) server_flush_to_var(s, true);
352 }
353
354 if (!s->runtime_journal &&
355 (s->storage != STORAGE_NONE)) {
356
357 fn = strjoina(s->runtime_storage.path, "/system.journal");
358
359 if (s->system_journal && !relinquish_requested) {
360
361 /* Try to open the runtime journal, but only
362 * if it already exists, so that we can flush
363 * it into the system journal */
364
365 r = open_journal(s, false, fn, O_RDWR, false, &s->runtime_storage.metrics, &s->runtime_journal);
366 if (r < 0) {
367 if (r != -ENOENT)
368 log_warning_errno(r, "Failed to open runtime journal: %m");
369
370 r = 0;
371 }
372
373 } else {
374
375 /* OK, we really need the runtime journal, so create it if necessary. */
376
377 (void) mkdir_parents(s->runtime_storage.path, 0755);
378 (void) mkdir(s->runtime_storage.path, 0750);
379
380 r = open_journal(s, true, fn, O_RDWR|O_CREAT, false, &s->runtime_storage.metrics, &s->runtime_journal);
381 if (r < 0)
382 return log_error_errno(r, "Failed to open runtime journal: %m");
383 }
384
385 if (s->runtime_journal) {
386 server_add_acls(s->runtime_journal, 0);
387 (void) cache_space_refresh(s, &s->runtime_storage);
388 patch_min_use(&s->runtime_storage);
389 }
390 }
391
392 return r;
393 }
394
395 static JournalFile* find_journal(Server *s, uid_t uid) {
396 _cleanup_free_ char *p = NULL;
397 JournalFile *f;
398 int r;
399
400 assert(s);
401
402 /* A rotate that fails to create the new journal (ENOSPC) leaves the rotated journal as NULL. Unless
403 * we revisit opening, even after space is made available we'll continue to return NULL indefinitely.
404 *
405 * system_journal_open() is a noop if the journals are already open, so we can just call it here to
406 * recover from failed rotates (or anything else that's left the journals as NULL).
407 *
408 * Fixes https://github.com/systemd/systemd/issues/3968 */
409 (void) system_journal_open(s, false, false);
410
411 /* We split up user logs only on /var, not on /run. If the runtime file is open, we write to it
412 * exclusively, in order to guarantee proper order as soon as we flush /run to /var and close the
413 * runtime file. */
414
415 if (s->runtime_journal)
416 return s->runtime_journal;
417
418 if (uid_for_system_journal(uid))
419 return s->system_journal;
420
421 f = ordered_hashmap_get(s->user_journals, UID_TO_PTR(uid));
422 if (f)
423 return f;
424
425 if (asprintf(&p, "%s/user-" UID_FMT ".journal", s->system_storage.path, uid) < 0) {
426 log_oom();
427 return s->system_journal;
428 }
429
430 /* Too many open? Then let's close one (or more) */
431 while (ordered_hashmap_size(s->user_journals) >= USER_JOURNALS_MAX) {
432 assert_se(f = ordered_hashmap_steal_first(s->user_journals));
433 (void) journal_file_close(f);
434 }
435
436 r = open_journal(s, true, p, O_RDWR|O_CREAT, s->seal, &s->system_storage.metrics, &f);
437 if (r < 0)
438 return s->system_journal;
439
440 r = ordered_hashmap_put(s->user_journals, UID_TO_PTR(uid), f);
441 if (r < 0) {
442 (void) journal_file_close(f);
443 return s->system_journal;
444 }
445
446 server_add_acls(f, uid);
447 return f;
448 }
449
450 static int do_rotate(
451 Server *s,
452 JournalFile **f,
453 const char* name,
454 bool seal,
455 uint32_t uid) {
456
457 int r;
458 assert(s);
459
460 if (!*f)
461 return -EINVAL;
462
463 r = journal_file_rotate(f, s->compress.enabled, s->compress.threshold_bytes, seal, s->deferred_closes);
464 if (r < 0) {
465 if (*f)
466 return log_error_errno(r, "Failed to rotate %s: %m", (*f)->path);
467 else
468 return log_error_errno(r, "Failed to create new %s journal: %m", name);
469 }
470
471 server_add_acls(*f, uid);
472 return r;
473 }
474
475 static void server_process_deferred_closes(Server *s) {
476 JournalFile *f;
477 Iterator i;
478
479 /* Perform any deferred closes which aren't still offlining. */
480 SET_FOREACH(f, s->deferred_closes, i) {
481 if (journal_file_is_offlining(f))
482 continue;
483
484 (void) set_remove(s->deferred_closes, f);
485 (void) journal_file_close(f);
486 }
487 }
488
489 static void server_vacuum_deferred_closes(Server *s) {
490 assert(s);
491
492 /* Make some room in the deferred closes list, so that it doesn't grow without bounds */
493 if (set_size(s->deferred_closes) < DEFERRED_CLOSES_MAX)
494 return;
495
496 /* Let's first remove all journal files that might already have completed closing */
497 server_process_deferred_closes(s);
498
499 /* And now, let's close some more until we reach the limit again. */
500 while (set_size(s->deferred_closes) >= DEFERRED_CLOSES_MAX) {
501 JournalFile *f;
502
503 assert_se(f = set_steal_first(s->deferred_closes));
504 journal_file_close(f);
505 }
506 }
507
508 static int vacuum_offline_user_journals(Server *s) {
509 _cleanup_closedir_ DIR *d = NULL;
510 int r;
511
512 assert(s);
513
514 d = opendir(s->system_storage.path);
515 if (!d) {
516 if (errno == ENOENT)
517 return 0;
518
519 return log_error_errno(errno, "Failed to open %s: %m", s->system_storage.path);
520 }
521
522 for (;;) {
523 _cleanup_free_ char *u = NULL, *full = NULL;
524 _cleanup_close_ int fd = -1;
525 const char *a, *b;
526 struct dirent *de;
527 JournalFile *f;
528 uid_t uid;
529
530 errno = 0;
531 de = readdir_no_dot(d);
532 if (!de) {
533 if (errno != 0)
534 log_warning_errno(errno, "Failed to enumerate %s, ignoring: %m", s->system_storage.path);
535
536 break;
537 }
538
539 a = startswith(de->d_name, "user-");
540 if (!a)
541 continue;
542 b = endswith(de->d_name, ".journal");
543 if (!b)
544 continue;
545
546 u = strndup(a, b-a);
547 if (!u)
548 return log_oom();
549
550 r = parse_uid(u, &uid);
551 if (r < 0) {
552 log_debug_errno(r, "Failed to parse UID from file name '%s', ignoring: %m", de->d_name);
553 continue;
554 }
555
556 /* Already rotated in the above loop? i.e. is it an open user journal? */
557 if (ordered_hashmap_contains(s->user_journals, UID_TO_PTR(uid)))
558 continue;
559
560 full = path_join(s->system_storage.path, de->d_name);
561 if (!full)
562 return log_oom();
563
564 fd = openat(dirfd(d), de->d_name, O_RDWR|O_CLOEXEC|O_NOCTTY|O_NOFOLLOW|O_NONBLOCK);
565 if (fd < 0) {
566 log_full_errno(IN_SET(errno, ELOOP, ENOENT) ? LOG_DEBUG : LOG_WARNING, errno,
567 "Failed to open journal file '%s' for rotation: %m", full);
568 continue;
569 }
570
571 /* Make some room in the set of deferred close()s */
572 server_vacuum_deferred_closes(s);
573
574 /* Open the file briefly, so that we can archive it */
575 r = journal_file_open(fd,
576 full,
577 O_RDWR,
578 0640,
579 s->compress.enabled,
580 s->compress.threshold_bytes,
581 s->seal,
582 &s->system_storage.metrics,
583 s->mmap,
584 s->deferred_closes,
585 NULL,
586 &f);
587 if (r < 0) {
588 log_warning_errno(r, "Failed to read journal file %s for rotation, trying to move it out of the way: %m", full);
589
590 r = journal_file_dispose(dirfd(d), de->d_name);
591 if (r < 0)
592 log_warning_errno(r, "Failed to move %s out of the way, ignoring: %m", full);
593 else
594 log_debug("Successfully moved %s out of the way.", full);
595
596 continue;
597 }
598
599 TAKE_FD(fd); /* Donated to journal_file_open() */
600
601 r = journal_file_archive(f);
602 if (r < 0)
603 log_debug_errno(r, "Failed to archive journal file '%s', ignoring: %m", full);
604
605 f = journal_initiate_close(f, s->deferred_closes);
606 }
607
608 return 0;
609 }
610
611 void server_rotate(Server *s) {
612 JournalFile *f;
613 Iterator i;
614 void *k;
615 int r;
616
617 log_debug("Rotating...");
618
619 /* First, rotate the system journal (either in its runtime flavour or in its runtime flavour) */
620 (void) do_rotate(s, &s->runtime_journal, "runtime", false, 0);
621 (void) do_rotate(s, &s->system_journal, "system", s->seal, 0);
622
623 /* Then, rotate all user journals we have open (keeping them open) */
624 ORDERED_HASHMAP_FOREACH_KEY(f, k, s->user_journals, i) {
625 r = do_rotate(s, &f, "user", s->seal, PTR_TO_UID(k));
626 if (r >= 0)
627 ordered_hashmap_replace(s->user_journals, k, f);
628 else if (!f)
629 /* Old file has been closed and deallocated */
630 ordered_hashmap_remove(s->user_journals, k);
631 }
632
633 /* Finally, also rotate all user journals we currently do not have open. (But do so only if we
634 * actually have access to /var, i.e. are not in the log-to-runtime-journal mode). */
635 if (!s->runtime_journal)
636 (void) vacuum_offline_user_journals(s);
637
638 server_process_deferred_closes(s);
639 }
640
641 void server_sync(Server *s) {
642 JournalFile *f;
643 Iterator i;
644 int r;
645
646 if (s->system_journal) {
647 r = journal_file_set_offline(s->system_journal, false);
648 if (r < 0)
649 log_warning_errno(r, "Failed to sync system journal, ignoring: %m");
650 }
651
652 ORDERED_HASHMAP_FOREACH(f, s->user_journals, i) {
653 r = journal_file_set_offline(f, false);
654 if (r < 0)
655 log_warning_errno(r, "Failed to sync user journal, ignoring: %m");
656 }
657
658 if (s->sync_event_source) {
659 r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_OFF);
660 if (r < 0)
661 log_error_errno(r, "Failed to disable sync timer source: %m");
662 }
663
664 s->sync_scheduled = false;
665 }
666
667 static void do_vacuum(Server *s, JournalStorage *storage, bool verbose) {
668
669 int r;
670
671 assert(s);
672 assert(storage);
673
674 (void) cache_space_refresh(s, storage);
675
676 if (verbose)
677 server_space_usage_message(s, storage);
678
679 r = journal_directory_vacuum(storage->path, storage->space.limit,
680 storage->metrics.n_max_files, s->max_retention_usec,
681 &s->oldest_file_usec, verbose);
682 if (r < 0 && r != -ENOENT)
683 log_warning_errno(r, "Failed to vacuum %s, ignoring: %m", storage->path);
684
685 cache_space_invalidate(&storage->space);
686 }
687
688 int server_vacuum(Server *s, bool verbose) {
689 assert(s);
690
691 log_debug("Vacuuming...");
692
693 s->oldest_file_usec = 0;
694
695 if (s->system_journal)
696 do_vacuum(s, &s->system_storage, verbose);
697 if (s->runtime_journal)
698 do_vacuum(s, &s->runtime_storage, verbose);
699
700 return 0;
701 }
702
703 static void server_cache_machine_id(Server *s) {
704 sd_id128_t id;
705 int r;
706
707 assert(s);
708
709 r = sd_id128_get_machine(&id);
710 if (r < 0)
711 return;
712
713 sd_id128_to_string(id, stpcpy(s->machine_id_field, "_MACHINE_ID="));
714 }
715
716 static void server_cache_boot_id(Server *s) {
717 sd_id128_t id;
718 int r;
719
720 assert(s);
721
722 r = sd_id128_get_boot(&id);
723 if (r < 0)
724 return;
725
726 sd_id128_to_string(id, stpcpy(s->boot_id_field, "_BOOT_ID="));
727 }
728
729 static void server_cache_hostname(Server *s) {
730 _cleanup_free_ char *t = NULL;
731 char *x;
732
733 assert(s);
734
735 t = gethostname_malloc();
736 if (!t)
737 return;
738
739 x = strjoin("_HOSTNAME=", t);
740 if (!x)
741 return;
742
743 free_and_replace(s->hostname_field, x);
744 }
745
746 static bool shall_try_append_again(JournalFile *f, int r) {
747 switch(r) {
748
749 case -E2BIG: /* Hit configured limit */
750 case -EFBIG: /* Hit fs limit */
751 case -EDQUOT: /* Quota limit hit */
752 case -ENOSPC: /* Disk full */
753 log_debug("%s: Allocation limit reached, rotating.", f->path);
754 return true;
755
756 case -EIO: /* I/O error of some kind (mmap) */
757 log_warning("%s: IO error, rotating.", f->path);
758 return true;
759
760 case -EHOSTDOWN: /* Other machine */
761 log_info("%s: Journal file from other machine, rotating.", f->path);
762 return true;
763
764 case -EBUSY: /* Unclean shutdown */
765 log_info("%s: Unclean shutdown, rotating.", f->path);
766 return true;
767
768 case -EPROTONOSUPPORT: /* Unsupported feature */
769 log_info("%s: Unsupported feature, rotating.", f->path);
770 return true;
771
772 case -EBADMSG: /* Corrupted */
773 case -ENODATA: /* Truncated */
774 case -ESHUTDOWN: /* Already archived */
775 log_warning("%s: Journal file corrupted, rotating.", f->path);
776 return true;
777
778 case -EIDRM: /* Journal file has been deleted */
779 log_warning("%s: Journal file has been deleted, rotating.", f->path);
780 return true;
781
782 case -ETXTBSY: /* Journal file is from the future */
783 log_warning("%s: Journal file is from the future, rotating.", f->path);
784 return true;
785
786 case -EAFNOSUPPORT:
787 log_warning("%s: underlying file system does not support memory mapping or another required file system feature.", f->path);
788 return false;
789
790 default:
791 return false;
792 }
793 }
794
795 static void write_to_journal(Server *s, uid_t uid, struct iovec *iovec, size_t n, int priority) {
796 bool vacuumed = false, rotate = false;
797 struct dual_timestamp ts;
798 JournalFile *f;
799 int r;
800
801 assert(s);
802 assert(iovec);
803 assert(n > 0);
804
805 /* Get the closest, linearized time we have for this log event from the event loop. (Note that we do not use
806 * the source time, and not even the time the event was originally seen, but instead simply the time we started
807 * processing it, as we want strictly linear ordering in what we write out.) */
808 assert_se(sd_event_now(s->event, CLOCK_REALTIME, &ts.realtime) >= 0);
809 assert_se(sd_event_now(s->event, CLOCK_MONOTONIC, &ts.monotonic) >= 0);
810
811 if (ts.realtime < s->last_realtime_clock) {
812 /* When the time jumps backwards, let's immediately rotate. Of course, this should not happen during
813 * regular operation. However, when it does happen, then we should make sure that we start fresh files
814 * to ensure that the entries in the journal files are strictly ordered by time, in order to ensure
815 * bisection works correctly. */
816
817 log_debug("Time jumped backwards, rotating.");
818 rotate = true;
819 } else {
820
821 f = find_journal(s, uid);
822 if (!f)
823 return;
824
825 if (journal_file_rotate_suggested(f, s->max_file_usec)) {
826 log_debug("%s: Journal header limits reached or header out-of-date, rotating.", f->path);
827 rotate = true;
828 }
829 }
830
831 if (rotate) {
832 server_rotate(s);
833 server_vacuum(s, false);
834 vacuumed = true;
835
836 f = find_journal(s, uid);
837 if (!f)
838 return;
839 }
840
841 s->last_realtime_clock = ts.realtime;
842
843 r = journal_file_append_entry(f, &ts, NULL, iovec, n, &s->seqnum, NULL, NULL);
844 if (r >= 0) {
845 server_schedule_sync(s, priority);
846 return;
847 }
848
849 if (vacuumed || !shall_try_append_again(f, r)) {
850 log_error_errno(r, "Failed to write entry (%zu items, %zu bytes), ignoring: %m", n, IOVEC_TOTAL_SIZE(iovec, n));
851 return;
852 }
853
854 server_rotate(s);
855 server_vacuum(s, false);
856
857 f = find_journal(s, uid);
858 if (!f)
859 return;
860
861 log_debug("Retrying write.");
862 r = journal_file_append_entry(f, &ts, NULL, iovec, n, &s->seqnum, NULL, NULL);
863 if (r < 0)
864 log_error_errno(r, "Failed to write entry (%zu items, %zu bytes) despite vacuuming, ignoring: %m", n, IOVEC_TOTAL_SIZE(iovec, n));
865 else
866 server_schedule_sync(s, priority);
867 }
868
869 #define IOVEC_ADD_NUMERIC_FIELD(iovec, n, value, type, isset, format, field) \
870 if (isset(value)) { \
871 char *k; \
872 k = newa(char, STRLEN(field "=") + DECIMAL_STR_MAX(type) + 1); \
873 sprintf(k, field "=" format, value); \
874 iovec[n++] = IOVEC_MAKE_STRING(k); \
875 }
876
877 #define IOVEC_ADD_STRING_FIELD(iovec, n, value, field) \
878 if (!isempty(value)) { \
879 char *k; \
880 k = strjoina(field "=", value); \
881 iovec[n++] = IOVEC_MAKE_STRING(k); \
882 }
883
884 #define IOVEC_ADD_ID128_FIELD(iovec, n, value, field) \
885 if (!sd_id128_is_null(value)) { \
886 char *k; \
887 k = newa(char, STRLEN(field "=") + SD_ID128_STRING_MAX); \
888 sd_id128_to_string(value, stpcpy(k, field "=")); \
889 iovec[n++] = IOVEC_MAKE_STRING(k); \
890 }
891
892 #define IOVEC_ADD_SIZED_FIELD(iovec, n, value, value_size, field) \
893 if (value_size > 0) { \
894 char *k; \
895 k = newa(char, STRLEN(field "=") + value_size + 1); \
896 *((char*) mempcpy(stpcpy(k, field "="), value, value_size)) = 0; \
897 iovec[n++] = IOVEC_MAKE_STRING(k); \
898 } \
899
900 static void dispatch_message_real(
901 Server *s,
902 struct iovec *iovec, size_t n, size_t m,
903 const ClientContext *c,
904 const struct timeval *tv,
905 int priority,
906 pid_t object_pid) {
907
908 char source_time[sizeof("_SOURCE_REALTIME_TIMESTAMP=") + DECIMAL_STR_MAX(usec_t)];
909 _cleanup_free_ char *cmdline1 = NULL, *cmdline2 = NULL;
910 uid_t journal_uid;
911 ClientContext *o;
912
913 assert(s);
914 assert(iovec);
915 assert(n > 0);
916 assert(n +
917 N_IOVEC_META_FIELDS +
918 (pid_is_valid(object_pid) ? N_IOVEC_OBJECT_FIELDS : 0) +
919 client_context_extra_fields_n_iovec(c) <= m);
920
921 if (c) {
922 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->pid, pid_t, pid_is_valid, PID_FMT, "_PID");
923 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->uid, uid_t, uid_is_valid, UID_FMT, "_UID");
924 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->gid, gid_t, gid_is_valid, GID_FMT, "_GID");
925
926 IOVEC_ADD_STRING_FIELD(iovec, n, c->comm, "_COMM"); /* At most TASK_COMM_LENGTH (16 bytes) */
927 IOVEC_ADD_STRING_FIELD(iovec, n, c->exe, "_EXE"); /* A path, so at most PATH_MAX (4096 bytes) */
928
929 if (c->cmdline)
930 /* At most _SC_ARG_MAX (2MB usually), which is too much to put on stack.
931 * Let's use a heap allocation for this one. */
932 cmdline1 = set_iovec_string_field(iovec, &n, "_CMDLINE=", c->cmdline);
933
934 IOVEC_ADD_STRING_FIELD(iovec, n, c->capeff, "_CAP_EFFECTIVE"); /* Read from /proc/.../status */
935 IOVEC_ADD_SIZED_FIELD(iovec, n, c->label, c->label_size, "_SELINUX_CONTEXT");
936 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->auditid, uint32_t, audit_session_is_valid, "%" PRIu32, "_AUDIT_SESSION");
937 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->loginuid, uid_t, uid_is_valid, UID_FMT, "_AUDIT_LOGINUID");
938
939 IOVEC_ADD_STRING_FIELD(iovec, n, c->cgroup, "_SYSTEMD_CGROUP"); /* A path */
940 IOVEC_ADD_STRING_FIELD(iovec, n, c->session, "_SYSTEMD_SESSION");
941 IOVEC_ADD_NUMERIC_FIELD(iovec, n, c->owner_uid, uid_t, uid_is_valid, UID_FMT, "_SYSTEMD_OWNER_UID");
942 IOVEC_ADD_STRING_FIELD(iovec, n, c->unit, "_SYSTEMD_UNIT"); /* Unit names are bounded by UNIT_NAME_MAX */
943 IOVEC_ADD_STRING_FIELD(iovec, n, c->user_unit, "_SYSTEMD_USER_UNIT");
944 IOVEC_ADD_STRING_FIELD(iovec, n, c->slice, "_SYSTEMD_SLICE");
945 IOVEC_ADD_STRING_FIELD(iovec, n, c->user_slice, "_SYSTEMD_USER_SLICE");
946
947 IOVEC_ADD_ID128_FIELD(iovec, n, c->invocation_id, "_SYSTEMD_INVOCATION_ID");
948
949 if (c->extra_fields_n_iovec > 0) {
950 memcpy(iovec + n, c->extra_fields_iovec, c->extra_fields_n_iovec * sizeof(struct iovec));
951 n += c->extra_fields_n_iovec;
952 }
953 }
954
955 assert(n <= m);
956
957 if (pid_is_valid(object_pid) && client_context_get(s, object_pid, NULL, NULL, 0, NULL, &o) >= 0) {
958
959 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->pid, pid_t, pid_is_valid, PID_FMT, "OBJECT_PID");
960 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_UID");
961 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->gid, gid_t, gid_is_valid, GID_FMT, "OBJECT_GID");
962
963 /* See above for size limits, only ->cmdline may be large, so use a heap allocation for it. */
964 IOVEC_ADD_STRING_FIELD(iovec, n, o->comm, "OBJECT_COMM");
965 IOVEC_ADD_STRING_FIELD(iovec, n, o->exe, "OBJECT_EXE");
966 if (o->cmdline)
967 cmdline2 = set_iovec_string_field(iovec, &n, "OBJECT_CMDLINE=", o->cmdline);
968
969 IOVEC_ADD_STRING_FIELD(iovec, n, o->capeff, "OBJECT_CAP_EFFECTIVE");
970 IOVEC_ADD_SIZED_FIELD(iovec, n, o->label, o->label_size, "OBJECT_SELINUX_CONTEXT");
971 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->auditid, uint32_t, audit_session_is_valid, "%" PRIu32, "OBJECT_AUDIT_SESSION");
972 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->loginuid, uid_t, uid_is_valid, UID_FMT, "OBJECT_AUDIT_LOGINUID");
973
974 IOVEC_ADD_STRING_FIELD(iovec, n, o->cgroup, "OBJECT_SYSTEMD_CGROUP");
975 IOVEC_ADD_STRING_FIELD(iovec, n, o->session, "OBJECT_SYSTEMD_SESSION");
976 IOVEC_ADD_NUMERIC_FIELD(iovec, n, o->owner_uid, uid_t, uid_is_valid, UID_FMT, "OBJECT_SYSTEMD_OWNER_UID");
977 IOVEC_ADD_STRING_FIELD(iovec, n, o->unit, "OBJECT_SYSTEMD_UNIT");
978 IOVEC_ADD_STRING_FIELD(iovec, n, o->user_unit, "OBJECT_SYSTEMD_USER_UNIT");
979 IOVEC_ADD_STRING_FIELD(iovec, n, o->slice, "OBJECT_SYSTEMD_SLICE");
980 IOVEC_ADD_STRING_FIELD(iovec, n, o->user_slice, "OBJECT_SYSTEMD_USER_SLICE");
981
982 IOVEC_ADD_ID128_FIELD(iovec, n, o->invocation_id, "OBJECT_SYSTEMD_INVOCATION_ID=");
983 }
984
985 assert(n <= m);
986
987 if (tv) {
988 sprintf(source_time, "_SOURCE_REALTIME_TIMESTAMP=" USEC_FMT, timeval_load(tv));
989 iovec[n++] = IOVEC_MAKE_STRING(source_time);
990 }
991
992 /* Note that strictly speaking storing the boot id here is
993 * redundant since the entry includes this in-line
994 * anyway. However, we need this indexed, too. */
995 if (!isempty(s->boot_id_field))
996 iovec[n++] = IOVEC_MAKE_STRING(s->boot_id_field);
997
998 if (!isempty(s->machine_id_field))
999 iovec[n++] = IOVEC_MAKE_STRING(s->machine_id_field);
1000
1001 if (!isempty(s->hostname_field))
1002 iovec[n++] = IOVEC_MAKE_STRING(s->hostname_field);
1003
1004 if (!isempty(s->namespace_field))
1005 iovec[n++] = IOVEC_MAKE_STRING(s->namespace_field);
1006
1007 assert(n <= m);
1008
1009 if (s->split_mode == SPLIT_UID && c && uid_is_valid(c->uid))
1010 /* Split up strictly by (non-root) UID */
1011 journal_uid = c->uid;
1012 else if (s->split_mode == SPLIT_LOGIN && c && c->uid > 0 && uid_is_valid(c->owner_uid))
1013 /* Split up by login UIDs. We do this only if the
1014 * realuid is not root, in order not to accidentally
1015 * leak privileged information to the user that is
1016 * logged by a privileged process that is part of an
1017 * unprivileged session. */
1018 journal_uid = c->owner_uid;
1019 else
1020 journal_uid = 0;
1021
1022 write_to_journal(s, journal_uid, iovec, n, priority);
1023 }
1024
1025 void server_driver_message(Server *s, pid_t object_pid, const char *message_id, const char *format, ...) {
1026
1027 struct iovec *iovec;
1028 size_t n = 0, k, m;
1029 va_list ap;
1030 int r;
1031
1032 assert(s);
1033 assert(format);
1034
1035 m = N_IOVEC_META_FIELDS + 5 + N_IOVEC_PAYLOAD_FIELDS + client_context_extra_fields_n_iovec(s->my_context) + N_IOVEC_OBJECT_FIELDS;
1036 iovec = newa(struct iovec, m);
1037
1038 assert_cc(3 == LOG_FAC(LOG_DAEMON));
1039 iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_FACILITY=3");
1040 iovec[n++] = IOVEC_MAKE_STRING("SYSLOG_IDENTIFIER=systemd-journald");
1041
1042 iovec[n++] = IOVEC_MAKE_STRING("_TRANSPORT=driver");
1043 assert_cc(6 == LOG_INFO);
1044 iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=6");
1045
1046 if (message_id)
1047 iovec[n++] = IOVEC_MAKE_STRING(message_id);
1048 k = n;
1049
1050 va_start(ap, format);
1051 r = log_format_iovec(iovec, m, &n, false, 0, format, ap);
1052 /* Error handling below */
1053 va_end(ap);
1054
1055 if (r >= 0)
1056 dispatch_message_real(s, iovec, n, m, s->my_context, NULL, LOG_INFO, object_pid);
1057
1058 while (k < n)
1059 free(iovec[k++].iov_base);
1060
1061 if (r < 0) {
1062 /* We failed to format the message. Emit a warning instead. */
1063 char buf[LINE_MAX];
1064
1065 xsprintf(buf, "MESSAGE=Entry printing failed: %s", strerror_safe(r));
1066
1067 n = 3;
1068 iovec[n++] = IOVEC_MAKE_STRING("PRIORITY=4");
1069 iovec[n++] = IOVEC_MAKE_STRING(buf);
1070 dispatch_message_real(s, iovec, n, m, s->my_context, NULL, LOG_INFO, object_pid);
1071 }
1072 }
1073
1074 void server_dispatch_message(
1075 Server *s,
1076 struct iovec *iovec, size_t n, size_t m,
1077 ClientContext *c,
1078 const struct timeval *tv,
1079 int priority,
1080 pid_t object_pid) {
1081
1082 uint64_t available = 0;
1083 int rl;
1084
1085 assert(s);
1086 assert(iovec || n == 0);
1087
1088 if (n == 0)
1089 return;
1090
1091 if (LOG_PRI(priority) > s->max_level_store)
1092 return;
1093
1094 /* Stop early in case the information will not be stored
1095 * in a journal. */
1096 if (s->storage == STORAGE_NONE)
1097 return;
1098
1099 if (c && c->unit) {
1100 (void) determine_space(s, &available, NULL);
1101
1102 rl = journal_ratelimit_test(s->ratelimit, c->unit, c->log_ratelimit_interval, c->log_ratelimit_burst, priority & LOG_PRIMASK, available);
1103 if (rl == 0)
1104 return;
1105
1106 /* Write a suppression message if we suppressed something */
1107 if (rl > 1)
1108 server_driver_message(s, c->pid,
1109 "MESSAGE_ID=" SD_MESSAGE_JOURNAL_DROPPED_STR,
1110 LOG_MESSAGE("Suppressed %i messages from %s", rl - 1, c->unit),
1111 "N_DROPPED=%i", rl - 1,
1112 NULL);
1113 }
1114
1115 dispatch_message_real(s, iovec, n, m, c, tv, priority, object_pid);
1116 }
1117
1118 int server_flush_to_var(Server *s, bool require_flag_file) {
1119 char ts[FORMAT_TIMESPAN_MAX];
1120 sd_journal *j = NULL;
1121 const char *fn;
1122 unsigned n = 0;
1123 usec_t start;
1124 int r, k;
1125
1126 assert(s);
1127
1128 if (!IN_SET(s->storage, STORAGE_AUTO, STORAGE_PERSISTENT))
1129 return 0;
1130
1131 if (s->namespace) /* Flushing concept does not exist for namespace instances */
1132 return 0;
1133
1134 if (!s->runtime_journal) /* Nothing to flush? */
1135 return 0;
1136
1137 if (require_flag_file && !flushed_flag_is_set(s))
1138 return 0;
1139
1140 (void) system_journal_open(s, true, false);
1141
1142 if (!s->system_journal)
1143 return 0;
1144
1145 log_debug("Flushing to %s...", s->system_storage.path);
1146
1147 start = now(CLOCK_MONOTONIC);
1148
1149 r = sd_journal_open(&j, SD_JOURNAL_RUNTIME_ONLY);
1150 if (r < 0)
1151 return log_error_errno(r, "Failed to read runtime journal: %m");
1152
1153 sd_journal_set_data_threshold(j, 0);
1154
1155 SD_JOURNAL_FOREACH(j) {
1156 Object *o = NULL;
1157 JournalFile *f;
1158
1159 f = j->current_file;
1160 assert(f && f->current_offset > 0);
1161
1162 n++;
1163
1164 r = journal_file_move_to_object(f, OBJECT_ENTRY, f->current_offset, &o);
1165 if (r < 0) {
1166 log_error_errno(r, "Can't read entry: %m");
1167 goto finish;
1168 }
1169
1170 r = journal_file_copy_entry(f, s->system_journal, o, f->current_offset);
1171 if (r >= 0)
1172 continue;
1173
1174 if (!shall_try_append_again(s->system_journal, r)) {
1175 log_error_errno(r, "Can't write entry: %m");
1176 goto finish;
1177 }
1178
1179 server_rotate(s);
1180 server_vacuum(s, false);
1181
1182 if (!s->system_journal) {
1183 log_notice("Didn't flush runtime journal since rotation of system journal wasn't successful.");
1184 r = -EIO;
1185 goto finish;
1186 }
1187
1188 log_debug("Retrying write.");
1189 r = journal_file_copy_entry(f, s->system_journal, o, f->current_offset);
1190 if (r < 0) {
1191 log_error_errno(r, "Can't write entry: %m");
1192 goto finish;
1193 }
1194 }
1195
1196 r = 0;
1197
1198 finish:
1199 if (s->system_journal)
1200 journal_file_post_change(s->system_journal);
1201
1202 s->runtime_journal = journal_file_close(s->runtime_journal);
1203
1204 if (r >= 0)
1205 (void) rm_rf(s->runtime_storage.path, REMOVE_ROOT);
1206
1207 sd_journal_close(j);
1208
1209 server_driver_message(s, 0, NULL,
1210 LOG_MESSAGE("Time spent on flushing to %s is %s for %u entries.",
1211 s->system_storage.path,
1212 format_timespan(ts, sizeof(ts), now(CLOCK_MONOTONIC) - start, 0),
1213 n),
1214 NULL);
1215
1216 fn = strjoina(s->runtime_directory, "/flushed");
1217 k = touch(fn);
1218 if (k < 0)
1219 log_warning_errno(k, "Failed to touch %s, ignoring: %m", fn);
1220
1221 server_refresh_idle_timer(s);
1222 return r;
1223 }
1224
1225 static int server_relinquish_var(Server *s) {
1226 const char *fn;
1227 assert(s);
1228
1229 if (s->storage == STORAGE_NONE)
1230 return 0;
1231
1232 if (s->namespace) /* Concept does not exist for namespaced instances */
1233 return -EOPNOTSUPP;
1234
1235 if (s->runtime_journal && !s->system_journal)
1236 return 0;
1237
1238 log_debug("Relinquishing %s...", s->system_storage.path);
1239
1240 (void) system_journal_open(s, false, true);
1241
1242 s->system_journal = journal_file_close(s->system_journal);
1243 ordered_hashmap_clear_with_destructor(s->user_journals, journal_file_close);
1244 set_clear_with_destructor(s->deferred_closes, journal_file_close);
1245
1246 fn = strjoina(s->runtime_directory, "/flushed");
1247 if (unlink(fn) < 0 && errno != ENOENT)
1248 log_warning_errno(errno, "Failed to unlink %s, ignoring: %m", fn);
1249
1250 server_refresh_idle_timer(s);
1251 return 0;
1252 }
1253
1254 int server_process_datagram(
1255 sd_event_source *es,
1256 int fd,
1257 uint32_t revents,
1258 void *userdata) {
1259
1260 Server *s = userdata;
1261 struct ucred *ucred = NULL;
1262 struct timeval *tv = NULL;
1263 struct cmsghdr *cmsg;
1264 char *label = NULL;
1265 size_t label_len = 0, m;
1266 struct iovec iovec;
1267 ssize_t n;
1268 int *fds = NULL, v = 0;
1269 size_t n_fds = 0;
1270
1271 /* We use NAME_MAX space for the SELinux label here. The kernel currently enforces no limit, but
1272 * according to suggestions from the SELinux people this will change and it will probably be
1273 * identical to NAME_MAX. For now we use that, but this should be updated one day when the final
1274 * limit is known. */
1275 CMSG_BUFFER_TYPE(CMSG_SPACE(sizeof(struct ucred)) +
1276 CMSG_SPACE(sizeof(struct timeval)) +
1277 CMSG_SPACE(sizeof(int)) + /* fd */
1278 CMSG_SPACE(NAME_MAX) /* selinux label */) control;
1279
1280 union sockaddr_union sa = {};
1281
1282 struct msghdr msghdr = {
1283 .msg_iov = &iovec,
1284 .msg_iovlen = 1,
1285 .msg_control = &control,
1286 .msg_controllen = sizeof(control),
1287 .msg_name = &sa,
1288 .msg_namelen = sizeof(sa),
1289 };
1290
1291 assert(s);
1292 assert(fd == s->native_fd || fd == s->syslog_fd || fd == s->audit_fd);
1293
1294 if (revents != EPOLLIN)
1295 return log_error_errno(SYNTHETIC_ERRNO(EIO),
1296 "Got invalid event from epoll for datagram fd: %" PRIx32,
1297 revents);
1298
1299 /* Try to get the right size, if we can. (Not all sockets support SIOCINQ, hence we just try, but don't rely on
1300 * it.) */
1301 (void) ioctl(fd, SIOCINQ, &v);
1302
1303 /* Fix it up, if it is too small. We use the same fixed value as auditd here. Awful! */
1304 m = PAGE_ALIGN(MAX3((size_t) v + 1,
1305 (size_t) LINE_MAX,
1306 ALIGN(sizeof(struct nlmsghdr)) + ALIGN((size_t) MAX_AUDIT_MESSAGE_LENGTH)) + 1);
1307
1308 if (!GREEDY_REALLOC(s->buffer, s->buffer_size, m))
1309 return log_oom();
1310
1311 iovec = IOVEC_MAKE(s->buffer, s->buffer_size - 1); /* Leave room for trailing NUL we add later */
1312
1313 n = recvmsg_safe(fd, &msghdr, MSG_DONTWAIT|MSG_CMSG_CLOEXEC);
1314 if (IN_SET(n, -EINTR, -EAGAIN))
1315 return 0;
1316 if (n == -EXFULL) {
1317 log_warning("Got message with truncated control data (too many fds sent?), ignoring.");
1318 return 0;
1319 }
1320 if (n < 0)
1321 return log_error_errno(n, "recvmsg() failed: %m");
1322
1323 CMSG_FOREACH(cmsg, &msghdr)
1324 if (cmsg->cmsg_level == SOL_SOCKET &&
1325 cmsg->cmsg_type == SCM_CREDENTIALS &&
1326 cmsg->cmsg_len == CMSG_LEN(sizeof(struct ucred))) {
1327 assert(!ucred);
1328 ucred = (struct ucred*) CMSG_DATA(cmsg);
1329 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1330 cmsg->cmsg_type == SCM_SECURITY) {
1331 assert(!label);
1332 label = (char*) CMSG_DATA(cmsg);
1333 label_len = cmsg->cmsg_len - CMSG_LEN(0);
1334 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1335 cmsg->cmsg_type == SO_TIMESTAMP &&
1336 cmsg->cmsg_len == CMSG_LEN(sizeof(struct timeval))) {
1337 assert(!tv);
1338 tv = (struct timeval*) CMSG_DATA(cmsg);
1339 } else if (cmsg->cmsg_level == SOL_SOCKET &&
1340 cmsg->cmsg_type == SCM_RIGHTS) {
1341 assert(!fds);
1342 fds = (int*) CMSG_DATA(cmsg);
1343 n_fds = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(int);
1344 }
1345
1346 /* And a trailing NUL, just in case */
1347 s->buffer[n] = 0;
1348
1349 if (fd == s->syslog_fd) {
1350 if (n > 0 && n_fds == 0)
1351 server_process_syslog_message(s, s->buffer, n, ucred, tv, label, label_len);
1352 else if (n_fds > 0)
1353 log_warning("Got file descriptors via syslog socket. Ignoring.");
1354
1355 } else if (fd == s->native_fd) {
1356 if (n > 0 && n_fds == 0)
1357 server_process_native_message(s, s->buffer, n, ucred, tv, label, label_len);
1358 else if (n == 0 && n_fds == 1)
1359 server_process_native_file(s, fds[0], ucred, tv, label, label_len);
1360 else if (n_fds > 0)
1361 log_warning("Got too many file descriptors via native socket. Ignoring.");
1362
1363 } else {
1364 assert(fd == s->audit_fd);
1365
1366 if (n > 0 && n_fds == 0)
1367 server_process_audit_message(s, s->buffer, n, ucred, &sa, msghdr.msg_namelen);
1368 else if (n_fds > 0)
1369 log_warning("Got file descriptors via audit socket. Ignoring.");
1370 }
1371
1372 close_many(fds, n_fds);
1373
1374 server_refresh_idle_timer(s);
1375 return 0;
1376 }
1377
1378 static void server_full_flush(Server *s) {
1379 assert(s);
1380
1381 (void) server_flush_to_var(s, false);
1382 server_sync(s);
1383 server_vacuum(s, false);
1384
1385 server_space_usage_message(s, NULL);
1386
1387 server_refresh_idle_timer(s);
1388 }
1389
1390 static int dispatch_sigusr1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1391 Server *s = userdata;
1392
1393 assert(s);
1394
1395 if (s->namespace) {
1396 log_error("Received SIGUSR1 signal from PID " PID_FMT ", but flushing runtime journals not supported for namespaced instances.", si->ssi_pid);
1397 return 0;
1398 }
1399
1400 log_info("Received SIGUSR1 signal from PID " PID_FMT ", as request to flush runtime journal.", si->ssi_pid);
1401 server_full_flush(s);
1402
1403 return 0;
1404 }
1405
1406 static void server_full_rotate(Server *s) {
1407 const char *fn;
1408 int r;
1409
1410 assert(s);
1411
1412 server_rotate(s);
1413 server_vacuum(s, true);
1414
1415 if (s->system_journal)
1416 patch_min_use(&s->system_storage);
1417 if (s->runtime_journal)
1418 patch_min_use(&s->runtime_storage);
1419
1420 /* Let clients know when the most recent rotation happened. */
1421 fn = strjoina(s->runtime_directory, "/rotated");
1422 r = write_timestamp_file_atomic(fn, now(CLOCK_MONOTONIC));
1423 if (r < 0)
1424 log_warning_errno(r, "Failed to write %s, ignoring: %m", fn);
1425 }
1426
1427 static int dispatch_sigusr2(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1428 Server *s = userdata;
1429
1430 assert(s);
1431
1432 log_info("Received SIGUSR2 signal from PID " PID_FMT ", as request to rotate journal.", si->ssi_pid);
1433 server_full_rotate(s);
1434
1435 return 0;
1436 }
1437
1438 static int dispatch_sigterm(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1439 Server *s = userdata;
1440
1441 assert(s);
1442
1443 log_received_signal(LOG_INFO, si);
1444
1445 sd_event_exit(s->event, 0);
1446 return 0;
1447 }
1448
1449 static void server_full_sync(Server *s) {
1450 const char *fn;
1451 int r;
1452
1453 assert(s);
1454
1455 server_sync(s);
1456
1457 /* Let clients know when the most recent sync happened. */
1458 fn = strjoina(s->runtime_directory, "/synced");
1459 r = write_timestamp_file_atomic(fn, now(CLOCK_MONOTONIC));
1460 if (r < 0)
1461 log_warning_errno(r, "Failed to write %s, ignoring: %m", fn);
1462
1463 return;
1464 }
1465
1466 static int dispatch_sigrtmin1(sd_event_source *es, const struct signalfd_siginfo *si, void *userdata) {
1467 Server *s = userdata;
1468
1469 assert(s);
1470
1471 log_debug("Received SIGRTMIN1 signal from PID " PID_FMT ", as request to sync.", si->ssi_pid );
1472 server_full_sync(s);
1473
1474 return 0;
1475 }
1476
1477 static int setup_signals(Server *s) {
1478 int r;
1479
1480 assert(s);
1481
1482 assert_se(sigprocmask_many(SIG_SETMASK, NULL, SIGINT, SIGTERM, SIGUSR1, SIGUSR2, SIGRTMIN+1, -1) >= 0);
1483
1484 r = sd_event_add_signal(s->event, &s->sigusr1_event_source, SIGUSR1, dispatch_sigusr1, s);
1485 if (r < 0)
1486 return r;
1487
1488 r = sd_event_add_signal(s->event, &s->sigusr2_event_source, SIGUSR2, dispatch_sigusr2, s);
1489 if (r < 0)
1490 return r;
1491
1492 r = sd_event_add_signal(s->event, &s->sigterm_event_source, SIGTERM, dispatch_sigterm, s);
1493 if (r < 0)
1494 return r;
1495
1496 /* Let's process SIGTERM late, so that we flush all queued messages to disk before we exit */
1497 r = sd_event_source_set_priority(s->sigterm_event_source, SD_EVENT_PRIORITY_NORMAL+20);
1498 if (r < 0)
1499 return r;
1500
1501 /* When journald is invoked on the terminal (when debugging), it's useful if C-c is handled
1502 * equivalent to SIGTERM. */
1503 r = sd_event_add_signal(s->event, &s->sigint_event_source, SIGINT, dispatch_sigterm, s);
1504 if (r < 0)
1505 return r;
1506
1507 r = sd_event_source_set_priority(s->sigint_event_source, SD_EVENT_PRIORITY_NORMAL+20);
1508 if (r < 0)
1509 return r;
1510
1511 /* SIGRTMIN+1 causes an immediate sync. We process this very late, so that everything else queued at
1512 * this point is really written to disk. Clients can watch /run/systemd/journal/synced with inotify
1513 * until its mtime changes to see when a sync happened. */
1514 r = sd_event_add_signal(s->event, &s->sigrtmin1_event_source, SIGRTMIN+1, dispatch_sigrtmin1, s);
1515 if (r < 0)
1516 return r;
1517
1518 r = sd_event_source_set_priority(s->sigrtmin1_event_source, SD_EVENT_PRIORITY_NORMAL+15);
1519 if (r < 0)
1520 return r;
1521
1522 return 0;
1523 }
1524
1525 static int parse_proc_cmdline_item(const char *key, const char *value, void *data) {
1526 Server *s = data;
1527 int r;
1528
1529 assert(s);
1530
1531 if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_syslog")) {
1532
1533 r = value ? parse_boolean(value) : true;
1534 if (r < 0)
1535 log_warning("Failed to parse forward to syslog switch \"%s\". Ignoring.", value);
1536 else
1537 s->forward_to_syslog = r;
1538
1539 } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_kmsg")) {
1540
1541 r = value ? parse_boolean(value) : true;
1542 if (r < 0)
1543 log_warning("Failed to parse forward to kmsg switch \"%s\". Ignoring.", value);
1544 else
1545 s->forward_to_kmsg = r;
1546
1547 } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_console")) {
1548
1549 r = value ? parse_boolean(value) : true;
1550 if (r < 0)
1551 log_warning("Failed to parse forward to console switch \"%s\". Ignoring.", value);
1552 else
1553 s->forward_to_console = r;
1554
1555 } else if (proc_cmdline_key_streq(key, "systemd.journald.forward_to_wall")) {
1556
1557 r = value ? parse_boolean(value) : true;
1558 if (r < 0)
1559 log_warning("Failed to parse forward to wall switch \"%s\". Ignoring.", value);
1560 else
1561 s->forward_to_wall = r;
1562
1563 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_console")) {
1564
1565 if (proc_cmdline_value_missing(key, value))
1566 return 0;
1567
1568 r = log_level_from_string(value);
1569 if (r < 0)
1570 log_warning("Failed to parse max level console value \"%s\". Ignoring.", value);
1571 else
1572 s->max_level_console = r;
1573
1574 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_store")) {
1575
1576 if (proc_cmdline_value_missing(key, value))
1577 return 0;
1578
1579 r = log_level_from_string(value);
1580 if (r < 0)
1581 log_warning("Failed to parse max level store value \"%s\". Ignoring.", value);
1582 else
1583 s->max_level_store = r;
1584
1585 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_syslog")) {
1586
1587 if (proc_cmdline_value_missing(key, value))
1588 return 0;
1589
1590 r = log_level_from_string(value);
1591 if (r < 0)
1592 log_warning("Failed to parse max level syslog value \"%s\". Ignoring.", value);
1593 else
1594 s->max_level_syslog = r;
1595
1596 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_kmsg")) {
1597
1598 if (proc_cmdline_value_missing(key, value))
1599 return 0;
1600
1601 r = log_level_from_string(value);
1602 if (r < 0)
1603 log_warning("Failed to parse max level kmsg value \"%s\". Ignoring.", value);
1604 else
1605 s->max_level_kmsg = r;
1606
1607 } else if (proc_cmdline_key_streq(key, "systemd.journald.max_level_wall")) {
1608
1609 if (proc_cmdline_value_missing(key, value))
1610 return 0;
1611
1612 r = log_level_from_string(value);
1613 if (r < 0)
1614 log_warning("Failed to parse max level wall value \"%s\". Ignoring.", value);
1615 else
1616 s->max_level_wall = r;
1617
1618 } else if (startswith(key, "systemd.journald"))
1619 log_warning("Unknown journald kernel command line option \"%s\". Ignoring.", key);
1620
1621 /* do not warn about state here, since probably systemd already did */
1622 return 0;
1623 }
1624
1625 static int server_parse_config_file(Server *s) {
1626 int r;
1627
1628 assert(s);
1629
1630 if (s->namespace) {
1631 const char *namespaced;
1632
1633 /* If we are running in namespace mode, load the namespace specific configuration file, and nothing else */
1634 namespaced = strjoina(PKGSYSCONFDIR "/journald@", s->namespace, ".conf");
1635
1636 r = config_parse(NULL,
1637 namespaced, NULL,
1638 "Journal\0",
1639 config_item_perf_lookup, journald_gperf_lookup,
1640 CONFIG_PARSE_WARN, s,
1641 NULL);
1642 if (r < 0)
1643 return r;
1644
1645 return 0;
1646 }
1647
1648 return config_parse_many_nulstr(
1649 PKGSYSCONFDIR "/journald.conf",
1650 CONF_PATHS_NULSTR("systemd/journald.conf.d"),
1651 "Journal\0",
1652 config_item_perf_lookup, journald_gperf_lookup,
1653 CONFIG_PARSE_WARN, s, NULL);
1654 }
1655
1656 static int server_dispatch_sync(sd_event_source *es, usec_t t, void *userdata) {
1657 Server *s = userdata;
1658
1659 assert(s);
1660
1661 server_sync(s);
1662 return 0;
1663 }
1664
1665 int server_schedule_sync(Server *s, int priority) {
1666 int r;
1667
1668 assert(s);
1669
1670 if (priority <= LOG_CRIT) {
1671 /* Immediately sync to disk when this is of priority CRIT, ALERT, EMERG */
1672 server_sync(s);
1673 return 0;
1674 }
1675
1676 if (s->sync_scheduled)
1677 return 0;
1678
1679 if (s->sync_interval_usec > 0) {
1680
1681 if (!s->sync_event_source) {
1682 r = sd_event_add_time_relative(
1683 s->event,
1684 &s->sync_event_source,
1685 CLOCK_MONOTONIC,
1686 s->sync_interval_usec, 0,
1687 server_dispatch_sync, s);
1688 if (r < 0)
1689 return r;
1690
1691 r = sd_event_source_set_priority(s->sync_event_source, SD_EVENT_PRIORITY_IMPORTANT);
1692 } else {
1693 r = sd_event_source_set_time_relative(s->sync_event_source, s->sync_interval_usec);
1694 if (r < 0)
1695 return r;
1696
1697 r = sd_event_source_set_enabled(s->sync_event_source, SD_EVENT_ONESHOT);
1698 }
1699 if (r < 0)
1700 return r;
1701
1702 s->sync_scheduled = true;
1703 }
1704
1705 return 0;
1706 }
1707
1708 static int dispatch_hostname_change(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
1709 Server *s = userdata;
1710
1711 assert(s);
1712
1713 server_cache_hostname(s);
1714 return 0;
1715 }
1716
1717 static int server_open_hostname(Server *s) {
1718 int r;
1719
1720 assert(s);
1721
1722 s->hostname_fd = open("/proc/sys/kernel/hostname",
1723 O_RDONLY|O_CLOEXEC|O_NONBLOCK|O_NOCTTY);
1724 if (s->hostname_fd < 0)
1725 return log_error_errno(errno, "Failed to open /proc/sys/kernel/hostname: %m");
1726
1727 r = sd_event_add_io(s->event, &s->hostname_event_source, s->hostname_fd, 0, dispatch_hostname_change, s);
1728 if (r < 0) {
1729 /* kernels prior to 3.2 don't support polling this file. Ignore
1730 * the failure. */
1731 if (r == -EPERM) {
1732 log_warning_errno(r, "Failed to register hostname fd in event loop, ignoring: %m");
1733 s->hostname_fd = safe_close(s->hostname_fd);
1734 return 0;
1735 }
1736
1737 return log_error_errno(r, "Failed to register hostname fd in event loop: %m");
1738 }
1739
1740 r = sd_event_source_set_priority(s->hostname_event_source, SD_EVENT_PRIORITY_IMPORTANT-10);
1741 if (r < 0)
1742 return log_error_errno(r, "Failed to adjust priority of hostname event source: %m");
1743
1744 return 0;
1745 }
1746
1747 static int dispatch_notify_event(sd_event_source *es, int fd, uint32_t revents, void *userdata) {
1748 Server *s = userdata;
1749 int r;
1750
1751 assert(s);
1752 assert(s->notify_event_source == es);
1753 assert(s->notify_fd == fd);
1754
1755 /* The $NOTIFY_SOCKET is writable again, now send exactly one
1756 * message on it. Either it's the watchdog event, the initial
1757 * READY=1 event or an stdout stream event. If there's nothing
1758 * to write anymore, turn our event source off. The next time
1759 * there's something to send it will be turned on again. */
1760
1761 if (!s->sent_notify_ready) {
1762 static const char p[] =
1763 "READY=1\n"
1764 "STATUS=Processing requests...";
1765 ssize_t l;
1766
1767 l = send(s->notify_fd, p, strlen(p), MSG_DONTWAIT);
1768 if (l < 0) {
1769 if (errno == EAGAIN)
1770 return 0;
1771
1772 return log_error_errno(errno, "Failed to send READY=1 notification message: %m");
1773 }
1774
1775 s->sent_notify_ready = true;
1776 log_debug("Sent READY=1 notification.");
1777
1778 } else if (s->send_watchdog) {
1779
1780 static const char p[] =
1781 "WATCHDOG=1";
1782
1783 ssize_t l;
1784
1785 l = send(s->notify_fd, p, strlen(p), MSG_DONTWAIT);
1786 if (l < 0) {
1787 if (errno == EAGAIN)
1788 return 0;
1789
1790 return log_error_errno(errno, "Failed to send WATCHDOG=1 notification message: %m");
1791 }
1792
1793 s->send_watchdog = false;
1794 log_debug("Sent WATCHDOG=1 notification.");
1795
1796 } else if (s->stdout_streams_notify_queue)
1797 /* Dispatch one stream notification event */
1798 stdout_stream_send_notify(s->stdout_streams_notify_queue);
1799
1800 /* Leave us enabled if there's still more to do. */
1801 if (s->send_watchdog || s->stdout_streams_notify_queue)
1802 return 0;
1803
1804 /* There was nothing to do anymore, let's turn ourselves off. */
1805 r = sd_event_source_set_enabled(es, SD_EVENT_OFF);
1806 if (r < 0)
1807 return log_error_errno(r, "Failed to turn off notify event source: %m");
1808
1809 return 0;
1810 }
1811
1812 static int dispatch_watchdog(sd_event_source *es, uint64_t usec, void *userdata) {
1813 Server *s = userdata;
1814 int r;
1815
1816 assert(s);
1817
1818 s->send_watchdog = true;
1819
1820 r = sd_event_source_set_enabled(s->notify_event_source, SD_EVENT_ON);
1821 if (r < 0)
1822 log_warning_errno(r, "Failed to turn on notify event source: %m");
1823
1824 r = sd_event_source_set_time(s->watchdog_event_source, usec + s->watchdog_usec / 2);
1825 if (r < 0)
1826 return log_error_errno(r, "Failed to restart watchdog event source: %m");
1827
1828 r = sd_event_source_set_enabled(s->watchdog_event_source, SD_EVENT_ON);
1829 if (r < 0)
1830 return log_error_errno(r, "Failed to enable watchdog event source: %m");
1831
1832 return 0;
1833 }
1834
1835 static int server_connect_notify(Server *s) {
1836 union sockaddr_union sa;
1837 socklen_t sa_len;
1838 const char *e;
1839 int r;
1840
1841 assert(s);
1842 assert(s->notify_fd < 0);
1843 assert(!s->notify_event_source);
1844
1845 /*
1846 * So here's the problem: we'd like to send notification messages to PID 1, but we cannot do that via
1847 * sd_notify(), since that's synchronous, and we might end up blocking on it. Specifically: given
1848 * that PID 1 might block on dbus-daemon during IPC, and dbus-daemon is logging to us, and might
1849 * hence block on us, we might end up in a deadlock if we block on sending PID 1 notification
1850 * messages — by generating a full blocking circle. To avoid this, let's create a non-blocking
1851 * socket, and connect it to the notification socket, and then wait for POLLOUT before we send
1852 * anything. This should efficiently avoid any deadlocks, as we'll never block on PID 1, hence PID 1
1853 * can safely block on dbus-daemon which can safely block on us again.
1854 *
1855 * Don't think that this issue is real? It is, see: https://github.com/systemd/systemd/issues/1505
1856 */
1857
1858 e = getenv("NOTIFY_SOCKET");
1859 if (!e)
1860 return 0;
1861
1862 r = sockaddr_un_set_path(&sa.un, e);
1863 if (r < 0)
1864 return log_error_errno(r, "NOTIFY_SOCKET set to invalid value '%s': %m", e);
1865 sa_len = r;
1866
1867 s->notify_fd = socket(AF_UNIX, SOCK_DGRAM|SOCK_CLOEXEC|SOCK_NONBLOCK, 0);
1868 if (s->notify_fd < 0)
1869 return log_error_errno(errno, "Failed to create notify socket: %m");
1870
1871 (void) fd_inc_sndbuf(s->notify_fd, NOTIFY_SNDBUF_SIZE);
1872
1873 r = connect(s->notify_fd, &sa.sa, sa_len);
1874 if (r < 0)
1875 return log_error_errno(errno, "Failed to connect to notify socket: %m");
1876
1877 r = sd_event_add_io(s->event, &s->notify_event_source, s->notify_fd, EPOLLOUT, dispatch_notify_event, s);
1878 if (r < 0)
1879 return log_error_errno(r, "Failed to watch notification socket: %m");
1880
1881 if (sd_watchdog_enabled(false, &s->watchdog_usec) > 0) {
1882 s->send_watchdog = true;
1883
1884 r = sd_event_add_time_relative(s->event, &s->watchdog_event_source, CLOCK_MONOTONIC, s->watchdog_usec/2, s->watchdog_usec/4, dispatch_watchdog, s);
1885 if (r < 0)
1886 return log_error_errno(r, "Failed to add watchdog time event: %m");
1887 }
1888
1889 /* This should fire pretty soon, which we'll use to send the READY=1 event. */
1890
1891 return 0;
1892 }
1893
1894 static int synchronize_second_half(sd_event_source *event_source, void *userdata) {
1895 Varlink *link = userdata;
1896 Server *s;
1897 int r;
1898
1899 assert(link);
1900 assert_se(s = varlink_get_userdata(link));
1901
1902 /* This is the "second half" of the Synchronize() varlink method. This function is called as deferred
1903 * event source at a low priority to ensure the synchronization completes after all queued log
1904 * messages are processed. */
1905 server_full_sync(s);
1906
1907 /* Let's get rid of the event source now, by marking it as non-floating again. It then has no ref
1908 * anymore and is immediately destroyed after we return from this function, i.e. from this event
1909 * source handler at the end. */
1910 r = sd_event_source_set_floating(event_source, false);
1911 if (r < 0)
1912 return log_error_errno(r, "Failed to mark event source as non-floating: %m");
1913
1914 return varlink_reply(link, NULL);
1915 }
1916
1917 static void synchronize_destroy(void *userdata) {
1918 varlink_unref(userdata);
1919 }
1920
1921 static int vl_method_synchronize(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
1922 _cleanup_(sd_event_source_unrefp) sd_event_source *event_source = NULL;
1923 Server *s = userdata;
1924 int r;
1925
1926 assert(link);
1927 assert(s);
1928
1929 if (json_variant_elements(parameters) > 0)
1930 return varlink_error_invalid_parameter(link, parameters);
1931
1932 log_info("Received client request to rotate journal.");
1933
1934 /* We don't do the main work now, but instead enqueue a deferred event loop job which will do
1935 * it. That job is scheduled at low priority, so that we return from this method call only after all
1936 * queued but not processed log messages are written to disk, so that this method call returning can
1937 * be used as nice synchronization point. */
1938 r = sd_event_add_defer(s->event, &event_source, synchronize_second_half, link);
1939 if (r < 0)
1940 return log_error_errno(r, "Failed to allocate defer event source: %m");
1941
1942 r = sd_event_source_set_destroy_callback(event_source, synchronize_destroy);
1943 if (r < 0)
1944 return log_error_errno(r, "Failed to set event source destroy callback: %m");
1945
1946 varlink_ref(link); /* The varlink object is now left to the destroy callback to unref */
1947
1948 r = sd_event_source_set_priority(event_source, SD_EVENT_PRIORITY_NORMAL+15);
1949 if (r < 0)
1950 return log_error_errno(r, "Failed to set defer event source priority: %m");
1951
1952 /* Give up ownership of this event source. It will now be destroyed along with event loop itself,
1953 * unless it destroys itself earlier. */
1954 r = sd_event_source_set_floating(event_source, true);
1955 if (r < 0)
1956 return log_error_errno(r, "Failed to mark event source as floating: %m");
1957
1958 (void) sd_event_source_set_description(event_source, "deferred-sync");
1959
1960 return 0;
1961 }
1962
1963 static int vl_method_rotate(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
1964 Server *s = userdata;
1965
1966 assert(link);
1967 assert(s);
1968
1969 if (json_variant_elements(parameters) > 0)
1970 return varlink_error_invalid_parameter(link, parameters);
1971
1972 log_info("Received client request to rotate journal.");
1973 server_full_rotate(s);
1974
1975 return varlink_reply(link, NULL);
1976 }
1977
1978 static int vl_method_flush_to_var(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
1979 Server *s = userdata;
1980
1981 assert(link);
1982 assert(s);
1983
1984 if (json_variant_elements(parameters) > 0)
1985 return varlink_error_invalid_parameter(link, parameters);
1986 if (s->namespace)
1987 return varlink_error(link, "io.systemd.Journal.NotSupportedByNamespaces", NULL);
1988
1989 log_info("Received client request to flush runtime journal.");
1990 server_full_flush(s);
1991
1992 return varlink_reply(link, NULL);
1993 }
1994
1995 static int vl_method_relinquish_var(Varlink *link, JsonVariant *parameters, VarlinkMethodFlags flags, void *userdata) {
1996 Server *s = userdata;
1997
1998 assert(link);
1999 assert(s);
2000
2001 if (json_variant_elements(parameters) > 0)
2002 return varlink_error_invalid_parameter(link, parameters);
2003 if (s->namespace)
2004 return varlink_error(link, "io.systemd.Journal.NotSupportedByNamespaces", NULL);
2005
2006 log_info("Received client request to relinquish %s access.", s->system_storage.path);
2007 server_relinquish_var(s);
2008
2009 return varlink_reply(link, NULL);
2010 }
2011
2012 static int vl_connect(VarlinkServer *server, Varlink *link, void *userdata) {
2013 Server *s = userdata;
2014
2015 assert(server);
2016 assert(link);
2017 assert(s);
2018
2019 (void) server_start_or_stop_idle_timer(s); /* maybe we are no longer idle */
2020
2021 return 0;
2022 }
2023
2024 static void vl_disconnect(VarlinkServer *server, Varlink *link, void *userdata) {
2025 Server *s = userdata;
2026
2027 assert(server);
2028 assert(link);
2029 assert(s);
2030
2031 (void) server_start_or_stop_idle_timer(s); /* maybe we are idle now */
2032 }
2033
2034 static int server_open_varlink(Server *s, const char *socket, int fd) {
2035 int r;
2036
2037 assert(s);
2038
2039 r = varlink_server_new(&s->varlink_server, VARLINK_SERVER_ROOT_ONLY);
2040 if (r < 0)
2041 return r;
2042
2043 varlink_server_set_userdata(s->varlink_server, s);
2044
2045 r = varlink_server_bind_method_many(
2046 s->varlink_server,
2047 "io.systemd.Journal.Synchronize", vl_method_synchronize,
2048 "io.systemd.Journal.Rotate", vl_method_rotate,
2049 "io.systemd.Journal.FlushToVar", vl_method_flush_to_var,
2050 "io.systemd.Journal.RelinquishVar", vl_method_relinquish_var);
2051 if (r < 0)
2052 return r;
2053
2054 r = varlink_server_bind_connect(s->varlink_server, vl_connect);
2055 if (r < 0)
2056 return r;
2057
2058 r = varlink_server_bind_disconnect(s->varlink_server, vl_disconnect);
2059 if (r < 0)
2060 return r;
2061
2062 if (fd < 0)
2063 r = varlink_server_listen_address(s->varlink_server, socket, 0600);
2064 else
2065 r = varlink_server_listen_fd(s->varlink_server, fd);
2066 if (r < 0)
2067 return r;
2068
2069 r = varlink_server_attach_event(s->varlink_server, s->event, SD_EVENT_PRIORITY_NORMAL);
2070 if (r < 0)
2071 return r;
2072
2073 return 0;
2074 }
2075
2076 static bool server_is_idle(Server *s) {
2077 assert(s);
2078
2079 /* The server for the main namespace is never idle */
2080 if (!s->namespace)
2081 return false;
2082
2083 /* If a retention maximum is set larger than the idle time we need to be running to enforce it, hence
2084 * turn off the idle logic. */
2085 if (s->max_retention_usec > IDLE_TIMEOUT_USEC)
2086 return false;
2087
2088 /* We aren't idle if we have a varlink client */
2089 if (varlink_server_current_connections(s->varlink_server) > 0)
2090 return false;
2091
2092 /* If we have stdout streams we aren't idle */
2093 if (s->n_stdout_streams > 0)
2094 return false;
2095
2096 return true;
2097 }
2098
2099 static int server_idle_handler(sd_event_source *source, uint64_t usec, void *userdata) {
2100 Server *s = userdata;
2101
2102 assert(source);
2103 assert(s);
2104
2105 log_debug("Server is idle, exiting.");
2106 sd_event_exit(s->event, 0);
2107 return 0;
2108 }
2109
2110 int server_start_or_stop_idle_timer(Server *s) {
2111 _cleanup_(sd_event_source_unrefp) sd_event_source *source = NULL;
2112 int r;
2113
2114 assert(s);
2115
2116 if (!server_is_idle(s)) {
2117 s->idle_event_source = sd_event_source_disable_unref(s->idle_event_source);
2118 return 0;
2119 }
2120
2121 if (s->idle_event_source)
2122 return 1;
2123
2124 r = sd_event_add_time_relative(s->event, &source, CLOCK_MONOTONIC, IDLE_TIMEOUT_USEC, 0, server_idle_handler, s);
2125 if (r < 0)
2126 return log_error_errno(r, "Failed to allocate idle timer: %m");
2127
2128 r = sd_event_source_set_priority(source, SD_EVENT_PRIORITY_IDLE);
2129 if (r < 0)
2130 return log_error_errno(r, "Failed to set idle timer priority: %m");
2131
2132 (void) sd_event_source_set_description(source, "idle-timer");
2133
2134 s->idle_event_source = TAKE_PTR(source);
2135 return 1;
2136 }
2137
2138 int server_refresh_idle_timer(Server *s) {
2139 int r;
2140
2141 assert(s);
2142
2143 if (!s->idle_event_source)
2144 return 0;
2145
2146 r = sd_event_source_set_time_relative(s->idle_event_source, IDLE_TIMEOUT_USEC);
2147 if (r < 0)
2148 return log_error_errno(r, "Failed to refresh idle timer: %m");
2149
2150 return 1;
2151 }
2152
2153 static int set_namespace(Server *s, const char *namespace) {
2154 assert(s);
2155
2156 if (!namespace)
2157 return 0;
2158
2159 if (!log_namespace_name_valid(namespace))
2160 return log_error_errno(SYNTHETIC_ERRNO(EINVAL), "Specified namespace name not valid, refusing: %s", namespace);
2161
2162 s->namespace = strdup(namespace);
2163 if (!s->namespace)
2164 return log_oom();
2165
2166 s->namespace_field = strjoin("_NAMESPACE=", namespace);
2167 if (!s->namespace_field)
2168 return log_oom();
2169
2170 return 1;
2171 }
2172
2173 int server_init(Server *s, const char *namespace) {
2174 const char *native_socket, *syslog_socket, *stdout_socket, *varlink_socket, *e;
2175 _cleanup_fdset_free_ FDSet *fds = NULL;
2176 int n, r, fd, varlink_fd = -1;
2177 bool no_sockets;
2178
2179 assert(s);
2180
2181 *s = (Server) {
2182 .syslog_fd = -1,
2183 .native_fd = -1,
2184 .stdout_fd = -1,
2185 .dev_kmsg_fd = -1,
2186 .audit_fd = -1,
2187 .hostname_fd = -1,
2188 .notify_fd = -1,
2189
2190 .compress.enabled = true,
2191 .compress.threshold_bytes = (uint64_t) -1,
2192 .seal = true,
2193
2194 .set_audit = true,
2195
2196 .watchdog_usec = USEC_INFINITY,
2197
2198 .sync_interval_usec = DEFAULT_SYNC_INTERVAL_USEC,
2199 .sync_scheduled = false,
2200
2201 .ratelimit_interval = DEFAULT_RATE_LIMIT_INTERVAL,
2202 .ratelimit_burst = DEFAULT_RATE_LIMIT_BURST,
2203
2204 .forward_to_wall = true,
2205
2206 .max_file_usec = DEFAULT_MAX_FILE_USEC,
2207
2208 .max_level_store = LOG_DEBUG,
2209 .max_level_syslog = LOG_DEBUG,
2210 .max_level_kmsg = LOG_NOTICE,
2211 .max_level_console = LOG_INFO,
2212 .max_level_wall = LOG_EMERG,
2213
2214 .line_max = DEFAULT_LINE_MAX,
2215
2216 .runtime_storage.name = "Runtime Journal",
2217 .system_storage.name = "System Journal",
2218 };
2219
2220 r = set_namespace(s, namespace);
2221 if (r < 0)
2222 return r;
2223
2224 /* By default, only read from /dev/kmsg if are the main namespace */
2225 s->read_kmsg = !s->namespace;
2226 s->storage = s->namespace ? STORAGE_PERSISTENT : STORAGE_AUTO;
2227
2228 journal_reset_metrics(&s->system_storage.metrics);
2229 journal_reset_metrics(&s->runtime_storage.metrics);
2230
2231 server_parse_config_file(s);
2232
2233 if (!s->namespace) {
2234 /* Parse kernel command line, but only if we are not a namespace instance */
2235 r = proc_cmdline_parse(parse_proc_cmdline_item, s, PROC_CMDLINE_STRIP_RD_PREFIX);
2236 if (r < 0)
2237 log_warning_errno(r, "Failed to parse kernel command line, ignoring: %m");
2238 }
2239
2240 if (!!s->ratelimit_interval != !!s->ratelimit_burst) { /* One set to 0 and the other not? */
2241 log_debug("Setting both rate limit interval and burst from "USEC_FMT",%u to 0,0",
2242 s->ratelimit_interval, s->ratelimit_burst);
2243 s->ratelimit_interval = s->ratelimit_burst = 0;
2244 }
2245
2246 e = getenv("RUNTIME_DIRECTORY");
2247 if (e)
2248 s->runtime_directory = strdup(e);
2249 else if (s->namespace)
2250 s->runtime_directory = strjoin("/run/systemd/journal.", s->namespace);
2251 else
2252 s->runtime_directory = strdup("/run/systemd/journal");
2253 if (!s->runtime_directory)
2254 return log_oom();
2255
2256 (void) mkdir_p(s->runtime_directory, 0755);
2257
2258 s->user_journals = ordered_hashmap_new(NULL);
2259 if (!s->user_journals)
2260 return log_oom();
2261
2262 s->mmap = mmap_cache_new();
2263 if (!s->mmap)
2264 return log_oom();
2265
2266 s->deferred_closes = set_new(NULL);
2267 if (!s->deferred_closes)
2268 return log_oom();
2269
2270 r = sd_event_default(&s->event);
2271 if (r < 0)
2272 return log_error_errno(r, "Failed to create event loop: %m");
2273
2274 n = sd_listen_fds(true);
2275 if (n < 0)
2276 return log_error_errno(n, "Failed to read listening file descriptors from environment: %m");
2277
2278 native_socket = strjoina(s->runtime_directory, "/socket");
2279 stdout_socket = strjoina(s->runtime_directory, "/stdout");
2280 syslog_socket = strjoina(s->runtime_directory, "/dev-log");
2281 varlink_socket = strjoina(s->runtime_directory, "/io.systemd.journal");
2282
2283 for (fd = SD_LISTEN_FDS_START; fd < SD_LISTEN_FDS_START + n; fd++) {
2284
2285 if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, native_socket, 0) > 0) {
2286
2287 if (s->native_fd >= 0)
2288 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2289 "Too many native sockets passed.");
2290
2291 s->native_fd = fd;
2292
2293 } else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, stdout_socket, 0) > 0) {
2294
2295 if (s->stdout_fd >= 0)
2296 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2297 "Too many stdout sockets passed.");
2298
2299 s->stdout_fd = fd;
2300
2301 } else if (sd_is_socket_unix(fd, SOCK_DGRAM, -1, syslog_socket, 0) > 0) {
2302
2303 if (s->syslog_fd >= 0)
2304 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2305 "Too many /dev/log sockets passed.");
2306
2307 s->syslog_fd = fd;
2308
2309 } else if (sd_is_socket_unix(fd, SOCK_STREAM, 1, varlink_socket, 0) > 0) {
2310
2311 if (varlink_fd >= 0)
2312 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2313 "Too many varlink sockets passed.");
2314
2315 varlink_fd = fd;
2316 } else if (sd_is_socket(fd, AF_NETLINK, SOCK_RAW, -1) > 0) {
2317
2318 if (s->audit_fd >= 0)
2319 return log_error_errno(SYNTHETIC_ERRNO(EINVAL),
2320 "Too many audit sockets passed.");
2321
2322 s->audit_fd = fd;
2323
2324 } else {
2325
2326 if (!fds) {
2327 fds = fdset_new();
2328 if (!fds)
2329 return log_oom();
2330 }
2331
2332 r = fdset_put(fds, fd);
2333 if (r < 0)
2334 return log_oom();
2335 }
2336 }
2337
2338 /* Try to restore streams, but don't bother if this fails */
2339 (void) server_restore_streams(s, fds);
2340
2341 if (fdset_size(fds) > 0) {
2342 log_warning("%u unknown file descriptors passed, closing.", fdset_size(fds));
2343 fds = fdset_free(fds);
2344 }
2345
2346 no_sockets = s->native_fd < 0 && s->stdout_fd < 0 && s->syslog_fd < 0 && s->audit_fd < 0 && varlink_fd < 0;
2347
2348 /* always open stdout, syslog, native, and kmsg sockets */
2349
2350 /* systemd-journald.socket: /run/systemd/journal/stdout */
2351 r = server_open_stdout_socket(s, stdout_socket);
2352 if (r < 0)
2353 return r;
2354
2355 /* systemd-journald-dev-log.socket: /run/systemd/journal/dev-log */
2356 r = server_open_syslog_socket(s, syslog_socket);
2357 if (r < 0)
2358 return r;
2359
2360 /* systemd-journald.socket: /run/systemd/journal/socket */
2361 r = server_open_native_socket(s, native_socket);
2362 if (r < 0)
2363 return r;
2364
2365 /* /dev/kmsg */
2366 r = server_open_dev_kmsg(s);
2367 if (r < 0)
2368 return r;
2369
2370 /* Unless we got *some* sockets and not audit, open audit socket */
2371 if (s->audit_fd >= 0 || no_sockets) {
2372 r = server_open_audit(s);
2373 if (r < 0)
2374 return r;
2375 }
2376
2377 r = server_open_varlink(s, varlink_socket, varlink_fd);
2378 if (r < 0)
2379 return r;
2380
2381 r = server_open_kernel_seqnum(s);
2382 if (r < 0)
2383 return r;
2384
2385 r = server_open_hostname(s);
2386 if (r < 0)
2387 return r;
2388
2389 r = setup_signals(s);
2390 if (r < 0)
2391 return r;
2392
2393 s->ratelimit = journal_ratelimit_new();
2394 if (!s->ratelimit)
2395 return log_oom();
2396
2397 r = cg_get_root_path(&s->cgroup_root);
2398 if (r < 0)
2399 return log_error_errno(r, "Failed to acquire cgroup root path: %m");
2400
2401 server_cache_hostname(s);
2402 server_cache_boot_id(s);
2403 server_cache_machine_id(s);
2404
2405 if (s->namespace)
2406 s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s), ".", s->namespace);
2407 else
2408 s->runtime_storage.path = strjoin("/run/log/journal/", SERVER_MACHINE_ID(s));
2409 if (!s->runtime_storage.path)
2410 return log_oom();
2411
2412 e = getenv("LOGS_DIRECTORY");
2413 if (e)
2414 s->system_storage.path = strdup(e);
2415 else if (s->namespace)
2416 s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s), ".", s->namespace);
2417 else
2418 s->system_storage.path = strjoin("/var/log/journal/", SERVER_MACHINE_ID(s));
2419 if (!s->system_storage.path)
2420 return log_oom();
2421
2422 (void) server_connect_notify(s);
2423
2424 (void) client_context_acquire_default(s);
2425
2426 r = system_journal_open(s, false, false);
2427 if (r < 0)
2428 return r;
2429
2430 server_start_or_stop_idle_timer(s);
2431 return 0;
2432 }
2433
2434 void server_maybe_append_tags(Server *s) {
2435 #if HAVE_GCRYPT
2436 JournalFile *f;
2437 Iterator i;
2438 usec_t n;
2439
2440 n = now(CLOCK_REALTIME);
2441
2442 if (s->system_journal)
2443 journal_file_maybe_append_tag(s->system_journal, n);
2444
2445 ORDERED_HASHMAP_FOREACH(f, s->user_journals, i)
2446 journal_file_maybe_append_tag(f, n);
2447 #endif
2448 }
2449
2450 void server_done(Server *s) {
2451 assert(s);
2452
2453 free(s->namespace);
2454 free(s->namespace_field);
2455
2456 set_free_with_destructor(s->deferred_closes, journal_file_close);
2457
2458 while (s->stdout_streams)
2459 stdout_stream_free(s->stdout_streams);
2460
2461 client_context_flush_all(s);
2462
2463 (void) journal_file_close(s->system_journal);
2464 (void) journal_file_close(s->runtime_journal);
2465
2466 ordered_hashmap_free_with_destructor(s->user_journals, journal_file_close);
2467
2468 varlink_server_unref(s->varlink_server);
2469
2470 sd_event_source_unref(s->syslog_event_source);
2471 sd_event_source_unref(s->native_event_source);
2472 sd_event_source_unref(s->stdout_event_source);
2473 sd_event_source_unref(s->dev_kmsg_event_source);
2474 sd_event_source_unref(s->audit_event_source);
2475 sd_event_source_unref(s->sync_event_source);
2476 sd_event_source_unref(s->sigusr1_event_source);
2477 sd_event_source_unref(s->sigusr2_event_source);
2478 sd_event_source_unref(s->sigterm_event_source);
2479 sd_event_source_unref(s->sigint_event_source);
2480 sd_event_source_unref(s->sigrtmin1_event_source);
2481 sd_event_source_unref(s->hostname_event_source);
2482 sd_event_source_unref(s->notify_event_source);
2483 sd_event_source_unref(s->watchdog_event_source);
2484 sd_event_source_unref(s->idle_event_source);
2485 sd_event_unref(s->event);
2486
2487 safe_close(s->syslog_fd);
2488 safe_close(s->native_fd);
2489 safe_close(s->stdout_fd);
2490 safe_close(s->dev_kmsg_fd);
2491 safe_close(s->audit_fd);
2492 safe_close(s->hostname_fd);
2493 safe_close(s->notify_fd);
2494
2495 if (s->ratelimit)
2496 journal_ratelimit_free(s->ratelimit);
2497
2498 if (s->kernel_seqnum)
2499 munmap(s->kernel_seqnum, sizeof(uint64_t));
2500
2501 free(s->buffer);
2502 free(s->tty_path);
2503 free(s->cgroup_root);
2504 free(s->hostname_field);
2505 free(s->runtime_storage.path);
2506 free(s->system_storage.path);
2507 free(s->runtime_directory);
2508
2509 mmap_cache_unref(s->mmap);
2510 }
2511
2512 static const char* const storage_table[_STORAGE_MAX] = {
2513 [STORAGE_AUTO] = "auto",
2514 [STORAGE_VOLATILE] = "volatile",
2515 [STORAGE_PERSISTENT] = "persistent",
2516 [STORAGE_NONE] = "none"
2517 };
2518
2519 DEFINE_STRING_TABLE_LOOKUP(storage, Storage);
2520 DEFINE_CONFIG_PARSE_ENUM(config_parse_storage, storage, Storage, "Failed to parse storage setting");
2521
2522 static const char* const split_mode_table[_SPLIT_MAX] = {
2523 [SPLIT_LOGIN] = "login",
2524 [SPLIT_UID] = "uid",
2525 [SPLIT_NONE] = "none",
2526 };
2527
2528 DEFINE_STRING_TABLE_LOOKUP(split_mode, SplitMode);
2529 DEFINE_CONFIG_PARSE_ENUM(config_parse_split_mode, split_mode, SplitMode, "Failed to parse split mode setting");
2530
2531 int config_parse_line_max(
2532 const char* unit,
2533 const char *filename,
2534 unsigned line,
2535 const char *section,
2536 unsigned section_line,
2537 const char *lvalue,
2538 int ltype,
2539 const char *rvalue,
2540 void *data,
2541 void *userdata) {
2542
2543 size_t *sz = data;
2544 int r;
2545
2546 assert(filename);
2547 assert(lvalue);
2548 assert(rvalue);
2549 assert(data);
2550
2551 if (isempty(rvalue))
2552 /* Empty assignment means default */
2553 *sz = DEFAULT_LINE_MAX;
2554 else {
2555 uint64_t v;
2556
2557 r = parse_size(rvalue, 1024, &v);
2558 if (r < 0) {
2559 log_syntax(unit, LOG_ERR, filename, line, r, "Failed to parse LineMax= value, ignoring: %s", rvalue);
2560 return 0;
2561 }
2562
2563 if (v < 79) {
2564 /* Why specify 79 here as minimum line length? Simply, because the most common traditional
2565 * terminal size is 80ch, and it might make sense to break one character before the natural
2566 * line break would occur on that. */
2567 log_syntax(unit, LOG_WARNING, filename, line, 0, "LineMax= too small, clamping to 79: %s", rvalue);
2568 *sz = 79;
2569 } else if (v > (uint64_t) (SSIZE_MAX-1)) {
2570 /* So, why specify SSIZE_MAX-1 here? Because that's one below the largest size value read()
2571 * can return, and we need one extra byte for the trailing NUL byte. Of course IRL such large
2572 * memory allocations will fail anyway, hence this limit is mostly theoretical anyway, as we'll
2573 * fail much earlier anyway. */
2574 log_syntax(unit, LOG_WARNING, filename, line, 0, "LineMax= too large, clamping to %" PRIu64 ": %s", (uint64_t) (SSIZE_MAX-1), rvalue);
2575 *sz = SSIZE_MAX-1;
2576 } else
2577 *sz = (size_t) v;
2578 }
2579
2580 return 0;
2581 }
2582
2583 int config_parse_compress(
2584 const char* unit,
2585 const char *filename,
2586 unsigned line,
2587 const char *section,
2588 unsigned section_line,
2589 const char *lvalue,
2590 int ltype,
2591 const char *rvalue,
2592 void *data,
2593 void *userdata) {
2594
2595 JournalCompressOptions* compress = data;
2596 int r;
2597
2598 if (isempty(rvalue)) {
2599 compress->enabled = true;
2600 compress->threshold_bytes = (uint64_t) -1;
2601 } else if (streq(rvalue, "1")) {
2602 log_syntax(unit, LOG_WARNING, filename, line, 0,
2603 "Compress= ambiguously specified as 1, enabling compression with default threshold");
2604 compress->enabled = true;
2605 } else if (streq(rvalue, "0")) {
2606 log_syntax(unit, LOG_WARNING, filename, line, 0,
2607 "Compress= ambiguously specified as 0, disabling compression");
2608 compress->enabled = false;
2609 } else {
2610 r = parse_boolean(rvalue);
2611 if (r < 0) {
2612 r = parse_size(rvalue, 1024, &compress->threshold_bytes);
2613 if (r < 0)
2614 log_syntax(unit, LOG_ERR, filename, line, r,
2615 "Failed to parse Compress= value, ignoring: %s", rvalue);
2616 else
2617 compress->enabled = true;
2618 } else
2619 compress->enabled = r;
2620 }
2621
2622 return 0;
2623 }