]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journald-context.c
8253a45128c6a29e61424153700f75d1ce99b0a5
[thirdparty/systemd.git] / src / journal / journald-context.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #if HAVE_SELINUX
4 #include <selinux/selinux.h>
5 #endif
6
7 #include "alloc-util.h"
8 #include "audit-util.h"
9 #include "cgroup-util.h"
10 #include "fd-util.h"
11 #include "fileio.h"
12 #include "fs-util.h"
13 #include "io-util.h"
14 #include "journal-util.h"
15 #include "journald-context.h"
16 #include "parse-util.h"
17 #include "path-util.h"
18 #include "process-util.h"
19 #include "string-util.h"
20 #include "syslog-util.h"
21 #include "unaligned.h"
22 #include "user-util.h"
23
24 /* This implements a metadata cache for clients, which are identified by their PID. Requesting metadata through /proc
25 * is expensive, hence let's cache the data if we can. Note that this means the metadata might be out-of-date when we
26 * store it, but it might already be anyway, as we request the data asynchronously from /proc at a different time the
27 * log entry was originally created. We hence just increase the "window of inaccuracy" a bit.
28 *
29 * The cache is indexed by the PID. Entries may be "pinned" in the cache, in which case the entries are not removed
30 * until they are unpinned. Unpinned entries are kept around until cache pressure is seen. Cache entries older than 5s
31 * are never used (a sad attempt to deal with the UNIX weakness of PIDs reuse), cache entries older than 1s are
32 * refreshed in an incremental way (meaning: data is reread from /proc, but any old data we can't refresh is not
33 * flushed out). Data newer than 1s is used immediately without refresh.
34 *
35 * Log stream clients (i.e. all clients using the AF_UNIX/SOCK_STREAM stdout/stderr transport) will pin a cache entry
36 * as long as their socket is connected. Note that cache entries are shared between different transports. That means a
37 * cache entry pinned for the stream connection logic may be reused for the syslog or native protocols.
38 *
39 * Caching metadata like this has two major benefits:
40 *
41 * 1. Reading metadata is expensive, and we can thus substantially speed up log processing under flood.
42 *
43 * 2. Because metadata caching is shared between stream and datagram transports and stream connections pin a cache
44 * entry there's a good chance we can properly map a substantial set of datagram log messages to their originating
45 * service, as all services (unless explicitly configured otherwise) will have their stdout/stderr connected to a
46 * stream connection. This should improve cases where a service process logs immediately before exiting and we
47 * previously had trouble associating the log message with the service.
48 *
49 * NB: With and without the metadata cache: the implicitly added entry metadata in the journal (with the exception of
50 * UID/PID/GID and SELinux label) must be understood as possibly slightly out of sync (i.e. sometimes slighly older
51 * and sometimes slightly newer than what was current at the log event).
52 */
53
54 /* We refresh every 1s */
55 #define REFRESH_USEC (1*USEC_PER_SEC)
56
57 /* Data older than 5s we flush out */
58 #define MAX_USEC (5*USEC_PER_SEC)
59
60 /* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
61 * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
62 * clients itself is limited.) */
63 #define CACHE_MAX (16*1024)
64
65 static int client_context_compare(const void *a, const void *b) {
66 const ClientContext *x = a, *y = b;
67 int r;
68
69 r = CMP(x->timestamp, y->timestamp);
70 if (r != 0)
71 return r;
72
73 return CMP(x->pid, y->pid);
74 }
75
76 static int client_context_new(Server *s, pid_t pid, ClientContext **ret) {
77 ClientContext *c;
78 int r;
79
80 assert(s);
81 assert(pid_is_valid(pid));
82 assert(ret);
83
84 r = hashmap_ensure_allocated(&s->client_contexts, NULL);
85 if (r < 0)
86 return r;
87
88 r = prioq_ensure_allocated(&s->client_contexts_lru, client_context_compare);
89 if (r < 0)
90 return r;
91
92 c = new0(ClientContext, 1);
93 if (!c)
94 return -ENOMEM;
95
96 c->pid = pid;
97
98 c->uid = UID_INVALID;
99 c->gid = GID_INVALID;
100 c->auditid = AUDIT_SESSION_INVALID;
101 c->loginuid = UID_INVALID;
102 c->owner_uid = UID_INVALID;
103 c->lru_index = PRIOQ_IDX_NULL;
104 c->timestamp = USEC_INFINITY;
105 c->extra_fields_mtime = NSEC_INFINITY;
106 c->log_level_max = -1;
107 c->log_rate_limit_interval = s->rate_limit_interval;
108 c->log_rate_limit_burst = s->rate_limit_burst;
109
110 r = hashmap_put(s->client_contexts, PID_TO_PTR(pid), c);
111 if (r < 0) {
112 free(c);
113 return r;
114 }
115
116 *ret = c;
117 return 0;
118 }
119
120 static void client_context_reset(Server *s, ClientContext *c) {
121 assert(s);
122 assert(c);
123
124 c->timestamp = USEC_INFINITY;
125
126 c->uid = UID_INVALID;
127 c->gid = GID_INVALID;
128
129 c->comm = mfree(c->comm);
130 c->exe = mfree(c->exe);
131 c->cmdline = mfree(c->cmdline);
132 c->capeff = mfree(c->capeff);
133
134 c->auditid = AUDIT_SESSION_INVALID;
135 c->loginuid = UID_INVALID;
136
137 c->cgroup = mfree(c->cgroup);
138 c->session = mfree(c->session);
139 c->owner_uid = UID_INVALID;
140 c->unit = mfree(c->unit);
141 c->user_unit = mfree(c->user_unit);
142 c->slice = mfree(c->slice);
143 c->user_slice = mfree(c->user_slice);
144
145 c->invocation_id = SD_ID128_NULL;
146
147 c->label = mfree(c->label);
148 c->label_size = 0;
149
150 c->extra_fields_iovec = mfree(c->extra_fields_iovec);
151 c->extra_fields_n_iovec = 0;
152 c->extra_fields_data = mfree(c->extra_fields_data);
153 c->extra_fields_mtime = NSEC_INFINITY;
154
155 c->log_level_max = -1;
156
157 c->log_rate_limit_interval = s->rate_limit_interval;
158 c->log_rate_limit_burst = s->rate_limit_burst;
159 }
160
161 static ClientContext* client_context_free(Server *s, ClientContext *c) {
162 assert(s);
163
164 if (!c)
165 return NULL;
166
167 assert_se(hashmap_remove(s->client_contexts, PID_TO_PTR(c->pid)) == c);
168
169 if (c->in_lru)
170 assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
171
172 client_context_reset(s, c);
173
174 return mfree(c);
175 }
176
177 static void client_context_read_uid_gid(ClientContext *c, const struct ucred *ucred) {
178 assert(c);
179 assert(pid_is_valid(c->pid));
180
181 /* The ucred data passed in is always the most current and accurate, if we have any. Use it. */
182 if (ucred && uid_is_valid(ucred->uid))
183 c->uid = ucred->uid;
184 else
185 (void) get_process_uid(c->pid, &c->uid);
186
187 if (ucred && gid_is_valid(ucred->gid))
188 c->gid = ucred->gid;
189 else
190 (void) get_process_gid(c->pid, &c->gid);
191 }
192
193 static void client_context_read_basic(ClientContext *c) {
194 char *t;
195
196 assert(c);
197 assert(pid_is_valid(c->pid));
198
199 if (get_process_comm(c->pid, &t) >= 0)
200 free_and_replace(c->comm, t);
201
202 if (get_process_exe(c->pid, &t) >= 0)
203 free_and_replace(c->exe, t);
204
205 if (get_process_cmdline(c->pid, 0, false, &t) >= 0)
206 free_and_replace(c->cmdline, t);
207
208 if (get_process_capeff(c->pid, &t) >= 0)
209 free_and_replace(c->capeff, t);
210 }
211
212 static int client_context_read_label(
213 ClientContext *c,
214 const char *label, size_t label_size) {
215
216 assert(c);
217 assert(pid_is_valid(c->pid));
218 assert(label_size == 0 || label);
219
220 if (label_size > 0) {
221 char *l;
222
223 /* If we got an SELinux label passed in it counts. */
224
225 l = newdup_suffix0(char, label, label_size);
226 if (!l)
227 return -ENOMEM;
228
229 free_and_replace(c->label, l);
230 c->label_size = label_size;
231 }
232 #if HAVE_SELINUX
233 else {
234 char *con;
235
236 /* If we got no SELinux label passed in, let's try to acquire one */
237
238 if (getpidcon(c->pid, &con) >= 0) {
239 free_and_replace(c->label, con);
240 c->label_size = strlen(c->label);
241 }
242 }
243 #endif
244
245 return 0;
246 }
247
248 static int client_context_read_cgroup(Server *s, ClientContext *c, const char *unit_id) {
249 char *t = NULL;
250 int r;
251
252 assert(c);
253
254 /* Try to acquire the current cgroup path */
255 r = cg_pid_get_path_shifted(c->pid, s->cgroup_root, &t);
256 if (r < 0 || empty_or_root(t)) {
257
258 /* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
259 * failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
260 * on cgroupsv1 and we want to be able to map log messages from them too. */
261 if (unit_id && !c->unit) {
262 c->unit = strdup(unit_id);
263 if (c->unit)
264 return 0;
265 }
266
267 return r;
268 }
269
270 /* Let's shortcut this if the cgroup path didn't change */
271 if (streq_ptr(c->cgroup, t)) {
272 free(t);
273 return 0;
274 }
275
276 free_and_replace(c->cgroup, t);
277
278 (void) cg_path_get_session(c->cgroup, &t);
279 free_and_replace(c->session, t);
280
281 if (cg_path_get_owner_uid(c->cgroup, &c->owner_uid) < 0)
282 c->owner_uid = UID_INVALID;
283
284 (void) cg_path_get_unit(c->cgroup, &t);
285 free_and_replace(c->unit, t);
286
287 (void) cg_path_get_user_unit(c->cgroup, &t);
288 free_and_replace(c->user_unit, t);
289
290 (void) cg_path_get_slice(c->cgroup, &t);
291 free_and_replace(c->slice, t);
292
293 (void) cg_path_get_user_slice(c->cgroup, &t);
294 free_and_replace(c->user_slice, t);
295
296 return 0;
297 }
298
299 static int client_context_read_invocation_id(
300 Server *s,
301 ClientContext *c) {
302
303 _cleanup_free_ char *value = NULL;
304 const char *p;
305 int r;
306
307 assert(s);
308 assert(c);
309
310 /* Read the invocation ID of a unit off a unit. PID 1 stores it in a per-unit symlink in /run/systemd/units/ */
311
312 if (!c->unit)
313 return 0;
314
315 p = strjoina("/run/systemd/units/invocation:", c->unit);
316 r = readlink_malloc(p, &value);
317 if (r < 0)
318 return r;
319
320 return sd_id128_from_string(value, &c->invocation_id);
321 }
322
323 static int client_context_read_log_level_max(
324 Server *s,
325 ClientContext *c) {
326
327 _cleanup_free_ char *value = NULL;
328 const char *p;
329 int r, ll;
330
331 if (!c->unit)
332 return 0;
333
334 p = strjoina("/run/systemd/units/log-level-max:", c->unit);
335 r = readlink_malloc(p, &value);
336 if (r < 0)
337 return r;
338
339 ll = log_level_from_string(value);
340 if (ll < 0)
341 return -EINVAL;
342
343 c->log_level_max = ll;
344 return 0;
345 }
346
347 static int client_context_read_extra_fields(
348 Server *s,
349 ClientContext *c) {
350
351 size_t size = 0, n_iovec = 0, n_allocated = 0, left;
352 _cleanup_free_ struct iovec *iovec = NULL;
353 _cleanup_free_ void *data = NULL;
354 _cleanup_fclose_ FILE *f = NULL;
355 struct stat st;
356 const char *p;
357 uint8_t *q;
358 int r;
359
360 if (!c->unit)
361 return 0;
362
363 p = strjoina("/run/systemd/units/log-extra-fields:", c->unit);
364
365 if (c->extra_fields_mtime != NSEC_INFINITY) {
366 if (stat(p, &st) < 0) {
367 if (errno == ENOENT)
368 return 0;
369
370 return -errno;
371 }
372
373 if (timespec_load_nsec(&st.st_mtim) == c->extra_fields_mtime)
374 return 0;
375 }
376
377 f = fopen(p, "re");
378 if (!f) {
379 if (errno == ENOENT)
380 return 0;
381
382 return -errno;
383 }
384
385 if (fstat(fileno(f), &st) < 0) /* The file might have been replaced since the stat() above, let's get a new
386 * one, that matches the stuff we are reading */
387 return -errno;
388
389 r = read_full_stream(f, (char**) &data, &size);
390 if (r < 0)
391 return r;
392
393 q = data, left = size;
394 while (left > 0) {
395 uint8_t *field, *eq;
396 uint64_t v, n;
397
398 if (left < sizeof(uint64_t))
399 return -EBADMSG;
400
401 v = unaligned_read_le64(q);
402 if (v < 2)
403 return -EBADMSG;
404
405 n = sizeof(uint64_t) + v;
406 if (left < n)
407 return -EBADMSG;
408
409 field = q + sizeof(uint64_t);
410
411 eq = memchr(field, '=', v);
412 if (!eq)
413 return -EBADMSG;
414
415 if (!journal_field_valid((const char *) field, eq - field, false))
416 return -EBADMSG;
417
418 if (!GREEDY_REALLOC(iovec, n_allocated, n_iovec+1))
419 return -ENOMEM;
420
421 iovec[n_iovec++] = IOVEC_MAKE(field, v);
422
423 left -= n, q += n;
424 }
425
426 free(c->extra_fields_iovec);
427 free(c->extra_fields_data);
428
429 c->extra_fields_iovec = TAKE_PTR(iovec);
430 c->extra_fields_n_iovec = n_iovec;
431 c->extra_fields_data = TAKE_PTR(data);
432 c->extra_fields_mtime = timespec_load_nsec(&st.st_mtim);
433
434 return 0;
435 }
436
437 static int client_context_read_log_rate_limit_interval(ClientContext *c) {
438 _cleanup_free_ char *value = NULL;
439 const char *p;
440 int r;
441
442 assert(c);
443
444 if (!c->unit)
445 return 0;
446
447 p = strjoina("/run/systemd/units/log-rate-limit-interval:", c->unit);
448 r = readlink_malloc(p, &value);
449 if (r < 0)
450 return r;
451
452 return safe_atou64(value, &c->log_rate_limit_interval);
453 }
454
455 static int client_context_read_log_rate_limit_burst(ClientContext *c) {
456 _cleanup_free_ char *value = NULL;
457 const char *p;
458 int r;
459
460 assert(c);
461
462 if (!c->unit)
463 return 0;
464
465 p = strjoina("/run/systemd/units/log-rate-limit-burst:", c->unit);
466 r = readlink_malloc(p, &value);
467 if (r < 0)
468 return r;
469
470 return safe_atou(value, &c->log_rate_limit_burst);
471 }
472
473 static void client_context_really_refresh(
474 Server *s,
475 ClientContext *c,
476 const struct ucred *ucred,
477 const char *label, size_t label_size,
478 const char *unit_id,
479 usec_t timestamp) {
480
481 assert(s);
482 assert(c);
483 assert(pid_is_valid(c->pid));
484
485 if (timestamp == USEC_INFINITY)
486 timestamp = now(CLOCK_MONOTONIC);
487
488 client_context_read_uid_gid(c, ucred);
489 client_context_read_basic(c);
490 (void) client_context_read_label(c, label, label_size);
491
492 (void) audit_session_from_pid(c->pid, &c->auditid);
493 (void) audit_loginuid_from_pid(c->pid, &c->loginuid);
494
495 (void) client_context_read_cgroup(s, c, unit_id);
496 (void) client_context_read_invocation_id(s, c);
497 (void) client_context_read_log_level_max(s, c);
498 (void) client_context_read_extra_fields(s, c);
499 (void) client_context_read_log_rate_limit_interval(c);
500 (void) client_context_read_log_rate_limit_burst(c);
501
502 c->timestamp = timestamp;
503
504 if (c->in_lru) {
505 assert(c->n_ref == 0);
506 assert_se(prioq_reshuffle(s->client_contexts_lru, c, &c->lru_index) >= 0);
507 }
508 }
509
510 void client_context_maybe_refresh(
511 Server *s,
512 ClientContext *c,
513 const struct ucred *ucred,
514 const char *label, size_t label_size,
515 const char *unit_id,
516 usec_t timestamp) {
517
518 assert(s);
519 assert(c);
520
521 if (timestamp == USEC_INFINITY)
522 timestamp = now(CLOCK_MONOTONIC);
523
524 /* No cached data so far? Let's fill it up */
525 if (c->timestamp == USEC_INFINITY)
526 goto refresh;
527
528 /* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
529 * entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
530 if (c->n_ref == 0 && c->timestamp + MAX_USEC < timestamp) {
531 client_context_reset(s, c);
532 goto refresh;
533 }
534
535 /* If the data is older than the lower limit, we refresh, but keep the old data for all we can't update */
536 if (c->timestamp + REFRESH_USEC < timestamp)
537 goto refresh;
538
539 /* If the data passed along doesn't match the cached data we also do a refresh */
540 if (ucred && uid_is_valid(ucred->uid) && c->uid != ucred->uid)
541 goto refresh;
542
543 if (ucred && gid_is_valid(ucred->gid) && c->gid != ucred->gid)
544 goto refresh;
545
546 if (label_size > 0 && (label_size != c->label_size || memcmp(label, c->label, label_size) != 0))
547 goto refresh;
548
549 return;
550
551 refresh:
552 client_context_really_refresh(s, c, ucred, label, label_size, unit_id, timestamp);
553 }
554
555 static void client_context_try_shrink_to(Server *s, size_t limit) {
556 assert(s);
557
558 /* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
559 * breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
560 * cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
561
562 while (hashmap_size(s->client_contexts) > limit) {
563 ClientContext *c;
564
565 c = prioq_pop(s->client_contexts_lru);
566 if (!c)
567 break; /* All remaining entries are pinned, give up */
568
569 assert(c->in_lru);
570 assert(c->n_ref == 0);
571
572 c->in_lru = false;
573
574 client_context_free(s, c);
575 }
576 }
577
578 void client_context_flush_all(Server *s) {
579 assert(s);
580
581 /* Flush out all remaining entries. This assumes all references are already dropped. */
582
583 s->my_context = client_context_release(s, s->my_context);
584 s->pid1_context = client_context_release(s, s->pid1_context);
585
586 client_context_try_shrink_to(s, 0);
587
588 assert(prioq_size(s->client_contexts_lru) == 0);
589 assert(hashmap_size(s->client_contexts) == 0);
590
591 s->client_contexts_lru = prioq_free(s->client_contexts_lru);
592 s->client_contexts = hashmap_free(s->client_contexts);
593 }
594
595 static int client_context_get_internal(
596 Server *s,
597 pid_t pid,
598 const struct ucred *ucred,
599 const char *label, size_t label_len,
600 const char *unit_id,
601 bool add_ref,
602 ClientContext **ret) {
603
604 ClientContext *c;
605 int r;
606
607 assert(s);
608 assert(ret);
609
610 if (!pid_is_valid(pid))
611 return -EINVAL;
612
613 c = hashmap_get(s->client_contexts, PID_TO_PTR(pid));
614 if (c) {
615
616 if (add_ref) {
617 if (c->in_lru) {
618 /* The entry wasn't pinned so far, let's remove it from the LRU list then */
619 assert(c->n_ref == 0);
620 assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
621 c->in_lru = false;
622 }
623
624 c->n_ref++;
625 }
626
627 client_context_maybe_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
628
629 *ret = c;
630 return 0;
631 }
632
633 client_context_try_shrink_to(s, CACHE_MAX-1);
634
635 r = client_context_new(s, pid, &c);
636 if (r < 0)
637 return r;
638
639 if (add_ref)
640 c->n_ref++;
641 else {
642 r = prioq_put(s->client_contexts_lru, c, &c->lru_index);
643 if (r < 0) {
644 client_context_free(s, c);
645 return r;
646 }
647
648 c->in_lru = true;
649 }
650
651 client_context_really_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
652
653 *ret = c;
654 return 0;
655 }
656
657 int client_context_get(
658 Server *s,
659 pid_t pid,
660 const struct ucred *ucred,
661 const char *label, size_t label_len,
662 const char *unit_id,
663 ClientContext **ret) {
664
665 return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, false, ret);
666 }
667
668 int client_context_acquire(
669 Server *s,
670 pid_t pid,
671 const struct ucred *ucred,
672 const char *label, size_t label_len,
673 const char *unit_id,
674 ClientContext **ret) {
675
676 return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, true, ret);
677 };
678
679 ClientContext *client_context_release(Server *s, ClientContext *c) {
680 assert(s);
681
682 if (!c)
683 return NULL;
684
685 assert(c->n_ref > 0);
686 assert(!c->in_lru);
687
688 c->n_ref--;
689 if (c->n_ref > 0)
690 return NULL;
691
692 /* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
693 * right-away */
694
695 if (prioq_put(s->client_contexts_lru, c, &c->lru_index) < 0)
696 client_context_free(s, c);
697 else
698 c->in_lru = true;
699
700 return NULL;
701 }
702
703 void client_context_acquire_default(Server *s) {
704 int r;
705
706 assert(s);
707
708 /* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
709 * generate driver messages. */
710
711 if (!s->my_context) {
712 struct ucred ucred = {
713 .pid = getpid_cached(),
714 .uid = getuid(),
715 .gid = getgid(),
716 };
717
718 r = client_context_acquire(s, ucred.pid, &ucred, NULL, 0, NULL, &s->my_context);
719 if (r < 0)
720 log_warning_errno(r, "Failed to acquire our own context, ignoring: %m");
721 }
722
723 if (!s->pid1_context) {
724
725 r = client_context_acquire(s, 1, NULL, NULL, 0, NULL, &s->pid1_context);
726 if (r < 0)
727 log_warning_errno(r, "Failed to acquire PID1's context, ignoring: %m");
728
729 }
730 }