]>
git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journald-context.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 #include <selinux/selinux.h>
7 #include "alloc-util.h"
8 #include "audit-util.h"
9 #include "cgroup-util.h"
15 #include "journal-util.h"
16 #include "journald-context.h"
17 #include "parse-util.h"
18 #include "path-util.h"
19 #include "process-util.h"
20 #include "procfs-util.h"
21 #include "string-util.h"
22 #include "syslog-util.h"
23 #include "unaligned.h"
24 #include "user-util.h"
26 /* This implements a metadata cache for clients, which are identified by their PID. Requesting metadata through /proc
27 * is expensive, hence let's cache the data if we can. Note that this means the metadata might be out-of-date when we
28 * store it, but it might already be anyway, as we request the data asynchronously from /proc at a different time the
29 * log entry was originally created. We hence just increase the "window of inaccuracy" a bit.
31 * The cache is indexed by the PID. Entries may be "pinned" in the cache, in which case the entries are not removed
32 * until they are unpinned. Unpinned entries are kept around until cache pressure is seen. Cache entries older than 5s
33 * are never used (a sad attempt to deal with the UNIX weakness of PIDs reuse), cache entries older than 1s are
34 * refreshed in an incremental way (meaning: data is reread from /proc, but any old data we can't refresh is not
35 * flushed out). Data newer than 1s is used immediately without refresh.
37 * Log stream clients (i.e. all clients using the AF_UNIX/SOCK_STREAM stdout/stderr transport) will pin a cache entry
38 * as long as their socket is connected. Note that cache entries are shared between different transports. That means a
39 * cache entry pinned for the stream connection logic may be reused for the syslog or native protocols.
41 * Caching metadata like this has two major benefits:
43 * 1. Reading metadata is expensive, and we can thus substantially speed up log processing under flood.
45 * 2. Because metadata caching is shared between stream and datagram transports and stream connections pin a cache
46 * entry there's a good chance we can properly map a substantial set of datagram log messages to their originating
47 * service, as all services (unless explicitly configured otherwise) will have their stdout/stderr connected to a
48 * stream connection. This should improve cases where a service process logs immediately before exiting and we
49 * previously had trouble associating the log message with the service.
51 * NB: With and without the metadata cache: the implicitly added entry metadata in the journal (with the exception of
52 * UID/PID/GID and SELinux label) must be understood as possibly slightly out of sync (i.e. sometimes slightly older
53 * and sometimes slightly newer than what was current at the log event).
56 /* We refresh every 1s */
57 #define REFRESH_USEC (1*USEC_PER_SEC)
59 /* Data older than 5s we flush out */
60 #define MAX_USEC (5*USEC_PER_SEC)
62 /* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
63 * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
64 * clients itself is limited.) */
65 #define CACHE_MAX_FALLBACK 128U
66 #define CACHE_MAX_MAX (16*1024U)
67 #define CACHE_MAX_MIN 64U
69 static size_t cache_max(void) {
70 static size_t cached
= -1;
72 if (cached
== (size_t) -1) {
76 r
= procfs_memory_get(&mem_total
, NULL
);
78 log_warning_errno(r
, "Cannot query /proc/meminfo for MemTotal: %m");
79 cached
= CACHE_MAX_FALLBACK
;
81 /* Cache entries are usually a few kB, but the process cmdline is controlled by the
82 * user and can be up to _SC_ARG_MAX, usually 2MB. Let's say that approximately up to
83 * 1/8th of memory may be used by the cache.
85 * In the common case, this formula gives 64 cache entries for each GB of RAM.
87 cached
= CLAMP(mem_total
/ 8 / sc_arg_max(), CACHE_MAX_MIN
, CACHE_MAX_MAX
);
93 static int client_context_compare(const void *a
, const void *b
) {
94 const ClientContext
*x
= a
, *y
= b
;
97 r
= CMP(x
->timestamp
, y
->timestamp
);
101 return CMP(x
->pid
, y
->pid
);
104 static int client_context_new(Server
*s
, pid_t pid
, ClientContext
**ret
) {
109 assert(pid_is_valid(pid
));
112 r
= hashmap_ensure_allocated(&s
->client_contexts
, NULL
);
116 r
= prioq_ensure_allocated(&s
->client_contexts_lru
, client_context_compare
);
120 c
= new(ClientContext
, 1);
124 *c
= (ClientContext
) {
128 .auditid
= AUDIT_SESSION_INVALID
,
129 .loginuid
= UID_INVALID
,
130 .owner_uid
= UID_INVALID
,
131 .lru_index
= PRIOQ_IDX_NULL
,
132 .timestamp
= USEC_INFINITY
,
133 .extra_fields_mtime
= NSEC_INFINITY
,
135 .log_ratelimit_interval
= s
->ratelimit_interval
,
136 .log_ratelimit_burst
= s
->ratelimit_burst
,
139 r
= hashmap_put(s
->client_contexts
, PID_TO_PTR(pid
), c
);
149 static void client_context_reset(Server
*s
, ClientContext
*c
) {
153 c
->timestamp
= USEC_INFINITY
;
155 c
->uid
= UID_INVALID
;
156 c
->gid
= GID_INVALID
;
158 c
->comm
= mfree(c
->comm
);
159 c
->exe
= mfree(c
->exe
);
160 c
->cmdline
= mfree(c
->cmdline
);
161 c
->capeff
= mfree(c
->capeff
);
163 c
->auditid
= AUDIT_SESSION_INVALID
;
164 c
->loginuid
= UID_INVALID
;
166 c
->cgroup
= mfree(c
->cgroup
);
167 c
->session
= mfree(c
->session
);
168 c
->owner_uid
= UID_INVALID
;
169 c
->unit
= mfree(c
->unit
);
170 c
->user_unit
= mfree(c
->user_unit
);
171 c
->slice
= mfree(c
->slice
);
172 c
->user_slice
= mfree(c
->user_slice
);
174 c
->invocation_id
= SD_ID128_NULL
;
176 c
->label
= mfree(c
->label
);
179 c
->extra_fields_iovec
= mfree(c
->extra_fields_iovec
);
180 c
->extra_fields_n_iovec
= 0;
181 c
->extra_fields_data
= mfree(c
->extra_fields_data
);
182 c
->extra_fields_mtime
= NSEC_INFINITY
;
184 c
->log_level_max
= -1;
186 c
->log_ratelimit_interval
= s
->ratelimit_interval
;
187 c
->log_ratelimit_burst
= s
->ratelimit_burst
;
190 static ClientContext
* client_context_free(Server
*s
, ClientContext
*c
) {
196 assert_se(hashmap_remove(s
->client_contexts
, PID_TO_PTR(c
->pid
)) == c
);
199 assert_se(prioq_remove(s
->client_contexts_lru
, c
, &c
->lru_index
) >= 0);
201 client_context_reset(s
, c
);
206 static void client_context_read_uid_gid(ClientContext
*c
, const struct ucred
*ucred
) {
208 assert(pid_is_valid(c
->pid
));
210 /* The ucred data passed in is always the most current and accurate, if we have any. Use it. */
211 if (ucred
&& uid_is_valid(ucred
->uid
))
214 (void) get_process_uid(c
->pid
, &c
->uid
);
216 if (ucred
&& gid_is_valid(ucred
->gid
))
219 (void) get_process_gid(c
->pid
, &c
->gid
);
222 static void client_context_read_basic(ClientContext
*c
) {
226 assert(pid_is_valid(c
->pid
));
228 if (get_process_comm(c
->pid
, &t
) >= 0)
229 free_and_replace(c
->comm
, t
);
231 if (get_process_exe(c
->pid
, &t
) >= 0)
232 free_and_replace(c
->exe
, t
);
234 if (get_process_cmdline(c
->pid
, SIZE_MAX
, 0, &t
) >= 0)
235 free_and_replace(c
->cmdline
, t
);
237 if (get_process_capeff(c
->pid
, &t
) >= 0)
238 free_and_replace(c
->capeff
, t
);
241 static int client_context_read_label(
243 const char *label
, size_t label_size
) {
246 assert(pid_is_valid(c
->pid
));
247 assert(label_size
== 0 || label
);
249 if (label_size
> 0) {
252 /* If we got an SELinux label passed in it counts. */
254 l
= newdup_suffix0(char, label
, label_size
);
258 free_and_replace(c
->label
, l
);
259 c
->label_size
= label_size
;
265 /* If we got no SELinux label passed in, let's try to acquire one */
267 if (getpidcon(c
->pid
, &con
) >= 0) {
268 free_and_replace(c
->label
, con
);
269 c
->label_size
= strlen(c
->label
);
277 static int client_context_read_cgroup(Server
*s
, ClientContext
*c
, const char *unit_id
) {
278 _cleanup_free_
char *t
= NULL
;
283 /* Try to acquire the current cgroup path */
284 r
= cg_pid_get_path_shifted(c
->pid
, s
->cgroup_root
, &t
);
285 if (r
< 0 || empty_or_root(t
)) {
286 /* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
287 * failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
288 * on cgroup v1 and we want to be able to map log messages from them too. */
289 if (unit_id
&& !c
->unit
) {
290 c
->unit
= strdup(unit_id
);
298 /* Let's shortcut this if the cgroup path didn't change */
299 if (streq_ptr(c
->cgroup
, t
))
302 free_and_replace(c
->cgroup
, t
);
304 (void) cg_path_get_session(c
->cgroup
, &t
);
305 free_and_replace(c
->session
, t
);
307 if (cg_path_get_owner_uid(c
->cgroup
, &c
->owner_uid
) < 0)
308 c
->owner_uid
= UID_INVALID
;
310 (void) cg_path_get_unit(c
->cgroup
, &t
);
311 free_and_replace(c
->unit
, t
);
313 (void) cg_path_get_user_unit(c
->cgroup
, &t
);
314 free_and_replace(c
->user_unit
, t
);
316 (void) cg_path_get_slice(c
->cgroup
, &t
);
317 free_and_replace(c
->slice
, t
);
319 (void) cg_path_get_user_slice(c
->cgroup
, &t
);
320 free_and_replace(c
->user_slice
, t
);
325 static int client_context_read_invocation_id(
329 _cleanup_free_
char *p
= NULL
, *value
= NULL
;
335 /* Read the invocation ID of a unit off a unit.
336 * PID 1 stores it in a per-unit symlink in /run/systemd/units/
337 * User managers store it in a per-unit symlink under /run/user/<uid>/systemd/units/ */
343 r
= asprintf(&p
, "/run/user/" UID_FMT
"/systemd/units/invocation:%s", c
->owner_uid
, c
->user_unit
);
347 p
= strjoin("/run/systemd/units/invocation:", c
->unit
);
352 r
= readlink_malloc(p
, &value
);
356 return sd_id128_from_string(value
, &c
->invocation_id
);
359 static int client_context_read_log_level_max(
363 _cleanup_free_
char *value
= NULL
;
370 p
= strjoina("/run/systemd/units/log-level-max:", c
->unit
);
371 r
= readlink_malloc(p
, &value
);
375 ll
= log_level_from_string(value
);
379 c
->log_level_max
= ll
;
383 static int client_context_read_extra_fields(
387 size_t size
= 0, n_iovec
= 0, n_allocated
= 0, left
;
388 _cleanup_free_
struct iovec
*iovec
= NULL
;
389 _cleanup_free_
void *data
= NULL
;
390 _cleanup_fclose_
FILE *f
= NULL
;
399 p
= strjoina("/run/systemd/units/log-extra-fields:", c
->unit
);
401 if (c
->extra_fields_mtime
!= NSEC_INFINITY
) {
402 if (stat(p
, &st
) < 0) {
409 if (timespec_load_nsec(&st
.st_mtim
) == c
->extra_fields_mtime
)
421 if (fstat(fileno(f
), &st
) < 0) /* The file might have been replaced since the stat() above, let's get a new
422 * one, that matches the stuff we are reading */
425 r
= read_full_stream(f
, (char**) &data
, &size
);
429 q
= data
, left
= size
;
434 if (left
< sizeof(uint64_t))
437 v
= unaligned_read_le64(q
);
441 n
= sizeof(uint64_t) + v
;
445 field
= q
+ sizeof(uint64_t);
447 eq
= memchr(field
, '=', v
);
451 if (!journal_field_valid((const char *) field
, eq
- field
, false))
454 if (!GREEDY_REALLOC(iovec
, n_allocated
, n_iovec
+1))
457 iovec
[n_iovec
++] = IOVEC_MAKE(field
, v
);
462 free(c
->extra_fields_iovec
);
463 free(c
->extra_fields_data
);
465 c
->extra_fields_iovec
= TAKE_PTR(iovec
);
466 c
->extra_fields_n_iovec
= n_iovec
;
467 c
->extra_fields_data
= TAKE_PTR(data
);
468 c
->extra_fields_mtime
= timespec_load_nsec(&st
.st_mtim
);
473 static int client_context_read_log_ratelimit_interval(ClientContext
*c
) {
474 _cleanup_free_
char *value
= NULL
;
483 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", c
->unit
);
484 r
= readlink_malloc(p
, &value
);
488 return safe_atou64(value
, &c
->log_ratelimit_interval
);
491 static int client_context_read_log_ratelimit_burst(ClientContext
*c
) {
492 _cleanup_free_
char *value
= NULL
;
501 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", c
->unit
);
502 r
= readlink_malloc(p
, &value
);
506 return safe_atou(value
, &c
->log_ratelimit_burst
);
509 static void client_context_really_refresh(
512 const struct ucred
*ucred
,
513 const char *label
, size_t label_size
,
519 assert(pid_is_valid(c
->pid
));
521 if (timestamp
== USEC_INFINITY
)
522 timestamp
= now(CLOCK_MONOTONIC
);
524 client_context_read_uid_gid(c
, ucred
);
525 client_context_read_basic(c
);
526 (void) client_context_read_label(c
, label
, label_size
);
528 (void) audit_session_from_pid(c
->pid
, &c
->auditid
);
529 (void) audit_loginuid_from_pid(c
->pid
, &c
->loginuid
);
531 (void) client_context_read_cgroup(s
, c
, unit_id
);
532 (void) client_context_read_invocation_id(s
, c
);
533 (void) client_context_read_log_level_max(s
, c
);
534 (void) client_context_read_extra_fields(s
, c
);
535 (void) client_context_read_log_ratelimit_interval(c
);
536 (void) client_context_read_log_ratelimit_burst(c
);
538 c
->timestamp
= timestamp
;
541 assert(c
->n_ref
== 0);
542 assert_se(prioq_reshuffle(s
->client_contexts_lru
, c
, &c
->lru_index
) >= 0);
546 void client_context_maybe_refresh(
549 const struct ucred
*ucred
,
550 const char *label
, size_t label_size
,
557 if (timestamp
== USEC_INFINITY
)
558 timestamp
= now(CLOCK_MONOTONIC
);
560 /* No cached data so far? Let's fill it up */
561 if (c
->timestamp
== USEC_INFINITY
)
564 /* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
565 * entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
566 if (c
->n_ref
== 0 && c
->timestamp
+ MAX_USEC
< timestamp
) {
567 client_context_reset(s
, c
);
571 /* If the data is older than the lower limit, we refresh, but keep the old data for all we can't update */
572 if (c
->timestamp
+ REFRESH_USEC
< timestamp
)
575 /* If the data passed along doesn't match the cached data we also do a refresh */
576 if (ucred
&& uid_is_valid(ucred
->uid
) && c
->uid
!= ucred
->uid
)
579 if (ucred
&& gid_is_valid(ucred
->gid
) && c
->gid
!= ucred
->gid
)
582 if (label_size
> 0 && (label_size
!= c
->label_size
|| memcmp(label
, c
->label
, label_size
) != 0))
588 client_context_really_refresh(s
, c
, ucred
, label
, label_size
, unit_id
, timestamp
);
591 static void client_context_try_shrink_to(Server
*s
, size_t limit
) {
597 /* Flush any cache entries for PIDs that have already moved on. Don't do this
598 * too often, since it's a slow process. */
599 t
= now(CLOCK_MONOTONIC
);
600 if (s
->last_cache_pid_flush
+ MAX_USEC
< t
) {
601 unsigned n
= prioq_size(s
->client_contexts_lru
), idx
= 0;
603 /* We do a number of iterations based on the initial size of the prioq. When we remove an
604 * item, a new item is moved into its places, and items to the right might be reshuffled.
606 for (unsigned i
= 0; i
< n
; i
++) {
607 c
= prioq_peek_by_index(s
->client_contexts_lru
, idx
);
609 assert(c
->n_ref
== 0);
611 if (!pid_is_unwaited(c
->pid
))
612 client_context_free(s
, c
);
617 s
->last_cache_pid_flush
= t
;
620 /* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
621 * breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
622 * cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
624 while (hashmap_size(s
->client_contexts
) > limit
) {
625 c
= prioq_pop(s
->client_contexts_lru
);
627 break; /* All remaining entries are pinned, give up */
630 assert(c
->n_ref
== 0);
634 client_context_free(s
, c
);
638 void client_context_flush_all(Server
*s
) {
641 /* Flush out all remaining entries. This assumes all references are already dropped. */
643 s
->my_context
= client_context_release(s
, s
->my_context
);
644 s
->pid1_context
= client_context_release(s
, s
->pid1_context
);
646 client_context_try_shrink_to(s
, 0);
648 assert(prioq_size(s
->client_contexts_lru
) == 0);
649 assert(hashmap_size(s
->client_contexts
) == 0);
651 s
->client_contexts_lru
= prioq_free(s
->client_contexts_lru
);
652 s
->client_contexts
= hashmap_free(s
->client_contexts
);
655 static int client_context_get_internal(
658 const struct ucred
*ucred
,
659 const char *label
, size_t label_len
,
662 ClientContext
**ret
) {
670 if (!pid_is_valid(pid
))
673 c
= hashmap_get(s
->client_contexts
, PID_TO_PTR(pid
));
678 /* The entry wasn't pinned so far, let's remove it from the LRU list then */
679 assert(c
->n_ref
== 0);
680 assert_se(prioq_remove(s
->client_contexts_lru
, c
, &c
->lru_index
) >= 0);
687 client_context_maybe_refresh(s
, c
, ucred
, label
, label_len
, unit_id
, USEC_INFINITY
);
693 client_context_try_shrink_to(s
, cache_max()-1);
695 r
= client_context_new(s
, pid
, &c
);
702 r
= prioq_put(s
->client_contexts_lru
, c
, &c
->lru_index
);
704 client_context_free(s
, c
);
711 client_context_really_refresh(s
, c
, ucred
, label
, label_len
, unit_id
, USEC_INFINITY
);
717 int client_context_get(
720 const struct ucred
*ucred
,
721 const char *label
, size_t label_len
,
723 ClientContext
**ret
) {
725 return client_context_get_internal(s
, pid
, ucred
, label
, label_len
, unit_id
, false, ret
);
728 int client_context_acquire(
731 const struct ucred
*ucred
,
732 const char *label
, size_t label_len
,
734 ClientContext
**ret
) {
736 return client_context_get_internal(s
, pid
, ucred
, label
, label_len
, unit_id
, true, ret
);
739 ClientContext
*client_context_release(Server
*s
, ClientContext
*c
) {
745 assert(c
->n_ref
> 0);
752 /* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
755 if (prioq_put(s
->client_contexts_lru
, c
, &c
->lru_index
) < 0)
756 client_context_free(s
, c
);
763 void client_context_acquire_default(Server
*s
) {
768 /* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
769 * generate driver messages. */
771 if (!s
->my_context
) {
772 struct ucred ucred
= {
773 .pid
= getpid_cached(),
778 r
= client_context_acquire(s
, ucred
.pid
, &ucred
, NULL
, 0, NULL
, &s
->my_context
);
780 log_warning_errno(r
, "Failed to acquire our own context, ignoring: %m");
783 if (!s
->namespace && !s
->pid1_context
) {
784 /* Acquire PID1's context, but only if we are in non-namespaced mode, since PID 1 is only
785 * going to log to the non-namespaced journal instance. */
787 r
= client_context_acquire(s
, 1, NULL
, NULL
, 0, NULL
, &s
->pid1_context
);
789 log_warning_errno(r
, "Failed to acquire PID1's context, ignoring: %m");