]>
git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journald-context.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
4 #include <selinux/selinux.h>
7 #include "alloc-util.h"
8 #include "audit-util.h"
9 #include "cgroup-util.h"
15 #include "journal-util.h"
16 #include "journald-context.h"
17 #include "parse-util.h"
18 #include "path-util.h"
19 #include "process-util.h"
20 #include "procfs-util.h"
21 #include "string-util.h"
22 #include "syslog-util.h"
23 #include "unaligned.h"
24 #include "user-util.h"
26 /* This implements a metadata cache for clients, which are identified by their PID. Requesting metadata through /proc
27 * is expensive, hence let's cache the data if we can. Note that this means the metadata might be out-of-date when we
28 * store it, but it might already be anyway, as we request the data asynchronously from /proc at a different time the
29 * log entry was originally created. We hence just increase the "window of inaccuracy" a bit.
31 * The cache is indexed by the PID. Entries may be "pinned" in the cache, in which case the entries are not removed
32 * until they are unpinned. Unpinned entries are kept around until cache pressure is seen. Cache entries older than 5s
33 * are never used (a sad attempt to deal with the UNIX weakness of PIDs reuse), cache entries older than 1s are
34 * refreshed in an incremental way (meaning: data is reread from /proc, but any old data we can't refresh is not
35 * flushed out). Data newer than 1s is used immediately without refresh.
37 * Log stream clients (i.e. all clients using the AF_UNIX/SOCK_STREAM stdout/stderr transport) will pin a cache entry
38 * as long as their socket is connected. Note that cache entries are shared between different transports. That means a
39 * cache entry pinned for the stream connection logic may be reused for the syslog or native protocols.
41 * Caching metadata like this has two major benefits:
43 * 1. Reading metadata is expensive, and we can thus substantially speed up log processing under flood.
45 * 2. Because metadata caching is shared between stream and datagram transports and stream connections pin a cache
46 * entry there's a good chance we can properly map a substantial set of datagram log messages to their originating
47 * service, as all services (unless explicitly configured otherwise) will have their stdout/stderr connected to a
48 * stream connection. This should improve cases where a service process logs immediately before exiting and we
49 * previously had trouble associating the log message with the service.
51 * NB: With and without the metadata cache: the implicitly added entry metadata in the journal (with the exception of
52 * UID/PID/GID and SELinux label) must be understood as possibly slightly out of sync (i.e. sometimes slightly older
53 * and sometimes slightly newer than what was current at the log event).
56 /* We refresh every 1s */
57 #define REFRESH_USEC (1*USEC_PER_SEC)
59 /* Data older than 5s we flush out */
60 #define MAX_USEC (5*USEC_PER_SEC)
62 /* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
63 * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
64 * clients itself is limited.) */
65 #define CACHE_MAX_FALLBACK 128U
66 #define CACHE_MAX_MAX (16*1024U)
67 #define CACHE_MAX_MIN 64U
69 static size_t cache_max(void) {
70 static size_t cached
= -1;
72 if (cached
== (size_t) -1) {
76 r
= procfs_memory_get(&mem_total
, NULL
);
78 log_warning_errno(r
, "Cannot query /proc/meminfo for MemTotal: %m");
79 cached
= CACHE_MAX_FALLBACK
;
81 /* Cache entries are usually a few kB, but the process cmdline is controlled by the
82 * user and can be up to _SC_ARG_MAX, usually 2MB. Let's say that approximately up to
83 * 1/8th of memory may be used by the cache.
85 * In the common case, this formula gives 64 cache entries for each GB of RAM.
87 cached
= CLAMP(mem_total
/ 8 / sc_arg_max(), CACHE_MAX_MIN
, CACHE_MAX_MAX
);
93 static int client_context_compare(const void *a
, const void *b
) {
94 const ClientContext
*x
= a
, *y
= b
;
97 r
= CMP(x
->timestamp
, y
->timestamp
);
101 return CMP(x
->pid
, y
->pid
);
104 static int client_context_new(Server
*s
, pid_t pid
, ClientContext
**ret
) {
109 assert(pid_is_valid(pid
));
112 r
= hashmap_ensure_allocated(&s
->client_contexts
, NULL
);
116 r
= prioq_ensure_allocated(&s
->client_contexts_lru
, client_context_compare
);
120 c
= new0(ClientContext
, 1);
126 c
->uid
= UID_INVALID
;
127 c
->gid
= GID_INVALID
;
128 c
->auditid
= AUDIT_SESSION_INVALID
;
129 c
->loginuid
= UID_INVALID
;
130 c
->owner_uid
= UID_INVALID
;
131 c
->lru_index
= PRIOQ_IDX_NULL
;
132 c
->timestamp
= USEC_INFINITY
;
133 c
->extra_fields_mtime
= NSEC_INFINITY
;
134 c
->log_level_max
= -1;
135 c
->log_rate_limit_interval
= s
->rate_limit_interval
;
136 c
->log_rate_limit_burst
= s
->rate_limit_burst
;
138 r
= hashmap_put(s
->client_contexts
, PID_TO_PTR(pid
), c
);
148 static void client_context_reset(Server
*s
, ClientContext
*c
) {
152 c
->timestamp
= USEC_INFINITY
;
154 c
->uid
= UID_INVALID
;
155 c
->gid
= GID_INVALID
;
157 c
->comm
= mfree(c
->comm
);
158 c
->exe
= mfree(c
->exe
);
159 c
->cmdline
= mfree(c
->cmdline
);
160 c
->capeff
= mfree(c
->capeff
);
162 c
->auditid
= AUDIT_SESSION_INVALID
;
163 c
->loginuid
= UID_INVALID
;
165 c
->cgroup
= mfree(c
->cgroup
);
166 c
->session
= mfree(c
->session
);
167 c
->owner_uid
= UID_INVALID
;
168 c
->unit
= mfree(c
->unit
);
169 c
->user_unit
= mfree(c
->user_unit
);
170 c
->slice
= mfree(c
->slice
);
171 c
->user_slice
= mfree(c
->user_slice
);
173 c
->invocation_id
= SD_ID128_NULL
;
175 c
->label
= mfree(c
->label
);
178 c
->extra_fields_iovec
= mfree(c
->extra_fields_iovec
);
179 c
->extra_fields_n_iovec
= 0;
180 c
->extra_fields_data
= mfree(c
->extra_fields_data
);
181 c
->extra_fields_mtime
= NSEC_INFINITY
;
183 c
->log_level_max
= -1;
185 c
->log_rate_limit_interval
= s
->rate_limit_interval
;
186 c
->log_rate_limit_burst
= s
->rate_limit_burst
;
189 static ClientContext
* client_context_free(Server
*s
, ClientContext
*c
) {
195 assert_se(hashmap_remove(s
->client_contexts
, PID_TO_PTR(c
->pid
)) == c
);
198 assert_se(prioq_remove(s
->client_contexts_lru
, c
, &c
->lru_index
) >= 0);
200 client_context_reset(s
, c
);
205 static void client_context_read_uid_gid(ClientContext
*c
, const struct ucred
*ucred
) {
207 assert(pid_is_valid(c
->pid
));
209 /* The ucred data passed in is always the most current and accurate, if we have any. Use it. */
210 if (ucred
&& uid_is_valid(ucred
->uid
))
213 (void) get_process_uid(c
->pid
, &c
->uid
);
215 if (ucred
&& gid_is_valid(ucred
->gid
))
218 (void) get_process_gid(c
->pid
, &c
->gid
);
221 static void client_context_read_basic(ClientContext
*c
) {
225 assert(pid_is_valid(c
->pid
));
227 if (get_process_comm(c
->pid
, &t
) >= 0)
228 free_and_replace(c
->comm
, t
);
230 if (get_process_exe(c
->pid
, &t
) >= 0)
231 free_and_replace(c
->exe
, t
);
233 if (get_process_cmdline(c
->pid
, 0, false, &t
) >= 0)
234 free_and_replace(c
->cmdline
, t
);
236 if (get_process_capeff(c
->pid
, &t
) >= 0)
237 free_and_replace(c
->capeff
, t
);
240 static int client_context_read_label(
242 const char *label
, size_t label_size
) {
245 assert(pid_is_valid(c
->pid
));
246 assert(label_size
== 0 || label
);
248 if (label_size
> 0) {
251 /* If we got an SELinux label passed in it counts. */
253 l
= newdup_suffix0(char, label
, label_size
);
257 free_and_replace(c
->label
, l
);
258 c
->label_size
= label_size
;
264 /* If we got no SELinux label passed in, let's try to acquire one */
266 if (getpidcon(c
->pid
, &con
) >= 0) {
267 free_and_replace(c
->label
, con
);
268 c
->label_size
= strlen(c
->label
);
276 static int client_context_read_cgroup(Server
*s
, ClientContext
*c
, const char *unit_id
) {
277 _cleanup_free_
char *t
= NULL
;
282 /* Try to acquire the current cgroup path */
283 r
= cg_pid_get_path_shifted(c
->pid
, s
->cgroup_root
, &t
);
284 if (r
< 0 || empty_or_root(t
)) {
285 /* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
286 * failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
287 * on cgroup v1 and we want to be able to map log messages from them too. */
288 if (unit_id
&& !c
->unit
) {
289 c
->unit
= strdup(unit_id
);
297 /* Let's shortcut this if the cgroup path didn't change */
298 if (streq_ptr(c
->cgroup
, t
))
301 free_and_replace(c
->cgroup
, t
);
303 (void) cg_path_get_session(c
->cgroup
, &t
);
304 free_and_replace(c
->session
, t
);
306 if (cg_path_get_owner_uid(c
->cgroup
, &c
->owner_uid
) < 0)
307 c
->owner_uid
= UID_INVALID
;
309 (void) cg_path_get_unit(c
->cgroup
, &t
);
310 free_and_replace(c
->unit
, t
);
312 (void) cg_path_get_user_unit(c
->cgroup
, &t
);
313 free_and_replace(c
->user_unit
, t
);
315 (void) cg_path_get_slice(c
->cgroup
, &t
);
316 free_and_replace(c
->slice
, t
);
318 (void) cg_path_get_user_slice(c
->cgroup
, &t
);
319 free_and_replace(c
->user_slice
, t
);
324 static int client_context_read_invocation_id(
328 _cleanup_free_
char *value
= NULL
;
335 /* Read the invocation ID of a unit off a unit. PID 1 stores it in a per-unit symlink in /run/systemd/units/ */
340 p
= strjoina("/run/systemd/units/invocation:", c
->unit
);
341 r
= readlink_malloc(p
, &value
);
345 return sd_id128_from_string(value
, &c
->invocation_id
);
348 static int client_context_read_log_level_max(
352 _cleanup_free_
char *value
= NULL
;
359 p
= strjoina("/run/systemd/units/log-level-max:", c
->unit
);
360 r
= readlink_malloc(p
, &value
);
364 ll
= log_level_from_string(value
);
368 c
->log_level_max
= ll
;
372 static int client_context_read_extra_fields(
376 size_t size
= 0, n_iovec
= 0, n_allocated
= 0, left
;
377 _cleanup_free_
struct iovec
*iovec
= NULL
;
378 _cleanup_free_
void *data
= NULL
;
379 _cleanup_fclose_
FILE *f
= NULL
;
388 p
= strjoina("/run/systemd/units/log-extra-fields:", c
->unit
);
390 if (c
->extra_fields_mtime
!= NSEC_INFINITY
) {
391 if (stat(p
, &st
) < 0) {
398 if (timespec_load_nsec(&st
.st_mtim
) == c
->extra_fields_mtime
)
410 if (fstat(fileno(f
), &st
) < 0) /* The file might have been replaced since the stat() above, let's get a new
411 * one, that matches the stuff we are reading */
414 r
= read_full_stream(f
, (char**) &data
, &size
);
418 q
= data
, left
= size
;
423 if (left
< sizeof(uint64_t))
426 v
= unaligned_read_le64(q
);
430 n
= sizeof(uint64_t) + v
;
434 field
= q
+ sizeof(uint64_t);
436 eq
= memchr(field
, '=', v
);
440 if (!journal_field_valid((const char *) field
, eq
- field
, false))
443 if (!GREEDY_REALLOC(iovec
, n_allocated
, n_iovec
+1))
446 iovec
[n_iovec
++] = IOVEC_MAKE(field
, v
);
451 free(c
->extra_fields_iovec
);
452 free(c
->extra_fields_data
);
454 c
->extra_fields_iovec
= TAKE_PTR(iovec
);
455 c
->extra_fields_n_iovec
= n_iovec
;
456 c
->extra_fields_data
= TAKE_PTR(data
);
457 c
->extra_fields_mtime
= timespec_load_nsec(&st
.st_mtim
);
462 static int client_context_read_log_rate_limit_interval(ClientContext
*c
) {
463 _cleanup_free_
char *value
= NULL
;
472 p
= strjoina("/run/systemd/units/log-rate-limit-interval:", c
->unit
);
473 r
= readlink_malloc(p
, &value
);
477 return safe_atou64(value
, &c
->log_rate_limit_interval
);
480 static int client_context_read_log_rate_limit_burst(ClientContext
*c
) {
481 _cleanup_free_
char *value
= NULL
;
490 p
= strjoina("/run/systemd/units/log-rate-limit-burst:", c
->unit
);
491 r
= readlink_malloc(p
, &value
);
495 return safe_atou(value
, &c
->log_rate_limit_burst
);
498 static void client_context_really_refresh(
501 const struct ucred
*ucred
,
502 const char *label
, size_t label_size
,
508 assert(pid_is_valid(c
->pid
));
510 if (timestamp
== USEC_INFINITY
)
511 timestamp
= now(CLOCK_MONOTONIC
);
513 client_context_read_uid_gid(c
, ucred
);
514 client_context_read_basic(c
);
515 (void) client_context_read_label(c
, label
, label_size
);
517 (void) audit_session_from_pid(c
->pid
, &c
->auditid
);
518 (void) audit_loginuid_from_pid(c
->pid
, &c
->loginuid
);
520 (void) client_context_read_cgroup(s
, c
, unit_id
);
521 (void) client_context_read_invocation_id(s
, c
);
522 (void) client_context_read_log_level_max(s
, c
);
523 (void) client_context_read_extra_fields(s
, c
);
524 (void) client_context_read_log_rate_limit_interval(c
);
525 (void) client_context_read_log_rate_limit_burst(c
);
527 c
->timestamp
= timestamp
;
530 assert(c
->n_ref
== 0);
531 assert_se(prioq_reshuffle(s
->client_contexts_lru
, c
, &c
->lru_index
) >= 0);
535 void client_context_maybe_refresh(
538 const struct ucred
*ucred
,
539 const char *label
, size_t label_size
,
546 if (timestamp
== USEC_INFINITY
)
547 timestamp
= now(CLOCK_MONOTONIC
);
549 /* No cached data so far? Let's fill it up */
550 if (c
->timestamp
== USEC_INFINITY
)
553 /* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
554 * entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
555 if (c
->n_ref
== 0 && c
->timestamp
+ MAX_USEC
< timestamp
) {
556 client_context_reset(s
, c
);
560 /* If the data is older than the lower limit, we refresh, but keep the old data for all we can't update */
561 if (c
->timestamp
+ REFRESH_USEC
< timestamp
)
564 /* If the data passed along doesn't match the cached data we also do a refresh */
565 if (ucred
&& uid_is_valid(ucred
->uid
) && c
->uid
!= ucred
->uid
)
568 if (ucred
&& gid_is_valid(ucred
->gid
) && c
->gid
!= ucred
->gid
)
571 if (label_size
> 0 && (label_size
!= c
->label_size
|| memcmp(label
, c
->label
, label_size
) != 0))
577 client_context_really_refresh(s
, c
, ucred
, label
, label_size
, unit_id
, timestamp
);
580 static void client_context_try_shrink_to(Server
*s
, size_t limit
) {
586 /* Flush any cache entries for PIDs that have already moved on. Don't do this
587 * too often, since it's a slow process. */
588 t
= now(CLOCK_MONOTONIC
);
589 if (s
->last_cache_pid_flush
+ MAX_USEC
< t
) {
590 unsigned n
= prioq_size(s
->client_contexts_lru
), idx
= 0;
592 /* We do a number of iterations based on the initial size of the prioq. When we remove an
593 * item, a new item is moved into its places, and items to the right might be reshuffled.
595 for (unsigned i
= 0; i
< n
; i
++) {
596 c
= prioq_peek_by_index(s
->client_contexts_lru
, idx
);
598 assert(c
->n_ref
== 0);
600 if (!pid_is_unwaited(c
->pid
))
601 client_context_free(s
, c
);
606 s
->last_cache_pid_flush
= t
;
609 /* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
610 * breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
611 * cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
613 while (hashmap_size(s
->client_contexts
) > limit
) {
614 c
= prioq_pop(s
->client_contexts_lru
);
616 break; /* All remaining entries are pinned, give up */
619 assert(c
->n_ref
== 0);
623 client_context_free(s
, c
);
627 void client_context_flush_all(Server
*s
) {
630 /* Flush out all remaining entries. This assumes all references are already dropped. */
632 s
->my_context
= client_context_release(s
, s
->my_context
);
633 s
->pid1_context
= client_context_release(s
, s
->pid1_context
);
635 client_context_try_shrink_to(s
, 0);
637 assert(prioq_size(s
->client_contexts_lru
) == 0);
638 assert(hashmap_size(s
->client_contexts
) == 0);
640 s
->client_contexts_lru
= prioq_free(s
->client_contexts_lru
);
641 s
->client_contexts
= hashmap_free(s
->client_contexts
);
644 static int client_context_get_internal(
647 const struct ucred
*ucred
,
648 const char *label
, size_t label_len
,
651 ClientContext
**ret
) {
659 if (!pid_is_valid(pid
))
662 c
= hashmap_get(s
->client_contexts
, PID_TO_PTR(pid
));
667 /* The entry wasn't pinned so far, let's remove it from the LRU list then */
668 assert(c
->n_ref
== 0);
669 assert_se(prioq_remove(s
->client_contexts_lru
, c
, &c
->lru_index
) >= 0);
676 client_context_maybe_refresh(s
, c
, ucred
, label
, label_len
, unit_id
, USEC_INFINITY
);
682 client_context_try_shrink_to(s
, cache_max()-1);
684 r
= client_context_new(s
, pid
, &c
);
691 r
= prioq_put(s
->client_contexts_lru
, c
, &c
->lru_index
);
693 client_context_free(s
, c
);
700 client_context_really_refresh(s
, c
, ucred
, label
, label_len
, unit_id
, USEC_INFINITY
);
706 int client_context_get(
709 const struct ucred
*ucred
,
710 const char *label
, size_t label_len
,
712 ClientContext
**ret
) {
714 return client_context_get_internal(s
, pid
, ucred
, label
, label_len
, unit_id
, false, ret
);
717 int client_context_acquire(
720 const struct ucred
*ucred
,
721 const char *label
, size_t label_len
,
723 ClientContext
**ret
) {
725 return client_context_get_internal(s
, pid
, ucred
, label
, label_len
, unit_id
, true, ret
);
728 ClientContext
*client_context_release(Server
*s
, ClientContext
*c
) {
734 assert(c
->n_ref
> 0);
741 /* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
744 if (prioq_put(s
->client_contexts_lru
, c
, &c
->lru_index
) < 0)
745 client_context_free(s
, c
);
752 void client_context_acquire_default(Server
*s
) {
757 /* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
758 * generate driver messages. */
760 if (!s
->my_context
) {
761 struct ucred ucred
= {
762 .pid
= getpid_cached(),
767 r
= client_context_acquire(s
, ucred
.pid
, &ucred
, NULL
, 0, NULL
, &s
->my_context
);
769 log_warning_errno(r
, "Failed to acquire our own context, ignoring: %m");
772 if (!s
->pid1_context
) {
774 r
= client_context_acquire(s
, 1, NULL
, NULL
, 0, NULL
, &s
->pid1_context
);
776 log_warning_errno(r
, "Failed to acquire PID1's context, ignoring: %m");