]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/journal/journald-context.c
Merge pull request #30284 from YHNdnzj/fstab-wantedby-defaultdeps
[thirdparty/systemd.git] / src / journal / journald-context.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #if HAVE_SELINUX
4 #include <selinux/selinux.h>
5 #endif
6
7 #include "alloc-util.h"
8 #include "audit-util.h"
9 #include "cgroup-util.h"
10 #include "env-util.h"
11 #include "fd-util.h"
12 #include "fileio.h"
13 #include "fs-util.h"
14 #include "iovec-util.h"
15 #include "journal-internal.h"
16 #include "journal-util.h"
17 #include "journald-client.h"
18 #include "journald-context.h"
19 #include "parse-util.h"
20 #include "path-util.h"
21 #include "process-util.h"
22 #include "procfs-util.h"
23 #include "string-util.h"
24 #include "syslog-util.h"
25 #include "unaligned.h"
26 #include "user-util.h"
27
28 /* This implements a metadata cache for clients, which are identified by their PID. Requesting metadata through /proc
29 * is expensive, hence let's cache the data if we can. Note that this means the metadata might be out-of-date when we
30 * store it, but it might already be anyway, as we request the data asynchronously from /proc at a different time the
31 * log entry was originally created. We hence just increase the "window of inaccuracy" a bit.
32 *
33 * The cache is indexed by the PID. Entries may be "pinned" in the cache, in which case the entries are not removed
34 * until they are unpinned. Unpinned entries are kept around until cache pressure is seen. Cache entries older than 5s
35 * are never used (a sad attempt to deal with the UNIX weakness of PIDs reuse), cache entries older than 1s are
36 * refreshed in an incremental way (meaning: data is reread from /proc, but any old data we can't refresh is not
37 * flushed out). Data newer than 1s is used immediately without refresh.
38 *
39 * Log stream clients (i.e. all clients using the AF_UNIX/SOCK_STREAM stdout/stderr transport) will pin a cache entry
40 * as long as their socket is connected. Note that cache entries are shared between different transports. That means a
41 * cache entry pinned for the stream connection logic may be reused for the syslog or native protocols.
42 *
43 * Caching metadata like this has two major benefits:
44 *
45 * 1. Reading metadata is expensive, and we can thus substantially speed up log processing under flood.
46 *
47 * 2. Because metadata caching is shared between stream and datagram transports and stream connections pin a cache
48 * entry there's a good chance we can properly map a substantial set of datagram log messages to their originating
49 * service, as all services (unless explicitly configured otherwise) will have their stdout/stderr connected to a
50 * stream connection. This should improve cases where a service process logs immediately before exiting and we
51 * previously had trouble associating the log message with the service.
52 *
53 * NB: With and without the metadata cache: the implicitly added entry metadata in the journal (with the exception of
54 * UID/PID/GID and SELinux label) must be understood as possibly slightly out of sync (i.e. sometimes slightly older
55 * and sometimes slightly newer than what was current at the log event).
56 */
57
58 /* We refresh every 1s */
59 #define REFRESH_USEC (1*USEC_PER_SEC)
60
61 /* Data older than 5s we flush out */
62 #define MAX_USEC (5*USEC_PER_SEC)
63
64 /* Keep at most 16K entries in the cache. (Note though that this limit may be violated if enough streams pin entries in
65 * the cache, in which case we *do* permit this limit to be breached. That's safe however, as the number of stream
66 * clients itself is limited.) */
67 #define CACHE_MAX_FALLBACK 128U
68 #define CACHE_MAX_MAX (16*1024U)
69 #define CACHE_MAX_MIN 64U
70
71 static size_t cache_max(void) {
72 static size_t cached = -1;
73
74 if (cached == SIZE_MAX) {
75 uint64_t mem_total;
76 int r;
77
78 r = procfs_memory_get(&mem_total, NULL);
79 if (r < 0) {
80 log_warning_errno(r, "Cannot query /proc/meminfo for MemTotal: %m");
81 cached = CACHE_MAX_FALLBACK;
82 } else
83 /* Cache entries are usually a few kB, but the process cmdline is controlled by the
84 * user and can be up to _SC_ARG_MAX, usually 2MB. Let's say that approximately up to
85 * 1/8th of memory may be used by the cache.
86 *
87 * In the common case, this formula gives 64 cache entries for each GB of RAM.
88 */
89 cached = CLAMP(mem_total / 8 / sc_arg_max(), CACHE_MAX_MIN, CACHE_MAX_MAX);
90 }
91
92 return cached;
93 }
94
95 static int client_context_compare(const void *a, const void *b) {
96 const ClientContext *x = a, *y = b;
97 int r;
98
99 r = CMP(x->timestamp, y->timestamp);
100 if (r != 0)
101 return r;
102
103 return CMP(x->pid, y->pid);
104 }
105
106 static int client_context_new(Server *s, pid_t pid, ClientContext **ret) {
107 _cleanup_free_ ClientContext *c = NULL;
108 int r;
109
110 assert(s);
111 assert(pid_is_valid(pid));
112 assert(ret);
113
114 r = prioq_ensure_allocated(&s->client_contexts_lru, client_context_compare);
115 if (r < 0)
116 return r;
117
118 c = new(ClientContext, 1);
119 if (!c)
120 return -ENOMEM;
121
122 *c = (ClientContext) {
123 .pid = pid,
124 .uid = UID_INVALID,
125 .gid = GID_INVALID,
126 .auditid = AUDIT_SESSION_INVALID,
127 .loginuid = UID_INVALID,
128 .owner_uid = UID_INVALID,
129 .lru_index = PRIOQ_IDX_NULL,
130 .timestamp = USEC_INFINITY,
131 .extra_fields_mtime = NSEC_INFINITY,
132 .log_level_max = -1,
133 .log_ratelimit_interval = s->ratelimit_interval,
134 .log_ratelimit_burst = s->ratelimit_burst,
135 };
136
137 r = hashmap_ensure_put(&s->client_contexts, NULL, PID_TO_PTR(pid), c);
138 if (r < 0)
139 return r;
140
141 *ret = TAKE_PTR(c);
142 return 0;
143 }
144
145 static void client_context_reset(Server *s, ClientContext *c) {
146 assert(s);
147 assert(c);
148
149 c->timestamp = USEC_INFINITY;
150
151 c->uid = UID_INVALID;
152 c->gid = GID_INVALID;
153
154 c->comm = mfree(c->comm);
155 c->exe = mfree(c->exe);
156 c->cmdline = mfree(c->cmdline);
157 c->capeff = mfree(c->capeff);
158
159 c->auditid = AUDIT_SESSION_INVALID;
160 c->loginuid = UID_INVALID;
161
162 c->cgroup = mfree(c->cgroup);
163 c->session = mfree(c->session);
164 c->owner_uid = UID_INVALID;
165 c->unit = mfree(c->unit);
166 c->user_unit = mfree(c->user_unit);
167 c->slice = mfree(c->slice);
168 c->user_slice = mfree(c->user_slice);
169
170 c->invocation_id = SD_ID128_NULL;
171
172 c->label = mfree(c->label);
173 c->label_size = 0;
174
175 c->extra_fields_iovec = mfree(c->extra_fields_iovec);
176 c->extra_fields_n_iovec = 0;
177 c->extra_fields_data = mfree(c->extra_fields_data);
178 c->extra_fields_mtime = NSEC_INFINITY;
179
180 c->log_level_max = -1;
181
182 c->log_ratelimit_interval = s->ratelimit_interval;
183 c->log_ratelimit_burst = s->ratelimit_burst;
184
185 c->log_filter_allowed_patterns = set_free_free(c->log_filter_allowed_patterns);
186 c->log_filter_denied_patterns = set_free_free(c->log_filter_denied_patterns);
187 }
188
189 static ClientContext* client_context_free(Server *s, ClientContext *c) {
190 assert(s);
191
192 if (!c)
193 return NULL;
194
195 assert_se(hashmap_remove(s->client_contexts, PID_TO_PTR(c->pid)) == c);
196
197 if (c->in_lru)
198 assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
199
200 client_context_reset(s, c);
201
202 return mfree(c);
203 }
204
205 static void client_context_read_uid_gid(ClientContext *c, const struct ucred *ucred) {
206 assert(c);
207 assert(pid_is_valid(c->pid));
208
209 /* The ucred data passed in is always the most current and accurate, if we have any. Use it. */
210 if (ucred && uid_is_valid(ucred->uid))
211 c->uid = ucred->uid;
212 else
213 (void) pid_get_uid(c->pid, &c->uid);
214
215 if (ucred && gid_is_valid(ucred->gid))
216 c->gid = ucred->gid;
217 else
218 (void) get_process_gid(c->pid, &c->gid);
219 }
220
221 static void client_context_read_basic(ClientContext *c) {
222 char *t;
223
224 assert(c);
225 assert(pid_is_valid(c->pid));
226
227 if (pid_get_comm(c->pid, &t) >= 0)
228 free_and_replace(c->comm, t);
229
230 if (get_process_exe(c->pid, &t) >= 0)
231 free_and_replace(c->exe, t);
232
233 if (pid_get_cmdline(c->pid, SIZE_MAX, PROCESS_CMDLINE_QUOTE, &t) >= 0)
234 free_and_replace(c->cmdline, t);
235
236 if (get_process_capeff(c->pid, &t) >= 0)
237 free_and_replace(c->capeff, t);
238 }
239
240 static int client_context_read_label(
241 ClientContext *c,
242 const char *label, size_t label_size) {
243
244 assert(c);
245 assert(pid_is_valid(c->pid));
246 assert(label_size == 0 || label);
247
248 if (label_size > 0) {
249 char *l;
250
251 /* If we got an SELinux label passed in it counts. */
252
253 l = newdup_suffix0(char, label, label_size);
254 if (!l)
255 return -ENOMEM;
256
257 free_and_replace(c->label, l);
258 c->label_size = label_size;
259 }
260 #if HAVE_SELINUX
261 else {
262 char *con;
263
264 /* If we got no SELinux label passed in, let's try to acquire one */
265
266 if (getpidcon(c->pid, &con) >= 0 && con) {
267 free_and_replace(c->label, con);
268 c->label_size = strlen(c->label);
269 }
270 }
271 #endif
272
273 return 0;
274 }
275
276 static int client_context_read_cgroup(Server *s, ClientContext *c, const char *unit_id) {
277 _cleanup_free_ char *t = NULL;
278 int r;
279
280 assert(c);
281
282 /* Try to acquire the current cgroup path */
283 r = cg_pid_get_path_shifted(c->pid, s->cgroup_root, &t);
284 if (r < 0 || empty_or_root(t)) {
285 /* We use the unit ID passed in as fallback if we have nothing cached yet and cg_pid_get_path_shifted()
286 * failed or process is running in a root cgroup. Zombie processes are automatically migrated to root cgroup
287 * on cgroup v1 and we want to be able to map log messages from them too. */
288 if (unit_id && !c->unit) {
289 c->unit = strdup(unit_id);
290 if (c->unit)
291 return 0;
292 }
293
294 return r;
295 }
296
297 (void) client_context_read_log_filter_patterns(c, t);
298
299 /* Let's shortcut this if the cgroup path didn't change */
300 if (streq_ptr(c->cgroup, t))
301 return 0;
302
303 free_and_replace(c->cgroup, t);
304
305 (void) cg_path_get_session(c->cgroup, &t);
306 free_and_replace(c->session, t);
307
308 if (cg_path_get_owner_uid(c->cgroup, &c->owner_uid) < 0)
309 c->owner_uid = UID_INVALID;
310
311 (void) cg_path_get_unit(c->cgroup, &t);
312 free_and_replace(c->unit, t);
313
314 (void) cg_path_get_user_unit(c->cgroup, &t);
315 free_and_replace(c->user_unit, t);
316
317 (void) cg_path_get_slice(c->cgroup, &t);
318 free_and_replace(c->slice, t);
319
320 (void) cg_path_get_user_slice(c->cgroup, &t);
321 free_and_replace(c->user_slice, t);
322
323 return 0;
324 }
325
326 static int client_context_read_invocation_id(
327 Server *s,
328 ClientContext *c) {
329
330 _cleanup_free_ char *p = NULL, *value = NULL;
331 int r;
332
333 assert(s);
334 assert(c);
335
336 /* Read the invocation ID of a unit off a unit.
337 * PID 1 stores it in a per-unit symlink in /run/systemd/units/
338 * User managers store it in a per-unit symlink under /run/user/<uid>/systemd/units/ */
339
340 if (!c->unit)
341 return 0;
342
343 if (c->user_unit) {
344 r = asprintf(&p, "/run/user/" UID_FMT "/systemd/units/invocation:%s", c->owner_uid, c->user_unit);
345 if (r < 0)
346 return r;
347 } else {
348 p = strjoin("/run/systemd/units/invocation:", c->unit);
349 if (!p)
350 return -ENOMEM;
351 }
352
353 r = readlink_malloc(p, &value);
354 if (r < 0)
355 return r;
356
357 return sd_id128_from_string(value, &c->invocation_id);
358 }
359
360 static int client_context_read_log_level_max(
361 Server *s,
362 ClientContext *c) {
363
364 _cleanup_free_ char *value = NULL;
365 const char *p;
366 int r, ll;
367
368 if (!c->unit)
369 return 0;
370
371 p = strjoina("/run/systemd/units/log-level-max:", c->unit);
372 r = readlink_malloc(p, &value);
373 if (r < 0)
374 return r;
375
376 ll = log_level_from_string(value);
377 if (ll < 0)
378 return ll;
379
380 c->log_level_max = ll;
381 return 0;
382 }
383
384 static int client_context_read_extra_fields(
385 Server *s,
386 ClientContext *c) {
387
388 _cleanup_free_ struct iovec *iovec = NULL;
389 size_t size = 0, n_iovec = 0, left;
390 _cleanup_free_ void *data = NULL;
391 _cleanup_fclose_ FILE *f = NULL;
392 struct stat st;
393 const char *p;
394 uint8_t *q;
395 int r;
396
397 if (!c->unit)
398 return 0;
399
400 p = strjoina("/run/systemd/units/log-extra-fields:", c->unit);
401
402 if (c->extra_fields_mtime != NSEC_INFINITY) {
403 if (stat(p, &st) < 0) {
404 if (errno == ENOENT)
405 return 0;
406
407 return -errno;
408 }
409
410 if (timespec_load_nsec(&st.st_mtim) == c->extra_fields_mtime)
411 return 0;
412 }
413
414 f = fopen(p, "re");
415 if (!f) {
416 if (errno == ENOENT)
417 return 0;
418
419 return -errno;
420 }
421
422 if (fstat(fileno(f), &st) < 0) /* The file might have been replaced since the stat() above, let's get a new
423 * one, that matches the stuff we are reading */
424 return -errno;
425
426 r = read_full_stream(f, (char**) &data, &size);
427 if (r < 0)
428 return r;
429
430 q = data, left = size;
431 while (left > 0) {
432 uint8_t *field, *eq;
433 uint64_t v, n;
434
435 if (left < sizeof(uint64_t))
436 return -EBADMSG;
437
438 v = unaligned_read_le64(q);
439 if (v < 2)
440 return -EBADMSG;
441
442 n = sizeof(uint64_t) + v;
443 if (left < n)
444 return -EBADMSG;
445
446 field = q + sizeof(uint64_t);
447
448 eq = memchr(field, '=', v);
449 if (!eq)
450 return -EBADMSG;
451
452 if (!journal_field_valid((const char *) field, eq - field, false))
453 return -EBADMSG;
454
455 if (!GREEDY_REALLOC(iovec, n_iovec+1))
456 return -ENOMEM;
457
458 iovec[n_iovec++] = IOVEC_MAKE(field, v);
459
460 left -= n, q += n;
461 }
462
463 free(c->extra_fields_iovec);
464 free(c->extra_fields_data);
465
466 c->extra_fields_iovec = TAKE_PTR(iovec);
467 c->extra_fields_n_iovec = n_iovec;
468 c->extra_fields_data = TAKE_PTR(data);
469 c->extra_fields_mtime = timespec_load_nsec(&st.st_mtim);
470
471 return 0;
472 }
473
474 static int client_context_read_log_ratelimit_interval(ClientContext *c) {
475 _cleanup_free_ char *value = NULL;
476 const char *p;
477 int r;
478
479 assert(c);
480
481 if (!c->unit)
482 return 0;
483
484 p = strjoina("/run/systemd/units/log-rate-limit-interval:", c->unit);
485 r = readlink_malloc(p, &value);
486 if (r < 0)
487 return r;
488
489 return safe_atou64(value, &c->log_ratelimit_interval);
490 }
491
492 static int client_context_read_log_ratelimit_burst(ClientContext *c) {
493 _cleanup_free_ char *value = NULL;
494 const char *p;
495 int r;
496
497 assert(c);
498
499 if (!c->unit)
500 return 0;
501
502 p = strjoina("/run/systemd/units/log-rate-limit-burst:", c->unit);
503 r = readlink_malloc(p, &value);
504 if (r < 0)
505 return r;
506
507 return safe_atou(value, &c->log_ratelimit_burst);
508 }
509
510 static void client_context_really_refresh(
511 Server *s,
512 ClientContext *c,
513 const struct ucred *ucred,
514 const char *label, size_t label_size,
515 const char *unit_id,
516 usec_t timestamp) {
517
518 assert(s);
519 assert(c);
520 assert(pid_is_valid(c->pid));
521
522 if (timestamp == USEC_INFINITY)
523 timestamp = now(CLOCK_MONOTONIC);
524
525 client_context_read_uid_gid(c, ucred);
526 client_context_read_basic(c);
527 (void) client_context_read_label(c, label, label_size);
528
529 (void) audit_session_from_pid(c->pid, &c->auditid);
530 (void) audit_loginuid_from_pid(c->pid, &c->loginuid);
531
532 (void) client_context_read_cgroup(s, c, unit_id);
533 (void) client_context_read_invocation_id(s, c);
534 (void) client_context_read_log_level_max(s, c);
535 (void) client_context_read_extra_fields(s, c);
536 (void) client_context_read_log_ratelimit_interval(c);
537 (void) client_context_read_log_ratelimit_burst(c);
538
539 c->timestamp = timestamp;
540
541 if (c->in_lru) {
542 assert(c->n_ref == 0);
543 prioq_reshuffle(s->client_contexts_lru, c, &c->lru_index);
544 }
545 }
546
547 void client_context_maybe_refresh(
548 Server *s,
549 ClientContext *c,
550 const struct ucred *ucred,
551 const char *label, size_t label_size,
552 const char *unit_id,
553 usec_t timestamp) {
554
555 assert(s);
556 assert(c);
557
558 if (timestamp == USEC_INFINITY)
559 timestamp = now(CLOCK_MONOTONIC);
560
561 /* No cached data so far? Let's fill it up */
562 if (c->timestamp == USEC_INFINITY)
563 goto refresh;
564
565 /* If the data isn't pinned and if the cashed data is older than the upper limit, we flush it out
566 * entirely. This follows the logic that as long as an entry is pinned the PID reuse is unlikely. */
567 if (c->n_ref == 0 && c->timestamp + MAX_USEC < timestamp) {
568 client_context_reset(s, c);
569 goto refresh;
570 }
571
572 /* If the data is older than the lower limit, we refresh, but keep the old data for all we can't update */
573 if (c->timestamp + REFRESH_USEC < timestamp)
574 goto refresh;
575
576 /* If the data passed along doesn't match the cached data we also do a refresh */
577 if (ucred && uid_is_valid(ucred->uid) && c->uid != ucred->uid)
578 goto refresh;
579
580 if (ucred && gid_is_valid(ucred->gid) && c->gid != ucred->gid)
581 goto refresh;
582
583 if (label_size > 0 && (label_size != c->label_size || memcmp(label, c->label, label_size) != 0))
584 goto refresh;
585
586 return;
587
588 refresh:
589 client_context_really_refresh(s, c, ucred, label, label_size, unit_id, timestamp);
590 }
591
592 static void client_context_try_shrink_to(Server *s, size_t limit) {
593 ClientContext *c;
594 usec_t t;
595
596 assert(s);
597
598 /* Flush any cache entries for PIDs that have already moved on. Don't do this
599 * too often, since it's a slow process. */
600 t = now(CLOCK_MONOTONIC);
601 if (s->last_cache_pid_flush + MAX_USEC < t) {
602 unsigned n = prioq_size(s->client_contexts_lru), idx = 0;
603
604 /* We do a number of iterations based on the initial size of the prioq. When we remove an
605 * item, a new item is moved into its places, and items to the right might be reshuffled.
606 */
607 for (unsigned i = 0; i < n; i++) {
608 c = prioq_peek_by_index(s->client_contexts_lru, idx);
609
610 assert(c->n_ref == 0);
611
612 if (pid_is_unwaited(c->pid) == 0)
613 client_context_free(s, c);
614 else
615 idx ++;
616 }
617
618 s->last_cache_pid_flush = t;
619 }
620
621 /* Bring the number of cache entries below the indicated limit, so that we can create a new entry without
622 * breaching the limit. Note that we only flush out entries that aren't pinned here. This means the number of
623 * cache entries may very well grow beyond the limit, if all entries stored remain pinned. */
624
625 while (hashmap_size(s->client_contexts) > limit) {
626 c = prioq_pop(s->client_contexts_lru);
627 if (!c)
628 break; /* All remaining entries are pinned, give up */
629
630 assert(c->in_lru);
631 assert(c->n_ref == 0);
632
633 c->in_lru = false;
634
635 client_context_free(s, c);
636 }
637 }
638
639 void client_context_flush_regular(Server *s) {
640 client_context_try_shrink_to(s, 0);
641 }
642
643 void client_context_flush_all(Server *s) {
644 assert(s);
645
646 /* Flush out all remaining entries. This assumes all references are already dropped. */
647
648 s->my_context = client_context_release(s, s->my_context);
649 s->pid1_context = client_context_release(s, s->pid1_context);
650
651 client_context_flush_regular(s);
652
653 assert(prioq_isempty(s->client_contexts_lru));
654 assert(hashmap_isempty(s->client_contexts));
655
656 s->client_contexts_lru = prioq_free(s->client_contexts_lru);
657 s->client_contexts = hashmap_free(s->client_contexts);
658 }
659
660 static int client_context_get_internal(
661 Server *s,
662 pid_t pid,
663 const struct ucred *ucred,
664 const char *label, size_t label_len,
665 const char *unit_id,
666 bool add_ref,
667 ClientContext **ret) {
668
669 ClientContext *c;
670 int r;
671
672 assert(s);
673 assert(ret);
674
675 if (!pid_is_valid(pid))
676 return -EINVAL;
677
678 c = hashmap_get(s->client_contexts, PID_TO_PTR(pid));
679 if (c) {
680
681 if (add_ref) {
682 if (c->in_lru) {
683 /* The entry wasn't pinned so far, let's remove it from the LRU list then */
684 assert(c->n_ref == 0);
685 assert_se(prioq_remove(s->client_contexts_lru, c, &c->lru_index) >= 0);
686 c->in_lru = false;
687 }
688
689 c->n_ref++;
690 }
691
692 client_context_maybe_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
693
694 *ret = c;
695 return 0;
696 }
697
698 client_context_try_shrink_to(s, cache_max()-1);
699
700 r = client_context_new(s, pid, &c);
701 if (r < 0)
702 return r;
703
704 if (add_ref)
705 c->n_ref++;
706 else {
707 r = prioq_put(s->client_contexts_lru, c, &c->lru_index);
708 if (r < 0) {
709 client_context_free(s, c);
710 return r;
711 }
712
713 c->in_lru = true;
714 }
715
716 client_context_really_refresh(s, c, ucred, label, label_len, unit_id, USEC_INFINITY);
717
718 *ret = c;
719 return 0;
720 }
721
722 int client_context_get(
723 Server *s,
724 pid_t pid,
725 const struct ucred *ucred,
726 const char *label, size_t label_len,
727 const char *unit_id,
728 ClientContext **ret) {
729
730 return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, false, ret);
731 }
732
733 int client_context_acquire(
734 Server *s,
735 pid_t pid,
736 const struct ucred *ucred,
737 const char *label, size_t label_len,
738 const char *unit_id,
739 ClientContext **ret) {
740
741 return client_context_get_internal(s, pid, ucred, label, label_len, unit_id, true, ret);
742 };
743
744 ClientContext *client_context_release(Server *s, ClientContext *c) {
745 assert(s);
746
747 if (!c)
748 return NULL;
749
750 assert(c->n_ref > 0);
751 assert(!c->in_lru);
752
753 c->n_ref--;
754 if (c->n_ref > 0)
755 return NULL;
756
757 /* The entry is not pinned anymore, let's add it to the LRU prioq if we can. If we can't we'll drop it
758 * right-away */
759
760 if (prioq_put(s->client_contexts_lru, c, &c->lru_index) < 0)
761 client_context_free(s, c);
762 else
763 c->in_lru = true;
764
765 return NULL;
766 }
767
768 void client_context_acquire_default(Server *s) {
769 int r;
770
771 assert(s);
772
773 /* Ensure that our own and PID1's contexts are always pinned. Our own context is particularly useful to
774 * generate driver messages. */
775
776 if (!s->my_context) {
777 struct ucred ucred = {
778 .pid = getpid_cached(),
779 .uid = getuid(),
780 .gid = getgid(),
781 };
782
783 r = client_context_acquire(s, ucred.pid, &ucred, NULL, 0, NULL, &s->my_context);
784 if (r < 0)
785 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
786 "Failed to acquire our own context, ignoring: %m");
787 }
788
789 if (!s->namespace && !s->pid1_context) {
790 /* Acquire PID1's context, but only if we are in non-namespaced mode, since PID 1 is only
791 * going to log to the non-namespaced journal instance. */
792
793 r = client_context_acquire(s, 1, NULL, NULL, 0, NULL, &s->pid1_context);
794 if (r < 0)
795 log_ratelimit_warning_errno(r, JOURNAL_LOG_RATELIMIT,
796 "Failed to acquire PID1's context, ignoring: %m");
797
798 }
799 }