]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/cgroup-util.c
resolved: reply using unicast mDNS when appropriate
[thirdparty/systemd.git] / src / basic / cgroup-util.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <ftw.h>
5 #include <limits.h>
6 #include <signal.h>
7 #include <stddef.h>
8 #include <stdlib.h>
9 #include <sys/types.h>
10 #include <sys/utsname.h>
11 #include <sys/xattr.h>
12 #include <unistd.h>
13
14 #include "alloc-util.h"
15 #include "cgroup-util.h"
16 #include "def.h"
17 #include "dirent-util.h"
18 #include "extract-word.h"
19 #include "fd-util.h"
20 #include "fileio.h"
21 #include "format-util.h"
22 #include "fs-util.h"
23 #include "log.h"
24 #include "login-util.h"
25 #include "macro.h"
26 #include "missing_magic.h"
27 #include "mkdir.h"
28 #include "parse-util.h"
29 #include "path-util.h"
30 #include "process-util.h"
31 #include "set.h"
32 #include "special.h"
33 #include "stat-util.h"
34 #include "stdio-util.h"
35 #include "string-table.h"
36 #include "string-util.h"
37 #include "strv.h"
38 #include "unit-name.h"
39 #include "user-util.h"
40 #include "xattr-util.h"
41
42 static int cg_enumerate_items(const char *controller, const char *path, FILE **_f, const char *item) {
43 _cleanup_free_ char *fs = NULL;
44 FILE *f;
45 int r;
46
47 assert(_f);
48
49 r = cg_get_path(controller, path, item, &fs);
50 if (r < 0)
51 return r;
52
53 f = fopen(fs, "re");
54 if (!f)
55 return -errno;
56
57 *_f = f;
58 return 0;
59 }
60
61 int cg_enumerate_processes(const char *controller, const char *path, FILE **_f) {
62 return cg_enumerate_items(controller, path, _f, "cgroup.procs");
63 }
64
65 int cg_read_pid(FILE *f, pid_t *_pid) {
66 unsigned long ul;
67
68 /* Note that the cgroup.procs might contain duplicates! See
69 * cgroups.txt for details. */
70
71 assert(f);
72 assert(_pid);
73
74 errno = 0;
75 if (fscanf(f, "%lu", &ul) != 1) {
76
77 if (feof(f))
78 return 0;
79
80 return errno_or_else(EIO);
81 }
82
83 if (ul <= 0)
84 return -EIO;
85
86 *_pid = (pid_t) ul;
87 return 1;
88 }
89
90 int cg_read_event(
91 const char *controller,
92 const char *path,
93 const char *event,
94 char **ret) {
95
96 _cleanup_free_ char *events = NULL, *content = NULL;
97 int r;
98
99 r = cg_get_path(controller, path, "cgroup.events", &events);
100 if (r < 0)
101 return r;
102
103 r = read_full_virtual_file(events, &content, NULL);
104 if (r < 0)
105 return r;
106
107 for (const char *p = content;;) {
108 _cleanup_free_ char *line = NULL, *key = NULL, *val = NULL;
109 const char *q;
110
111 r = extract_first_word(&p, &line, "\n", 0);
112 if (r < 0)
113 return r;
114 if (r == 0)
115 return -ENOENT;
116
117 q = line;
118 r = extract_first_word(&q, &key, " ", 0);
119 if (r < 0)
120 return r;
121 if (r == 0)
122 return -EINVAL;
123
124 if (!streq(key, event))
125 continue;
126
127 val = strdup(q);
128 if (!val)
129 return -ENOMEM;
130
131 *ret = TAKE_PTR(val);
132 return 0;
133 }
134 }
135
136 bool cg_ns_supported(void) {
137 static thread_local int enabled = -1;
138
139 if (enabled >= 0)
140 return enabled;
141
142 if (access("/proc/self/ns/cgroup", F_OK) < 0) {
143 if (errno != ENOENT)
144 log_debug_errno(errno, "Failed to check whether /proc/self/ns/cgroup is available, assuming not: %m");
145 enabled = false;
146 } else
147 enabled = true;
148
149 return enabled;
150 }
151
152 bool cg_freezer_supported(void) {
153 static thread_local int supported = -1;
154
155 if (supported >= 0)
156 return supported;
157
158 supported = cg_all_unified() > 0 && access("/sys/fs/cgroup/init.scope/cgroup.freeze", F_OK) == 0;
159
160 return supported;
161 }
162
163 int cg_enumerate_subgroups(const char *controller, const char *path, DIR **_d) {
164 _cleanup_free_ char *fs = NULL;
165 int r;
166 DIR *d;
167
168 assert(_d);
169
170 /* This is not recursive! */
171
172 r = cg_get_path(controller, path, NULL, &fs);
173 if (r < 0)
174 return r;
175
176 d = opendir(fs);
177 if (!d)
178 return -errno;
179
180 *_d = d;
181 return 0;
182 }
183
184 int cg_read_subgroup(DIR *d, char **fn) {
185 struct dirent *de;
186
187 assert(d);
188 assert(fn);
189
190 FOREACH_DIRENT_ALL(de, d, return -errno) {
191 char *b;
192
193 if (de->d_type != DT_DIR)
194 continue;
195
196 if (dot_or_dot_dot(de->d_name))
197 continue;
198
199 b = strdup(de->d_name);
200 if (!b)
201 return -ENOMEM;
202
203 *fn = b;
204 return 1;
205 }
206
207 return 0;
208 }
209
210 int cg_rmdir(const char *controller, const char *path) {
211 _cleanup_free_ char *p = NULL;
212 int r;
213
214 r = cg_get_path(controller, path, NULL, &p);
215 if (r < 0)
216 return r;
217
218 r = rmdir(p);
219 if (r < 0 && errno != ENOENT)
220 return -errno;
221
222 r = cg_hybrid_unified();
223 if (r <= 0)
224 return r;
225
226 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
227 r = cg_rmdir(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path);
228 if (r < 0)
229 log_warning_errno(r, "Failed to remove compat systemd cgroup %s: %m", path);
230 }
231
232 return 0;
233 }
234
235 static int cg_kill_items(
236 const char *controller,
237 const char *path,
238 int sig,
239 CGroupFlags flags,
240 Set *s,
241 cg_kill_log_func_t log_kill,
242 void *userdata,
243 const char *item) {
244
245 _cleanup_set_free_ Set *allocated_set = NULL;
246 bool done = false;
247 int r, ret = 0, ret_log_kill = 0;
248 pid_t my_pid;
249
250 assert(sig >= 0);
251
252 /* Don't send SIGCONT twice. Also, SIGKILL always works even when process is suspended, hence don't send
253 * SIGCONT on SIGKILL. */
254 if (IN_SET(sig, SIGCONT, SIGKILL))
255 flags &= ~CGROUP_SIGCONT;
256
257 /* This goes through the tasks list and kills them all. This
258 * is repeated until no further processes are added to the
259 * tasks list, to properly handle forking processes */
260
261 if (!s) {
262 s = allocated_set = set_new(NULL);
263 if (!s)
264 return -ENOMEM;
265 }
266
267 my_pid = getpid_cached();
268
269 do {
270 _cleanup_fclose_ FILE *f = NULL;
271 pid_t pid = 0;
272 done = true;
273
274 r = cg_enumerate_items(controller, path, &f, item);
275 if (r < 0) {
276 if (ret >= 0 && r != -ENOENT)
277 return r;
278
279 return ret;
280 }
281
282 while ((r = cg_read_pid(f, &pid)) > 0) {
283
284 if ((flags & CGROUP_IGNORE_SELF) && pid == my_pid)
285 continue;
286
287 if (set_get(s, PID_TO_PTR(pid)) == PID_TO_PTR(pid))
288 continue;
289
290 if (log_kill)
291 ret_log_kill = log_kill(pid, sig, userdata);
292
293 /* If we haven't killed this process yet, kill
294 * it */
295 if (kill(pid, sig) < 0) {
296 if (ret >= 0 && errno != ESRCH)
297 ret = -errno;
298 } else {
299 if (flags & CGROUP_SIGCONT)
300 (void) kill(pid, SIGCONT);
301
302 if (ret == 0) {
303 if (log_kill)
304 ret = ret_log_kill;
305 else
306 ret = 1;
307 }
308 }
309
310 done = false;
311
312 r = set_put(s, PID_TO_PTR(pid));
313 if (r < 0) {
314 if (ret >= 0)
315 return r;
316
317 return ret;
318 }
319 }
320
321 if (r < 0) {
322 if (ret >= 0)
323 return r;
324
325 return ret;
326 }
327
328 /* To avoid racing against processes which fork
329 * quicker than we can kill them we repeat this until
330 * no new pids need to be killed. */
331
332 } while (!done);
333
334 return ret;
335 }
336
337 int cg_kill(
338 const char *controller,
339 const char *path,
340 int sig,
341 CGroupFlags flags,
342 Set *s,
343 cg_kill_log_func_t log_kill,
344 void *userdata) {
345 int r;
346
347 r = cg_kill_items(controller, path, sig, flags, s, log_kill, userdata, "cgroup.procs");
348 if (r < 0 || sig != SIGKILL)
349 return r;
350
351 /* Only in case of killing with SIGKILL and when using cgroupsv2, kill remaining threads manually as
352 a workaround for kernel bug. It was fixed in 5.2-rc5 (c03cd7738a83), backported to 4.19.66
353 (4340d175b898) and 4.14.138 (feb6b123b7dd). */
354 r = cg_unified_controller(controller);
355 if (r <= 0)
356 return r;
357
358 return cg_kill_items(controller, path, sig, flags, s, log_kill, userdata, "cgroup.threads");
359 }
360
361 int cg_kill_recursive(
362 const char *controller,
363 const char *path,
364 int sig,
365 CGroupFlags flags,
366 Set *s,
367 cg_kill_log_func_t log_kill,
368 void *userdata) {
369
370 _cleanup_set_free_ Set *allocated_set = NULL;
371 _cleanup_closedir_ DIR *d = NULL;
372 int r, ret;
373 char *fn;
374
375 assert(path);
376 assert(sig >= 0);
377
378 if (!s) {
379 s = allocated_set = set_new(NULL);
380 if (!s)
381 return -ENOMEM;
382 }
383
384 ret = cg_kill(controller, path, sig, flags, s, log_kill, userdata);
385
386 r = cg_enumerate_subgroups(controller, path, &d);
387 if (r < 0) {
388 if (ret >= 0 && r != -ENOENT)
389 return r;
390
391 return ret;
392 }
393
394 while ((r = cg_read_subgroup(d, &fn)) > 0) {
395 _cleanup_free_ char *p = NULL;
396
397 p = path_join(empty_to_root(path), fn);
398 free(fn);
399 if (!p)
400 return -ENOMEM;
401
402 r = cg_kill_recursive(controller, p, sig, flags, s, log_kill, userdata);
403 if (r != 0 && ret >= 0)
404 ret = r;
405 }
406 if (ret >= 0 && r < 0)
407 ret = r;
408
409 if (flags & CGROUP_REMOVE) {
410 r = cg_rmdir(controller, path);
411 if (r < 0 && ret >= 0 && !IN_SET(r, -ENOENT, -EBUSY))
412 return r;
413 }
414
415 return ret;
416 }
417
418 static const char *controller_to_dirname(const char *controller) {
419 const char *e;
420
421 assert(controller);
422
423 /* Converts a controller name to the directory name below
424 * /sys/fs/cgroup/ we want to mount it to. Effectively, this
425 * just cuts off the name= prefixed used for named
426 * hierarchies, if it is specified. */
427
428 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
429 if (cg_hybrid_unified() > 0)
430 controller = SYSTEMD_CGROUP_CONTROLLER_HYBRID;
431 else
432 controller = SYSTEMD_CGROUP_CONTROLLER_LEGACY;
433 }
434
435 e = startswith(controller, "name=");
436 if (e)
437 return e;
438
439 return controller;
440 }
441
442 static int join_path_legacy(const char *controller, const char *path, const char *suffix, char **fs) {
443 const char *dn;
444 char *t = NULL;
445
446 assert(fs);
447 assert(controller);
448
449 dn = controller_to_dirname(controller);
450
451 if (isempty(path) && isempty(suffix))
452 t = path_join("/sys/fs/cgroup", dn);
453 else if (isempty(path))
454 t = path_join("/sys/fs/cgroup", dn, suffix);
455 else if (isempty(suffix))
456 t = path_join("/sys/fs/cgroup", dn, path);
457 else
458 t = path_join("/sys/fs/cgroup", dn, path, suffix);
459 if (!t)
460 return -ENOMEM;
461
462 *fs = t;
463 return 0;
464 }
465
466 static int join_path_unified(const char *path, const char *suffix, char **fs) {
467 char *t;
468
469 assert(fs);
470
471 if (isempty(path) && isempty(suffix))
472 t = strdup("/sys/fs/cgroup");
473 else if (isempty(path))
474 t = path_join("/sys/fs/cgroup", suffix);
475 else if (isempty(suffix))
476 t = path_join("/sys/fs/cgroup", path);
477 else
478 t = path_join("/sys/fs/cgroup", path, suffix);
479 if (!t)
480 return -ENOMEM;
481
482 *fs = t;
483 return 0;
484 }
485
486 int cg_get_path(const char *controller, const char *path, const char *suffix, char **fs) {
487 int r;
488
489 assert(fs);
490
491 if (!controller) {
492 char *t;
493
494 /* If no controller is specified, we return the path
495 * *below* the controllers, without any prefix. */
496
497 if (!path && !suffix)
498 return -EINVAL;
499
500 if (!suffix)
501 t = strdup(path);
502 else if (!path)
503 t = strdup(suffix);
504 else
505 t = path_join(path, suffix);
506 if (!t)
507 return -ENOMEM;
508
509 *fs = path_simplify(t, false);
510 return 0;
511 }
512
513 if (!cg_controller_is_valid(controller))
514 return -EINVAL;
515
516 r = cg_all_unified();
517 if (r < 0)
518 return r;
519 if (r > 0)
520 r = join_path_unified(path, suffix, fs);
521 else
522 r = join_path_legacy(controller, path, suffix, fs);
523 if (r < 0)
524 return r;
525
526 path_simplify(*fs, false);
527 return 0;
528 }
529
530 static int controller_is_v1_accessible(const char *root, const char *controller) {
531 const char *cpath, *dn;
532
533 assert(controller);
534
535 dn = controller_to_dirname(controller);
536
537 /* If root if specified, we check that:
538 * - possible subcgroup is created at root,
539 * - we can modify the hierarchy. */
540
541 cpath = strjoina("/sys/fs/cgroup/", dn, root, root ? "/cgroup.procs" : NULL);
542 if (laccess(cpath, root ? W_OK : F_OK) < 0)
543 return -errno;
544
545 return 0;
546 }
547
548 int cg_get_path_and_check(const char *controller, const char *path, const char *suffix, char **fs) {
549 int r;
550
551 assert(controller);
552 assert(fs);
553
554 if (!cg_controller_is_valid(controller))
555 return -EINVAL;
556
557 r = cg_all_unified();
558 if (r < 0)
559 return r;
560 if (r > 0) {
561 /* In the unified hierarchy all controllers are considered accessible,
562 * except for the named hierarchies */
563 if (startswith(controller, "name="))
564 return -EOPNOTSUPP;
565 } else {
566 /* Check if the specified controller is actually accessible */
567 r = controller_is_v1_accessible(NULL, controller);
568 if (r < 0)
569 return r;
570 }
571
572 return cg_get_path(controller, path, suffix, fs);
573 }
574
575 int cg_set_xattr(const char *controller, const char *path, const char *name, const void *value, size_t size, int flags) {
576 _cleanup_free_ char *fs = NULL;
577 int r;
578
579 assert(path);
580 assert(name);
581 assert(value || size <= 0);
582
583 r = cg_get_path(controller, path, NULL, &fs);
584 if (r < 0)
585 return r;
586
587 if (setxattr(fs, name, value, size, flags) < 0)
588 return -errno;
589
590 return 0;
591 }
592
593 int cg_get_xattr(const char *controller, const char *path, const char *name, void *value, size_t size) {
594 _cleanup_free_ char *fs = NULL;
595 ssize_t n;
596 int r;
597
598 assert(path);
599 assert(name);
600
601 r = cg_get_path(controller, path, NULL, &fs);
602 if (r < 0)
603 return r;
604
605 n = getxattr(fs, name, value, size);
606 if (n < 0)
607 return -errno;
608
609 return (int) n;
610 }
611
612 int cg_get_xattr_malloc(const char *controller, const char *path, const char *name, char **ret) {
613 _cleanup_free_ char *fs = NULL;
614 int r;
615
616 assert(path);
617 assert(name);
618
619 r = cg_get_path(controller, path, NULL, &fs);
620 if (r < 0)
621 return r;
622
623 r = getxattr_malloc(fs, name, ret, false);
624 if (r < 0)
625 return r;
626
627 return r;
628 }
629
630 int cg_get_xattr_bool(const char *controller, const char *path, const char *name) {
631 _cleanup_free_ char *val = NULL;
632 int r;
633
634 assert(path);
635 assert(name);
636
637 r = cg_get_xattr_malloc(controller, path, name, &val);
638 if (r < 0)
639 return r;
640
641 return parse_boolean(val);
642 }
643
644 int cg_remove_xattr(const char *controller, const char *path, const char *name) {
645 _cleanup_free_ char *fs = NULL;
646 int r;
647
648 assert(path);
649 assert(name);
650
651 r = cg_get_path(controller, path, NULL, &fs);
652 if (r < 0)
653 return r;
654
655 if (removexattr(fs, name) < 0)
656 return -errno;
657
658 return 0;
659 }
660
661 int cg_pid_get_path(const char *controller, pid_t pid, char **ret_path) {
662 _cleanup_fclose_ FILE *f = NULL;
663 const char *fs, *controller_str;
664 int unified, r;
665
666 assert(pid >= 0);
667 assert(ret_path);
668
669 if (controller) {
670 if (!cg_controller_is_valid(controller))
671 return -EINVAL;
672 } else
673 controller = SYSTEMD_CGROUP_CONTROLLER;
674
675 unified = cg_unified_controller(controller);
676 if (unified < 0)
677 return unified;
678 if (unified == 0) {
679 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER))
680 controller_str = SYSTEMD_CGROUP_CONTROLLER_LEGACY;
681 else
682 controller_str = controller;
683 }
684
685 fs = procfs_file_alloca(pid, "cgroup");
686 r = fopen_unlocked(fs, "re", &f);
687 if (r == -ENOENT)
688 return -ESRCH;
689 if (r < 0)
690 return r;
691
692 for (;;) {
693 _cleanup_free_ char *line = NULL;
694 char *e;
695
696 r = read_line(f, LONG_LINE_MAX, &line);
697 if (r < 0)
698 return r;
699 if (r == 0)
700 return -ENODATA;
701
702 if (unified) {
703 e = startswith(line, "0:");
704 if (!e)
705 continue;
706
707 e = strchr(e, ':');
708 if (!e)
709 continue;
710 } else {
711 char *l;
712
713 l = strchr(line, ':');
714 if (!l)
715 continue;
716
717 l++;
718 e = strchr(l, ':');
719 if (!e)
720 continue;
721 *e = 0;
722
723 r = string_contains_word(l, ",", controller_str);
724 if (r < 0)
725 return r;
726 if (r == 0)
727 continue;
728 }
729
730 char *path = strdup(e + 1);
731 if (!path)
732 return -ENOMEM;
733
734 /* Truncate suffix indicating the process is a zombie */
735 e = endswith(path, " (deleted)");
736 if (e)
737 *e = 0;
738
739 *ret_path = path;
740 return 0;
741 }
742 }
743
744 int cg_install_release_agent(const char *controller, const char *agent) {
745 _cleanup_free_ char *fs = NULL, *contents = NULL;
746 const char *sc;
747 int r;
748
749 assert(agent);
750
751 r = cg_unified_controller(controller);
752 if (r < 0)
753 return r;
754 if (r > 0) /* doesn't apply to unified hierarchy */
755 return -EOPNOTSUPP;
756
757 r = cg_get_path(controller, NULL, "release_agent", &fs);
758 if (r < 0)
759 return r;
760
761 r = read_one_line_file(fs, &contents);
762 if (r < 0)
763 return r;
764
765 sc = strstrip(contents);
766 if (isempty(sc)) {
767 r = write_string_file(fs, agent, WRITE_STRING_FILE_DISABLE_BUFFER);
768 if (r < 0)
769 return r;
770 } else if (!path_equal(sc, agent))
771 return -EEXIST;
772
773 fs = mfree(fs);
774 r = cg_get_path(controller, NULL, "notify_on_release", &fs);
775 if (r < 0)
776 return r;
777
778 contents = mfree(contents);
779 r = read_one_line_file(fs, &contents);
780 if (r < 0)
781 return r;
782
783 sc = strstrip(contents);
784 if (streq(sc, "0")) {
785 r = write_string_file(fs, "1", WRITE_STRING_FILE_DISABLE_BUFFER);
786 if (r < 0)
787 return r;
788
789 return 1;
790 }
791
792 if (!streq(sc, "1"))
793 return -EIO;
794
795 return 0;
796 }
797
798 int cg_uninstall_release_agent(const char *controller) {
799 _cleanup_free_ char *fs = NULL;
800 int r;
801
802 r = cg_unified_controller(controller);
803 if (r < 0)
804 return r;
805 if (r > 0) /* Doesn't apply to unified hierarchy */
806 return -EOPNOTSUPP;
807
808 r = cg_get_path(controller, NULL, "notify_on_release", &fs);
809 if (r < 0)
810 return r;
811
812 r = write_string_file(fs, "0", WRITE_STRING_FILE_DISABLE_BUFFER);
813 if (r < 0)
814 return r;
815
816 fs = mfree(fs);
817
818 r = cg_get_path(controller, NULL, "release_agent", &fs);
819 if (r < 0)
820 return r;
821
822 r = write_string_file(fs, "", WRITE_STRING_FILE_DISABLE_BUFFER);
823 if (r < 0)
824 return r;
825
826 return 0;
827 }
828
829 int cg_is_empty(const char *controller, const char *path) {
830 _cleanup_fclose_ FILE *f = NULL;
831 pid_t pid;
832 int r;
833
834 assert(path);
835
836 r = cg_enumerate_processes(controller, path, &f);
837 if (r == -ENOENT)
838 return true;
839 if (r < 0)
840 return r;
841
842 r = cg_read_pid(f, &pid);
843 if (r < 0)
844 return r;
845
846 return r == 0;
847 }
848
849 int cg_is_empty_recursive(const char *controller, const char *path) {
850 int r;
851
852 assert(path);
853
854 /* The root cgroup is always populated */
855 if (controller && empty_or_root(path))
856 return false;
857
858 r = cg_unified_controller(controller);
859 if (r < 0)
860 return r;
861 if (r > 0) {
862 _cleanup_free_ char *t = NULL;
863
864 /* On the unified hierarchy we can check empty state
865 * via the "populated" attribute of "cgroup.events". */
866
867 r = cg_read_event(controller, path, "populated", &t);
868 if (r == -ENOENT)
869 return true;
870 if (r < 0)
871 return r;
872
873 return streq(t, "0");
874 } else {
875 _cleanup_closedir_ DIR *d = NULL;
876 char *fn;
877
878 r = cg_is_empty(controller, path);
879 if (r <= 0)
880 return r;
881
882 r = cg_enumerate_subgroups(controller, path, &d);
883 if (r == -ENOENT)
884 return true;
885 if (r < 0)
886 return r;
887
888 while ((r = cg_read_subgroup(d, &fn)) > 0) {
889 _cleanup_free_ char *p = NULL;
890
891 p = path_join(path, fn);
892 free(fn);
893 if (!p)
894 return -ENOMEM;
895
896 r = cg_is_empty_recursive(controller, p);
897 if (r <= 0)
898 return r;
899 }
900 if (r < 0)
901 return r;
902
903 return true;
904 }
905 }
906
907 int cg_split_spec(const char *spec, char **ret_controller, char **ret_path) {
908 _cleanup_free_ char *controller = NULL, *path = NULL;
909
910 assert(spec);
911
912 if (*spec == '/') {
913 if (!path_is_normalized(spec))
914 return -EINVAL;
915
916 if (ret_path) {
917 path = strdup(spec);
918 if (!path)
919 return -ENOMEM;
920
921 path_simplify(path, false);
922 }
923
924 } else {
925 const char *e;
926
927 e = strchr(spec, ':');
928 if (e) {
929 controller = strndup(spec, e-spec);
930 if (!controller)
931 return -ENOMEM;
932 if (!cg_controller_is_valid(controller))
933 return -EINVAL;
934
935 if (!isempty(e + 1)) {
936 path = strdup(e+1);
937 if (!path)
938 return -ENOMEM;
939
940 if (!path_is_normalized(path) ||
941 !path_is_absolute(path))
942 return -EINVAL;
943
944 path_simplify(path, false);
945 }
946
947 } else {
948 if (!cg_controller_is_valid(spec))
949 return -EINVAL;
950
951 if (ret_controller) {
952 controller = strdup(spec);
953 if (!controller)
954 return -ENOMEM;
955 }
956 }
957 }
958
959 if (ret_controller)
960 *ret_controller = TAKE_PTR(controller);
961 if (ret_path)
962 *ret_path = TAKE_PTR(path);
963 return 0;
964 }
965
966 int cg_mangle_path(const char *path, char **result) {
967 _cleanup_free_ char *c = NULL, *p = NULL;
968 char *t;
969 int r;
970
971 assert(path);
972 assert(result);
973
974 /* First, check if it already is a filesystem path */
975 if (path_startswith(path, "/sys/fs/cgroup")) {
976
977 t = strdup(path);
978 if (!t)
979 return -ENOMEM;
980
981 *result = path_simplify(t, false);
982 return 0;
983 }
984
985 /* Otherwise, treat it as cg spec */
986 r = cg_split_spec(path, &c, &p);
987 if (r < 0)
988 return r;
989
990 return cg_get_path(c ?: SYSTEMD_CGROUP_CONTROLLER, p ?: "/", NULL, result);
991 }
992
993 int cg_get_root_path(char **path) {
994 char *p, *e;
995 int r;
996
997 assert(path);
998
999 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 1, &p);
1000 if (r < 0)
1001 return r;
1002
1003 e = endswith(p, "/" SPECIAL_INIT_SCOPE);
1004 if (!e)
1005 e = endswith(p, "/" SPECIAL_SYSTEM_SLICE); /* legacy */
1006 if (!e)
1007 e = endswith(p, "/system"); /* even more legacy */
1008 if (e)
1009 *e = 0;
1010
1011 *path = p;
1012 return 0;
1013 }
1014
1015 int cg_shift_path(const char *cgroup, const char *root, const char **shifted) {
1016 _cleanup_free_ char *rt = NULL;
1017 char *p;
1018 int r;
1019
1020 assert(cgroup);
1021 assert(shifted);
1022
1023 if (!root) {
1024 /* If the root was specified let's use that, otherwise
1025 * let's determine it from PID 1 */
1026
1027 r = cg_get_root_path(&rt);
1028 if (r < 0)
1029 return r;
1030
1031 root = rt;
1032 }
1033
1034 p = path_startswith(cgroup, root);
1035 if (p && p > cgroup)
1036 *shifted = p - 1;
1037 else
1038 *shifted = cgroup;
1039
1040 return 0;
1041 }
1042
1043 int cg_pid_get_path_shifted(pid_t pid, const char *root, char **cgroup) {
1044 _cleanup_free_ char *raw = NULL;
1045 const char *c;
1046 int r;
1047
1048 assert(pid >= 0);
1049 assert(cgroup);
1050
1051 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &raw);
1052 if (r < 0)
1053 return r;
1054
1055 r = cg_shift_path(raw, root, &c);
1056 if (r < 0)
1057 return r;
1058
1059 if (c == raw)
1060 *cgroup = TAKE_PTR(raw);
1061 else {
1062 char *n;
1063
1064 n = strdup(c);
1065 if (!n)
1066 return -ENOMEM;
1067
1068 *cgroup = n;
1069 }
1070
1071 return 0;
1072 }
1073
1074 int cg_path_decode_unit(const char *cgroup, char **unit) {
1075 char *c, *s;
1076 size_t n;
1077
1078 assert(cgroup);
1079 assert(unit);
1080
1081 n = strcspn(cgroup, "/");
1082 if (n < 3)
1083 return -ENXIO;
1084
1085 c = strndupa(cgroup, n);
1086 c = cg_unescape(c);
1087
1088 if (!unit_name_is_valid(c, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
1089 return -ENXIO;
1090
1091 s = strdup(c);
1092 if (!s)
1093 return -ENOMEM;
1094
1095 *unit = s;
1096 return 0;
1097 }
1098
1099 static bool valid_slice_name(const char *p, size_t n) {
1100
1101 if (!p)
1102 return false;
1103
1104 if (n < STRLEN("x.slice"))
1105 return false;
1106
1107 if (memcmp(p + n - 6, ".slice", 6) == 0) {
1108 char buf[n+1], *c;
1109
1110 memcpy(buf, p, n);
1111 buf[n] = 0;
1112
1113 c = cg_unescape(buf);
1114
1115 return unit_name_is_valid(c, UNIT_NAME_PLAIN);
1116 }
1117
1118 return false;
1119 }
1120
1121 static const char *skip_slices(const char *p) {
1122 assert(p);
1123
1124 /* Skips over all slice assignments */
1125
1126 for (;;) {
1127 size_t n;
1128
1129 p += strspn(p, "/");
1130
1131 n = strcspn(p, "/");
1132 if (!valid_slice_name(p, n))
1133 return p;
1134
1135 p += n;
1136 }
1137 }
1138
1139 int cg_path_get_unit(const char *path, char **ret) {
1140 _cleanup_free_ char *unit = NULL;
1141 const char *e;
1142 int r;
1143
1144 assert(path);
1145 assert(ret);
1146
1147 e = skip_slices(path);
1148
1149 r = cg_path_decode_unit(e, &unit);
1150 if (r < 0)
1151 return r;
1152
1153 /* We skipped over the slices, don't accept any now */
1154 if (endswith(unit, ".slice"))
1155 return -ENXIO;
1156
1157 *ret = TAKE_PTR(unit);
1158 return 0;
1159 }
1160
1161 int cg_pid_get_unit(pid_t pid, char **unit) {
1162 _cleanup_free_ char *cgroup = NULL;
1163 int r;
1164
1165 assert(unit);
1166
1167 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1168 if (r < 0)
1169 return r;
1170
1171 return cg_path_get_unit(cgroup, unit);
1172 }
1173
1174 /**
1175 * Skip session-*.scope, but require it to be there.
1176 */
1177 static const char *skip_session(const char *p) {
1178 size_t n;
1179
1180 if (isempty(p))
1181 return NULL;
1182
1183 p += strspn(p, "/");
1184
1185 n = strcspn(p, "/");
1186 if (n < STRLEN("session-x.scope"))
1187 return NULL;
1188
1189 if (memcmp(p, "session-", 8) == 0 && memcmp(p + n - 6, ".scope", 6) == 0) {
1190 char buf[n - 8 - 6 + 1];
1191
1192 memcpy(buf, p + 8, n - 8 - 6);
1193 buf[n - 8 - 6] = 0;
1194
1195 /* Note that session scopes never need unescaping,
1196 * since they cannot conflict with the kernel's own
1197 * names, hence we don't need to call cg_unescape()
1198 * here. */
1199
1200 if (!session_id_valid(buf))
1201 return false;
1202
1203 p += n;
1204 p += strspn(p, "/");
1205 return p;
1206 }
1207
1208 return NULL;
1209 }
1210
1211 /**
1212 * Skip user@*.service, but require it to be there.
1213 */
1214 static const char *skip_user_manager(const char *p) {
1215 size_t n;
1216
1217 if (isempty(p))
1218 return NULL;
1219
1220 p += strspn(p, "/");
1221
1222 n = strcspn(p, "/");
1223 if (n < STRLEN("user@x.service"))
1224 return NULL;
1225
1226 if (memcmp(p, "user@", 5) == 0 && memcmp(p + n - 8, ".service", 8) == 0) {
1227 char buf[n - 5 - 8 + 1];
1228
1229 memcpy(buf, p + 5, n - 5 - 8);
1230 buf[n - 5 - 8] = 0;
1231
1232 /* Note that user manager services never need unescaping,
1233 * since they cannot conflict with the kernel's own
1234 * names, hence we don't need to call cg_unescape()
1235 * here. */
1236
1237 if (parse_uid(buf, NULL) < 0)
1238 return NULL;
1239
1240 p += n;
1241 p += strspn(p, "/");
1242
1243 return p;
1244 }
1245
1246 return NULL;
1247 }
1248
1249 static const char *skip_user_prefix(const char *path) {
1250 const char *e, *t;
1251
1252 assert(path);
1253
1254 /* Skip slices, if there are any */
1255 e = skip_slices(path);
1256
1257 /* Skip the user manager, if it's in the path now... */
1258 t = skip_user_manager(e);
1259 if (t)
1260 return t;
1261
1262 /* Alternatively skip the user session if it is in the path... */
1263 return skip_session(e);
1264 }
1265
1266 int cg_path_get_user_unit(const char *path, char **ret) {
1267 const char *t;
1268
1269 assert(path);
1270 assert(ret);
1271
1272 t = skip_user_prefix(path);
1273 if (!t)
1274 return -ENXIO;
1275
1276 /* And from here on it looks pretty much the same as for a system unit, hence let's use the same
1277 * parser. */
1278 return cg_path_get_unit(t, ret);
1279 }
1280
1281 int cg_pid_get_user_unit(pid_t pid, char **unit) {
1282 _cleanup_free_ char *cgroup = NULL;
1283 int r;
1284
1285 assert(unit);
1286
1287 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1288 if (r < 0)
1289 return r;
1290
1291 return cg_path_get_user_unit(cgroup, unit);
1292 }
1293
1294 int cg_path_get_machine_name(const char *path, char **machine) {
1295 _cleanup_free_ char *u = NULL;
1296 const char *sl;
1297 int r;
1298
1299 r = cg_path_get_unit(path, &u);
1300 if (r < 0)
1301 return r;
1302
1303 sl = strjoina("/run/systemd/machines/unit:", u);
1304 return readlink_malloc(sl, machine);
1305 }
1306
1307 int cg_pid_get_machine_name(pid_t pid, char **machine) {
1308 _cleanup_free_ char *cgroup = NULL;
1309 int r;
1310
1311 assert(machine);
1312
1313 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1314 if (r < 0)
1315 return r;
1316
1317 return cg_path_get_machine_name(cgroup, machine);
1318 }
1319
1320 int cg_path_get_session(const char *path, char **session) {
1321 _cleanup_free_ char *unit = NULL;
1322 char *start, *end;
1323 int r;
1324
1325 assert(path);
1326
1327 r = cg_path_get_unit(path, &unit);
1328 if (r < 0)
1329 return r;
1330
1331 start = startswith(unit, "session-");
1332 if (!start)
1333 return -ENXIO;
1334 end = endswith(start, ".scope");
1335 if (!end)
1336 return -ENXIO;
1337
1338 *end = 0;
1339 if (!session_id_valid(start))
1340 return -ENXIO;
1341
1342 if (session) {
1343 char *rr;
1344
1345 rr = strdup(start);
1346 if (!rr)
1347 return -ENOMEM;
1348
1349 *session = rr;
1350 }
1351
1352 return 0;
1353 }
1354
1355 int cg_pid_get_session(pid_t pid, char **session) {
1356 _cleanup_free_ char *cgroup = NULL;
1357 int r;
1358
1359 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1360 if (r < 0)
1361 return r;
1362
1363 return cg_path_get_session(cgroup, session);
1364 }
1365
1366 int cg_path_get_owner_uid(const char *path, uid_t *uid) {
1367 _cleanup_free_ char *slice = NULL;
1368 char *start, *end;
1369 int r;
1370
1371 assert(path);
1372
1373 r = cg_path_get_slice(path, &slice);
1374 if (r < 0)
1375 return r;
1376
1377 start = startswith(slice, "user-");
1378 if (!start)
1379 return -ENXIO;
1380 end = endswith(start, ".slice");
1381 if (!end)
1382 return -ENXIO;
1383
1384 *end = 0;
1385 if (parse_uid(start, uid) < 0)
1386 return -ENXIO;
1387
1388 return 0;
1389 }
1390
1391 int cg_pid_get_owner_uid(pid_t pid, uid_t *uid) {
1392 _cleanup_free_ char *cgroup = NULL;
1393 int r;
1394
1395 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1396 if (r < 0)
1397 return r;
1398
1399 return cg_path_get_owner_uid(cgroup, uid);
1400 }
1401
1402 int cg_path_get_slice(const char *p, char **slice) {
1403 const char *e = NULL;
1404
1405 assert(p);
1406 assert(slice);
1407
1408 /* Finds the right-most slice unit from the beginning, but
1409 * stops before we come to the first non-slice unit. */
1410
1411 for (;;) {
1412 size_t n;
1413
1414 p += strspn(p, "/");
1415
1416 n = strcspn(p, "/");
1417 if (!valid_slice_name(p, n)) {
1418
1419 if (!e) {
1420 char *s;
1421
1422 s = strdup(SPECIAL_ROOT_SLICE);
1423 if (!s)
1424 return -ENOMEM;
1425
1426 *slice = s;
1427 return 0;
1428 }
1429
1430 return cg_path_decode_unit(e, slice);
1431 }
1432
1433 e = p;
1434 p += n;
1435 }
1436 }
1437
1438 int cg_pid_get_slice(pid_t pid, char **slice) {
1439 _cleanup_free_ char *cgroup = NULL;
1440 int r;
1441
1442 assert(slice);
1443
1444 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1445 if (r < 0)
1446 return r;
1447
1448 return cg_path_get_slice(cgroup, slice);
1449 }
1450
1451 int cg_path_get_user_slice(const char *p, char **slice) {
1452 const char *t;
1453 assert(p);
1454 assert(slice);
1455
1456 t = skip_user_prefix(p);
1457 if (!t)
1458 return -ENXIO;
1459
1460 /* And now it looks pretty much the same as for a system
1461 * slice, so let's just use the same parser from here on. */
1462 return cg_path_get_slice(t, slice);
1463 }
1464
1465 int cg_pid_get_user_slice(pid_t pid, char **slice) {
1466 _cleanup_free_ char *cgroup = NULL;
1467 int r;
1468
1469 assert(slice);
1470
1471 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1472 if (r < 0)
1473 return r;
1474
1475 return cg_path_get_user_slice(cgroup, slice);
1476 }
1477
1478 char *cg_escape(const char *p) {
1479 bool need_prefix = false;
1480
1481 /* This implements very minimal escaping for names to be used
1482 * as file names in the cgroup tree: any name which might
1483 * conflict with a kernel name or is prefixed with '_' is
1484 * prefixed with a '_'. That way, when reading cgroup names it
1485 * is sufficient to remove a single prefixing underscore if
1486 * there is one. */
1487
1488 /* The return value of this function (unlike cg_unescape())
1489 * needs free()! */
1490
1491 if (IN_SET(p[0], 0, '_', '.') ||
1492 STR_IN_SET(p, "notify_on_release", "release_agent", "tasks") ||
1493 startswith(p, "cgroup."))
1494 need_prefix = true;
1495 else {
1496 const char *dot;
1497
1498 dot = strrchr(p, '.');
1499 if (dot) {
1500 CGroupController c;
1501 size_t l = dot - p;
1502
1503 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1504 const char *n;
1505
1506 n = cgroup_controller_to_string(c);
1507
1508 if (l != strlen(n))
1509 continue;
1510
1511 if (memcmp(p, n, l) != 0)
1512 continue;
1513
1514 need_prefix = true;
1515 break;
1516 }
1517 }
1518 }
1519
1520 if (need_prefix)
1521 return strjoin("_", p);
1522
1523 return strdup(p);
1524 }
1525
1526 char *cg_unescape(const char *p) {
1527 assert(p);
1528
1529 /* The return value of this function (unlike cg_escape())
1530 * doesn't need free()! */
1531
1532 if (p[0] == '_')
1533 return (char*) p+1;
1534
1535 return (char*) p;
1536 }
1537
1538 #define CONTROLLER_VALID \
1539 DIGITS LETTERS \
1540 "_"
1541
1542 bool cg_controller_is_valid(const char *p) {
1543 const char *t, *s;
1544
1545 if (!p)
1546 return false;
1547
1548 if (streq(p, SYSTEMD_CGROUP_CONTROLLER))
1549 return true;
1550
1551 s = startswith(p, "name=");
1552 if (s)
1553 p = s;
1554
1555 if (IN_SET(*p, 0, '_'))
1556 return false;
1557
1558 for (t = p; *t; t++)
1559 if (!strchr(CONTROLLER_VALID, *t))
1560 return false;
1561
1562 if (t - p > NAME_MAX)
1563 return false;
1564
1565 return true;
1566 }
1567
1568 int cg_slice_to_path(const char *unit, char **ret) {
1569 _cleanup_free_ char *p = NULL, *s = NULL, *e = NULL;
1570 const char *dash;
1571 int r;
1572
1573 assert(unit);
1574 assert(ret);
1575
1576 if (streq(unit, SPECIAL_ROOT_SLICE)) {
1577 char *x;
1578
1579 x = strdup("");
1580 if (!x)
1581 return -ENOMEM;
1582 *ret = x;
1583 return 0;
1584 }
1585
1586 if (!unit_name_is_valid(unit, UNIT_NAME_PLAIN))
1587 return -EINVAL;
1588
1589 if (!endswith(unit, ".slice"))
1590 return -EINVAL;
1591
1592 r = unit_name_to_prefix(unit, &p);
1593 if (r < 0)
1594 return r;
1595
1596 dash = strchr(p, '-');
1597
1598 /* Don't allow initial dashes */
1599 if (dash == p)
1600 return -EINVAL;
1601
1602 while (dash) {
1603 _cleanup_free_ char *escaped = NULL;
1604 char n[dash - p + sizeof(".slice")];
1605
1606 #if HAS_FEATURE_MEMORY_SANITIZER
1607 /* msan doesn't instrument stpncpy, so it thinks
1608 * n is later used uninitialized:
1609 * https://github.com/google/sanitizers/issues/926
1610 */
1611 zero(n);
1612 #endif
1613
1614 /* Don't allow trailing or double dashes */
1615 if (IN_SET(dash[1], 0, '-'))
1616 return -EINVAL;
1617
1618 strcpy(stpncpy(n, p, dash - p), ".slice");
1619 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN))
1620 return -EINVAL;
1621
1622 escaped = cg_escape(n);
1623 if (!escaped)
1624 return -ENOMEM;
1625
1626 if (!strextend(&s, escaped, "/"))
1627 return -ENOMEM;
1628
1629 dash = strchr(dash+1, '-');
1630 }
1631
1632 e = cg_escape(unit);
1633 if (!e)
1634 return -ENOMEM;
1635
1636 if (!strextend(&s, e))
1637 return -ENOMEM;
1638
1639 *ret = TAKE_PTR(s);
1640
1641 return 0;
1642 }
1643
1644 int cg_set_attribute(const char *controller, const char *path, const char *attribute, const char *value) {
1645 _cleanup_free_ char *p = NULL;
1646 int r;
1647
1648 r = cg_get_path(controller, path, attribute, &p);
1649 if (r < 0)
1650 return r;
1651
1652 return write_string_file(p, value, WRITE_STRING_FILE_DISABLE_BUFFER);
1653 }
1654
1655 int cg_get_attribute(const char *controller, const char *path, const char *attribute, char **ret) {
1656 _cleanup_free_ char *p = NULL;
1657 int r;
1658
1659 r = cg_get_path(controller, path, attribute, &p);
1660 if (r < 0)
1661 return r;
1662
1663 return read_one_line_file(p, ret);
1664 }
1665
1666 int cg_get_attribute_as_uint64(const char *controller, const char *path, const char *attribute, uint64_t *ret) {
1667 _cleanup_free_ char *value = NULL;
1668 uint64_t v;
1669 int r;
1670
1671 assert(ret);
1672
1673 r = cg_get_attribute(controller, path, attribute, &value);
1674 if (r == -ENOENT)
1675 return -ENODATA;
1676 if (r < 0)
1677 return r;
1678
1679 if (streq(value, "max")) {
1680 *ret = CGROUP_LIMIT_MAX;
1681 return 0;
1682 }
1683
1684 r = safe_atou64(value, &v);
1685 if (r < 0)
1686 return r;
1687
1688 *ret = v;
1689 return 0;
1690 }
1691
1692 int cg_get_attribute_as_bool(const char *controller, const char *path, const char *attribute, bool *ret) {
1693 _cleanup_free_ char *value = NULL;
1694 int r;
1695
1696 assert(ret);
1697
1698 r = cg_get_attribute(controller, path, attribute, &value);
1699 if (r == -ENOENT)
1700 return -ENODATA;
1701 if (r < 0)
1702 return r;
1703
1704 r = parse_boolean(value);
1705 if (r < 0)
1706 return r;
1707
1708 *ret = r;
1709 return 0;
1710 }
1711
1712 int cg_get_owner(const char *controller, const char *path, uid_t *ret_uid) {
1713 _cleanup_free_ char *f = NULL;
1714 struct stat stats;
1715 int r;
1716
1717 assert(ret_uid);
1718
1719 r = cg_get_path(controller, path, NULL, &f);
1720 if (r < 0)
1721 return r;
1722
1723 r = stat(f, &stats);
1724 if (r < 0)
1725 return -errno;
1726
1727 *ret_uid = stats.st_uid;
1728 return 0;
1729 }
1730
1731 int cg_get_keyed_attribute_full(
1732 const char *controller,
1733 const char *path,
1734 const char *attribute,
1735 char **keys,
1736 char **ret_values,
1737 CGroupKeyMode mode) {
1738
1739 _cleanup_free_ char *filename = NULL, *contents = NULL;
1740 const char *p;
1741 size_t n, i, n_done = 0;
1742 char **v;
1743 int r;
1744
1745 /* Reads one or more fields of a cgroup v2 keyed attribute file. The 'keys' parameter should be an strv with
1746 * all keys to retrieve. The 'ret_values' parameter should be passed as string size with the same number of
1747 * entries as 'keys'. On success each entry will be set to the value of the matching key.
1748 *
1749 * If the attribute file doesn't exist at all returns ENOENT, if any key is not found returns ENXIO. If mode
1750 * is set to GG_KEY_MODE_GRACEFUL we ignore missing keys and return those that were parsed successfully. */
1751
1752 r = cg_get_path(controller, path, attribute, &filename);
1753 if (r < 0)
1754 return r;
1755
1756 r = read_full_file(filename, &contents, NULL);
1757 if (r < 0)
1758 return r;
1759
1760 n = strv_length(keys);
1761 if (n == 0) /* No keys to retrieve? That's easy, we are done then */
1762 return 0;
1763
1764 /* Let's build this up in a temporary array for now in order not to clobber the return parameter on failure */
1765 v = newa0(char*, n);
1766
1767 for (p = contents; *p;) {
1768 const char *w = NULL;
1769
1770 for (i = 0; i < n; i++)
1771 if (!v[i]) {
1772 w = first_word(p, keys[i]);
1773 if (w)
1774 break;
1775 }
1776
1777 if (w) {
1778 size_t l;
1779
1780 l = strcspn(w, NEWLINE);
1781 v[i] = strndup(w, l);
1782 if (!v[i]) {
1783 r = -ENOMEM;
1784 goto fail;
1785 }
1786
1787 n_done++;
1788 if (n_done >= n)
1789 goto done;
1790
1791 p = w + l;
1792 } else
1793 p += strcspn(p, NEWLINE);
1794
1795 p += strspn(p, NEWLINE);
1796 }
1797
1798 if (mode & CG_KEY_MODE_GRACEFUL)
1799 goto done;
1800
1801 r = -ENXIO;
1802
1803 fail:
1804 for (i = 0; i < n; i++)
1805 free(v[i]);
1806
1807 return r;
1808
1809 done:
1810 memcpy(ret_values, v, sizeof(char*) * n);
1811 if (mode & CG_KEY_MODE_GRACEFUL)
1812 return n_done;
1813
1814 return 0;
1815 }
1816
1817 int cg_mask_to_string(CGroupMask mask, char **ret) {
1818 _cleanup_free_ char *s = NULL;
1819 size_t n = 0, allocated = 0;
1820 bool space = false;
1821 CGroupController c;
1822
1823 assert(ret);
1824
1825 if (mask == 0) {
1826 *ret = NULL;
1827 return 0;
1828 }
1829
1830 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1831 const char *k;
1832 size_t l;
1833
1834 if (!FLAGS_SET(mask, CGROUP_CONTROLLER_TO_MASK(c)))
1835 continue;
1836
1837 k = cgroup_controller_to_string(c);
1838 l = strlen(k);
1839
1840 if (!GREEDY_REALLOC(s, allocated, n + space + l + 1))
1841 return -ENOMEM;
1842
1843 if (space)
1844 s[n] = ' ';
1845 memcpy(s + n + space, k, l);
1846 n += space + l;
1847
1848 space = true;
1849 }
1850
1851 assert(s);
1852
1853 s[n] = 0;
1854 *ret = TAKE_PTR(s);
1855
1856 return 0;
1857 }
1858
1859 int cg_mask_from_string(const char *value, CGroupMask *ret) {
1860 CGroupMask m = 0;
1861
1862 assert(ret);
1863 assert(value);
1864
1865 for (;;) {
1866 _cleanup_free_ char *n = NULL;
1867 CGroupController v;
1868 int r;
1869
1870 r = extract_first_word(&value, &n, NULL, 0);
1871 if (r < 0)
1872 return r;
1873 if (r == 0)
1874 break;
1875
1876 v = cgroup_controller_from_string(n);
1877 if (v < 0)
1878 continue;
1879
1880 m |= CGROUP_CONTROLLER_TO_MASK(v);
1881 }
1882
1883 *ret = m;
1884 return 0;
1885 }
1886
1887 int cg_mask_supported_subtree(const char *root, CGroupMask *ret) {
1888 CGroupMask mask;
1889 int r;
1890
1891 /* Determines the mask of supported cgroup controllers. Only includes controllers we can make sense of and that
1892 * are actually accessible. Only covers real controllers, i.e. not the CGROUP_CONTROLLER_BPF_xyz
1893 * pseudo-controllers. */
1894
1895 r = cg_all_unified();
1896 if (r < 0)
1897 return r;
1898 if (r > 0) {
1899 _cleanup_free_ char *controllers = NULL, *path = NULL;
1900
1901 /* In the unified hierarchy we can read the supported and accessible controllers from
1902 * the top-level cgroup attribute */
1903
1904 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, root, "cgroup.controllers", &path);
1905 if (r < 0)
1906 return r;
1907
1908 r = read_one_line_file(path, &controllers);
1909 if (r < 0)
1910 return r;
1911
1912 r = cg_mask_from_string(controllers, &mask);
1913 if (r < 0)
1914 return r;
1915
1916 /* Mask controllers that are not supported in unified hierarchy. */
1917 mask &= CGROUP_MASK_V2;
1918
1919 } else {
1920 CGroupController c;
1921
1922 /* In the legacy hierarchy, we check which hierarchies are accessible. */
1923
1924 mask = 0;
1925 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1926 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1927 const char *n;
1928
1929 if (!FLAGS_SET(CGROUP_MASK_V1, bit))
1930 continue;
1931
1932 n = cgroup_controller_to_string(c);
1933 if (controller_is_v1_accessible(root, n) >= 0)
1934 mask |= bit;
1935 }
1936 }
1937
1938 *ret = mask;
1939 return 0;
1940 }
1941
1942 int cg_mask_supported(CGroupMask *ret) {
1943 _cleanup_free_ char *root = NULL;
1944 int r;
1945
1946 r = cg_get_root_path(&root);
1947 if (r < 0)
1948 return r;
1949
1950 return cg_mask_supported_subtree(root, ret);
1951 }
1952
1953 int cg_kernel_controllers(Set **ret) {
1954 _cleanup_set_free_free_ Set *controllers = NULL;
1955 _cleanup_fclose_ FILE *f = NULL;
1956 int r;
1957
1958 assert(ret);
1959
1960 /* Determines the full list of kernel-known controllers. Might include controllers we don't actually support
1961 * and controllers that aren't currently accessible (because not mounted). This does not include "name="
1962 * pseudo-controllers. */
1963
1964 controllers = set_new(&string_hash_ops);
1965 if (!controllers)
1966 return -ENOMEM;
1967
1968 r = fopen_unlocked("/proc/cgroups", "re", &f);
1969 if (r == -ENOENT) {
1970 *ret = NULL;
1971 return 0;
1972 }
1973 if (r < 0)
1974 return r;
1975
1976 /* Ignore the header line */
1977 (void) read_line(f, SIZE_MAX, NULL);
1978
1979 for (;;) {
1980 char *controller;
1981 int enabled = 0;
1982
1983 errno = 0;
1984 if (fscanf(f, "%ms %*i %*i %i", &controller, &enabled) != 2) {
1985
1986 if (feof(f))
1987 break;
1988
1989 if (ferror(f))
1990 return errno_or_else(EIO);
1991
1992 return -EBADMSG;
1993 }
1994
1995 if (!enabled) {
1996 free(controller);
1997 continue;
1998 }
1999
2000 if (!cg_controller_is_valid(controller)) {
2001 free(controller);
2002 return -EBADMSG;
2003 }
2004
2005 r = set_consume(controllers, controller);
2006 if (r < 0)
2007 return r;
2008 }
2009
2010 *ret = TAKE_PTR(controllers);
2011
2012 return 0;
2013 }
2014
2015 /* The hybrid mode was initially implemented in v232 and simply mounted cgroup2 on
2016 * /sys/fs/cgroup/systemd. This unfortunately broke other tools (such as docker) which expected the v1
2017 * "name=systemd" hierarchy on /sys/fs/cgroup/systemd. From v233 and on, the hybrid mode mounts v2 on
2018 * /sys/fs/cgroup/unified and maintains "name=systemd" hierarchy on /sys/fs/cgroup/systemd for compatibility
2019 * with other tools.
2020 *
2021 * To keep live upgrade working, we detect and support v232 layout. When v232 layout is detected, to keep
2022 * cgroup v2 process management but disable the compat dual layout, we return true on
2023 * cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) and false on cg_hybrid_unified().
2024 */
2025 static thread_local bool unified_systemd_v232;
2026
2027 int cg_unified_cached(bool flush) {
2028 static thread_local CGroupUnified unified_cache = CGROUP_UNIFIED_UNKNOWN;
2029
2030 struct statfs fs;
2031
2032 /* Checks if we support the unified hierarchy. Returns an
2033 * error when the cgroup hierarchies aren't mounted yet or we
2034 * have any other trouble determining if the unified hierarchy
2035 * is supported. */
2036
2037 if (flush)
2038 unified_cache = CGROUP_UNIFIED_UNKNOWN;
2039 else if (unified_cache >= CGROUP_UNIFIED_NONE)
2040 return unified_cache;
2041
2042 if (statfs("/sys/fs/cgroup/", &fs) < 0)
2043 return log_debug_errno(errno, "statfs(\"/sys/fs/cgroup/\") failed: %m");
2044
2045 if (F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2046 log_debug("Found cgroup2 on /sys/fs/cgroup/, full unified hierarchy");
2047 unified_cache = CGROUP_UNIFIED_ALL;
2048 } else if (F_TYPE_EQUAL(fs.f_type, TMPFS_MAGIC)) {
2049 if (statfs("/sys/fs/cgroup/unified/", &fs) == 0 &&
2050 F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2051 log_debug("Found cgroup2 on /sys/fs/cgroup/unified, unified hierarchy for systemd controller");
2052 unified_cache = CGROUP_UNIFIED_SYSTEMD;
2053 unified_systemd_v232 = false;
2054 } else {
2055 if (statfs("/sys/fs/cgroup/systemd/", &fs) < 0) {
2056 if (errno == ENOENT) {
2057 /* Some other software may have set up /sys/fs/cgroup in a configuration we do not recognize. */
2058 log_debug_errno(errno, "Unsupported cgroupsv1 setup detected: name=systemd hierarchy not found.");
2059 return -ENOMEDIUM;
2060 }
2061 return log_debug_errno(errno, "statfs(\"/sys/fs/cgroup/systemd\" failed: %m");
2062 }
2063
2064 if (F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2065 log_debug("Found cgroup2 on /sys/fs/cgroup/systemd, unified hierarchy for systemd controller (v232 variant)");
2066 unified_cache = CGROUP_UNIFIED_SYSTEMD;
2067 unified_systemd_v232 = true;
2068 } else if (F_TYPE_EQUAL(fs.f_type, CGROUP_SUPER_MAGIC)) {
2069 log_debug("Found cgroup on /sys/fs/cgroup/systemd, legacy hierarchy");
2070 unified_cache = CGROUP_UNIFIED_NONE;
2071 } else {
2072 log_debug("Unexpected filesystem type %llx mounted on /sys/fs/cgroup/systemd, assuming legacy hierarchy",
2073 (unsigned long long) fs.f_type);
2074 unified_cache = CGROUP_UNIFIED_NONE;
2075 }
2076 }
2077 } else if (F_TYPE_EQUAL(fs.f_type, SYSFS_MAGIC)) {
2078 return log_debug_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
2079 "No filesystem is currently mounted on /sys/fs/cgroup.");
2080 } else
2081 return log_debug_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
2082 "Unknown filesystem type %llx mounted on /sys/fs/cgroup.",
2083 (unsigned long long)fs.f_type);
2084
2085 return unified_cache;
2086 }
2087
2088 int cg_unified_controller(const char *controller) {
2089 int r;
2090
2091 r = cg_unified_cached(false);
2092 if (r < 0)
2093 return r;
2094
2095 if (r == CGROUP_UNIFIED_NONE)
2096 return false;
2097
2098 if (r >= CGROUP_UNIFIED_ALL)
2099 return true;
2100
2101 return streq_ptr(controller, SYSTEMD_CGROUP_CONTROLLER);
2102 }
2103
2104 int cg_all_unified(void) {
2105 int r;
2106
2107 r = cg_unified_cached(false);
2108 if (r < 0)
2109 return r;
2110
2111 return r >= CGROUP_UNIFIED_ALL;
2112 }
2113
2114 int cg_hybrid_unified(void) {
2115 int r;
2116
2117 r = cg_unified_cached(false);
2118 if (r < 0)
2119 return r;
2120
2121 return r == CGROUP_UNIFIED_SYSTEMD && !unified_systemd_v232;
2122 }
2123
2124 const uint64_t cgroup_io_limit_defaults[_CGROUP_IO_LIMIT_TYPE_MAX] = {
2125 [CGROUP_IO_RBPS_MAX] = CGROUP_LIMIT_MAX,
2126 [CGROUP_IO_WBPS_MAX] = CGROUP_LIMIT_MAX,
2127 [CGROUP_IO_RIOPS_MAX] = CGROUP_LIMIT_MAX,
2128 [CGROUP_IO_WIOPS_MAX] = CGROUP_LIMIT_MAX,
2129 };
2130
2131 static const char* const cgroup_io_limit_type_table[_CGROUP_IO_LIMIT_TYPE_MAX] = {
2132 [CGROUP_IO_RBPS_MAX] = "IOReadBandwidthMax",
2133 [CGROUP_IO_WBPS_MAX] = "IOWriteBandwidthMax",
2134 [CGROUP_IO_RIOPS_MAX] = "IOReadIOPSMax",
2135 [CGROUP_IO_WIOPS_MAX] = "IOWriteIOPSMax",
2136 };
2137
2138 DEFINE_STRING_TABLE_LOOKUP(cgroup_io_limit_type, CGroupIOLimitType);
2139
2140 bool is_cgroup_fs(const struct statfs *s) {
2141 return is_fs_type(s, CGROUP_SUPER_MAGIC) ||
2142 is_fs_type(s, CGROUP2_SUPER_MAGIC);
2143 }
2144
2145 bool fd_is_cgroup_fs(int fd) {
2146 struct statfs s;
2147
2148 if (fstatfs(fd, &s) < 0)
2149 return -errno;
2150
2151 return is_cgroup_fs(&s);
2152 }
2153
2154 static const char *const cgroup_controller_table[_CGROUP_CONTROLLER_MAX] = {
2155 [CGROUP_CONTROLLER_CPU] = "cpu",
2156 [CGROUP_CONTROLLER_CPUACCT] = "cpuacct",
2157 [CGROUP_CONTROLLER_CPUSET] = "cpuset",
2158 [CGROUP_CONTROLLER_IO] = "io",
2159 [CGROUP_CONTROLLER_BLKIO] = "blkio",
2160 [CGROUP_CONTROLLER_MEMORY] = "memory",
2161 [CGROUP_CONTROLLER_DEVICES] = "devices",
2162 [CGROUP_CONTROLLER_PIDS] = "pids",
2163 [CGROUP_CONTROLLER_BPF_FIREWALL] = "bpf-firewall",
2164 [CGROUP_CONTROLLER_BPF_DEVICES] = "bpf-devices",
2165 };
2166
2167 DEFINE_STRING_TABLE_LOOKUP(cgroup_controller, CGroupController);
2168
2169 CGroupMask get_cpu_accounting_mask(void) {
2170 static CGroupMask needed_mask = (CGroupMask) -1;
2171
2172 /* On kernel ≥4.15 with unified hierarchy, cpu.stat's usage_usec is
2173 * provided externally from the CPU controller, which means we don't
2174 * need to enable the CPU controller just to get metrics. This is good,
2175 * because enabling the CPU controller comes at a minor performance
2176 * hit, especially when it's propagated deep into large hierarchies.
2177 * There's also no separate CPU accounting controller available within
2178 * a unified hierarchy.
2179 *
2180 * This combination of factors results in the desired cgroup mask to
2181 * enable for CPU accounting varying as follows:
2182 *
2183 * ╔═════════════════════╤═════════════════════╗
2184 * ║ Linux ≥4.15 │ Linux <4.15 ║
2185 * ╔═══════════════╬═════════════════════╪═════════════════════╣
2186 * ║ Unified ║ nothing │ CGROUP_MASK_CPU ║
2187 * ╟───────────────╫─────────────────────┼─────────────────────╢
2188 * ║ Hybrid/Legacy ║ CGROUP_MASK_CPUACCT │ CGROUP_MASK_CPUACCT ║
2189 * ╚═══════════════╩═════════════════════╧═════════════════════╝
2190 *
2191 * We check kernel version here instead of manually checking whether
2192 * cpu.stat is present for every cgroup, as that check in itself would
2193 * already be fairly expensive.
2194 *
2195 * Kernels where this patch has been backported will therefore have the
2196 * CPU controller enabled unnecessarily. This is more expensive than
2197 * necessary, but harmless. ☺️
2198 */
2199
2200 if (needed_mask == (CGroupMask) -1) {
2201 if (cg_all_unified()) {
2202 struct utsname u;
2203 assert_se(uname(&u) >= 0);
2204
2205 if (strverscmp_improved(u.release, "4.15") < 0)
2206 needed_mask = CGROUP_MASK_CPU;
2207 else
2208 needed_mask = 0;
2209 } else
2210 needed_mask = CGROUP_MASK_CPUACCT;
2211 }
2212
2213 return needed_mask;
2214 }
2215
2216 bool cpu_accounting_is_cheap(void) {
2217 return get_cpu_accounting_mask() == 0;
2218 }
2219
2220 static const char* const managed_oom_mode_table[_MANAGED_OOM_MODE_MAX] = {
2221 [MANAGED_OOM_AUTO] = "auto",
2222 [MANAGED_OOM_KILL] = "kill",
2223 };
2224
2225 DEFINE_STRING_TABLE_LOOKUP(managed_oom_mode, ManagedOOMMode);
2226
2227 static const char* const managed_oom_preference_table[_MANAGED_OOM_PREFERENCE_MAX] = {
2228 [MANAGED_OOM_PREFERENCE_NONE] = "none",
2229 [MANAGED_OOM_PREFERENCE_AVOID] = "avoid",
2230 [MANAGED_OOM_PREFERENCE_OMIT] = "omit",
2231 };
2232
2233 DEFINE_STRING_TABLE_LOOKUP(managed_oom_preference, ManagedOOMPreference);