]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/cgroup-util.c
Merge pull request #18553 from Werkov/cgroup-user-instance-controllers
[thirdparty/systemd.git] / src / basic / cgroup-util.c
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2
3 #include <errno.h>
4 #include <ftw.h>
5 #include <limits.h>
6 #include <signal.h>
7 #include <stddef.h>
8 #include <stdlib.h>
9 #include <sys/types.h>
10 #include <sys/utsname.h>
11 #include <sys/xattr.h>
12 #include <unistd.h>
13
14 #include "alloc-util.h"
15 #include "cgroup-util.h"
16 #include "def.h"
17 #include "dirent-util.h"
18 #include "extract-word.h"
19 #include "fd-util.h"
20 #include "fileio.h"
21 #include "format-util.h"
22 #include "fs-util.h"
23 #include "log.h"
24 #include "login-util.h"
25 #include "macro.h"
26 #include "missing_magic.h"
27 #include "mkdir.h"
28 #include "parse-util.h"
29 #include "path-util.h"
30 #include "process-util.h"
31 #include "set.h"
32 #include "special.h"
33 #include "stat-util.h"
34 #include "stdio-util.h"
35 #include "string-table.h"
36 #include "string-util.h"
37 #include "strv.h"
38 #include "unit-name.h"
39 #include "user-util.h"
40 #include "xattr-util.h"
41
42 static int cg_enumerate_items(const char *controller, const char *path, FILE **_f, const char *item) {
43 _cleanup_free_ char *fs = NULL;
44 FILE *f;
45 int r;
46
47 assert(_f);
48
49 r = cg_get_path(controller, path, item, &fs);
50 if (r < 0)
51 return r;
52
53 f = fopen(fs, "re");
54 if (!f)
55 return -errno;
56
57 *_f = f;
58 return 0;
59 }
60
61 int cg_enumerate_processes(const char *controller, const char *path, FILE **_f) {
62 return cg_enumerate_items(controller, path, _f, "cgroup.procs");
63 }
64
65 int cg_read_pid(FILE *f, pid_t *_pid) {
66 unsigned long ul;
67
68 /* Note that the cgroup.procs might contain duplicates! See
69 * cgroups.txt for details. */
70
71 assert(f);
72 assert(_pid);
73
74 errno = 0;
75 if (fscanf(f, "%lu", &ul) != 1) {
76
77 if (feof(f))
78 return 0;
79
80 return errno_or_else(EIO);
81 }
82
83 if (ul <= 0)
84 return -EIO;
85
86 *_pid = (pid_t) ul;
87 return 1;
88 }
89
90 int cg_read_event(
91 const char *controller,
92 const char *path,
93 const char *event,
94 char **ret) {
95
96 _cleanup_free_ char *events = NULL, *content = NULL;
97 int r;
98
99 r = cg_get_path(controller, path, "cgroup.events", &events);
100 if (r < 0)
101 return r;
102
103 r = read_full_file(events, &content, NULL);
104 if (r < 0)
105 return r;
106
107 for (const char *p = content;;) {
108 _cleanup_free_ char *line = NULL, *key = NULL, *val = NULL;
109 const char *q;
110
111 r = extract_first_word(&p, &line, "\n", 0);
112 if (r < 0)
113 return r;
114 if (r == 0)
115 return -ENOENT;
116
117 q = line;
118 r = extract_first_word(&q, &key, " ", 0);
119 if (r < 0)
120 return r;
121 if (r == 0)
122 return -EINVAL;
123
124 if (!streq(key, event))
125 continue;
126
127 val = strdup(q);
128 if (!val)
129 return -ENOMEM;
130
131 *ret = TAKE_PTR(val);
132 return 0;
133 }
134 }
135
136 bool cg_ns_supported(void) {
137 static thread_local int enabled = -1;
138
139 if (enabled >= 0)
140 return enabled;
141
142 if (access("/proc/self/ns/cgroup", F_OK) < 0) {
143 if (errno != ENOENT)
144 log_debug_errno(errno, "Failed to check whether /proc/self/ns/cgroup is available, assuming not: %m");
145 enabled = false;
146 } else
147 enabled = true;
148
149 return enabled;
150 }
151
152 bool cg_freezer_supported(void) {
153 static thread_local int supported = -1;
154
155 if (supported >= 0)
156 return supported;
157
158 supported = cg_all_unified() > 0 && access("/sys/fs/cgroup/init.scope/cgroup.freeze", F_OK) == 0;
159
160 return supported;
161 }
162
163 int cg_enumerate_subgroups(const char *controller, const char *path, DIR **_d) {
164 _cleanup_free_ char *fs = NULL;
165 int r;
166 DIR *d;
167
168 assert(_d);
169
170 /* This is not recursive! */
171
172 r = cg_get_path(controller, path, NULL, &fs);
173 if (r < 0)
174 return r;
175
176 d = opendir(fs);
177 if (!d)
178 return -errno;
179
180 *_d = d;
181 return 0;
182 }
183
184 int cg_read_subgroup(DIR *d, char **fn) {
185 struct dirent *de;
186
187 assert(d);
188 assert(fn);
189
190 FOREACH_DIRENT_ALL(de, d, return -errno) {
191 char *b;
192
193 if (de->d_type != DT_DIR)
194 continue;
195
196 if (dot_or_dot_dot(de->d_name))
197 continue;
198
199 b = strdup(de->d_name);
200 if (!b)
201 return -ENOMEM;
202
203 *fn = b;
204 return 1;
205 }
206
207 return 0;
208 }
209
210 int cg_rmdir(const char *controller, const char *path) {
211 _cleanup_free_ char *p = NULL;
212 int r;
213
214 r = cg_get_path(controller, path, NULL, &p);
215 if (r < 0)
216 return r;
217
218 r = rmdir(p);
219 if (r < 0 && errno != ENOENT)
220 return -errno;
221
222 r = cg_hybrid_unified();
223 if (r <= 0)
224 return r;
225
226 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
227 r = cg_rmdir(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path);
228 if (r < 0)
229 log_warning_errno(r, "Failed to remove compat systemd cgroup %s: %m", path);
230 }
231
232 return 0;
233 }
234
235 static int cg_kill_items(
236 const char *controller,
237 const char *path,
238 int sig,
239 CGroupFlags flags,
240 Set *s,
241 cg_kill_log_func_t log_kill,
242 void *userdata,
243 const char *item) {
244
245 _cleanup_set_free_ Set *allocated_set = NULL;
246 bool done = false;
247 int r, ret = 0, ret_log_kill = 0;
248 pid_t my_pid;
249
250 assert(sig >= 0);
251
252 /* Don't send SIGCONT twice. Also, SIGKILL always works even when process is suspended, hence don't send
253 * SIGCONT on SIGKILL. */
254 if (IN_SET(sig, SIGCONT, SIGKILL))
255 flags &= ~CGROUP_SIGCONT;
256
257 /* This goes through the tasks list and kills them all. This
258 * is repeated until no further processes are added to the
259 * tasks list, to properly handle forking processes */
260
261 if (!s) {
262 s = allocated_set = set_new(NULL);
263 if (!s)
264 return -ENOMEM;
265 }
266
267 my_pid = getpid_cached();
268
269 do {
270 _cleanup_fclose_ FILE *f = NULL;
271 pid_t pid = 0;
272 done = true;
273
274 r = cg_enumerate_items(controller, path, &f, item);
275 if (r < 0) {
276 if (ret >= 0 && r != -ENOENT)
277 return r;
278
279 return ret;
280 }
281
282 while ((r = cg_read_pid(f, &pid)) > 0) {
283
284 if ((flags & CGROUP_IGNORE_SELF) && pid == my_pid)
285 continue;
286
287 if (set_get(s, PID_TO_PTR(pid)) == PID_TO_PTR(pid))
288 continue;
289
290 if (log_kill)
291 ret_log_kill = log_kill(pid, sig, userdata);
292
293 /* If we haven't killed this process yet, kill
294 * it */
295 if (kill(pid, sig) < 0) {
296 if (ret >= 0 && errno != ESRCH)
297 ret = -errno;
298 } else {
299 if (flags & CGROUP_SIGCONT)
300 (void) kill(pid, SIGCONT);
301
302 if (ret == 0) {
303 if (log_kill)
304 ret = ret_log_kill;
305 else
306 ret = 1;
307 }
308 }
309
310 done = false;
311
312 r = set_put(s, PID_TO_PTR(pid));
313 if (r < 0) {
314 if (ret >= 0)
315 return r;
316
317 return ret;
318 }
319 }
320
321 if (r < 0) {
322 if (ret >= 0)
323 return r;
324
325 return ret;
326 }
327
328 /* To avoid racing against processes which fork
329 * quicker than we can kill them we repeat this until
330 * no new pids need to be killed. */
331
332 } while (!done);
333
334 return ret;
335 }
336
337 int cg_kill(
338 const char *controller,
339 const char *path,
340 int sig,
341 CGroupFlags flags,
342 Set *s,
343 cg_kill_log_func_t log_kill,
344 void *userdata) {
345 int r;
346
347 r = cg_kill_items(controller, path, sig, flags, s, log_kill, userdata, "cgroup.procs");
348 if (r < 0 || sig != SIGKILL)
349 return r;
350
351 /* Only in case of killing with SIGKILL and when using cgroupsv2, kill remaining threads manually as
352 a workaround for kernel bug. It was fixed in 5.2-rc5 (c03cd7738a83), backported to 4.19.66
353 (4340d175b898) and 4.14.138 (feb6b123b7dd). */
354 r = cg_unified_controller(controller);
355 if (r <= 0)
356 return r;
357
358 return cg_kill_items(controller, path, sig, flags, s, log_kill, userdata, "cgroup.threads");
359 }
360
361 int cg_kill_recursive(
362 const char *controller,
363 const char *path,
364 int sig,
365 CGroupFlags flags,
366 Set *s,
367 cg_kill_log_func_t log_kill,
368 void *userdata) {
369
370 _cleanup_set_free_ Set *allocated_set = NULL;
371 _cleanup_closedir_ DIR *d = NULL;
372 int r, ret;
373 char *fn;
374
375 assert(path);
376 assert(sig >= 0);
377
378 if (!s) {
379 s = allocated_set = set_new(NULL);
380 if (!s)
381 return -ENOMEM;
382 }
383
384 ret = cg_kill(controller, path, sig, flags, s, log_kill, userdata);
385
386 r = cg_enumerate_subgroups(controller, path, &d);
387 if (r < 0) {
388 if (ret >= 0 && r != -ENOENT)
389 return r;
390
391 return ret;
392 }
393
394 while ((r = cg_read_subgroup(d, &fn)) > 0) {
395 _cleanup_free_ char *p = NULL;
396
397 p = path_join(empty_to_root(path), fn);
398 free(fn);
399 if (!p)
400 return -ENOMEM;
401
402 r = cg_kill_recursive(controller, p, sig, flags, s, log_kill, userdata);
403 if (r != 0 && ret >= 0)
404 ret = r;
405 }
406 if (ret >= 0 && r < 0)
407 ret = r;
408
409 if (flags & CGROUP_REMOVE) {
410 r = cg_rmdir(controller, path);
411 if (r < 0 && ret >= 0 && !IN_SET(r, -ENOENT, -EBUSY))
412 return r;
413 }
414
415 return ret;
416 }
417
418 static const char *controller_to_dirname(const char *controller) {
419 const char *e;
420
421 assert(controller);
422
423 /* Converts a controller name to the directory name below
424 * /sys/fs/cgroup/ we want to mount it to. Effectively, this
425 * just cuts off the name= prefixed used for named
426 * hierarchies, if it is specified. */
427
428 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
429 if (cg_hybrid_unified() > 0)
430 controller = SYSTEMD_CGROUP_CONTROLLER_HYBRID;
431 else
432 controller = SYSTEMD_CGROUP_CONTROLLER_LEGACY;
433 }
434
435 e = startswith(controller, "name=");
436 if (e)
437 return e;
438
439 return controller;
440 }
441
442 static int join_path_legacy(const char *controller, const char *path, const char *suffix, char **fs) {
443 const char *dn;
444 char *t = NULL;
445
446 assert(fs);
447 assert(controller);
448
449 dn = controller_to_dirname(controller);
450
451 if (isempty(path) && isempty(suffix))
452 t = path_join("/sys/fs/cgroup", dn);
453 else if (isempty(path))
454 t = path_join("/sys/fs/cgroup", dn, suffix);
455 else if (isempty(suffix))
456 t = path_join("/sys/fs/cgroup", dn, path);
457 else
458 t = path_join("/sys/fs/cgroup", dn, path, suffix);
459 if (!t)
460 return -ENOMEM;
461
462 *fs = t;
463 return 0;
464 }
465
466 static int join_path_unified(const char *path, const char *suffix, char **fs) {
467 char *t;
468
469 assert(fs);
470
471 if (isempty(path) && isempty(suffix))
472 t = strdup("/sys/fs/cgroup");
473 else if (isempty(path))
474 t = path_join("/sys/fs/cgroup", suffix);
475 else if (isempty(suffix))
476 t = path_join("/sys/fs/cgroup", path);
477 else
478 t = path_join("/sys/fs/cgroup", path, suffix);
479 if (!t)
480 return -ENOMEM;
481
482 *fs = t;
483 return 0;
484 }
485
486 int cg_get_path(const char *controller, const char *path, const char *suffix, char **fs) {
487 int r;
488
489 assert(fs);
490
491 if (!controller) {
492 char *t;
493
494 /* If no controller is specified, we return the path
495 * *below* the controllers, without any prefix. */
496
497 if (!path && !suffix)
498 return -EINVAL;
499
500 if (!suffix)
501 t = strdup(path);
502 else if (!path)
503 t = strdup(suffix);
504 else
505 t = path_join(path, suffix);
506 if (!t)
507 return -ENOMEM;
508
509 *fs = path_simplify(t, false);
510 return 0;
511 }
512
513 if (!cg_controller_is_valid(controller))
514 return -EINVAL;
515
516 r = cg_all_unified();
517 if (r < 0)
518 return r;
519 if (r > 0)
520 r = join_path_unified(path, suffix, fs);
521 else
522 r = join_path_legacy(controller, path, suffix, fs);
523 if (r < 0)
524 return r;
525
526 path_simplify(*fs, false);
527 return 0;
528 }
529
530 static int controller_is_v1_accessible(const char *root, const char *controller) {
531 const char *cpath, *dn;
532
533 assert(controller);
534
535 dn = controller_to_dirname(controller);
536 cpath = strjoina("/sys/fs/cgroup/", dn);
537 if (root)
538 /* Also check that:
539 * - possible subcgroup is created at root,
540 * - we can modify the hierarchy.
541 * "Leak" cpath on stack */
542 cpath = strjoina(cpath, root, "/cgroup.procs");
543
544 if (laccess(cpath, root ? W_OK : F_OK) < 0)
545 return -errno;
546
547 return 0;
548 }
549
550 int cg_get_path_and_check(const char *controller, const char *path, const char *suffix, char **fs) {
551 int r;
552
553 assert(controller);
554 assert(fs);
555
556 if (!cg_controller_is_valid(controller))
557 return -EINVAL;
558
559 r = cg_all_unified();
560 if (r < 0)
561 return r;
562 if (r > 0) {
563 /* In the unified hierarchy all controllers are considered accessible,
564 * except for the named hierarchies */
565 if (startswith(controller, "name="))
566 return -EOPNOTSUPP;
567 } else {
568 /* Check if the specified controller is actually accessible */
569 r = controller_is_v1_accessible(NULL, controller);
570 if (r < 0)
571 return r;
572 }
573
574 return cg_get_path(controller, path, suffix, fs);
575 }
576
577 int cg_set_xattr(const char *controller, const char *path, const char *name, const void *value, size_t size, int flags) {
578 _cleanup_free_ char *fs = NULL;
579 int r;
580
581 assert(path);
582 assert(name);
583 assert(value || size <= 0);
584
585 r = cg_get_path(controller, path, NULL, &fs);
586 if (r < 0)
587 return r;
588
589 if (setxattr(fs, name, value, size, flags) < 0)
590 return -errno;
591
592 return 0;
593 }
594
595 int cg_get_xattr(const char *controller, const char *path, const char *name, void *value, size_t size) {
596 _cleanup_free_ char *fs = NULL;
597 ssize_t n;
598 int r;
599
600 assert(path);
601 assert(name);
602
603 r = cg_get_path(controller, path, NULL, &fs);
604 if (r < 0)
605 return r;
606
607 n = getxattr(fs, name, value, size);
608 if (n < 0)
609 return -errno;
610
611 return (int) n;
612 }
613
614 int cg_get_xattr_malloc(const char *controller, const char *path, const char *name, char **ret) {
615 _cleanup_free_ char *fs = NULL;
616 int r;
617
618 assert(path);
619 assert(name);
620
621 r = cg_get_path(controller, path, NULL, &fs);
622 if (r < 0)
623 return r;
624
625 r = getxattr_malloc(fs, name, ret, false);
626 if (r < 0)
627 return r;
628
629 return r;
630 }
631
632 int cg_get_xattr_bool(const char *controller, const char *path, const char *name) {
633 _cleanup_free_ char *val = NULL;
634 int r;
635
636 assert(path);
637 assert(name);
638
639 r = cg_get_xattr_malloc(controller, path, name, &val);
640 if (r < 0)
641 return r;
642
643 return parse_boolean(val);
644 }
645
646 int cg_remove_xattr(const char *controller, const char *path, const char *name) {
647 _cleanup_free_ char *fs = NULL;
648 int r;
649
650 assert(path);
651 assert(name);
652
653 r = cg_get_path(controller, path, NULL, &fs);
654 if (r < 0)
655 return r;
656
657 if (removexattr(fs, name) < 0)
658 return -errno;
659
660 return 0;
661 }
662
663 int cg_pid_get_path(const char *controller, pid_t pid, char **ret_path) {
664 _cleanup_fclose_ FILE *f = NULL;
665 const char *fs, *controller_str;
666 int unified, r;
667
668 assert(pid >= 0);
669 assert(ret_path);
670
671 if (controller) {
672 if (!cg_controller_is_valid(controller))
673 return -EINVAL;
674 } else
675 controller = SYSTEMD_CGROUP_CONTROLLER;
676
677 unified = cg_unified_controller(controller);
678 if (unified < 0)
679 return unified;
680 if (unified == 0) {
681 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER))
682 controller_str = SYSTEMD_CGROUP_CONTROLLER_LEGACY;
683 else
684 controller_str = controller;
685 }
686
687 fs = procfs_file_alloca(pid, "cgroup");
688 r = fopen_unlocked(fs, "re", &f);
689 if (r == -ENOENT)
690 return -ESRCH;
691 if (r < 0)
692 return r;
693
694 for (;;) {
695 _cleanup_free_ char *line = NULL;
696 char *e;
697
698 r = read_line(f, LONG_LINE_MAX, &line);
699 if (r < 0)
700 return r;
701 if (r == 0)
702 return -ENODATA;
703
704 if (unified) {
705 e = startswith(line, "0:");
706 if (!e)
707 continue;
708
709 e = strchr(e, ':');
710 if (!e)
711 continue;
712 } else {
713 char *l;
714
715 l = strchr(line, ':');
716 if (!l)
717 continue;
718
719 l++;
720 e = strchr(l, ':');
721 if (!e)
722 continue;
723 *e = 0;
724
725 r = string_contains_word(l, ",", controller_str);
726 if (r < 0)
727 return r;
728 if (r == 0)
729 continue;
730 }
731
732 char *path = strdup(e + 1);
733 if (!path)
734 return -ENOMEM;
735
736 /* Truncate suffix indicating the process is a zombie */
737 e = endswith(path, " (deleted)");
738 if (e)
739 *e = 0;
740
741 *ret_path = path;
742 return 0;
743 }
744 }
745
746 int cg_install_release_agent(const char *controller, const char *agent) {
747 _cleanup_free_ char *fs = NULL, *contents = NULL;
748 const char *sc;
749 int r;
750
751 assert(agent);
752
753 r = cg_unified_controller(controller);
754 if (r < 0)
755 return r;
756 if (r > 0) /* doesn't apply to unified hierarchy */
757 return -EOPNOTSUPP;
758
759 r = cg_get_path(controller, NULL, "release_agent", &fs);
760 if (r < 0)
761 return r;
762
763 r = read_one_line_file(fs, &contents);
764 if (r < 0)
765 return r;
766
767 sc = strstrip(contents);
768 if (isempty(sc)) {
769 r = write_string_file(fs, agent, WRITE_STRING_FILE_DISABLE_BUFFER);
770 if (r < 0)
771 return r;
772 } else if (!path_equal(sc, agent))
773 return -EEXIST;
774
775 fs = mfree(fs);
776 r = cg_get_path(controller, NULL, "notify_on_release", &fs);
777 if (r < 0)
778 return r;
779
780 contents = mfree(contents);
781 r = read_one_line_file(fs, &contents);
782 if (r < 0)
783 return r;
784
785 sc = strstrip(contents);
786 if (streq(sc, "0")) {
787 r = write_string_file(fs, "1", WRITE_STRING_FILE_DISABLE_BUFFER);
788 if (r < 0)
789 return r;
790
791 return 1;
792 }
793
794 if (!streq(sc, "1"))
795 return -EIO;
796
797 return 0;
798 }
799
800 int cg_uninstall_release_agent(const char *controller) {
801 _cleanup_free_ char *fs = NULL;
802 int r;
803
804 r = cg_unified_controller(controller);
805 if (r < 0)
806 return r;
807 if (r > 0) /* Doesn't apply to unified hierarchy */
808 return -EOPNOTSUPP;
809
810 r = cg_get_path(controller, NULL, "notify_on_release", &fs);
811 if (r < 0)
812 return r;
813
814 r = write_string_file(fs, "0", WRITE_STRING_FILE_DISABLE_BUFFER);
815 if (r < 0)
816 return r;
817
818 fs = mfree(fs);
819
820 r = cg_get_path(controller, NULL, "release_agent", &fs);
821 if (r < 0)
822 return r;
823
824 r = write_string_file(fs, "", WRITE_STRING_FILE_DISABLE_BUFFER);
825 if (r < 0)
826 return r;
827
828 return 0;
829 }
830
831 int cg_is_empty(const char *controller, const char *path) {
832 _cleanup_fclose_ FILE *f = NULL;
833 pid_t pid;
834 int r;
835
836 assert(path);
837
838 r = cg_enumerate_processes(controller, path, &f);
839 if (r == -ENOENT)
840 return true;
841 if (r < 0)
842 return r;
843
844 r = cg_read_pid(f, &pid);
845 if (r < 0)
846 return r;
847
848 return r == 0;
849 }
850
851 int cg_is_empty_recursive(const char *controller, const char *path) {
852 int r;
853
854 assert(path);
855
856 /* The root cgroup is always populated */
857 if (controller && empty_or_root(path))
858 return false;
859
860 r = cg_unified_controller(controller);
861 if (r < 0)
862 return r;
863 if (r > 0) {
864 _cleanup_free_ char *t = NULL;
865
866 /* On the unified hierarchy we can check empty state
867 * via the "populated" attribute of "cgroup.events". */
868
869 r = cg_read_event(controller, path, "populated", &t);
870 if (r == -ENOENT)
871 return true;
872 if (r < 0)
873 return r;
874
875 return streq(t, "0");
876 } else {
877 _cleanup_closedir_ DIR *d = NULL;
878 char *fn;
879
880 r = cg_is_empty(controller, path);
881 if (r <= 0)
882 return r;
883
884 r = cg_enumerate_subgroups(controller, path, &d);
885 if (r == -ENOENT)
886 return true;
887 if (r < 0)
888 return r;
889
890 while ((r = cg_read_subgroup(d, &fn)) > 0) {
891 _cleanup_free_ char *p = NULL;
892
893 p = path_join(path, fn);
894 free(fn);
895 if (!p)
896 return -ENOMEM;
897
898 r = cg_is_empty_recursive(controller, p);
899 if (r <= 0)
900 return r;
901 }
902 if (r < 0)
903 return r;
904
905 return true;
906 }
907 }
908
909 int cg_split_spec(const char *spec, char **ret_controller, char **ret_path) {
910 _cleanup_free_ char *controller = NULL, *path = NULL;
911
912 assert(spec);
913
914 if (*spec == '/') {
915 if (!path_is_normalized(spec))
916 return -EINVAL;
917
918 if (ret_path) {
919 path = strdup(spec);
920 if (!path)
921 return -ENOMEM;
922
923 path_simplify(path, false);
924 }
925
926 } else {
927 const char *e;
928
929 e = strchr(spec, ':');
930 if (e) {
931 controller = strndup(spec, e-spec);
932 if (!controller)
933 return -ENOMEM;
934 if (!cg_controller_is_valid(controller))
935 return -EINVAL;
936
937 if (!isempty(e + 1)) {
938 path = strdup(e+1);
939 if (!path)
940 return -ENOMEM;
941
942 if (!path_is_normalized(path) ||
943 !path_is_absolute(path))
944 return -EINVAL;
945
946 path_simplify(path, false);
947 }
948
949 } else {
950 if (!cg_controller_is_valid(spec))
951 return -EINVAL;
952
953 if (ret_controller) {
954 controller = strdup(spec);
955 if (!controller)
956 return -ENOMEM;
957 }
958 }
959 }
960
961 if (ret_controller)
962 *ret_controller = TAKE_PTR(controller);
963 if (ret_path)
964 *ret_path = TAKE_PTR(path);
965 return 0;
966 }
967
968 int cg_mangle_path(const char *path, char **result) {
969 _cleanup_free_ char *c = NULL, *p = NULL;
970 char *t;
971 int r;
972
973 assert(path);
974 assert(result);
975
976 /* First, check if it already is a filesystem path */
977 if (path_startswith(path, "/sys/fs/cgroup")) {
978
979 t = strdup(path);
980 if (!t)
981 return -ENOMEM;
982
983 *result = path_simplify(t, false);
984 return 0;
985 }
986
987 /* Otherwise, treat it as cg spec */
988 r = cg_split_spec(path, &c, &p);
989 if (r < 0)
990 return r;
991
992 return cg_get_path(c ?: SYSTEMD_CGROUP_CONTROLLER, p ?: "/", NULL, result);
993 }
994
995 int cg_get_root_path(char **path) {
996 char *p, *e;
997 int r;
998
999 assert(path);
1000
1001 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 1, &p);
1002 if (r < 0)
1003 return r;
1004
1005 e = endswith(p, "/" SPECIAL_INIT_SCOPE);
1006 if (!e)
1007 e = endswith(p, "/" SPECIAL_SYSTEM_SLICE); /* legacy */
1008 if (!e)
1009 e = endswith(p, "/system"); /* even more legacy */
1010 if (e)
1011 *e = 0;
1012
1013 *path = p;
1014 return 0;
1015 }
1016
1017 int cg_shift_path(const char *cgroup, const char *root, const char **shifted) {
1018 _cleanup_free_ char *rt = NULL;
1019 char *p;
1020 int r;
1021
1022 assert(cgroup);
1023 assert(shifted);
1024
1025 if (!root) {
1026 /* If the root was specified let's use that, otherwise
1027 * let's determine it from PID 1 */
1028
1029 r = cg_get_root_path(&rt);
1030 if (r < 0)
1031 return r;
1032
1033 root = rt;
1034 }
1035
1036 p = path_startswith(cgroup, root);
1037 if (p && p > cgroup)
1038 *shifted = p - 1;
1039 else
1040 *shifted = cgroup;
1041
1042 return 0;
1043 }
1044
1045 int cg_pid_get_path_shifted(pid_t pid, const char *root, char **cgroup) {
1046 _cleanup_free_ char *raw = NULL;
1047 const char *c;
1048 int r;
1049
1050 assert(pid >= 0);
1051 assert(cgroup);
1052
1053 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &raw);
1054 if (r < 0)
1055 return r;
1056
1057 r = cg_shift_path(raw, root, &c);
1058 if (r < 0)
1059 return r;
1060
1061 if (c == raw)
1062 *cgroup = TAKE_PTR(raw);
1063 else {
1064 char *n;
1065
1066 n = strdup(c);
1067 if (!n)
1068 return -ENOMEM;
1069
1070 *cgroup = n;
1071 }
1072
1073 return 0;
1074 }
1075
1076 int cg_path_decode_unit(const char *cgroup, char **unit) {
1077 char *c, *s;
1078 size_t n;
1079
1080 assert(cgroup);
1081 assert(unit);
1082
1083 n = strcspn(cgroup, "/");
1084 if (n < 3)
1085 return -ENXIO;
1086
1087 c = strndupa(cgroup, n);
1088 c = cg_unescape(c);
1089
1090 if (!unit_name_is_valid(c, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
1091 return -ENXIO;
1092
1093 s = strdup(c);
1094 if (!s)
1095 return -ENOMEM;
1096
1097 *unit = s;
1098 return 0;
1099 }
1100
1101 static bool valid_slice_name(const char *p, size_t n) {
1102
1103 if (!p)
1104 return false;
1105
1106 if (n < STRLEN("x.slice"))
1107 return false;
1108
1109 if (memcmp(p + n - 6, ".slice", 6) == 0) {
1110 char buf[n+1], *c;
1111
1112 memcpy(buf, p, n);
1113 buf[n] = 0;
1114
1115 c = cg_unescape(buf);
1116
1117 return unit_name_is_valid(c, UNIT_NAME_PLAIN);
1118 }
1119
1120 return false;
1121 }
1122
1123 static const char *skip_slices(const char *p) {
1124 assert(p);
1125
1126 /* Skips over all slice assignments */
1127
1128 for (;;) {
1129 size_t n;
1130
1131 p += strspn(p, "/");
1132
1133 n = strcspn(p, "/");
1134 if (!valid_slice_name(p, n))
1135 return p;
1136
1137 p += n;
1138 }
1139 }
1140
1141 int cg_path_get_unit(const char *path, char **ret) {
1142 _cleanup_free_ char *unit = NULL;
1143 const char *e;
1144 int r;
1145
1146 assert(path);
1147 assert(ret);
1148
1149 e = skip_slices(path);
1150
1151 r = cg_path_decode_unit(e, &unit);
1152 if (r < 0)
1153 return r;
1154
1155 /* We skipped over the slices, don't accept any now */
1156 if (endswith(unit, ".slice"))
1157 return -ENXIO;
1158
1159 *ret = TAKE_PTR(unit);
1160 return 0;
1161 }
1162
1163 int cg_pid_get_unit(pid_t pid, char **unit) {
1164 _cleanup_free_ char *cgroup = NULL;
1165 int r;
1166
1167 assert(unit);
1168
1169 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1170 if (r < 0)
1171 return r;
1172
1173 return cg_path_get_unit(cgroup, unit);
1174 }
1175
1176 /**
1177 * Skip session-*.scope, but require it to be there.
1178 */
1179 static const char *skip_session(const char *p) {
1180 size_t n;
1181
1182 if (isempty(p))
1183 return NULL;
1184
1185 p += strspn(p, "/");
1186
1187 n = strcspn(p, "/");
1188 if (n < STRLEN("session-x.scope"))
1189 return NULL;
1190
1191 if (memcmp(p, "session-", 8) == 0 && memcmp(p + n - 6, ".scope", 6) == 0) {
1192 char buf[n - 8 - 6 + 1];
1193
1194 memcpy(buf, p + 8, n - 8 - 6);
1195 buf[n - 8 - 6] = 0;
1196
1197 /* Note that session scopes never need unescaping,
1198 * since they cannot conflict with the kernel's own
1199 * names, hence we don't need to call cg_unescape()
1200 * here. */
1201
1202 if (!session_id_valid(buf))
1203 return false;
1204
1205 p += n;
1206 p += strspn(p, "/");
1207 return p;
1208 }
1209
1210 return NULL;
1211 }
1212
1213 /**
1214 * Skip user@*.service, but require it to be there.
1215 */
1216 static const char *skip_user_manager(const char *p) {
1217 size_t n;
1218
1219 if (isempty(p))
1220 return NULL;
1221
1222 p += strspn(p, "/");
1223
1224 n = strcspn(p, "/");
1225 if (n < STRLEN("user@x.service"))
1226 return NULL;
1227
1228 if (memcmp(p, "user@", 5) == 0 && memcmp(p + n - 8, ".service", 8) == 0) {
1229 char buf[n - 5 - 8 + 1];
1230
1231 memcpy(buf, p + 5, n - 5 - 8);
1232 buf[n - 5 - 8] = 0;
1233
1234 /* Note that user manager services never need unescaping,
1235 * since they cannot conflict with the kernel's own
1236 * names, hence we don't need to call cg_unescape()
1237 * here. */
1238
1239 if (parse_uid(buf, NULL) < 0)
1240 return NULL;
1241
1242 p += n;
1243 p += strspn(p, "/");
1244
1245 return p;
1246 }
1247
1248 return NULL;
1249 }
1250
1251 static const char *skip_user_prefix(const char *path) {
1252 const char *e, *t;
1253
1254 assert(path);
1255
1256 /* Skip slices, if there are any */
1257 e = skip_slices(path);
1258
1259 /* Skip the user manager, if it's in the path now... */
1260 t = skip_user_manager(e);
1261 if (t)
1262 return t;
1263
1264 /* Alternatively skip the user session if it is in the path... */
1265 return skip_session(e);
1266 }
1267
1268 int cg_path_get_user_unit(const char *path, char **ret) {
1269 const char *t;
1270
1271 assert(path);
1272 assert(ret);
1273
1274 t = skip_user_prefix(path);
1275 if (!t)
1276 return -ENXIO;
1277
1278 /* And from here on it looks pretty much the same as for a system unit, hence let's use the same
1279 * parser. */
1280 return cg_path_get_unit(t, ret);
1281 }
1282
1283 int cg_pid_get_user_unit(pid_t pid, char **unit) {
1284 _cleanup_free_ char *cgroup = NULL;
1285 int r;
1286
1287 assert(unit);
1288
1289 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1290 if (r < 0)
1291 return r;
1292
1293 return cg_path_get_user_unit(cgroup, unit);
1294 }
1295
1296 int cg_path_get_machine_name(const char *path, char **machine) {
1297 _cleanup_free_ char *u = NULL;
1298 const char *sl;
1299 int r;
1300
1301 r = cg_path_get_unit(path, &u);
1302 if (r < 0)
1303 return r;
1304
1305 sl = strjoina("/run/systemd/machines/unit:", u);
1306 return readlink_malloc(sl, machine);
1307 }
1308
1309 int cg_pid_get_machine_name(pid_t pid, char **machine) {
1310 _cleanup_free_ char *cgroup = NULL;
1311 int r;
1312
1313 assert(machine);
1314
1315 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1316 if (r < 0)
1317 return r;
1318
1319 return cg_path_get_machine_name(cgroup, machine);
1320 }
1321
1322 int cg_path_get_session(const char *path, char **session) {
1323 _cleanup_free_ char *unit = NULL;
1324 char *start, *end;
1325 int r;
1326
1327 assert(path);
1328
1329 r = cg_path_get_unit(path, &unit);
1330 if (r < 0)
1331 return r;
1332
1333 start = startswith(unit, "session-");
1334 if (!start)
1335 return -ENXIO;
1336 end = endswith(start, ".scope");
1337 if (!end)
1338 return -ENXIO;
1339
1340 *end = 0;
1341 if (!session_id_valid(start))
1342 return -ENXIO;
1343
1344 if (session) {
1345 char *rr;
1346
1347 rr = strdup(start);
1348 if (!rr)
1349 return -ENOMEM;
1350
1351 *session = rr;
1352 }
1353
1354 return 0;
1355 }
1356
1357 int cg_pid_get_session(pid_t pid, char **session) {
1358 _cleanup_free_ char *cgroup = NULL;
1359 int r;
1360
1361 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1362 if (r < 0)
1363 return r;
1364
1365 return cg_path_get_session(cgroup, session);
1366 }
1367
1368 int cg_path_get_owner_uid(const char *path, uid_t *uid) {
1369 _cleanup_free_ char *slice = NULL;
1370 char *start, *end;
1371 int r;
1372
1373 assert(path);
1374
1375 r = cg_path_get_slice(path, &slice);
1376 if (r < 0)
1377 return r;
1378
1379 start = startswith(slice, "user-");
1380 if (!start)
1381 return -ENXIO;
1382 end = endswith(start, ".slice");
1383 if (!end)
1384 return -ENXIO;
1385
1386 *end = 0;
1387 if (parse_uid(start, uid) < 0)
1388 return -ENXIO;
1389
1390 return 0;
1391 }
1392
1393 int cg_pid_get_owner_uid(pid_t pid, uid_t *uid) {
1394 _cleanup_free_ char *cgroup = NULL;
1395 int r;
1396
1397 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1398 if (r < 0)
1399 return r;
1400
1401 return cg_path_get_owner_uid(cgroup, uid);
1402 }
1403
1404 int cg_path_get_slice(const char *p, char **slice) {
1405 const char *e = NULL;
1406
1407 assert(p);
1408 assert(slice);
1409
1410 /* Finds the right-most slice unit from the beginning, but
1411 * stops before we come to the first non-slice unit. */
1412
1413 for (;;) {
1414 size_t n;
1415
1416 p += strspn(p, "/");
1417
1418 n = strcspn(p, "/");
1419 if (!valid_slice_name(p, n)) {
1420
1421 if (!e) {
1422 char *s;
1423
1424 s = strdup(SPECIAL_ROOT_SLICE);
1425 if (!s)
1426 return -ENOMEM;
1427
1428 *slice = s;
1429 return 0;
1430 }
1431
1432 return cg_path_decode_unit(e, slice);
1433 }
1434
1435 e = p;
1436 p += n;
1437 }
1438 }
1439
1440 int cg_pid_get_slice(pid_t pid, char **slice) {
1441 _cleanup_free_ char *cgroup = NULL;
1442 int r;
1443
1444 assert(slice);
1445
1446 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1447 if (r < 0)
1448 return r;
1449
1450 return cg_path_get_slice(cgroup, slice);
1451 }
1452
1453 int cg_path_get_user_slice(const char *p, char **slice) {
1454 const char *t;
1455 assert(p);
1456 assert(slice);
1457
1458 t = skip_user_prefix(p);
1459 if (!t)
1460 return -ENXIO;
1461
1462 /* And now it looks pretty much the same as for a system
1463 * slice, so let's just use the same parser from here on. */
1464 return cg_path_get_slice(t, slice);
1465 }
1466
1467 int cg_pid_get_user_slice(pid_t pid, char **slice) {
1468 _cleanup_free_ char *cgroup = NULL;
1469 int r;
1470
1471 assert(slice);
1472
1473 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1474 if (r < 0)
1475 return r;
1476
1477 return cg_path_get_user_slice(cgroup, slice);
1478 }
1479
1480 char *cg_escape(const char *p) {
1481 bool need_prefix = false;
1482
1483 /* This implements very minimal escaping for names to be used
1484 * as file names in the cgroup tree: any name which might
1485 * conflict with a kernel name or is prefixed with '_' is
1486 * prefixed with a '_'. That way, when reading cgroup names it
1487 * is sufficient to remove a single prefixing underscore if
1488 * there is one. */
1489
1490 /* The return value of this function (unlike cg_unescape())
1491 * needs free()! */
1492
1493 if (IN_SET(p[0], 0, '_', '.') ||
1494 STR_IN_SET(p, "notify_on_release", "release_agent", "tasks") ||
1495 startswith(p, "cgroup."))
1496 need_prefix = true;
1497 else {
1498 const char *dot;
1499
1500 dot = strrchr(p, '.');
1501 if (dot) {
1502 CGroupController c;
1503 size_t l = dot - p;
1504
1505 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1506 const char *n;
1507
1508 n = cgroup_controller_to_string(c);
1509
1510 if (l != strlen(n))
1511 continue;
1512
1513 if (memcmp(p, n, l) != 0)
1514 continue;
1515
1516 need_prefix = true;
1517 break;
1518 }
1519 }
1520 }
1521
1522 if (need_prefix)
1523 return strjoin("_", p);
1524
1525 return strdup(p);
1526 }
1527
1528 char *cg_unescape(const char *p) {
1529 assert(p);
1530
1531 /* The return value of this function (unlike cg_escape())
1532 * doesn't need free()! */
1533
1534 if (p[0] == '_')
1535 return (char*) p+1;
1536
1537 return (char*) p;
1538 }
1539
1540 #define CONTROLLER_VALID \
1541 DIGITS LETTERS \
1542 "_"
1543
1544 bool cg_controller_is_valid(const char *p) {
1545 const char *t, *s;
1546
1547 if (!p)
1548 return false;
1549
1550 if (streq(p, SYSTEMD_CGROUP_CONTROLLER))
1551 return true;
1552
1553 s = startswith(p, "name=");
1554 if (s)
1555 p = s;
1556
1557 if (IN_SET(*p, 0, '_'))
1558 return false;
1559
1560 for (t = p; *t; t++)
1561 if (!strchr(CONTROLLER_VALID, *t))
1562 return false;
1563
1564 if (t - p > NAME_MAX)
1565 return false;
1566
1567 return true;
1568 }
1569
1570 int cg_slice_to_path(const char *unit, char **ret) {
1571 _cleanup_free_ char *p = NULL, *s = NULL, *e = NULL;
1572 const char *dash;
1573 int r;
1574
1575 assert(unit);
1576 assert(ret);
1577
1578 if (streq(unit, SPECIAL_ROOT_SLICE)) {
1579 char *x;
1580
1581 x = strdup("");
1582 if (!x)
1583 return -ENOMEM;
1584 *ret = x;
1585 return 0;
1586 }
1587
1588 if (!unit_name_is_valid(unit, UNIT_NAME_PLAIN))
1589 return -EINVAL;
1590
1591 if (!endswith(unit, ".slice"))
1592 return -EINVAL;
1593
1594 r = unit_name_to_prefix(unit, &p);
1595 if (r < 0)
1596 return r;
1597
1598 dash = strchr(p, '-');
1599
1600 /* Don't allow initial dashes */
1601 if (dash == p)
1602 return -EINVAL;
1603
1604 while (dash) {
1605 _cleanup_free_ char *escaped = NULL;
1606 char n[dash - p + sizeof(".slice")];
1607
1608 #if HAS_FEATURE_MEMORY_SANITIZER
1609 /* msan doesn't instrument stpncpy, so it thinks
1610 * n is later used uninitialized:
1611 * https://github.com/google/sanitizers/issues/926
1612 */
1613 zero(n);
1614 #endif
1615
1616 /* Don't allow trailing or double dashes */
1617 if (IN_SET(dash[1], 0, '-'))
1618 return -EINVAL;
1619
1620 strcpy(stpncpy(n, p, dash - p), ".slice");
1621 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN))
1622 return -EINVAL;
1623
1624 escaped = cg_escape(n);
1625 if (!escaped)
1626 return -ENOMEM;
1627
1628 if (!strextend(&s, escaped, "/"))
1629 return -ENOMEM;
1630
1631 dash = strchr(dash+1, '-');
1632 }
1633
1634 e = cg_escape(unit);
1635 if (!e)
1636 return -ENOMEM;
1637
1638 if (!strextend(&s, e))
1639 return -ENOMEM;
1640
1641 *ret = TAKE_PTR(s);
1642
1643 return 0;
1644 }
1645
1646 int cg_set_attribute(const char *controller, const char *path, const char *attribute, const char *value) {
1647 _cleanup_free_ char *p = NULL;
1648 int r;
1649
1650 r = cg_get_path(controller, path, attribute, &p);
1651 if (r < 0)
1652 return r;
1653
1654 return write_string_file(p, value, WRITE_STRING_FILE_DISABLE_BUFFER);
1655 }
1656
1657 int cg_get_attribute(const char *controller, const char *path, const char *attribute, char **ret) {
1658 _cleanup_free_ char *p = NULL;
1659 int r;
1660
1661 r = cg_get_path(controller, path, attribute, &p);
1662 if (r < 0)
1663 return r;
1664
1665 return read_one_line_file(p, ret);
1666 }
1667
1668 int cg_get_attribute_as_uint64(const char *controller, const char *path, const char *attribute, uint64_t *ret) {
1669 _cleanup_free_ char *value = NULL;
1670 uint64_t v;
1671 int r;
1672
1673 assert(ret);
1674
1675 r = cg_get_attribute(controller, path, attribute, &value);
1676 if (r == -ENOENT)
1677 return -ENODATA;
1678 if (r < 0)
1679 return r;
1680
1681 if (streq(value, "max")) {
1682 *ret = CGROUP_LIMIT_MAX;
1683 return 0;
1684 }
1685
1686 r = safe_atou64(value, &v);
1687 if (r < 0)
1688 return r;
1689
1690 *ret = v;
1691 return 0;
1692 }
1693
1694 int cg_get_attribute_as_bool(const char *controller, const char *path, const char *attribute, bool *ret) {
1695 _cleanup_free_ char *value = NULL;
1696 int r;
1697
1698 assert(ret);
1699
1700 r = cg_get_attribute(controller, path, attribute, &value);
1701 if (r == -ENOENT)
1702 return -ENODATA;
1703 if (r < 0)
1704 return r;
1705
1706 r = parse_boolean(value);
1707 if (r < 0)
1708 return r;
1709
1710 *ret = r;
1711 return 0;
1712 }
1713
1714 int cg_get_owner(const char *controller, const char *path, uid_t *ret_uid) {
1715 _cleanup_free_ char *f = NULL;
1716 struct stat stats;
1717 int r;
1718
1719 assert(ret_uid);
1720
1721 r = cg_get_path(controller, path, NULL, &f);
1722 if (r < 0)
1723 return r;
1724
1725 r = stat(f, &stats);
1726 if (r < 0)
1727 return -errno;
1728
1729 *ret_uid = stats.st_uid;
1730 return 0;
1731 }
1732
1733 int cg_get_keyed_attribute_full(
1734 const char *controller,
1735 const char *path,
1736 const char *attribute,
1737 char **keys,
1738 char **ret_values,
1739 CGroupKeyMode mode) {
1740
1741 _cleanup_free_ char *filename = NULL, *contents = NULL;
1742 const char *p;
1743 size_t n, i, n_done = 0;
1744 char **v;
1745 int r;
1746
1747 /* Reads one or more fields of a cgroup v2 keyed attribute file. The 'keys' parameter should be an strv with
1748 * all keys to retrieve. The 'ret_values' parameter should be passed as string size with the same number of
1749 * entries as 'keys'. On success each entry will be set to the value of the matching key.
1750 *
1751 * If the attribute file doesn't exist at all returns ENOENT, if any key is not found returns ENXIO. If mode
1752 * is set to GG_KEY_MODE_GRACEFUL we ignore missing keys and return those that were parsed successfully. */
1753
1754 r = cg_get_path(controller, path, attribute, &filename);
1755 if (r < 0)
1756 return r;
1757
1758 r = read_full_file(filename, &contents, NULL);
1759 if (r < 0)
1760 return r;
1761
1762 n = strv_length(keys);
1763 if (n == 0) /* No keys to retrieve? That's easy, we are done then */
1764 return 0;
1765
1766 /* Let's build this up in a temporary array for now in order not to clobber the return parameter on failure */
1767 v = newa0(char*, n);
1768
1769 for (p = contents; *p;) {
1770 const char *w = NULL;
1771
1772 for (i = 0; i < n; i++)
1773 if (!v[i]) {
1774 w = first_word(p, keys[i]);
1775 if (w)
1776 break;
1777 }
1778
1779 if (w) {
1780 size_t l;
1781
1782 l = strcspn(w, NEWLINE);
1783 v[i] = strndup(w, l);
1784 if (!v[i]) {
1785 r = -ENOMEM;
1786 goto fail;
1787 }
1788
1789 n_done++;
1790 if (n_done >= n)
1791 goto done;
1792
1793 p = w + l;
1794 } else
1795 p += strcspn(p, NEWLINE);
1796
1797 p += strspn(p, NEWLINE);
1798 }
1799
1800 if (mode & CG_KEY_MODE_GRACEFUL)
1801 goto done;
1802
1803 r = -ENXIO;
1804
1805 fail:
1806 for (i = 0; i < n; i++)
1807 free(v[i]);
1808
1809 return r;
1810
1811 done:
1812 memcpy(ret_values, v, sizeof(char*) * n);
1813 if (mode & CG_KEY_MODE_GRACEFUL)
1814 return n_done;
1815
1816 return 0;
1817 }
1818
1819 int cg_mask_to_string(CGroupMask mask, char **ret) {
1820 _cleanup_free_ char *s = NULL;
1821 size_t n = 0, allocated = 0;
1822 bool space = false;
1823 CGroupController c;
1824
1825 assert(ret);
1826
1827 if (mask == 0) {
1828 *ret = NULL;
1829 return 0;
1830 }
1831
1832 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1833 const char *k;
1834 size_t l;
1835
1836 if (!FLAGS_SET(mask, CGROUP_CONTROLLER_TO_MASK(c)))
1837 continue;
1838
1839 k = cgroup_controller_to_string(c);
1840 l = strlen(k);
1841
1842 if (!GREEDY_REALLOC(s, allocated, n + space + l + 1))
1843 return -ENOMEM;
1844
1845 if (space)
1846 s[n] = ' ';
1847 memcpy(s + n + space, k, l);
1848 n += space + l;
1849
1850 space = true;
1851 }
1852
1853 assert(s);
1854
1855 s[n] = 0;
1856 *ret = TAKE_PTR(s);
1857
1858 return 0;
1859 }
1860
1861 int cg_mask_from_string(const char *value, CGroupMask *ret) {
1862 CGroupMask m = 0;
1863
1864 assert(ret);
1865 assert(value);
1866
1867 for (;;) {
1868 _cleanup_free_ char *n = NULL;
1869 CGroupController v;
1870 int r;
1871
1872 r = extract_first_word(&value, &n, NULL, 0);
1873 if (r < 0)
1874 return r;
1875 if (r == 0)
1876 break;
1877
1878 v = cgroup_controller_from_string(n);
1879 if (v < 0)
1880 continue;
1881
1882 m |= CGROUP_CONTROLLER_TO_MASK(v);
1883 }
1884
1885 *ret = m;
1886 return 0;
1887 }
1888
1889 int cg_mask_supported_subtree(const char *root, CGroupMask *ret) {
1890 CGroupMask mask;
1891 int r;
1892
1893 /* Determines the mask of supported cgroup controllers. Only includes controllers we can make sense of and that
1894 * are actually accessible. Only covers real controllers, i.e. not the CGROUP_CONTROLLER_BPF_xyz
1895 * pseudo-controllers. */
1896
1897 r = cg_all_unified();
1898 if (r < 0)
1899 return r;
1900 if (r > 0) {
1901 _cleanup_free_ char *controllers = NULL, *path = NULL;
1902
1903 /* In the unified hierarchy we can read the supported and accessible controllers from
1904 * the top-level cgroup attribute */
1905
1906 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, root, "cgroup.controllers", &path);
1907 if (r < 0)
1908 return r;
1909
1910 r = read_one_line_file(path, &controllers);
1911 if (r < 0)
1912 return r;
1913
1914 r = cg_mask_from_string(controllers, &mask);
1915 if (r < 0)
1916 return r;
1917
1918 /* Mask controllers that are not supported in unified hierarchy. */
1919 mask &= CGROUP_MASK_V2;
1920
1921 } else {
1922 CGroupController c;
1923
1924 /* In the legacy hierarchy, we check which hierarchies are accessible. */
1925
1926 mask = 0;
1927 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1928 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
1929 const char *n;
1930
1931 if (!FLAGS_SET(CGROUP_MASK_V1, bit))
1932 continue;
1933
1934 n = cgroup_controller_to_string(c);
1935 if (controller_is_v1_accessible(root, n) >= 0)
1936 mask |= bit;
1937 }
1938 }
1939
1940 *ret = mask;
1941 return 0;
1942 }
1943
1944 int cg_mask_supported(CGroupMask *ret) {
1945 _cleanup_free_ char *root = NULL;
1946 int r;
1947
1948 r = cg_get_root_path(&root);
1949 if (r < 0)
1950 return r;
1951
1952 return cg_mask_supported_subtree(root, ret);
1953 }
1954
1955 int cg_kernel_controllers(Set **ret) {
1956 _cleanup_set_free_free_ Set *controllers = NULL;
1957 _cleanup_fclose_ FILE *f = NULL;
1958 int r;
1959
1960 assert(ret);
1961
1962 /* Determines the full list of kernel-known controllers. Might include controllers we don't actually support
1963 * and controllers that aren't currently accessible (because not mounted). This does not include "name="
1964 * pseudo-controllers. */
1965
1966 controllers = set_new(&string_hash_ops);
1967 if (!controllers)
1968 return -ENOMEM;
1969
1970 r = fopen_unlocked("/proc/cgroups", "re", &f);
1971 if (r == -ENOENT) {
1972 *ret = NULL;
1973 return 0;
1974 }
1975 if (r < 0)
1976 return r;
1977
1978 /* Ignore the header line */
1979 (void) read_line(f, SIZE_MAX, NULL);
1980
1981 for (;;) {
1982 char *controller;
1983 int enabled = 0;
1984
1985 errno = 0;
1986 if (fscanf(f, "%ms %*i %*i %i", &controller, &enabled) != 2) {
1987
1988 if (feof(f))
1989 break;
1990
1991 if (ferror(f))
1992 return errno_or_else(EIO);
1993
1994 return -EBADMSG;
1995 }
1996
1997 if (!enabled) {
1998 free(controller);
1999 continue;
2000 }
2001
2002 if (!cg_controller_is_valid(controller)) {
2003 free(controller);
2004 return -EBADMSG;
2005 }
2006
2007 r = set_consume(controllers, controller);
2008 if (r < 0)
2009 return r;
2010 }
2011
2012 *ret = TAKE_PTR(controllers);
2013
2014 return 0;
2015 }
2016
2017 /* The hybrid mode was initially implemented in v232 and simply mounted cgroup2 on
2018 * /sys/fs/cgroup/systemd. This unfortunately broke other tools (such as docker) which expected the v1
2019 * "name=systemd" hierarchy on /sys/fs/cgroup/systemd. From v233 and on, the hybrid mode mounts v2 on
2020 * /sys/fs/cgroup/unified and maintains "name=systemd" hierarchy on /sys/fs/cgroup/systemd for compatibility
2021 * with other tools.
2022 *
2023 * To keep live upgrade working, we detect and support v232 layout. When v232 layout is detected, to keep
2024 * cgroup v2 process management but disable the compat dual layout, we return true on
2025 * cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) and false on cg_hybrid_unified().
2026 */
2027 static thread_local bool unified_systemd_v232;
2028
2029 int cg_unified_cached(bool flush) {
2030 static thread_local CGroupUnified unified_cache = CGROUP_UNIFIED_UNKNOWN;
2031
2032 struct statfs fs;
2033
2034 /* Checks if we support the unified hierarchy. Returns an
2035 * error when the cgroup hierarchies aren't mounted yet or we
2036 * have any other trouble determining if the unified hierarchy
2037 * is supported. */
2038
2039 if (flush)
2040 unified_cache = CGROUP_UNIFIED_UNKNOWN;
2041 else if (unified_cache >= CGROUP_UNIFIED_NONE)
2042 return unified_cache;
2043
2044 if (statfs("/sys/fs/cgroup/", &fs) < 0)
2045 return log_debug_errno(errno, "statfs(\"/sys/fs/cgroup/\") failed: %m");
2046
2047 if (F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2048 log_debug("Found cgroup2 on /sys/fs/cgroup/, full unified hierarchy");
2049 unified_cache = CGROUP_UNIFIED_ALL;
2050 } else if (F_TYPE_EQUAL(fs.f_type, TMPFS_MAGIC)) {
2051 if (statfs("/sys/fs/cgroup/unified/", &fs) == 0 &&
2052 F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2053 log_debug("Found cgroup2 on /sys/fs/cgroup/unified, unified hierarchy for systemd controller");
2054 unified_cache = CGROUP_UNIFIED_SYSTEMD;
2055 unified_systemd_v232 = false;
2056 } else {
2057 if (statfs("/sys/fs/cgroup/systemd/", &fs) < 0)
2058 return log_debug_errno(errno, "statfs(\"/sys/fs/cgroup/systemd\" failed: %m");
2059
2060 if (F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2061 log_debug("Found cgroup2 on /sys/fs/cgroup/systemd, unified hierarchy for systemd controller (v232 variant)");
2062 unified_cache = CGROUP_UNIFIED_SYSTEMD;
2063 unified_systemd_v232 = true;
2064 } else if (F_TYPE_EQUAL(fs.f_type, CGROUP_SUPER_MAGIC)) {
2065 log_debug("Found cgroup on /sys/fs/cgroup/systemd, legacy hierarchy");
2066 unified_cache = CGROUP_UNIFIED_NONE;
2067 } else {
2068 log_debug("Unexpected filesystem type %llx mounted on /sys/fs/cgroup/systemd, assuming legacy hierarchy",
2069 (unsigned long long) fs.f_type);
2070 unified_cache = CGROUP_UNIFIED_NONE;
2071 }
2072 }
2073 } else if (F_TYPE_EQUAL(fs.f_type, SYSFS_MAGIC)) {
2074 return log_debug_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
2075 "No filesystem is currently mounted on /sys/fs/cgroup.");
2076 } else
2077 return log_debug_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
2078 "Unknown filesystem type %llx mounted on /sys/fs/cgroup.",
2079 (unsigned long long)fs.f_type);
2080
2081 return unified_cache;
2082 }
2083
2084 int cg_unified_controller(const char *controller) {
2085 int r;
2086
2087 r = cg_unified_cached(false);
2088 if (r < 0)
2089 return r;
2090
2091 if (r == CGROUP_UNIFIED_NONE)
2092 return false;
2093
2094 if (r >= CGROUP_UNIFIED_ALL)
2095 return true;
2096
2097 return streq_ptr(controller, SYSTEMD_CGROUP_CONTROLLER);
2098 }
2099
2100 int cg_all_unified(void) {
2101 int r;
2102
2103 r = cg_unified_cached(false);
2104 if (r < 0)
2105 return r;
2106
2107 return r >= CGROUP_UNIFIED_ALL;
2108 }
2109
2110 int cg_hybrid_unified(void) {
2111 int r;
2112
2113 r = cg_unified_cached(false);
2114 if (r < 0)
2115 return r;
2116
2117 return r == CGROUP_UNIFIED_SYSTEMD && !unified_systemd_v232;
2118 }
2119
2120 const uint64_t cgroup_io_limit_defaults[_CGROUP_IO_LIMIT_TYPE_MAX] = {
2121 [CGROUP_IO_RBPS_MAX] = CGROUP_LIMIT_MAX,
2122 [CGROUP_IO_WBPS_MAX] = CGROUP_LIMIT_MAX,
2123 [CGROUP_IO_RIOPS_MAX] = CGROUP_LIMIT_MAX,
2124 [CGROUP_IO_WIOPS_MAX] = CGROUP_LIMIT_MAX,
2125 };
2126
2127 static const char* const cgroup_io_limit_type_table[_CGROUP_IO_LIMIT_TYPE_MAX] = {
2128 [CGROUP_IO_RBPS_MAX] = "IOReadBandwidthMax",
2129 [CGROUP_IO_WBPS_MAX] = "IOWriteBandwidthMax",
2130 [CGROUP_IO_RIOPS_MAX] = "IOReadIOPSMax",
2131 [CGROUP_IO_WIOPS_MAX] = "IOWriteIOPSMax",
2132 };
2133
2134 DEFINE_STRING_TABLE_LOOKUP(cgroup_io_limit_type, CGroupIOLimitType);
2135
2136 bool is_cgroup_fs(const struct statfs *s) {
2137 return is_fs_type(s, CGROUP_SUPER_MAGIC) ||
2138 is_fs_type(s, CGROUP2_SUPER_MAGIC);
2139 }
2140
2141 bool fd_is_cgroup_fs(int fd) {
2142 struct statfs s;
2143
2144 if (fstatfs(fd, &s) < 0)
2145 return -errno;
2146
2147 return is_cgroup_fs(&s);
2148 }
2149
2150 static const char *const cgroup_controller_table[_CGROUP_CONTROLLER_MAX] = {
2151 [CGROUP_CONTROLLER_CPU] = "cpu",
2152 [CGROUP_CONTROLLER_CPUACCT] = "cpuacct",
2153 [CGROUP_CONTROLLER_CPUSET] = "cpuset",
2154 [CGROUP_CONTROLLER_IO] = "io",
2155 [CGROUP_CONTROLLER_BLKIO] = "blkio",
2156 [CGROUP_CONTROLLER_MEMORY] = "memory",
2157 [CGROUP_CONTROLLER_DEVICES] = "devices",
2158 [CGROUP_CONTROLLER_PIDS] = "pids",
2159 [CGROUP_CONTROLLER_BPF_FIREWALL] = "bpf-firewall",
2160 [CGROUP_CONTROLLER_BPF_DEVICES] = "bpf-devices",
2161 };
2162
2163 DEFINE_STRING_TABLE_LOOKUP(cgroup_controller, CGroupController);
2164
2165 CGroupMask get_cpu_accounting_mask(void) {
2166 static CGroupMask needed_mask = (CGroupMask) -1;
2167
2168 /* On kernel ≥4.15 with unified hierarchy, cpu.stat's usage_usec is
2169 * provided externally from the CPU controller, which means we don't
2170 * need to enable the CPU controller just to get metrics. This is good,
2171 * because enabling the CPU controller comes at a minor performance
2172 * hit, especially when it's propagated deep into large hierarchies.
2173 * There's also no separate CPU accounting controller available within
2174 * a unified hierarchy.
2175 *
2176 * This combination of factors results in the desired cgroup mask to
2177 * enable for CPU accounting varying as follows:
2178 *
2179 * ╔═════════════════════╤═════════════════════╗
2180 * ║ Linux ≥4.15 │ Linux <4.15 ║
2181 * ╔═══════════════╬═════════════════════╪═════════════════════╣
2182 * ║ Unified ║ nothing │ CGROUP_MASK_CPU ║
2183 * ╟───────────────╫─────────────────────┼─────────────────────╢
2184 * ║ Hybrid/Legacy ║ CGROUP_MASK_CPUACCT │ CGROUP_MASK_CPUACCT ║
2185 * ╚═══════════════╩═════════════════════╧═════════════════════╝
2186 *
2187 * We check kernel version here instead of manually checking whether
2188 * cpu.stat is present for every cgroup, as that check in itself would
2189 * already be fairly expensive.
2190 *
2191 * Kernels where this patch has been backported will therefore have the
2192 * CPU controller enabled unnecessarily. This is more expensive than
2193 * necessary, but harmless. ☺️
2194 */
2195
2196 if (needed_mask == (CGroupMask) -1) {
2197 if (cg_all_unified()) {
2198 struct utsname u;
2199 assert_se(uname(&u) >= 0);
2200
2201 if (strverscmp_improved(u.release, "4.15") < 0)
2202 needed_mask = CGROUP_MASK_CPU;
2203 else
2204 needed_mask = 0;
2205 } else
2206 needed_mask = CGROUP_MASK_CPUACCT;
2207 }
2208
2209 return needed_mask;
2210 }
2211
2212 bool cpu_accounting_is_cheap(void) {
2213 return get_cpu_accounting_mask() == 0;
2214 }
2215
2216 static const char* const managed_oom_mode_table[_MANAGED_OOM_MODE_MAX] = {
2217 [MANAGED_OOM_AUTO] = "auto",
2218 [MANAGED_OOM_KILL] = "kill",
2219 };
2220
2221 DEFINE_STRING_TABLE_LOOKUP(managed_oom_mode, ManagedOOMMode);
2222
2223 static const char* const managed_oom_preference_table[_MANAGED_OOM_PREFERENCE_MAX] = {
2224 [MANAGED_OOM_PREFERENCE_NONE] = "none",
2225 [MANAGED_OOM_PREFERENCE_AVOID] = "avoid",
2226 [MANAGED_OOM_PREFERENCE_OMIT] = "omit",
2227 };
2228
2229 DEFINE_STRING_TABLE_LOOKUP(managed_oom_preference, ManagedOOMPreference);