]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/cgroup-util.c
Add fopen_unlocked() wrapper
[thirdparty/systemd.git] / src / basic / cgroup-util.c
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2
3 #include <dirent.h>
4 #include <errno.h>
5 #include <ftw.h>
6 #include <limits.h>
7 #include <signal.h>
8 #include <stddef.h>
9 #include <stdio_ext.h>
10 #include <stdlib.h>
11 #include <string.h>
12 #include <sys/stat.h>
13 #include <sys/statfs.h>
14 #include <sys/types.h>
15 #include <sys/utsname.h>
16 #include <sys/xattr.h>
17 #include <unistd.h>
18
19 #include "alloc-util.h"
20 #include "cgroup-util.h"
21 #include "def.h"
22 #include "dirent-util.h"
23 #include "extract-word.h"
24 #include "fd-util.h"
25 #include "fileio.h"
26 #include "format-util.h"
27 #include "fs-util.h"
28 #include "log.h"
29 #include "login-util.h"
30 #include "macro.h"
31 #include "missing.h"
32 #include "mkdir.h"
33 #include "parse-util.h"
34 #include "path-util.h"
35 #include "proc-cmdline.h"
36 #include "process-util.h"
37 #include "set.h"
38 #include "special.h"
39 #include "stat-util.h"
40 #include "stdio-util.h"
41 #include "string-table.h"
42 #include "string-util.h"
43 #include "strv.h"
44 #include "unit-name.h"
45 #include "user-util.h"
46
47 int cg_enumerate_processes(const char *controller, const char *path, FILE **_f) {
48 _cleanup_free_ char *fs = NULL;
49 FILE *f;
50 int r;
51
52 assert(_f);
53
54 r = cg_get_path(controller, path, "cgroup.procs", &fs);
55 if (r < 0)
56 return r;
57
58 f = fopen(fs, "re");
59 if (!f)
60 return -errno;
61
62 *_f = f;
63 return 0;
64 }
65
66 int cg_read_pid(FILE *f, pid_t *_pid) {
67 unsigned long ul;
68
69 /* Note that the cgroup.procs might contain duplicates! See
70 * cgroups.txt for details. */
71
72 assert(f);
73 assert(_pid);
74
75 errno = 0;
76 if (fscanf(f, "%lu", &ul) != 1) {
77
78 if (feof(f))
79 return 0;
80
81 return errno > 0 ? -errno : -EIO;
82 }
83
84 if (ul <= 0)
85 return -EIO;
86
87 *_pid = (pid_t) ul;
88 return 1;
89 }
90
91 int cg_read_event(
92 const char *controller,
93 const char *path,
94 const char *event,
95 char **val) {
96
97 _cleanup_free_ char *events = NULL, *content = NULL;
98 char *p, *line;
99 int r;
100
101 r = cg_get_path(controller, path, "cgroup.events", &events);
102 if (r < 0)
103 return r;
104
105 r = read_full_file(events, &content, NULL);
106 if (r < 0)
107 return r;
108
109 p = content;
110 while ((line = strsep(&p, "\n"))) {
111 char *key;
112
113 key = strsep(&line, " ");
114 if (!key || !line)
115 return -EINVAL;
116
117 if (strcmp(key, event))
118 continue;
119
120 *val = strdup(line);
121 return 0;
122 }
123
124 return -ENOENT;
125 }
126
127 bool cg_ns_supported(void) {
128 static thread_local int enabled = -1;
129
130 if (enabled >= 0)
131 return enabled;
132
133 if (access("/proc/self/ns/cgroup", F_OK) < 0) {
134 if (errno != ENOENT)
135 log_debug_errno(errno, "Failed to check whether /proc/self/ns/cgroup is available, assuming not: %m");
136 enabled = false;
137 } else
138 enabled = true;
139
140 return enabled;
141 }
142
143 int cg_enumerate_subgroups(const char *controller, const char *path, DIR **_d) {
144 _cleanup_free_ char *fs = NULL;
145 int r;
146 DIR *d;
147
148 assert(_d);
149
150 /* This is not recursive! */
151
152 r = cg_get_path(controller, path, NULL, &fs);
153 if (r < 0)
154 return r;
155
156 d = opendir(fs);
157 if (!d)
158 return -errno;
159
160 *_d = d;
161 return 0;
162 }
163
164 int cg_read_subgroup(DIR *d, char **fn) {
165 struct dirent *de;
166
167 assert(d);
168 assert(fn);
169
170 FOREACH_DIRENT_ALL(de, d, return -errno) {
171 char *b;
172
173 if (de->d_type != DT_DIR)
174 continue;
175
176 if (dot_or_dot_dot(de->d_name))
177 continue;
178
179 b = strdup(de->d_name);
180 if (!b)
181 return -ENOMEM;
182
183 *fn = b;
184 return 1;
185 }
186
187 return 0;
188 }
189
190 int cg_rmdir(const char *controller, const char *path) {
191 _cleanup_free_ char *p = NULL;
192 int r;
193
194 r = cg_get_path(controller, path, NULL, &p);
195 if (r < 0)
196 return r;
197
198 r = rmdir(p);
199 if (r < 0 && errno != ENOENT)
200 return -errno;
201
202 r = cg_hybrid_unified();
203 if (r <= 0)
204 return r;
205
206 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
207 r = cg_rmdir(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path);
208 if (r < 0)
209 log_warning_errno(r, "Failed to remove compat systemd cgroup %s: %m", path);
210 }
211
212 return 0;
213 }
214
215 int cg_kill(
216 const char *controller,
217 const char *path,
218 int sig,
219 CGroupFlags flags,
220 Set *s,
221 cg_kill_log_func_t log_kill,
222 void *userdata) {
223
224 _cleanup_set_free_ Set *allocated_set = NULL;
225 bool done = false;
226 int r, ret = 0, ret_log_kill = 0;
227 pid_t my_pid;
228
229 assert(sig >= 0);
230
231 /* Don't send SIGCONT twice. Also, SIGKILL always works even when process is suspended, hence don't send
232 * SIGCONT on SIGKILL. */
233 if (IN_SET(sig, SIGCONT, SIGKILL))
234 flags &= ~CGROUP_SIGCONT;
235
236 /* This goes through the tasks list and kills them all. This
237 * is repeated until no further processes are added to the
238 * tasks list, to properly handle forking processes */
239
240 if (!s) {
241 s = allocated_set = set_new(NULL);
242 if (!s)
243 return -ENOMEM;
244 }
245
246 my_pid = getpid_cached();
247
248 do {
249 _cleanup_fclose_ FILE *f = NULL;
250 pid_t pid = 0;
251 done = true;
252
253 r = cg_enumerate_processes(controller, path, &f);
254 if (r < 0) {
255 if (ret >= 0 && r != -ENOENT)
256 return r;
257
258 return ret;
259 }
260
261 while ((r = cg_read_pid(f, &pid)) > 0) {
262
263 if ((flags & CGROUP_IGNORE_SELF) && pid == my_pid)
264 continue;
265
266 if (set_get(s, PID_TO_PTR(pid)) == PID_TO_PTR(pid))
267 continue;
268
269 if (log_kill)
270 ret_log_kill = log_kill(pid, sig, userdata);
271
272 /* If we haven't killed this process yet, kill
273 * it */
274 if (kill(pid, sig) < 0) {
275 if (ret >= 0 && errno != ESRCH)
276 ret = -errno;
277 } else {
278 if (flags & CGROUP_SIGCONT)
279 (void) kill(pid, SIGCONT);
280
281 if (ret == 0) {
282 if (log_kill)
283 ret = ret_log_kill;
284 else
285 ret = 1;
286 }
287 }
288
289 done = false;
290
291 r = set_put(s, PID_TO_PTR(pid));
292 if (r < 0) {
293 if (ret >= 0)
294 return r;
295
296 return ret;
297 }
298 }
299
300 if (r < 0) {
301 if (ret >= 0)
302 return r;
303
304 return ret;
305 }
306
307 /* To avoid racing against processes which fork
308 * quicker than we can kill them we repeat this until
309 * no new pids need to be killed. */
310
311 } while (!done);
312
313 return ret;
314 }
315
316 int cg_kill_recursive(
317 const char *controller,
318 const char *path,
319 int sig,
320 CGroupFlags flags,
321 Set *s,
322 cg_kill_log_func_t log_kill,
323 void *userdata) {
324
325 _cleanup_set_free_ Set *allocated_set = NULL;
326 _cleanup_closedir_ DIR *d = NULL;
327 int r, ret;
328 char *fn;
329
330 assert(path);
331 assert(sig >= 0);
332
333 if (!s) {
334 s = allocated_set = set_new(NULL);
335 if (!s)
336 return -ENOMEM;
337 }
338
339 ret = cg_kill(controller, path, sig, flags, s, log_kill, userdata);
340
341 r = cg_enumerate_subgroups(controller, path, &d);
342 if (r < 0) {
343 if (ret >= 0 && r != -ENOENT)
344 return r;
345
346 return ret;
347 }
348
349 while ((r = cg_read_subgroup(d, &fn)) > 0) {
350 _cleanup_free_ char *p = NULL;
351
352 p = strjoin(path, "/", fn);
353 free(fn);
354 if (!p)
355 return -ENOMEM;
356
357 r = cg_kill_recursive(controller, p, sig, flags, s, log_kill, userdata);
358 if (r != 0 && ret >= 0)
359 ret = r;
360 }
361 if (ret >= 0 && r < 0)
362 ret = r;
363
364 if (flags & CGROUP_REMOVE) {
365 r = cg_rmdir(controller, path);
366 if (r < 0 && ret >= 0 && !IN_SET(r, -ENOENT, -EBUSY))
367 return r;
368 }
369
370 return ret;
371 }
372
373 int cg_migrate(
374 const char *cfrom,
375 const char *pfrom,
376 const char *cto,
377 const char *pto,
378 CGroupFlags flags) {
379
380 bool done = false;
381 _cleanup_set_free_ Set *s = NULL;
382 int r, ret = 0;
383 pid_t my_pid;
384
385 assert(cfrom);
386 assert(pfrom);
387 assert(cto);
388 assert(pto);
389
390 s = set_new(NULL);
391 if (!s)
392 return -ENOMEM;
393
394 my_pid = getpid_cached();
395
396 do {
397 _cleanup_fclose_ FILE *f = NULL;
398 pid_t pid = 0;
399 done = true;
400
401 r = cg_enumerate_processes(cfrom, pfrom, &f);
402 if (r < 0) {
403 if (ret >= 0 && r != -ENOENT)
404 return r;
405
406 return ret;
407 }
408
409 while ((r = cg_read_pid(f, &pid)) > 0) {
410
411 /* This might do weird stuff if we aren't a
412 * single-threaded program. However, we
413 * luckily know we are not */
414 if ((flags & CGROUP_IGNORE_SELF) && pid == my_pid)
415 continue;
416
417 if (set_get(s, PID_TO_PTR(pid)) == PID_TO_PTR(pid))
418 continue;
419
420 /* Ignore kernel threads. Since they can only
421 * exist in the root cgroup, we only check for
422 * them there. */
423 if (cfrom &&
424 empty_or_root(pfrom) &&
425 is_kernel_thread(pid) > 0)
426 continue;
427
428 r = cg_attach(cto, pto, pid);
429 if (r < 0) {
430 if (ret >= 0 && r != -ESRCH)
431 ret = r;
432 } else if (ret == 0)
433 ret = 1;
434
435 done = false;
436
437 r = set_put(s, PID_TO_PTR(pid));
438 if (r < 0) {
439 if (ret >= 0)
440 return r;
441
442 return ret;
443 }
444 }
445
446 if (r < 0) {
447 if (ret >= 0)
448 return r;
449
450 return ret;
451 }
452 } while (!done);
453
454 return ret;
455 }
456
457 int cg_migrate_recursive(
458 const char *cfrom,
459 const char *pfrom,
460 const char *cto,
461 const char *pto,
462 CGroupFlags flags) {
463
464 _cleanup_closedir_ DIR *d = NULL;
465 int r, ret = 0;
466 char *fn;
467
468 assert(cfrom);
469 assert(pfrom);
470 assert(cto);
471 assert(pto);
472
473 ret = cg_migrate(cfrom, pfrom, cto, pto, flags);
474
475 r = cg_enumerate_subgroups(cfrom, pfrom, &d);
476 if (r < 0) {
477 if (ret >= 0 && r != -ENOENT)
478 return r;
479
480 return ret;
481 }
482
483 while ((r = cg_read_subgroup(d, &fn)) > 0) {
484 _cleanup_free_ char *p = NULL;
485
486 p = strjoin(pfrom, "/", fn);
487 free(fn);
488 if (!p)
489 return -ENOMEM;
490
491 r = cg_migrate_recursive(cfrom, p, cto, pto, flags);
492 if (r != 0 && ret >= 0)
493 ret = r;
494 }
495
496 if (r < 0 && ret >= 0)
497 ret = r;
498
499 if (flags & CGROUP_REMOVE) {
500 r = cg_rmdir(cfrom, pfrom);
501 if (r < 0 && ret >= 0 && !IN_SET(r, -ENOENT, -EBUSY))
502 return r;
503 }
504
505 return ret;
506 }
507
508 int cg_migrate_recursive_fallback(
509 const char *cfrom,
510 const char *pfrom,
511 const char *cto,
512 const char *pto,
513 CGroupFlags flags) {
514
515 int r;
516
517 assert(cfrom);
518 assert(pfrom);
519 assert(cto);
520 assert(pto);
521
522 r = cg_migrate_recursive(cfrom, pfrom, cto, pto, flags);
523 if (r < 0) {
524 char prefix[strlen(pto) + 1];
525
526 /* This didn't work? Then let's try all prefixes of the destination */
527
528 PATH_FOREACH_PREFIX(prefix, pto) {
529 int q;
530
531 q = cg_migrate_recursive(cfrom, pfrom, cto, prefix, flags);
532 if (q >= 0)
533 return q;
534 }
535 }
536
537 return r;
538 }
539
540 static const char *controller_to_dirname(const char *controller) {
541 const char *e;
542
543 assert(controller);
544
545 /* Converts a controller name to the directory name below
546 * /sys/fs/cgroup/ we want to mount it to. Effectively, this
547 * just cuts off the name= prefixed used for named
548 * hierarchies, if it is specified. */
549
550 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
551 if (cg_hybrid_unified() > 0)
552 controller = SYSTEMD_CGROUP_CONTROLLER_HYBRID;
553 else
554 controller = SYSTEMD_CGROUP_CONTROLLER_LEGACY;
555 }
556
557 e = startswith(controller, "name=");
558 if (e)
559 return e;
560
561 return controller;
562 }
563
564 static int join_path_legacy(const char *controller, const char *path, const char *suffix, char **fs) {
565 const char *dn;
566 char *t = NULL;
567
568 assert(fs);
569 assert(controller);
570
571 dn = controller_to_dirname(controller);
572
573 if (isempty(path) && isempty(suffix))
574 t = strappend("/sys/fs/cgroup/", dn);
575 else if (isempty(path))
576 t = strjoin("/sys/fs/cgroup/", dn, "/", suffix);
577 else if (isempty(suffix))
578 t = strjoin("/sys/fs/cgroup/", dn, "/", path);
579 else
580 t = strjoin("/sys/fs/cgroup/", dn, "/", path, "/", suffix);
581 if (!t)
582 return -ENOMEM;
583
584 *fs = t;
585 return 0;
586 }
587
588 static int join_path_unified(const char *path, const char *suffix, char **fs) {
589 char *t;
590
591 assert(fs);
592
593 if (isempty(path) && isempty(suffix))
594 t = strdup("/sys/fs/cgroup");
595 else if (isempty(path))
596 t = strappend("/sys/fs/cgroup/", suffix);
597 else if (isempty(suffix))
598 t = strappend("/sys/fs/cgroup/", path);
599 else
600 t = strjoin("/sys/fs/cgroup/", path, "/", suffix);
601 if (!t)
602 return -ENOMEM;
603
604 *fs = t;
605 return 0;
606 }
607
608 int cg_get_path(const char *controller, const char *path, const char *suffix, char **fs) {
609 int r;
610
611 assert(fs);
612
613 if (!controller) {
614 char *t;
615
616 /* If no controller is specified, we return the path
617 * *below* the controllers, without any prefix. */
618
619 if (!path && !suffix)
620 return -EINVAL;
621
622 if (!suffix)
623 t = strdup(path);
624 else if (!path)
625 t = strdup(suffix);
626 else
627 t = strjoin(path, "/", suffix);
628 if (!t)
629 return -ENOMEM;
630
631 *fs = path_simplify(t, false);
632 return 0;
633 }
634
635 if (!cg_controller_is_valid(controller))
636 return -EINVAL;
637
638 r = cg_all_unified();
639 if (r < 0)
640 return r;
641 if (r > 0)
642 r = join_path_unified(path, suffix, fs);
643 else
644 r = join_path_legacy(controller, path, suffix, fs);
645 if (r < 0)
646 return r;
647
648 path_simplify(*fs, false);
649 return 0;
650 }
651
652 static int controller_is_accessible(const char *controller) {
653 int r;
654
655 assert(controller);
656
657 /* Checks whether a specific controller is accessible,
658 * i.e. its hierarchy mounted. In the unified hierarchy all
659 * controllers are considered accessible, except for the named
660 * hierarchies */
661
662 if (!cg_controller_is_valid(controller))
663 return -EINVAL;
664
665 r = cg_all_unified();
666 if (r < 0)
667 return r;
668 if (r > 0) {
669 /* We don't support named hierarchies if we are using
670 * the unified hierarchy. */
671
672 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER))
673 return 0;
674
675 if (startswith(controller, "name="))
676 return -EOPNOTSUPP;
677
678 } else {
679 const char *cc, *dn;
680
681 dn = controller_to_dirname(controller);
682 cc = strjoina("/sys/fs/cgroup/", dn);
683
684 if (laccess(cc, F_OK) < 0)
685 return -errno;
686 }
687
688 return 0;
689 }
690
691 int cg_get_path_and_check(const char *controller, const char *path, const char *suffix, char **fs) {
692 int r;
693
694 assert(controller);
695 assert(fs);
696
697 /* Check if the specified controller is actually accessible */
698 r = controller_is_accessible(controller);
699 if (r < 0)
700 return r;
701
702 return cg_get_path(controller, path, suffix, fs);
703 }
704
705 static int trim_cb(const char *path, const struct stat *sb, int typeflag, struct FTW *ftwbuf) {
706 assert(path);
707 assert(sb);
708 assert(ftwbuf);
709
710 if (typeflag != FTW_DP)
711 return 0;
712
713 if (ftwbuf->level < 1)
714 return 0;
715
716 (void) rmdir(path);
717 return 0;
718 }
719
720 int cg_trim(const char *controller, const char *path, bool delete_root) {
721 _cleanup_free_ char *fs = NULL;
722 int r = 0, q;
723
724 assert(path);
725
726 r = cg_get_path(controller, path, NULL, &fs);
727 if (r < 0)
728 return r;
729
730 errno = 0;
731 if (nftw(fs, trim_cb, 64, FTW_DEPTH|FTW_MOUNT|FTW_PHYS) != 0) {
732 if (errno == ENOENT)
733 r = 0;
734 else if (errno > 0)
735 r = -errno;
736 else
737 r = -EIO;
738 }
739
740 if (delete_root) {
741 if (rmdir(fs) < 0 && errno != ENOENT)
742 return -errno;
743 }
744
745 q = cg_hybrid_unified();
746 if (q < 0)
747 return q;
748 if (q > 0 && streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
749 q = cg_trim(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path, delete_root);
750 if (q < 0)
751 log_warning_errno(q, "Failed to trim compat systemd cgroup %s: %m", path);
752 }
753
754 return r;
755 }
756
757 /* Create a cgroup in the hierarchy of controller.
758 * Returns 0 if the group already existed, 1 on success, negative otherwise.
759 */
760 int cg_create(const char *controller, const char *path) {
761 _cleanup_free_ char *fs = NULL;
762 int r;
763
764 r = cg_get_path_and_check(controller, path, NULL, &fs);
765 if (r < 0)
766 return r;
767
768 r = mkdir_parents(fs, 0755);
769 if (r < 0)
770 return r;
771
772 r = mkdir_errno_wrapper(fs, 0755);
773 if (r == -EEXIST)
774 return 0;
775 if (r < 0)
776 return r;
777
778 r = cg_hybrid_unified();
779 if (r < 0)
780 return r;
781
782 if (r > 0 && streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
783 r = cg_create(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path);
784 if (r < 0)
785 log_warning_errno(r, "Failed to create compat systemd cgroup %s: %m", path);
786 }
787
788 return 1;
789 }
790
791 int cg_create_and_attach(const char *controller, const char *path, pid_t pid) {
792 int r, q;
793
794 assert(pid >= 0);
795
796 r = cg_create(controller, path);
797 if (r < 0)
798 return r;
799
800 q = cg_attach(controller, path, pid);
801 if (q < 0)
802 return q;
803
804 /* This does not remove the cgroup on failure */
805 return r;
806 }
807
808 int cg_attach(const char *controller, const char *path, pid_t pid) {
809 _cleanup_free_ char *fs = NULL;
810 char c[DECIMAL_STR_MAX(pid_t) + 2];
811 int r;
812
813 assert(path);
814 assert(pid >= 0);
815
816 r = cg_get_path_and_check(controller, path, "cgroup.procs", &fs);
817 if (r < 0)
818 return r;
819
820 if (pid == 0)
821 pid = getpid_cached();
822
823 xsprintf(c, PID_FMT "\n", pid);
824
825 r = write_string_file(fs, c, WRITE_STRING_FILE_DISABLE_BUFFER);
826 if (r < 0)
827 return r;
828
829 r = cg_hybrid_unified();
830 if (r < 0)
831 return r;
832
833 if (r > 0 && streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
834 r = cg_attach(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path, pid);
835 if (r < 0)
836 log_warning_errno(r, "Failed to attach "PID_FMT" to compat systemd cgroup %s: %m", pid, path);
837 }
838
839 return 0;
840 }
841
842 int cg_attach_fallback(const char *controller, const char *path, pid_t pid) {
843 int r;
844
845 assert(controller);
846 assert(path);
847 assert(pid >= 0);
848
849 r = cg_attach(controller, path, pid);
850 if (r < 0) {
851 char prefix[strlen(path) + 1];
852
853 /* This didn't work? Then let's try all prefixes of
854 * the destination */
855
856 PATH_FOREACH_PREFIX(prefix, path) {
857 int q;
858
859 q = cg_attach(controller, prefix, pid);
860 if (q >= 0)
861 return q;
862 }
863 }
864
865 return r;
866 }
867
868 int cg_set_access(
869 const char *controller,
870 const char *path,
871 uid_t uid,
872 gid_t gid) {
873
874 struct Attribute {
875 const char *name;
876 bool fatal;
877 };
878
879 /* cgroup v1, aka legacy/non-unified */
880 static const struct Attribute legacy_attributes[] = {
881 { "cgroup.procs", true },
882 { "tasks", false },
883 { "cgroup.clone_children", false },
884 {},
885 };
886
887 /* cgroup v2, aka unified */
888 static const struct Attribute unified_attributes[] = {
889 { "cgroup.procs", true },
890 { "cgroup.subtree_control", true },
891 { "cgroup.threads", false },
892 {},
893 };
894
895 static const struct Attribute* const attributes[] = {
896 [false] = legacy_attributes,
897 [true] = unified_attributes,
898 };
899
900 _cleanup_free_ char *fs = NULL;
901 const struct Attribute *i;
902 int r, unified;
903
904 assert(path);
905
906 if (uid == UID_INVALID && gid == GID_INVALID)
907 return 0;
908
909 unified = cg_unified_controller(controller);
910 if (unified < 0)
911 return unified;
912
913 /* Configure access to the cgroup itself */
914 r = cg_get_path(controller, path, NULL, &fs);
915 if (r < 0)
916 return r;
917
918 r = chmod_and_chown(fs, 0755, uid, gid);
919 if (r < 0)
920 return r;
921
922 /* Configure access to the cgroup's attributes */
923 for (i = attributes[unified]; i->name; i++) {
924 fs = mfree(fs);
925
926 r = cg_get_path(controller, path, i->name, &fs);
927 if (r < 0)
928 return r;
929
930 r = chmod_and_chown(fs, 0644, uid, gid);
931 if (r < 0) {
932 if (i->fatal)
933 return r;
934
935 log_debug_errno(r, "Failed to set access on cgroup %s, ignoring: %m", fs);
936 }
937 }
938
939 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER)) {
940 r = cg_hybrid_unified();
941 if (r < 0)
942 return r;
943 if (r > 0) {
944 /* Always propagate access mode from unified to legacy controller */
945 r = cg_set_access(SYSTEMD_CGROUP_CONTROLLER_LEGACY, path, uid, gid);
946 if (r < 0)
947 log_debug_errno(r, "Failed to set access on compatibility systemd cgroup %s, ignoring: %m", path);
948 }
949 }
950
951 return 0;
952 }
953
954 int cg_set_xattr(const char *controller, const char *path, const char *name, const void *value, size_t size, int flags) {
955 _cleanup_free_ char *fs = NULL;
956 int r;
957
958 assert(path);
959 assert(name);
960 assert(value || size <= 0);
961
962 r = cg_get_path(controller, path, NULL, &fs);
963 if (r < 0)
964 return r;
965
966 if (setxattr(fs, name, value, size, flags) < 0)
967 return -errno;
968
969 return 0;
970 }
971
972 int cg_get_xattr(const char *controller, const char *path, const char *name, void *value, size_t size) {
973 _cleanup_free_ char *fs = NULL;
974 ssize_t n;
975 int r;
976
977 assert(path);
978 assert(name);
979
980 r = cg_get_path(controller, path, NULL, &fs);
981 if (r < 0)
982 return r;
983
984 n = getxattr(fs, name, value, size);
985 if (n < 0)
986 return -errno;
987
988 return (int) n;
989 }
990
991 int cg_pid_get_path(const char *controller, pid_t pid, char **path) {
992 _cleanup_fclose_ FILE *f = NULL;
993 const char *fs, *controller_str;
994 int unified, r;
995 size_t cs = 0;
996
997 assert(path);
998 assert(pid >= 0);
999
1000 if (controller) {
1001 if (!cg_controller_is_valid(controller))
1002 return -EINVAL;
1003 } else
1004 controller = SYSTEMD_CGROUP_CONTROLLER;
1005
1006 unified = cg_unified_controller(controller);
1007 if (unified < 0)
1008 return unified;
1009 if (unified == 0) {
1010 if (streq(controller, SYSTEMD_CGROUP_CONTROLLER))
1011 controller_str = SYSTEMD_CGROUP_CONTROLLER_LEGACY;
1012 else
1013 controller_str = controller;
1014
1015 cs = strlen(controller_str);
1016 }
1017
1018 fs = procfs_file_alloca(pid, "cgroup");
1019 r = fopen_unlocked(fs, "re", &f);
1020 if (r == -ENOENT)
1021 return -ESRCH;
1022 if (r < 0)
1023 return r;
1024
1025 for (;;) {
1026 _cleanup_free_ char *line = NULL;
1027 char *e, *p;
1028
1029 r = read_line(f, LONG_LINE_MAX, &line);
1030 if (r < 0)
1031 return r;
1032 if (r == 0)
1033 break;
1034
1035 if (unified) {
1036 e = startswith(line, "0:");
1037 if (!e)
1038 continue;
1039
1040 e = strchr(e, ':');
1041 if (!e)
1042 continue;
1043 } else {
1044 char *l;
1045 size_t k;
1046 const char *word, *state;
1047 bool found = false;
1048
1049 l = strchr(line, ':');
1050 if (!l)
1051 continue;
1052
1053 l++;
1054 e = strchr(l, ':');
1055 if (!e)
1056 continue;
1057
1058 *e = 0;
1059 FOREACH_WORD_SEPARATOR(word, k, l, ",", state)
1060 if (k == cs && memcmp(word, controller_str, cs) == 0) {
1061 found = true;
1062 break;
1063 }
1064 if (!found)
1065 continue;
1066 }
1067
1068 p = strdup(e + 1);
1069 if (!p)
1070 return -ENOMEM;
1071
1072 /* Truncate suffix indicating the process is a zombie */
1073 e = endswith(p, " (deleted)");
1074 if (e)
1075 *e = 0;
1076
1077 *path = p;
1078 return 0;
1079 }
1080
1081 return -ENODATA;
1082 }
1083
1084 int cg_install_release_agent(const char *controller, const char *agent) {
1085 _cleanup_free_ char *fs = NULL, *contents = NULL;
1086 const char *sc;
1087 int r;
1088
1089 assert(agent);
1090
1091 r = cg_unified_controller(controller);
1092 if (r < 0)
1093 return r;
1094 if (r > 0) /* doesn't apply to unified hierarchy */
1095 return -EOPNOTSUPP;
1096
1097 r = cg_get_path(controller, NULL, "release_agent", &fs);
1098 if (r < 0)
1099 return r;
1100
1101 r = read_one_line_file(fs, &contents);
1102 if (r < 0)
1103 return r;
1104
1105 sc = strstrip(contents);
1106 if (isempty(sc)) {
1107 r = write_string_file(fs, agent, WRITE_STRING_FILE_DISABLE_BUFFER);
1108 if (r < 0)
1109 return r;
1110 } else if (!path_equal(sc, agent))
1111 return -EEXIST;
1112
1113 fs = mfree(fs);
1114 r = cg_get_path(controller, NULL, "notify_on_release", &fs);
1115 if (r < 0)
1116 return r;
1117
1118 contents = mfree(contents);
1119 r = read_one_line_file(fs, &contents);
1120 if (r < 0)
1121 return r;
1122
1123 sc = strstrip(contents);
1124 if (streq(sc, "0")) {
1125 r = write_string_file(fs, "1", WRITE_STRING_FILE_DISABLE_BUFFER);
1126 if (r < 0)
1127 return r;
1128
1129 return 1;
1130 }
1131
1132 if (!streq(sc, "1"))
1133 return -EIO;
1134
1135 return 0;
1136 }
1137
1138 int cg_uninstall_release_agent(const char *controller) {
1139 _cleanup_free_ char *fs = NULL;
1140 int r;
1141
1142 r = cg_unified_controller(controller);
1143 if (r < 0)
1144 return r;
1145 if (r > 0) /* Doesn't apply to unified hierarchy */
1146 return -EOPNOTSUPP;
1147
1148 r = cg_get_path(controller, NULL, "notify_on_release", &fs);
1149 if (r < 0)
1150 return r;
1151
1152 r = write_string_file(fs, "0", WRITE_STRING_FILE_DISABLE_BUFFER);
1153 if (r < 0)
1154 return r;
1155
1156 fs = mfree(fs);
1157
1158 r = cg_get_path(controller, NULL, "release_agent", &fs);
1159 if (r < 0)
1160 return r;
1161
1162 r = write_string_file(fs, "", WRITE_STRING_FILE_DISABLE_BUFFER);
1163 if (r < 0)
1164 return r;
1165
1166 return 0;
1167 }
1168
1169 int cg_is_empty(const char *controller, const char *path) {
1170 _cleanup_fclose_ FILE *f = NULL;
1171 pid_t pid;
1172 int r;
1173
1174 assert(path);
1175
1176 r = cg_enumerate_processes(controller, path, &f);
1177 if (r == -ENOENT)
1178 return true;
1179 if (r < 0)
1180 return r;
1181
1182 r = cg_read_pid(f, &pid);
1183 if (r < 0)
1184 return r;
1185
1186 return r == 0;
1187 }
1188
1189 int cg_is_empty_recursive(const char *controller, const char *path) {
1190 int r;
1191
1192 assert(path);
1193
1194 /* The root cgroup is always populated */
1195 if (controller && empty_or_root(path))
1196 return false;
1197
1198 r = cg_unified_controller(controller);
1199 if (r < 0)
1200 return r;
1201 if (r > 0) {
1202 _cleanup_free_ char *t = NULL;
1203
1204 /* On the unified hierarchy we can check empty state
1205 * via the "populated" attribute of "cgroup.events". */
1206
1207 r = cg_read_event(controller, path, "populated", &t);
1208 if (r == -ENOENT)
1209 return true;
1210 if (r < 0)
1211 return r;
1212
1213 return streq(t, "0");
1214 } else {
1215 _cleanup_closedir_ DIR *d = NULL;
1216 char *fn;
1217
1218 r = cg_is_empty(controller, path);
1219 if (r <= 0)
1220 return r;
1221
1222 r = cg_enumerate_subgroups(controller, path, &d);
1223 if (r == -ENOENT)
1224 return true;
1225 if (r < 0)
1226 return r;
1227
1228 while ((r = cg_read_subgroup(d, &fn)) > 0) {
1229 _cleanup_free_ char *p = NULL;
1230
1231 p = strjoin(path, "/", fn);
1232 free(fn);
1233 if (!p)
1234 return -ENOMEM;
1235
1236 r = cg_is_empty_recursive(controller, p);
1237 if (r <= 0)
1238 return r;
1239 }
1240 if (r < 0)
1241 return r;
1242
1243 return true;
1244 }
1245 }
1246
1247 int cg_split_spec(const char *spec, char **controller, char **path) {
1248 char *t = NULL, *u = NULL;
1249 const char *e;
1250
1251 assert(spec);
1252
1253 if (*spec == '/') {
1254 if (!path_is_normalized(spec))
1255 return -EINVAL;
1256
1257 if (path) {
1258 t = strdup(spec);
1259 if (!t)
1260 return -ENOMEM;
1261
1262 *path = path_simplify(t, false);
1263 }
1264
1265 if (controller)
1266 *controller = NULL;
1267
1268 return 0;
1269 }
1270
1271 e = strchr(spec, ':');
1272 if (!e) {
1273 if (!cg_controller_is_valid(spec))
1274 return -EINVAL;
1275
1276 if (controller) {
1277 t = strdup(spec);
1278 if (!t)
1279 return -ENOMEM;
1280
1281 *controller = t;
1282 }
1283
1284 if (path)
1285 *path = NULL;
1286
1287 return 0;
1288 }
1289
1290 t = strndup(spec, e-spec);
1291 if (!t)
1292 return -ENOMEM;
1293 if (!cg_controller_is_valid(t)) {
1294 free(t);
1295 return -EINVAL;
1296 }
1297
1298 if (isempty(e+1))
1299 u = NULL;
1300 else {
1301 u = strdup(e+1);
1302 if (!u) {
1303 free(t);
1304 return -ENOMEM;
1305 }
1306
1307 if (!path_is_normalized(u) ||
1308 !path_is_absolute(u)) {
1309 free(t);
1310 free(u);
1311 return -EINVAL;
1312 }
1313
1314 path_simplify(u, false);
1315 }
1316
1317 if (controller)
1318 *controller = t;
1319 else
1320 free(t);
1321
1322 if (path)
1323 *path = u;
1324 else
1325 free(u);
1326
1327 return 0;
1328 }
1329
1330 int cg_mangle_path(const char *path, char **result) {
1331 _cleanup_free_ char *c = NULL, *p = NULL;
1332 char *t;
1333 int r;
1334
1335 assert(path);
1336 assert(result);
1337
1338 /* First, check if it already is a filesystem path */
1339 if (path_startswith(path, "/sys/fs/cgroup")) {
1340
1341 t = strdup(path);
1342 if (!t)
1343 return -ENOMEM;
1344
1345 *result = path_simplify(t, false);
1346 return 0;
1347 }
1348
1349 /* Otherwise, treat it as cg spec */
1350 r = cg_split_spec(path, &c, &p);
1351 if (r < 0)
1352 return r;
1353
1354 return cg_get_path(c ?: SYSTEMD_CGROUP_CONTROLLER, p ?: "/", NULL, result);
1355 }
1356
1357 int cg_get_root_path(char **path) {
1358 char *p, *e;
1359 int r;
1360
1361 assert(path);
1362
1363 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, 1, &p);
1364 if (r < 0)
1365 return r;
1366
1367 e = endswith(p, "/" SPECIAL_INIT_SCOPE);
1368 if (!e)
1369 e = endswith(p, "/" SPECIAL_SYSTEM_SLICE); /* legacy */
1370 if (!e)
1371 e = endswith(p, "/system"); /* even more legacy */
1372 if (e)
1373 *e = 0;
1374
1375 *path = p;
1376 return 0;
1377 }
1378
1379 int cg_shift_path(const char *cgroup, const char *root, const char **shifted) {
1380 _cleanup_free_ char *rt = NULL;
1381 char *p;
1382 int r;
1383
1384 assert(cgroup);
1385 assert(shifted);
1386
1387 if (!root) {
1388 /* If the root was specified let's use that, otherwise
1389 * let's determine it from PID 1 */
1390
1391 r = cg_get_root_path(&rt);
1392 if (r < 0)
1393 return r;
1394
1395 root = rt;
1396 }
1397
1398 p = path_startswith(cgroup, root);
1399 if (p && p > cgroup)
1400 *shifted = p - 1;
1401 else
1402 *shifted = cgroup;
1403
1404 return 0;
1405 }
1406
1407 int cg_pid_get_path_shifted(pid_t pid, const char *root, char **cgroup) {
1408 _cleanup_free_ char *raw = NULL;
1409 const char *c;
1410 int r;
1411
1412 assert(pid >= 0);
1413 assert(cgroup);
1414
1415 r = cg_pid_get_path(SYSTEMD_CGROUP_CONTROLLER, pid, &raw);
1416 if (r < 0)
1417 return r;
1418
1419 r = cg_shift_path(raw, root, &c);
1420 if (r < 0)
1421 return r;
1422
1423 if (c == raw)
1424 *cgroup = TAKE_PTR(raw);
1425 else {
1426 char *n;
1427
1428 n = strdup(c);
1429 if (!n)
1430 return -ENOMEM;
1431
1432 *cgroup = n;
1433 }
1434
1435 return 0;
1436 }
1437
1438 int cg_path_decode_unit(const char *cgroup, char **unit) {
1439 char *c, *s;
1440 size_t n;
1441
1442 assert(cgroup);
1443 assert(unit);
1444
1445 n = strcspn(cgroup, "/");
1446 if (n < 3)
1447 return -ENXIO;
1448
1449 c = strndupa(cgroup, n);
1450 c = cg_unescape(c);
1451
1452 if (!unit_name_is_valid(c, UNIT_NAME_PLAIN|UNIT_NAME_INSTANCE))
1453 return -ENXIO;
1454
1455 s = strdup(c);
1456 if (!s)
1457 return -ENOMEM;
1458
1459 *unit = s;
1460 return 0;
1461 }
1462
1463 static bool valid_slice_name(const char *p, size_t n) {
1464
1465 if (!p)
1466 return false;
1467
1468 if (n < STRLEN("x.slice"))
1469 return false;
1470
1471 if (memcmp(p + n - 6, ".slice", 6) == 0) {
1472 char buf[n+1], *c;
1473
1474 memcpy(buf, p, n);
1475 buf[n] = 0;
1476
1477 c = cg_unescape(buf);
1478
1479 return unit_name_is_valid(c, UNIT_NAME_PLAIN);
1480 }
1481
1482 return false;
1483 }
1484
1485 static const char *skip_slices(const char *p) {
1486 assert(p);
1487
1488 /* Skips over all slice assignments */
1489
1490 for (;;) {
1491 size_t n;
1492
1493 p += strspn(p, "/");
1494
1495 n = strcspn(p, "/");
1496 if (!valid_slice_name(p, n))
1497 return p;
1498
1499 p += n;
1500 }
1501 }
1502
1503 int cg_path_get_unit(const char *path, char **ret) {
1504 const char *e;
1505 char *unit;
1506 int r;
1507
1508 assert(path);
1509 assert(ret);
1510
1511 e = skip_slices(path);
1512
1513 r = cg_path_decode_unit(e, &unit);
1514 if (r < 0)
1515 return r;
1516
1517 /* We skipped over the slices, don't accept any now */
1518 if (endswith(unit, ".slice")) {
1519 free(unit);
1520 return -ENXIO;
1521 }
1522
1523 *ret = unit;
1524 return 0;
1525 }
1526
1527 int cg_pid_get_unit(pid_t pid, char **unit) {
1528 _cleanup_free_ char *cgroup = NULL;
1529 int r;
1530
1531 assert(unit);
1532
1533 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1534 if (r < 0)
1535 return r;
1536
1537 return cg_path_get_unit(cgroup, unit);
1538 }
1539
1540 /**
1541 * Skip session-*.scope, but require it to be there.
1542 */
1543 static const char *skip_session(const char *p) {
1544 size_t n;
1545
1546 if (isempty(p))
1547 return NULL;
1548
1549 p += strspn(p, "/");
1550
1551 n = strcspn(p, "/");
1552 if (n < STRLEN("session-x.scope"))
1553 return NULL;
1554
1555 if (memcmp(p, "session-", 8) == 0 && memcmp(p + n - 6, ".scope", 6) == 0) {
1556 char buf[n - 8 - 6 + 1];
1557
1558 memcpy(buf, p + 8, n - 8 - 6);
1559 buf[n - 8 - 6] = 0;
1560
1561 /* Note that session scopes never need unescaping,
1562 * since they cannot conflict with the kernel's own
1563 * names, hence we don't need to call cg_unescape()
1564 * here. */
1565
1566 if (!session_id_valid(buf))
1567 return false;
1568
1569 p += n;
1570 p += strspn(p, "/");
1571 return p;
1572 }
1573
1574 return NULL;
1575 }
1576
1577 /**
1578 * Skip user@*.service, but require it to be there.
1579 */
1580 static const char *skip_user_manager(const char *p) {
1581 size_t n;
1582
1583 if (isempty(p))
1584 return NULL;
1585
1586 p += strspn(p, "/");
1587
1588 n = strcspn(p, "/");
1589 if (n < STRLEN("user@x.service"))
1590 return NULL;
1591
1592 if (memcmp(p, "user@", 5) == 0 && memcmp(p + n - 8, ".service", 8) == 0) {
1593 char buf[n - 5 - 8 + 1];
1594
1595 memcpy(buf, p + 5, n - 5 - 8);
1596 buf[n - 5 - 8] = 0;
1597
1598 /* Note that user manager services never need unescaping,
1599 * since they cannot conflict with the kernel's own
1600 * names, hence we don't need to call cg_unescape()
1601 * here. */
1602
1603 if (parse_uid(buf, NULL) < 0)
1604 return NULL;
1605
1606 p += n;
1607 p += strspn(p, "/");
1608
1609 return p;
1610 }
1611
1612 return NULL;
1613 }
1614
1615 static const char *skip_user_prefix(const char *path) {
1616 const char *e, *t;
1617
1618 assert(path);
1619
1620 /* Skip slices, if there are any */
1621 e = skip_slices(path);
1622
1623 /* Skip the user manager, if it's in the path now... */
1624 t = skip_user_manager(e);
1625 if (t)
1626 return t;
1627
1628 /* Alternatively skip the user session if it is in the path... */
1629 return skip_session(e);
1630 }
1631
1632 int cg_path_get_user_unit(const char *path, char **ret) {
1633 const char *t;
1634
1635 assert(path);
1636 assert(ret);
1637
1638 t = skip_user_prefix(path);
1639 if (!t)
1640 return -ENXIO;
1641
1642 /* And from here on it looks pretty much the same as for a
1643 * system unit, hence let's use the same parser from here
1644 * on. */
1645 return cg_path_get_unit(t, ret);
1646 }
1647
1648 int cg_pid_get_user_unit(pid_t pid, char **unit) {
1649 _cleanup_free_ char *cgroup = NULL;
1650 int r;
1651
1652 assert(unit);
1653
1654 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1655 if (r < 0)
1656 return r;
1657
1658 return cg_path_get_user_unit(cgroup, unit);
1659 }
1660
1661 int cg_path_get_machine_name(const char *path, char **machine) {
1662 _cleanup_free_ char *u = NULL;
1663 const char *sl;
1664 int r;
1665
1666 r = cg_path_get_unit(path, &u);
1667 if (r < 0)
1668 return r;
1669
1670 sl = strjoina("/run/systemd/machines/unit:", u);
1671 return readlink_malloc(sl, machine);
1672 }
1673
1674 int cg_pid_get_machine_name(pid_t pid, char **machine) {
1675 _cleanup_free_ char *cgroup = NULL;
1676 int r;
1677
1678 assert(machine);
1679
1680 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1681 if (r < 0)
1682 return r;
1683
1684 return cg_path_get_machine_name(cgroup, machine);
1685 }
1686
1687 int cg_path_get_session(const char *path, char **session) {
1688 _cleanup_free_ char *unit = NULL;
1689 char *start, *end;
1690 int r;
1691
1692 assert(path);
1693
1694 r = cg_path_get_unit(path, &unit);
1695 if (r < 0)
1696 return r;
1697
1698 start = startswith(unit, "session-");
1699 if (!start)
1700 return -ENXIO;
1701 end = endswith(start, ".scope");
1702 if (!end)
1703 return -ENXIO;
1704
1705 *end = 0;
1706 if (!session_id_valid(start))
1707 return -ENXIO;
1708
1709 if (session) {
1710 char *rr;
1711
1712 rr = strdup(start);
1713 if (!rr)
1714 return -ENOMEM;
1715
1716 *session = rr;
1717 }
1718
1719 return 0;
1720 }
1721
1722 int cg_pid_get_session(pid_t pid, char **session) {
1723 _cleanup_free_ char *cgroup = NULL;
1724 int r;
1725
1726 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1727 if (r < 0)
1728 return r;
1729
1730 return cg_path_get_session(cgroup, session);
1731 }
1732
1733 int cg_path_get_owner_uid(const char *path, uid_t *uid) {
1734 _cleanup_free_ char *slice = NULL;
1735 char *start, *end;
1736 int r;
1737
1738 assert(path);
1739
1740 r = cg_path_get_slice(path, &slice);
1741 if (r < 0)
1742 return r;
1743
1744 start = startswith(slice, "user-");
1745 if (!start)
1746 return -ENXIO;
1747 end = endswith(start, ".slice");
1748 if (!end)
1749 return -ENXIO;
1750
1751 *end = 0;
1752 if (parse_uid(start, uid) < 0)
1753 return -ENXIO;
1754
1755 return 0;
1756 }
1757
1758 int cg_pid_get_owner_uid(pid_t pid, uid_t *uid) {
1759 _cleanup_free_ char *cgroup = NULL;
1760 int r;
1761
1762 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1763 if (r < 0)
1764 return r;
1765
1766 return cg_path_get_owner_uid(cgroup, uid);
1767 }
1768
1769 int cg_path_get_slice(const char *p, char **slice) {
1770 const char *e = NULL;
1771
1772 assert(p);
1773 assert(slice);
1774
1775 /* Finds the right-most slice unit from the beginning, but
1776 * stops before we come to the first non-slice unit. */
1777
1778 for (;;) {
1779 size_t n;
1780
1781 p += strspn(p, "/");
1782
1783 n = strcspn(p, "/");
1784 if (!valid_slice_name(p, n)) {
1785
1786 if (!e) {
1787 char *s;
1788
1789 s = strdup(SPECIAL_ROOT_SLICE);
1790 if (!s)
1791 return -ENOMEM;
1792
1793 *slice = s;
1794 return 0;
1795 }
1796
1797 return cg_path_decode_unit(e, slice);
1798 }
1799
1800 e = p;
1801 p += n;
1802 }
1803 }
1804
1805 int cg_pid_get_slice(pid_t pid, char **slice) {
1806 _cleanup_free_ char *cgroup = NULL;
1807 int r;
1808
1809 assert(slice);
1810
1811 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1812 if (r < 0)
1813 return r;
1814
1815 return cg_path_get_slice(cgroup, slice);
1816 }
1817
1818 int cg_path_get_user_slice(const char *p, char **slice) {
1819 const char *t;
1820 assert(p);
1821 assert(slice);
1822
1823 t = skip_user_prefix(p);
1824 if (!t)
1825 return -ENXIO;
1826
1827 /* And now it looks pretty much the same as for a system
1828 * slice, so let's just use the same parser from here on. */
1829 return cg_path_get_slice(t, slice);
1830 }
1831
1832 int cg_pid_get_user_slice(pid_t pid, char **slice) {
1833 _cleanup_free_ char *cgroup = NULL;
1834 int r;
1835
1836 assert(slice);
1837
1838 r = cg_pid_get_path_shifted(pid, NULL, &cgroup);
1839 if (r < 0)
1840 return r;
1841
1842 return cg_path_get_user_slice(cgroup, slice);
1843 }
1844
1845 char *cg_escape(const char *p) {
1846 bool need_prefix = false;
1847
1848 /* This implements very minimal escaping for names to be used
1849 * as file names in the cgroup tree: any name which might
1850 * conflict with a kernel name or is prefixed with '_' is
1851 * prefixed with a '_'. That way, when reading cgroup names it
1852 * is sufficient to remove a single prefixing underscore if
1853 * there is one. */
1854
1855 /* The return value of this function (unlike cg_unescape())
1856 * needs free()! */
1857
1858 if (IN_SET(p[0], 0, '_', '.') ||
1859 STR_IN_SET(p, "notify_on_release", "release_agent", "tasks") ||
1860 startswith(p, "cgroup."))
1861 need_prefix = true;
1862 else {
1863 const char *dot;
1864
1865 dot = strrchr(p, '.');
1866 if (dot) {
1867 CGroupController c;
1868 size_t l = dot - p;
1869
1870 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
1871 const char *n;
1872
1873 n = cgroup_controller_to_string(c);
1874
1875 if (l != strlen(n))
1876 continue;
1877
1878 if (memcmp(p, n, l) != 0)
1879 continue;
1880
1881 need_prefix = true;
1882 break;
1883 }
1884 }
1885 }
1886
1887 if (need_prefix)
1888 return strappend("_", p);
1889
1890 return strdup(p);
1891 }
1892
1893 char *cg_unescape(const char *p) {
1894 assert(p);
1895
1896 /* The return value of this function (unlike cg_escape())
1897 * doesn't need free()! */
1898
1899 if (p[0] == '_')
1900 return (char*) p+1;
1901
1902 return (char*) p;
1903 }
1904
1905 #define CONTROLLER_VALID \
1906 DIGITS LETTERS \
1907 "_"
1908
1909 bool cg_controller_is_valid(const char *p) {
1910 const char *t, *s;
1911
1912 if (!p)
1913 return false;
1914
1915 if (streq(p, SYSTEMD_CGROUP_CONTROLLER))
1916 return true;
1917
1918 s = startswith(p, "name=");
1919 if (s)
1920 p = s;
1921
1922 if (IN_SET(*p, 0, '_'))
1923 return false;
1924
1925 for (t = p; *t; t++)
1926 if (!strchr(CONTROLLER_VALID, *t))
1927 return false;
1928
1929 if (t - p > FILENAME_MAX)
1930 return false;
1931
1932 return true;
1933 }
1934
1935 int cg_slice_to_path(const char *unit, char **ret) {
1936 _cleanup_free_ char *p = NULL, *s = NULL, *e = NULL;
1937 const char *dash;
1938 int r;
1939
1940 assert(unit);
1941 assert(ret);
1942
1943 if (streq(unit, SPECIAL_ROOT_SLICE)) {
1944 char *x;
1945
1946 x = strdup("");
1947 if (!x)
1948 return -ENOMEM;
1949 *ret = x;
1950 return 0;
1951 }
1952
1953 if (!unit_name_is_valid(unit, UNIT_NAME_PLAIN))
1954 return -EINVAL;
1955
1956 if (!endswith(unit, ".slice"))
1957 return -EINVAL;
1958
1959 r = unit_name_to_prefix(unit, &p);
1960 if (r < 0)
1961 return r;
1962
1963 dash = strchr(p, '-');
1964
1965 /* Don't allow initial dashes */
1966 if (dash == p)
1967 return -EINVAL;
1968
1969 while (dash) {
1970 _cleanup_free_ char *escaped = NULL;
1971 char n[dash - p + sizeof(".slice")];
1972
1973 #if HAS_FEATURE_MEMORY_SANITIZER
1974 /* msan doesn't instrument stpncpy, so it thinks
1975 * n is later used unitialized:
1976 * https://github.com/google/sanitizers/issues/926
1977 */
1978 zero(n);
1979 #endif
1980
1981 /* Don't allow trailing or double dashes */
1982 if (IN_SET(dash[1], 0, '-'))
1983 return -EINVAL;
1984
1985 strcpy(stpncpy(n, p, dash - p), ".slice");
1986 if (!unit_name_is_valid(n, UNIT_NAME_PLAIN))
1987 return -EINVAL;
1988
1989 escaped = cg_escape(n);
1990 if (!escaped)
1991 return -ENOMEM;
1992
1993 if (!strextend(&s, escaped, "/", NULL))
1994 return -ENOMEM;
1995
1996 dash = strchr(dash+1, '-');
1997 }
1998
1999 e = cg_escape(unit);
2000 if (!e)
2001 return -ENOMEM;
2002
2003 if (!strextend(&s, e, NULL))
2004 return -ENOMEM;
2005
2006 *ret = TAKE_PTR(s);
2007
2008 return 0;
2009 }
2010
2011 int cg_set_attribute(const char *controller, const char *path, const char *attribute, const char *value) {
2012 _cleanup_free_ char *p = NULL;
2013 int r;
2014
2015 r = cg_get_path(controller, path, attribute, &p);
2016 if (r < 0)
2017 return r;
2018
2019 return write_string_file(p, value, WRITE_STRING_FILE_DISABLE_BUFFER);
2020 }
2021
2022 int cg_get_attribute(const char *controller, const char *path, const char *attribute, char **ret) {
2023 _cleanup_free_ char *p = NULL;
2024 int r;
2025
2026 r = cg_get_path(controller, path, attribute, &p);
2027 if (r < 0)
2028 return r;
2029
2030 return read_one_line_file(p, ret);
2031 }
2032
2033 int cg_get_keyed_attribute(
2034 const char *controller,
2035 const char *path,
2036 const char *attribute,
2037 char **keys,
2038 char **ret_values) {
2039
2040 _cleanup_free_ char *filename = NULL, *contents = NULL;
2041 const char *p;
2042 size_t n, i, n_done = 0;
2043 char **v;
2044 int r;
2045
2046 /* Reads one or more fields of a cgroup v2 keyed attribute file. The 'keys' parameter should be an strv with
2047 * all keys to retrieve. The 'ret_values' parameter should be passed as string size with the same number of
2048 * entries as 'keys'. On success each entry will be set to the value of the matching key.
2049 *
2050 * If the attribute file doesn't exist at all returns ENOENT, if any key is not found returns ENXIO. */
2051
2052 r = cg_get_path(controller, path, attribute, &filename);
2053 if (r < 0)
2054 return r;
2055
2056 r = read_full_file(filename, &contents, NULL);
2057 if (r < 0)
2058 return r;
2059
2060 n = strv_length(keys);
2061 if (n == 0) /* No keys to retrieve? That's easy, we are done then */
2062 return 0;
2063
2064 /* Let's build this up in a temporary array for now in order not to clobber the return parameter on failure */
2065 v = newa0(char*, n);
2066
2067 for (p = contents; *p;) {
2068 const char *w = NULL;
2069
2070 for (i = 0; i < n; i++)
2071 if (!v[i]) {
2072 w = first_word(p, keys[i]);
2073 if (w)
2074 break;
2075 }
2076
2077 if (w) {
2078 size_t l;
2079
2080 l = strcspn(w, NEWLINE);
2081 v[i] = strndup(w, l);
2082 if (!v[i]) {
2083 r = -ENOMEM;
2084 goto fail;
2085 }
2086
2087 n_done++;
2088 if (n_done >= n)
2089 goto done;
2090
2091 p = w + l;
2092 } else
2093 p += strcspn(p, NEWLINE);
2094
2095 p += strspn(p, NEWLINE);
2096 }
2097
2098 r = -ENXIO;
2099
2100 fail:
2101 for (i = 0; i < n; i++)
2102 free(v[i]);
2103
2104 return r;
2105
2106 done:
2107 memcpy(ret_values, v, sizeof(char*) * n);
2108 return 0;
2109
2110 }
2111
2112 int cg_create_everywhere(CGroupMask supported, CGroupMask mask, const char *path) {
2113 CGroupController c;
2114 CGroupMask done;
2115 bool created;
2116 int r;
2117
2118 /* This one will create a cgroup in our private tree, but also
2119 * duplicate it in the trees specified in mask, and remove it
2120 * in all others.
2121 *
2122 * Returns 0 if the group already existed in the systemd hierarchy,
2123 * 1 on success, negative otherwise.
2124 */
2125
2126 /* First create the cgroup in our own hierarchy. */
2127 r = cg_create(SYSTEMD_CGROUP_CONTROLLER, path);
2128 if (r < 0)
2129 return r;
2130 created = r;
2131
2132 /* If we are in the unified hierarchy, we are done now */
2133 r = cg_all_unified();
2134 if (r < 0)
2135 return r;
2136 if (r > 0)
2137 return created;
2138
2139 supported &= CGROUP_MASK_V1;
2140 mask = CGROUP_MASK_EXTEND_JOINED(mask);
2141 done = 0;
2142
2143 /* Otherwise, do the same in the other hierarchies */
2144 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2145 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2146 const char *n;
2147
2148 if (!FLAGS_SET(supported, bit))
2149 continue;
2150
2151 if (FLAGS_SET(done, bit))
2152 continue;
2153
2154 n = cgroup_controller_to_string(c);
2155 if (FLAGS_SET(mask, bit))
2156 (void) cg_create(n, path);
2157 else
2158 (void) cg_trim(n, path, true);
2159
2160 done |= CGROUP_MASK_EXTEND_JOINED(bit);
2161 }
2162
2163 return created;
2164 }
2165
2166 int cg_attach_everywhere(CGroupMask supported, const char *path, pid_t pid, cg_migrate_callback_t path_callback, void *userdata) {
2167 CGroupController c;
2168 CGroupMask done;
2169 int r;
2170
2171 r = cg_attach(SYSTEMD_CGROUP_CONTROLLER, path, pid);
2172 if (r < 0)
2173 return r;
2174
2175 r = cg_all_unified();
2176 if (r < 0)
2177 return r;
2178 if (r > 0)
2179 return 0;
2180
2181 supported &= CGROUP_MASK_V1;
2182 done = 0;
2183
2184 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2185 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2186 const char *p = NULL;
2187
2188 if (!FLAGS_SET(supported, bit))
2189 continue;
2190
2191 if (FLAGS_SET(done, bit))
2192 continue;
2193
2194 if (path_callback)
2195 p = path_callback(bit, userdata);
2196 if (!p)
2197 p = path;
2198
2199 (void) cg_attach_fallback(cgroup_controller_to_string(c), p, pid);
2200 done |= CGROUP_MASK_EXTEND_JOINED(bit);
2201 }
2202
2203 return 0;
2204 }
2205
2206 int cg_attach_many_everywhere(CGroupMask supported, const char *path, Set* pids, cg_migrate_callback_t path_callback, void *userdata) {
2207 Iterator i;
2208 void *pidp;
2209 int r = 0;
2210
2211 SET_FOREACH(pidp, pids, i) {
2212 pid_t pid = PTR_TO_PID(pidp);
2213 int q;
2214
2215 q = cg_attach_everywhere(supported, path, pid, path_callback, userdata);
2216 if (q < 0 && r >= 0)
2217 r = q;
2218 }
2219
2220 return r;
2221 }
2222
2223 int cg_migrate_everywhere(CGroupMask supported, const char *from, const char *to, cg_migrate_callback_t to_callback, void *userdata) {
2224 CGroupController c;
2225 CGroupMask done;
2226 int r = 0, q;
2227
2228 if (!path_equal(from, to)) {
2229 r = cg_migrate_recursive(SYSTEMD_CGROUP_CONTROLLER, from, SYSTEMD_CGROUP_CONTROLLER, to, CGROUP_REMOVE);
2230 if (r < 0)
2231 return r;
2232 }
2233
2234 q = cg_all_unified();
2235 if (q < 0)
2236 return q;
2237 if (q > 0)
2238 return r;
2239
2240 supported &= CGROUP_MASK_V1;
2241 done = 0;
2242
2243 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2244 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2245 const char *p = NULL;
2246
2247 if (!FLAGS_SET(supported, bit))
2248 continue;
2249
2250 if (FLAGS_SET(done, bit))
2251 continue;
2252
2253 if (to_callback)
2254 p = to_callback(bit, userdata);
2255 if (!p)
2256 p = to;
2257
2258 (void) cg_migrate_recursive_fallback(SYSTEMD_CGROUP_CONTROLLER, to, cgroup_controller_to_string(c), p, 0);
2259 done |= CGROUP_MASK_EXTEND_JOINED(bit);
2260 }
2261
2262 return r;
2263 }
2264
2265 int cg_trim_everywhere(CGroupMask supported, const char *path, bool delete_root) {
2266 CGroupController c;
2267 CGroupMask done;
2268 int r, q;
2269
2270 r = cg_trim(SYSTEMD_CGROUP_CONTROLLER, path, delete_root);
2271 if (r < 0)
2272 return r;
2273
2274 q = cg_all_unified();
2275 if (q < 0)
2276 return q;
2277 if (q > 0)
2278 return r;
2279
2280 supported &= CGROUP_MASK_V1;
2281 done = 0;
2282
2283 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2284 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2285
2286 if (!FLAGS_SET(supported, bit))
2287 continue;
2288
2289 if (FLAGS_SET(done, bit))
2290 continue;
2291
2292 (void) cg_trim(cgroup_controller_to_string(c), path, delete_root);
2293 done |= CGROUP_MASK_EXTEND_JOINED(bit);
2294 }
2295
2296 return r;
2297 }
2298
2299 int cg_mask_to_string(CGroupMask mask, char **ret) {
2300 _cleanup_free_ char *s = NULL;
2301 size_t n = 0, allocated = 0;
2302 bool space = false;
2303 CGroupController c;
2304
2305 assert(ret);
2306
2307 if (mask == 0) {
2308 *ret = NULL;
2309 return 0;
2310 }
2311
2312 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2313 const char *k;
2314 size_t l;
2315
2316 if (!FLAGS_SET(mask, CGROUP_CONTROLLER_TO_MASK(c)))
2317 continue;
2318
2319 k = cgroup_controller_to_string(c);
2320 l = strlen(k);
2321
2322 if (!GREEDY_REALLOC(s, allocated, n + space + l + 1))
2323 return -ENOMEM;
2324
2325 if (space)
2326 s[n] = ' ';
2327 memcpy(s + n + space, k, l);
2328 n += space + l;
2329
2330 space = true;
2331 }
2332
2333 assert(s);
2334
2335 s[n] = 0;
2336 *ret = TAKE_PTR(s);
2337
2338 return 0;
2339 }
2340
2341 int cg_mask_from_string(const char *value, CGroupMask *ret) {
2342 CGroupMask m = 0;
2343
2344 assert(ret);
2345 assert(value);
2346
2347 for (;;) {
2348 _cleanup_free_ char *n = NULL;
2349 CGroupController v;
2350 int r;
2351
2352 r = extract_first_word(&value, &n, NULL, 0);
2353 if (r < 0)
2354 return r;
2355 if (r == 0)
2356 break;
2357
2358 v = cgroup_controller_from_string(n);
2359 if (v < 0)
2360 continue;
2361
2362 m |= CGROUP_CONTROLLER_TO_MASK(v);
2363 }
2364
2365 *ret = m;
2366 return 0;
2367 }
2368
2369 int cg_mask_supported(CGroupMask *ret) {
2370 CGroupMask mask;
2371 int r;
2372
2373 /* Determines the mask of supported cgroup controllers. Only includes controllers we can make sense of and that
2374 * are actually accessible. Only covers real controllers, i.e. not the CGROUP_CONTROLLER_BPF_xyz
2375 * pseudo-controllers. */
2376
2377 r = cg_all_unified();
2378 if (r < 0)
2379 return r;
2380 if (r > 0) {
2381 _cleanup_free_ char *root = NULL, *controllers = NULL, *path = NULL;
2382
2383 /* In the unified hierarchy we can read the supported
2384 * and accessible controllers from a the top-level
2385 * cgroup attribute */
2386
2387 r = cg_get_root_path(&root);
2388 if (r < 0)
2389 return r;
2390
2391 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, root, "cgroup.controllers", &path);
2392 if (r < 0)
2393 return r;
2394
2395 r = read_one_line_file(path, &controllers);
2396 if (r < 0)
2397 return r;
2398
2399 r = cg_mask_from_string(controllers, &mask);
2400 if (r < 0)
2401 return r;
2402
2403 /* Currently, we support the cpu, memory, io and pids controller in the unified hierarchy, mask
2404 * everything else off. */
2405 mask &= CGROUP_MASK_V2;
2406
2407 } else {
2408 CGroupController c;
2409
2410 /* In the legacy hierarchy, we check which hierarchies are mounted. */
2411
2412 mask = 0;
2413 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2414 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2415 const char *n;
2416
2417 if (!FLAGS_SET(CGROUP_MASK_V1, bit))
2418 continue;
2419
2420 n = cgroup_controller_to_string(c);
2421 if (controller_is_accessible(n) >= 0)
2422 mask |= bit;
2423 }
2424 }
2425
2426 *ret = mask;
2427 return 0;
2428 }
2429
2430 int cg_kernel_controllers(Set **ret) {
2431 _cleanup_set_free_free_ Set *controllers = NULL;
2432 _cleanup_fclose_ FILE *f = NULL;
2433 int r;
2434
2435 assert(ret);
2436
2437 /* Determines the full list of kernel-known controllers. Might include controllers we don't actually support
2438 * and controllers that aren't currently accessible (because not mounted). This does not include "name="
2439 * pseudo-controllers. */
2440
2441 controllers = set_new(&string_hash_ops);
2442 if (!controllers)
2443 return -ENOMEM;
2444
2445 r = fopen_unlocked("/proc/cgroups", "re", &f);
2446 if (r == -ENOENT) {
2447 *ret = NULL;
2448 return 0;
2449 }
2450 if (r < 0)
2451 return r;
2452
2453 /* Ignore the header line */
2454 (void) read_line(f, (size_t) -1, NULL);
2455
2456 for (;;) {
2457 char *controller;
2458 int enabled = 0;
2459
2460 errno = 0;
2461 if (fscanf(f, "%ms %*i %*i %i", &controller, &enabled) != 2) {
2462
2463 if (feof(f))
2464 break;
2465
2466 if (ferror(f) && errno > 0)
2467 return -errno;
2468
2469 return -EBADMSG;
2470 }
2471
2472 if (!enabled) {
2473 free(controller);
2474 continue;
2475 }
2476
2477 if (!cg_controller_is_valid(controller)) {
2478 free(controller);
2479 return -EBADMSG;
2480 }
2481
2482 r = set_consume(controllers, controller);
2483 if (r < 0)
2484 return r;
2485 }
2486
2487 *ret = TAKE_PTR(controllers);
2488
2489 return 0;
2490 }
2491
2492 static thread_local CGroupUnified unified_cache = CGROUP_UNIFIED_UNKNOWN;
2493
2494 /* The hybrid mode was initially implemented in v232 and simply mounted cgroup2 on /sys/fs/cgroup/systemd. This
2495 * unfortunately broke other tools (such as docker) which expected the v1 "name=systemd" hierarchy on
2496 * /sys/fs/cgroup/systemd. From v233 and on, the hybrid mode mountnbs v2 on /sys/fs/cgroup/unified and maintains
2497 * "name=systemd" hierarchy on /sys/fs/cgroup/systemd for compatibility with other tools.
2498 *
2499 * To keep live upgrade working, we detect and support v232 layout. When v232 layout is detected, to keep cgroup v2
2500 * process management but disable the compat dual layout, we return %true on
2501 * cg_unified_controller(SYSTEMD_CGROUP_CONTROLLER) and %false on cg_hybrid_unified().
2502 */
2503 static thread_local bool unified_systemd_v232;
2504
2505 static int cg_unified_update(void) {
2506
2507 struct statfs fs;
2508
2509 /* Checks if we support the unified hierarchy. Returns an
2510 * error when the cgroup hierarchies aren't mounted yet or we
2511 * have any other trouble determining if the unified hierarchy
2512 * is supported. */
2513
2514 if (unified_cache >= CGROUP_UNIFIED_NONE)
2515 return 0;
2516
2517 if (statfs("/sys/fs/cgroup/", &fs) < 0)
2518 return log_debug_errno(errno, "statfs(\"/sys/fs/cgroup/\") failed: %m");
2519
2520 if (F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2521 log_debug("Found cgroup2 on /sys/fs/cgroup/, full unified hierarchy");
2522 unified_cache = CGROUP_UNIFIED_ALL;
2523 } else if (F_TYPE_EQUAL(fs.f_type, TMPFS_MAGIC)) {
2524 if (statfs("/sys/fs/cgroup/unified/", &fs) == 0 &&
2525 F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2526 log_debug("Found cgroup2 on /sys/fs/cgroup/unified, unified hierarchy for systemd controller");
2527 unified_cache = CGROUP_UNIFIED_SYSTEMD;
2528 unified_systemd_v232 = false;
2529 } else {
2530 if (statfs("/sys/fs/cgroup/systemd/", &fs) < 0)
2531 return log_debug_errno(errno, "statfs(\"/sys/fs/cgroup/systemd\" failed: %m");
2532
2533 if (F_TYPE_EQUAL(fs.f_type, CGROUP2_SUPER_MAGIC)) {
2534 log_debug("Found cgroup2 on /sys/fs/cgroup/systemd, unified hierarchy for systemd controller (v232 variant)");
2535 unified_cache = CGROUP_UNIFIED_SYSTEMD;
2536 unified_systemd_v232 = true;
2537 } else if (F_TYPE_EQUAL(fs.f_type, CGROUP_SUPER_MAGIC)) {
2538 log_debug("Found cgroup on /sys/fs/cgroup/systemd, legacy hierarchy");
2539 unified_cache = CGROUP_UNIFIED_NONE;
2540 } else {
2541 log_debug("Unexpected filesystem type %llx mounted on /sys/fs/cgroup/systemd, assuming legacy hierarchy",
2542 (unsigned long long) fs.f_type);
2543 unified_cache = CGROUP_UNIFIED_NONE;
2544 }
2545 }
2546 } else
2547 return log_debug_errno(SYNTHETIC_ERRNO(ENOMEDIUM),
2548 "Unknown filesystem type %llx mounted on /sys/fs/cgroup.",
2549 (unsigned long long)fs.f_type);
2550
2551 return 0;
2552 }
2553
2554 int cg_unified_controller(const char *controller) {
2555 int r;
2556
2557 r = cg_unified_update();
2558 if (r < 0)
2559 return r;
2560
2561 if (unified_cache == CGROUP_UNIFIED_NONE)
2562 return false;
2563
2564 if (unified_cache >= CGROUP_UNIFIED_ALL)
2565 return true;
2566
2567 return streq_ptr(controller, SYSTEMD_CGROUP_CONTROLLER);
2568 }
2569
2570 int cg_all_unified(void) {
2571 int r;
2572
2573 r = cg_unified_update();
2574 if (r < 0)
2575 return r;
2576
2577 return unified_cache >= CGROUP_UNIFIED_ALL;
2578 }
2579
2580 int cg_hybrid_unified(void) {
2581 int r;
2582
2583 r = cg_unified_update();
2584 if (r < 0)
2585 return r;
2586
2587 return unified_cache == CGROUP_UNIFIED_SYSTEMD && !unified_systemd_v232;
2588 }
2589
2590 int cg_unified_flush(void) {
2591 unified_cache = CGROUP_UNIFIED_UNKNOWN;
2592
2593 return cg_unified_update();
2594 }
2595
2596 int cg_enable_everywhere(
2597 CGroupMask supported,
2598 CGroupMask mask,
2599 const char *p,
2600 CGroupMask *ret_result_mask) {
2601
2602 _cleanup_fclose_ FILE *f = NULL;
2603 _cleanup_free_ char *fs = NULL;
2604 CGroupController c;
2605 CGroupMask ret = 0;
2606 int r;
2607
2608 assert(p);
2609
2610 if (supported == 0) {
2611 if (ret_result_mask)
2612 *ret_result_mask = 0;
2613 return 0;
2614 }
2615
2616 r = cg_all_unified();
2617 if (r < 0)
2618 return r;
2619 if (r == 0) {
2620 /* On the legacy hiearchy there's no concept of "enabling" controllers in cgroups defined. Let's claim
2621 * complete success right away. (If you wonder why we return the full mask here, rather than zero: the
2622 * caller tends to use the returned mask later on to compare if all controllers where properly joined,
2623 * and if not requeues realization. This use is the primary purpose of the return value, hence let's
2624 * minimize surprises here and reduce triggers for re-realization by always saying we fully
2625 * succeeded.) */
2626 if (ret_result_mask)
2627 *ret_result_mask = mask & supported & CGROUP_MASK_V2; /* If you wonder why we mask this with
2628 * CGROUP_MASK_V2: The 'supported' mask
2629 * might contain pure-V1 or BPF
2630 * controllers, and we never want to
2631 * claim that we could enable those with
2632 * cgroup.subtree_control */
2633 return 0;
2634 }
2635
2636 r = cg_get_path(SYSTEMD_CGROUP_CONTROLLER, p, "cgroup.subtree_control", &fs);
2637 if (r < 0)
2638 return r;
2639
2640 for (c = 0; c < _CGROUP_CONTROLLER_MAX; c++) {
2641 CGroupMask bit = CGROUP_CONTROLLER_TO_MASK(c);
2642 const char *n;
2643
2644 if (!FLAGS_SET(CGROUP_MASK_V2, bit))
2645 continue;
2646
2647 if (!FLAGS_SET(supported, bit))
2648 continue;
2649
2650 n = cgroup_controller_to_string(c);
2651 {
2652 char s[1 + strlen(n) + 1];
2653
2654 s[0] = FLAGS_SET(mask, bit) ? '+' : '-';
2655 strcpy(s + 1, n);
2656
2657 if (!f) {
2658 f = fopen(fs, "we");
2659 if (!f)
2660 return log_debug_errno(errno, "Failed to open cgroup.subtree_control file of %s: %m", p);
2661 }
2662
2663 r = write_string_stream(f, s, WRITE_STRING_FILE_DISABLE_BUFFER);
2664 if (r < 0) {
2665 log_debug_errno(r, "Failed to %s controller %s for %s (%s): %m",
2666 FLAGS_SET(mask, bit) ? "enable" : "disable", n, p, fs);
2667 clearerr(f);
2668
2669 /* If we can't turn off a controller, leave it on in the reported resulting mask. This
2670 * happens for example when we attempt to turn off a controller up in the tree that is
2671 * used down in the tree. */
2672 if (!FLAGS_SET(mask, bit) && r == -EBUSY) /* You might wonder why we check for EBUSY
2673 * only here, and not follow the same logic
2674 * for other errors such as EINVAL or
2675 * EOPNOTSUPP or anything else. That's
2676 * because EBUSY indicates that the
2677 * controllers is currently enabled and
2678 * cannot be disabled because something down
2679 * the hierarchy is still using it. Any other
2680 * error most likely means something like "I
2681 * never heard of this controller" or
2682 * similar. In the former case it's hence
2683 * safe to assume the controller is still on
2684 * after the failed operation, while in the
2685 * latter case it's safer to assume the
2686 * controller is unknown and hence certainly
2687 * not enabled. */
2688 ret |= bit;
2689 } else {
2690 /* Otherwise, if we managed to turn on a controller, set the bit reflecting that. */
2691 if (FLAGS_SET(mask, bit))
2692 ret |= bit;
2693 }
2694 }
2695 }
2696
2697 /* Let's return the precise set of controllers now enabled for the cgroup. */
2698 if (ret_result_mask)
2699 *ret_result_mask = ret;
2700
2701 return 0;
2702 }
2703
2704 bool cg_is_unified_wanted(void) {
2705 static thread_local int wanted = -1;
2706 int r;
2707 bool b;
2708 const bool is_default = DEFAULT_HIERARCHY == CGROUP_UNIFIED_ALL;
2709 _cleanup_free_ char *c = NULL;
2710
2711 /* If we have a cached value, return that. */
2712 if (wanted >= 0)
2713 return wanted;
2714
2715 /* If the hierarchy is already mounted, then follow whatever
2716 * was chosen for it. */
2717 if (cg_unified_flush() >= 0)
2718 return (wanted = unified_cache >= CGROUP_UNIFIED_ALL);
2719
2720 /* If we were explicitly passed systemd.unified_cgroup_hierarchy,
2721 * respect that. */
2722 r = proc_cmdline_get_bool("systemd.unified_cgroup_hierarchy", &b);
2723 if (r > 0)
2724 return (wanted = b);
2725
2726 /* If we passed cgroup_no_v1=all with no other instructions, it seems
2727 * highly unlikely that we want to use hybrid or legacy hierarchy. */
2728 r = proc_cmdline_get_key("cgroup_no_v1", 0, &c);
2729 if (r > 0 && streq_ptr(c, "all"))
2730 return (wanted = true);
2731
2732 return (wanted = is_default);
2733 }
2734
2735 bool cg_is_legacy_wanted(void) {
2736 static thread_local int wanted = -1;
2737
2738 /* If we have a cached value, return that. */
2739 if (wanted >= 0)
2740 return wanted;
2741
2742 /* Check if we have cgroup v2 already mounted. */
2743 if (cg_unified_flush() >= 0 &&
2744 unified_cache == CGROUP_UNIFIED_ALL)
2745 return (wanted = false);
2746
2747 /* Otherwise, assume that at least partial legacy is wanted,
2748 * since cgroup v2 should already be mounted at this point. */
2749 return (wanted = true);
2750 }
2751
2752 bool cg_is_hybrid_wanted(void) {
2753 static thread_local int wanted = -1;
2754 int r;
2755 bool b;
2756 const bool is_default = DEFAULT_HIERARCHY >= CGROUP_UNIFIED_SYSTEMD;
2757 /* We default to true if the default is "hybrid", obviously,
2758 * but also when the default is "unified", because if we get
2759 * called, it means that unified hierarchy was not mounted. */
2760
2761 /* If we have a cached value, return that. */
2762 if (wanted >= 0)
2763 return wanted;
2764
2765 /* If the hierarchy is already mounted, then follow whatever
2766 * was chosen for it. */
2767 if (cg_unified_flush() >= 0 &&
2768 unified_cache == CGROUP_UNIFIED_ALL)
2769 return (wanted = false);
2770
2771 /* Otherwise, let's see what the kernel command line has to say.
2772 * Since checking is expensive, cache a non-error result. */
2773 r = proc_cmdline_get_bool("systemd.legacy_systemd_cgroup_controller", &b);
2774
2775 /* The meaning of the kernel option is reversed wrt. to the return value
2776 * of this function, hence the negation. */
2777 return (wanted = r > 0 ? !b : is_default);
2778 }
2779
2780 int cg_weight_parse(const char *s, uint64_t *ret) {
2781 uint64_t u;
2782 int r;
2783
2784 if (isempty(s)) {
2785 *ret = CGROUP_WEIGHT_INVALID;
2786 return 0;
2787 }
2788
2789 r = safe_atou64(s, &u);
2790 if (r < 0)
2791 return r;
2792
2793 if (u < CGROUP_WEIGHT_MIN || u > CGROUP_WEIGHT_MAX)
2794 return -ERANGE;
2795
2796 *ret = u;
2797 return 0;
2798 }
2799
2800 const uint64_t cgroup_io_limit_defaults[_CGROUP_IO_LIMIT_TYPE_MAX] = {
2801 [CGROUP_IO_RBPS_MAX] = CGROUP_LIMIT_MAX,
2802 [CGROUP_IO_WBPS_MAX] = CGROUP_LIMIT_MAX,
2803 [CGROUP_IO_RIOPS_MAX] = CGROUP_LIMIT_MAX,
2804 [CGROUP_IO_WIOPS_MAX] = CGROUP_LIMIT_MAX,
2805 };
2806
2807 static const char* const cgroup_io_limit_type_table[_CGROUP_IO_LIMIT_TYPE_MAX] = {
2808 [CGROUP_IO_RBPS_MAX] = "IOReadBandwidthMax",
2809 [CGROUP_IO_WBPS_MAX] = "IOWriteBandwidthMax",
2810 [CGROUP_IO_RIOPS_MAX] = "IOReadIOPSMax",
2811 [CGROUP_IO_WIOPS_MAX] = "IOWriteIOPSMax",
2812 };
2813
2814 DEFINE_STRING_TABLE_LOOKUP(cgroup_io_limit_type, CGroupIOLimitType);
2815
2816 int cg_cpu_shares_parse(const char *s, uint64_t *ret) {
2817 uint64_t u;
2818 int r;
2819
2820 if (isempty(s)) {
2821 *ret = CGROUP_CPU_SHARES_INVALID;
2822 return 0;
2823 }
2824
2825 r = safe_atou64(s, &u);
2826 if (r < 0)
2827 return r;
2828
2829 if (u < CGROUP_CPU_SHARES_MIN || u > CGROUP_CPU_SHARES_MAX)
2830 return -ERANGE;
2831
2832 *ret = u;
2833 return 0;
2834 }
2835
2836 int cg_blkio_weight_parse(const char *s, uint64_t *ret) {
2837 uint64_t u;
2838 int r;
2839
2840 if (isempty(s)) {
2841 *ret = CGROUP_BLKIO_WEIGHT_INVALID;
2842 return 0;
2843 }
2844
2845 r = safe_atou64(s, &u);
2846 if (r < 0)
2847 return r;
2848
2849 if (u < CGROUP_BLKIO_WEIGHT_MIN || u > CGROUP_BLKIO_WEIGHT_MAX)
2850 return -ERANGE;
2851
2852 *ret = u;
2853 return 0;
2854 }
2855
2856 bool is_cgroup_fs(const struct statfs *s) {
2857 return is_fs_type(s, CGROUP_SUPER_MAGIC) ||
2858 is_fs_type(s, CGROUP2_SUPER_MAGIC);
2859 }
2860
2861 bool fd_is_cgroup_fs(int fd) {
2862 struct statfs s;
2863
2864 if (fstatfs(fd, &s) < 0)
2865 return -errno;
2866
2867 return is_cgroup_fs(&s);
2868 }
2869
2870 static const char *const cgroup_controller_table[_CGROUP_CONTROLLER_MAX] = {
2871 [CGROUP_CONTROLLER_CPU] = "cpu",
2872 [CGROUP_CONTROLLER_CPUACCT] = "cpuacct",
2873 [CGROUP_CONTROLLER_IO] = "io",
2874 [CGROUP_CONTROLLER_BLKIO] = "blkio",
2875 [CGROUP_CONTROLLER_MEMORY] = "memory",
2876 [CGROUP_CONTROLLER_DEVICES] = "devices",
2877 [CGROUP_CONTROLLER_PIDS] = "pids",
2878 [CGROUP_CONTROLLER_BPF_FIREWALL] = "bpf-firewall",
2879 [CGROUP_CONTROLLER_BPF_DEVICES] = "bpf-devices",
2880 };
2881
2882 DEFINE_STRING_TABLE_LOOKUP(cgroup_controller, CGroupController);
2883
2884 CGroupMask get_cpu_accounting_mask(void) {
2885 static CGroupMask needed_mask = (CGroupMask) -1;
2886
2887 /* On kernel ≥4.15 with unified hierarchy, cpu.stat's usage_usec is
2888 * provided externally from the CPU controller, which means we don't
2889 * need to enable the CPU controller just to get metrics. This is good,
2890 * because enabling the CPU controller comes at a minor performance
2891 * hit, especially when it's propagated deep into large hierarchies.
2892 * There's also no separate CPU accounting controller available within
2893 * a unified hierarchy.
2894 *
2895 * This combination of factors results in the desired cgroup mask to
2896 * enable for CPU accounting varying as follows:
2897 *
2898 * ╔═════════════════════╤═════════════════════╗
2899 * ║ Linux ≥4.15 │ Linux <4.15 ║
2900 * ╔═══════════════╬═════════════════════╪═════════════════════╣
2901 * ║ Unified ║ nothing │ CGROUP_MASK_CPU ║
2902 * ╟───────────────╫─────────────────────┼─────────────────────╢
2903 * ║ Hybrid/Legacy ║ CGROUP_MASK_CPUACCT │ CGROUP_MASK_CPUACCT ║
2904 * ╚═══════════════╩═════════════════════╧═════════════════════╝
2905 *
2906 * We check kernel version here instead of manually checking whether
2907 * cpu.stat is present for every cgroup, as that check in itself would
2908 * already be fairly expensive.
2909 *
2910 * Kernels where this patch has been backported will therefore have the
2911 * CPU controller enabled unnecessarily. This is more expensive than
2912 * necessary, but harmless. ☺️
2913 */
2914
2915 if (needed_mask == (CGroupMask) -1) {
2916 if (cg_all_unified()) {
2917 struct utsname u;
2918 assert_se(uname(&u) >= 0);
2919
2920 if (str_verscmp(u.release, "4.15") < 0)
2921 needed_mask = CGROUP_MASK_CPU;
2922 else
2923 needed_mask = 0;
2924 } else
2925 needed_mask = CGROUP_MASK_CPUACCT;
2926 }
2927
2928 return needed_mask;
2929 }
2930
2931 bool cpu_accounting_is_cheap(void) {
2932 return get_cpu_accounting_mask() == 0;
2933 }