1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
5 #include "cgroup-setup.h"
6 #include "cgroup-util.h"
7 #include "errno-util.h"
11 #include "missing_threads.h"
13 #include "parse-util.h"
14 #include "path-util.h"
15 #include "proc-cmdline.h"
16 #include "process-util.h"
17 #include "recurse-dir.h"
18 #include "stdio-util.h"
19 #include "string-util.h"
20 #include "user-util.h"
23 static int cg_any_controller_used_for_v1(void) {
24 _cleanup_free_
char *buf
= NULL
;
25 _cleanup_strv_free_
char **lines
= NULL
;
28 r
= read_full_virtual_file("/proc/cgroups", &buf
, NULL
);
30 return log_debug_errno(r
, "Could not read /proc/cgroups, ignoring: %m");
32 r
= strv_split_newlines_full(&lines
, buf
, 0);
36 /* The intention of this is to check if the fully unified cgroup tree setup is possible, meaning all
37 * enabled kernel cgroup controllers are currently not in use by cgroup1. For reference:
38 * https://systemd.io/CGROUP_DELEGATION/#three-different-tree-setups-
40 * Note that this is typically only useful to check inside a container where we don't know what
41 * cgroup tree setup is in use by the host; if the host is using legacy or hybrid, we can't use
42 * unified since some or all controllers would be missing. This is not the best way to detect this,
43 * as whatever container manager created our container should have mounted /sys/fs/cgroup
44 * appropriately, but in case that wasn't done, we try to detect if it's possible for us to use
46 STRV_FOREACH(line
, lines
) {
47 _cleanup_free_
char *name
= NULL
, *hierarchy_id
= NULL
, *num
= NULL
, *enabled
= NULL
;
49 /* Skip header line */
50 if (startswith(*line
, "#"))
53 const char *p
= *line
;
54 r
= extract_many_words(&p
, NULL
, 0, &name
, &hierarchy_id
, &num
, &enabled
);
56 return log_debug_errno(r
, "Error parsing /proc/cgroups line, ignoring: %m");
58 log_debug("Invalid /proc/cgroups line, ignoring.");
62 /* Ignore disabled controllers. */
63 if (streq(enabled
, "0"))
66 /* Ignore controllers we don't care about. */
67 if (cgroup_controller_from_string(name
) < 0)
70 /* Since the unified cgroup doesn't use multiple hierarchies, if any controller has a
71 * non-zero hierarchy_id that means it's in use already in a legacy (or hybrid) cgroup v1
72 * hierarchy, and can't be used in a unified cgroup. */
73 if (!streq(hierarchy_id
, "0")) {
74 log_debug("Cgroup controller %s in use by legacy v1 hierarchy.", name
);
82 bool cg_is_unified_wanted(void) {
83 static thread_local
int wanted
= -1;
86 /* If we have a cached value, return that. */
90 /* If the hierarchy is already mounted, then follow whatever was chosen for it. */
91 r
= cg_unified_cached(true);
93 return (wanted
= r
>= CGROUP_UNIFIED_ALL
);
95 /* If we were explicitly passed systemd.unified_cgroup_hierarchy, respect that. */
97 r
= proc_cmdline_get_bool("systemd.unified_cgroup_hierarchy", /* flags = */ 0, &b
);
101 /* If we passed cgroup_no_v1=all with no other instructions, it seems highly unlikely that we want to
102 * use hybrid or legacy hierarchy. */
103 _cleanup_free_
char *c
= NULL
;
104 r
= proc_cmdline_get_key("cgroup_no_v1", 0, &c
);
105 if (r
> 0 && streq_ptr(c
, "all"))
106 return (wanted
= true);
108 /* If any controller is in use as v1, don't use unified. */
109 return (wanted
= (cg_any_controller_used_for_v1() <= 0));
112 bool cg_is_legacy_wanted(void) {
113 static thread_local
int wanted
= -1;
115 /* If we have a cached value, return that. */
119 /* Check if we have cgroup v2 already mounted. */
120 if (cg_unified_cached(true) == CGROUP_UNIFIED_ALL
)
121 return (wanted
= false);
123 /* Otherwise, assume that at least partial legacy is wanted,
124 * since cgroup v2 should already be mounted at this point. */
125 return (wanted
= true);
128 bool cg_is_hybrid_wanted(void) {
129 static thread_local
int wanted
= -1;
132 /* If we have a cached value, return that. */
136 /* If the hierarchy is already mounted, then follow whatever was chosen for it. */
137 if (cg_unified_cached(true) == CGROUP_UNIFIED_ALL
)
138 return (wanted
= false);
140 /* Otherwise, let's see what the kernel command line has to say. Since checking is expensive, cache
141 * a non-error result.
142 * The meaning of the kernel option is reversed wrt. to the return value of this function, hence the
145 r
= proc_cmdline_get_bool("systemd.legacy_systemd_cgroup_controller", /* flags = */ 0, &b
);
147 return (wanted
= !b
);
149 /* The default hierarchy is "unified". But if this is reached, it means that unified hierarchy was
150 * not mounted, so return true too. */
151 return (wanted
= true);
154 bool cg_is_legacy_force_enabled(void) {
157 if (!cg_is_legacy_wanted())
160 /* If in container, we have to follow host's cgroup hierarchy. */
161 if (detect_container() > 0)
164 if (proc_cmdline_get_bool("SYSTEMD_CGROUP_ENABLE_LEGACY_FORCE", /* flags = */ 0, &force
) < 0)
170 int cg_weight_parse(const char *s
, uint64_t *ret
) {
175 *ret
= CGROUP_WEIGHT_INVALID
;
179 r
= safe_atou64(s
, &u
);
183 if (u
< CGROUP_WEIGHT_MIN
|| u
> CGROUP_WEIGHT_MAX
)
190 int cg_cpu_weight_parse(const char *s
, uint64_t *ret
) {
191 if (streq_ptr(s
, "idle"))
192 return *ret
= CGROUP_WEIGHT_IDLE
;
193 return cg_weight_parse(s
, ret
);
196 int cg_cpu_shares_parse(const char *s
, uint64_t *ret
) {
201 *ret
= CGROUP_CPU_SHARES_INVALID
;
205 r
= safe_atou64(s
, &u
);
209 if (u
< CGROUP_CPU_SHARES_MIN
|| u
> CGROUP_CPU_SHARES_MAX
)
216 int cg_blkio_weight_parse(const char *s
, uint64_t *ret
) {
221 *ret
= CGROUP_BLKIO_WEIGHT_INVALID
;
225 r
= safe_atou64(s
, &u
);
229 if (u
< CGROUP_BLKIO_WEIGHT_MIN
|| u
> CGROUP_BLKIO_WEIGHT_MAX
)
237 RecurseDirEvent event
,
241 const struct dirent
*de
,
242 const struct statx
*sx
,
245 /* Failures to delete inner cgroup we ignore (but debug log in case error code is unexpected) */
246 if (event
== RECURSE_DIR_LEAVE
&&
247 de
->d_type
== DT_DIR
&&
248 unlinkat(dir_fd
, de
->d_name
, AT_REMOVEDIR
) < 0 &&
249 !IN_SET(errno
, ENOENT
, ENOTEMPTY
, EBUSY
))
250 log_debug_errno(errno
, "Failed to trim inner cgroup %s, ignoring: %m", path
);
252 return RECURSE_DIR_CONTINUE
;
255 int cg_trim(const char *controller
, const char *path
, bool delete_root
) {
256 _cleanup_free_
char *fs
= NULL
;
262 r
= cg_get_path(controller
, path
, NULL
, &fs
);
270 /* n_depth_max= */ UINT_MAX
,
271 RECURSE_DIR_ENSURE_TYPE
,
274 if (r
== -ENOENT
) /* non-existing is the ultimate trimming, hence no error */
277 log_debug_errno(r
, "Failed to iterate through cgroup %s: %m", path
);
279 /* If we shall delete the top-level cgroup, then propagate the failure to do so (except if it is
280 * already gone anyway). Also, let's debug log about this failure, except if the error code is an
282 if (delete_root
&& !empty_or_root(path
) &&
283 rmdir(fs
) < 0 && errno
!= ENOENT
) {
284 if (!IN_SET(errno
, ENOTEMPTY
, EBUSY
))
285 log_debug_errno(errno
, "Failed to trim cgroup %s: %m", path
);
290 q
= cg_hybrid_unified();
293 if (q
> 0 && streq(controller
, SYSTEMD_CGROUP_CONTROLLER
))
294 (void) cg_trim(SYSTEMD_CGROUP_CONTROLLER_LEGACY
, path
, delete_root
);
299 /* Create a cgroup in the hierarchy of controller.
300 * Returns 0 if the group already existed, 1 on success, negative otherwise.
302 int cg_create(const char *controller
, const char *path
) {
303 _cleanup_free_
char *fs
= NULL
;
306 r
= cg_get_path_and_check(controller
, path
, NULL
, &fs
);
310 r
= mkdir_parents(fs
, 0755);
314 r
= RET_NERRNO(mkdir(fs
, 0755));
320 r
= cg_hybrid_unified();
324 if (r
> 0 && streq(controller
, SYSTEMD_CGROUP_CONTROLLER
)) {
325 r
= cg_create(SYSTEMD_CGROUP_CONTROLLER_LEGACY
, path
);
327 log_warning_errno(r
, "Failed to create compat systemd cgroup %s: %m", path
);
333 int cg_create_and_attach(const char *controller
, const char *path
, pid_t pid
) {
338 r
= cg_create(controller
, path
);
342 q
= cg_attach(controller
, path
, pid
);
346 /* This does not remove the cgroup on failure */
350 int cg_attach(const char *controller
, const char *path
, pid_t pid
) {
351 _cleanup_free_
char *fs
= NULL
;
352 char c
[DECIMAL_STR_MAX(pid_t
) + 2];
358 r
= cg_get_path_and_check(controller
, path
, "cgroup.procs", &fs
);
363 pid
= getpid_cached();
365 xsprintf(c
, PID_FMT
"\n", pid
);
367 r
= write_string_file(fs
, c
, WRITE_STRING_FILE_DISABLE_BUFFER
);
368 if (r
== -EOPNOTSUPP
&& cg_is_threaded(path
) > 0)
369 /* When the threaded mode is used, we cannot read/write the file. Let's return recognizable error. */
374 r
= cg_hybrid_unified();
378 if (r
> 0 && streq(controller
, SYSTEMD_CGROUP_CONTROLLER
)) {
379 r
= cg_attach(SYSTEMD_CGROUP_CONTROLLER_LEGACY
, path
, pid
);
381 log_warning_errno(r
, "Failed to attach "PID_FMT
" to compat systemd cgroup %s: %m", pid
, path
);
387 int cg_fd_attach(int fd
, pid_t pid
) {
388 char c
[DECIMAL_STR_MAX(pid_t
) + 2];
394 pid
= getpid_cached();
396 xsprintf(c
, PID_FMT
"\n", pid
);
398 return write_string_file_at(fd
, "cgroup.procs", c
, WRITE_STRING_FILE_DISABLE_BUFFER
);
401 int cg_attach_fallback(const char *controller
, const char *path
, pid_t pid
) {
408 r
= cg_attach(controller
, path
, pid
);
410 char prefix
[strlen(path
) + 1];
412 /* This didn't work? Then let's try all prefixes of
415 PATH_FOREACH_PREFIX(prefix
, path
) {
418 q
= cg_attach(controller
, prefix
, pid
);
428 const char *controller
,
438 /* cgroup v1, aka legacy/non-unified */
439 static const struct Attribute legacy_attributes
[] = {
440 { "cgroup.procs", true },
442 { "cgroup.clone_children", false },
446 /* cgroup v2, aka unified */
447 static const struct Attribute unified_attributes
[] = {
448 { "cgroup.procs", true },
449 { "cgroup.subtree_control", true },
450 { "cgroup.threads", false },
451 { "memory.oom.group", false },
452 { "memory.reclaim", false },
456 static const struct Attribute
* const attributes
[] = {
457 [false] = legacy_attributes
,
458 [true] = unified_attributes
,
461 _cleanup_free_
char *fs
= NULL
;
462 const struct Attribute
*i
;
467 if (uid
== UID_INVALID
&& gid
== GID_INVALID
)
470 unified
= cg_unified_controller(controller
);
474 /* Configure access to the cgroup itself */
475 r
= cg_get_path(controller
, path
, NULL
, &fs
);
479 r
= chmod_and_chown(fs
, 0755, uid
, gid
);
483 /* Configure access to the cgroup's attributes */
484 for (i
= attributes
[unified
]; i
->name
; i
++) {
487 r
= cg_get_path(controller
, path
, i
->name
, &fs
);
491 r
= chmod_and_chown(fs
, 0644, uid
, gid
);
496 log_debug_errno(r
, "Failed to set access on cgroup %s, ignoring: %m", fs
);
500 if (streq(controller
, SYSTEMD_CGROUP_CONTROLLER
)) {
501 r
= cg_hybrid_unified();
505 /* Always propagate access mode from unified to legacy controller */
506 r
= cg_set_access(SYSTEMD_CGROUP_CONTROLLER_LEGACY
, path
, uid
, gid
);
508 log_debug_errno(r
, "Failed to set access on compatibility systemd cgroup %s, ignoring: %m", path
);
515 struct access_callback_data
{
521 static int access_callback(
522 RecurseDirEvent event
,
526 const struct dirent
*de
,
527 const struct statx
*sx
,
530 struct access_callback_data
*d
= ASSERT_PTR(userdata
);
532 if (!IN_SET(event
, RECURSE_DIR_ENTER
, RECURSE_DIR_ENTRY
))
533 return RECURSE_DIR_CONTINUE
;
535 assert(inode_fd
>= 0);
537 /* fchown() doesn't support O_PATH fds, hence we use the /proc/self/fd/ trick */
538 if (chown(FORMAT_PROC_FD_PATH(inode_fd
), d
->uid
, d
->gid
) < 0) {
539 log_debug_errno(errno
, "Failed to change ownership of '%s', ignoring: %m", ASSERT_PTR(path
));
541 if (d
->error
== 0) /* Return last error to caller */
545 return RECURSE_DIR_CONTINUE
;
548 int cg_set_access_recursive(
549 const char *controller
,
554 _cleanup_close_
int fd
= -EBADF
;
555 _cleanup_free_
char *fs
= NULL
;
558 /* A recursive version of cg_set_access(). But note that this one changes ownership of *all* files,
559 * not just the allowlist that cg_set_access() uses. Use cg_set_access() on the cgroup you want to
560 * delegate, and cg_set_access_recursive() for any subcrgoups you might want to create below it. */
562 if (!uid_is_valid(uid
) && !gid_is_valid(gid
))
565 r
= cg_get_path(controller
, path
, NULL
, &fs
);
569 fd
= open(fs
, O_DIRECTORY
|O_CLOEXEC
|O_RDONLY
);
573 struct access_callback_data d
= {
581 /* n_depth_max= */ UINT_MAX
,
582 RECURSE_DIR_SAME_MOUNT
|RECURSE_DIR_INODE_FD
|RECURSE_DIR_TOPLEVEL
,
599 _cleanup_set_free_ Set
*s
= NULL
;
608 _cleanup_fclose_
FILE *f
= NULL
;
613 r
= cg_enumerate_processes(cfrom
, pfrom
, &f
);
615 return RET_GATHER(ret
, r
);
617 while ((r
= cg_read_pid(f
, &pid
)) > 0) {
618 /* This might do weird stuff if we aren't a single-threaded program. However, we
619 * luckily know we are. */
620 if (FLAGS_SET(flags
, CGROUP_IGNORE_SELF
) && pid
== getpid_cached())
623 if (set_contains(s
, PID_TO_PTR(pid
)))
626 /* Ignore kernel threads. Since they can only exist in the root cgroup, we only
627 * check for them there. */
628 if (cfrom
&& empty_or_root(pfrom
) &&
629 pid_is_kernel_thread(pid
) > 0)
632 r
= cg_attach(cto
, pto
, pid
);
641 r
= set_ensure_put(&s
, /* hash_ops = */ NULL
, PID_TO_PTR(pid
));
643 return RET_GATHER(ret
, r
);
646 return RET_GATHER(ret
, r
);
652 int cg_migrate_recursive(
659 _cleanup_closedir_
DIR *d
= NULL
;
668 ret
= cg_migrate(cfrom
, pfrom
, cto
, pto
, flags
);
670 r
= cg_enumerate_subgroups(cfrom
, pfrom
, &d
);
672 if (ret
>= 0 && r
!= -ENOENT
)
678 while ((r
= cg_read_subgroup(d
, &fn
)) > 0) {
679 _cleanup_free_
char *p
= NULL
;
681 p
= path_join(empty_to_root(pfrom
), fn
);
686 r
= cg_migrate_recursive(cfrom
, p
, cto
, pto
, flags
);
687 if (r
!= 0 && ret
>= 0)
691 if (r
< 0 && ret
>= 0)
694 if (flags
& CGROUP_REMOVE
) {
695 r
= cg_rmdir(cfrom
, pfrom
);
696 if (r
< 0 && ret
>= 0 && !IN_SET(r
, -ENOENT
, -EBUSY
))
703 int cg_migrate_recursive_fallback(
717 r
= cg_migrate_recursive(cfrom
, pfrom
, cto
, pto
, flags
);
719 char prefix
[strlen(pto
) + 1];
721 /* This didn't work? Then let's try all prefixes of the destination */
723 PATH_FOREACH_PREFIX(prefix
, pto
) {
726 q
= cg_migrate_recursive(cfrom
, pfrom
, cto
, prefix
, flags
);
735 int cg_create_everywhere(CGroupMask supported
, CGroupMask mask
, const char *path
) {
741 /* This one will create a cgroup in our private tree, but also
742 * duplicate it in the trees specified in mask, and remove it
745 * Returns 0 if the group already existed in the systemd hierarchy,
746 * 1 on success, negative otherwise.
749 /* First create the cgroup in our own hierarchy. */
750 r
= cg_create(SYSTEMD_CGROUP_CONTROLLER
, path
);
755 /* If we are in the unified hierarchy, we are done now */
756 r
= cg_all_unified();
762 supported
&= CGROUP_MASK_V1
;
763 mask
= CGROUP_MASK_EXTEND_JOINED(mask
);
766 /* Otherwise, do the same in the other hierarchies */
767 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
768 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
771 if (!FLAGS_SET(supported
, bit
))
774 if (FLAGS_SET(done
, bit
))
777 n
= cgroup_controller_to_string(c
);
778 if (FLAGS_SET(mask
, bit
))
779 (void) cg_create(n
, path
);
781 done
|= CGROUP_MASK_EXTEND_JOINED(bit
);
787 int cg_attach_everywhere(CGroupMask supported
, const char *path
, pid_t pid
, cg_migrate_callback_t path_callback
, void *userdata
) {
790 r
= cg_attach(SYSTEMD_CGROUP_CONTROLLER
, path
, pid
);
794 r
= cg_all_unified();
800 supported
&= CGROUP_MASK_V1
;
803 for (CGroupController c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
804 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
805 const char *p
= NULL
;
807 if (!FLAGS_SET(supported
, bit
))
810 if (FLAGS_SET(done
, bit
))
814 p
= path_callback(bit
, userdata
);
818 (void) cg_attach_fallback(cgroup_controller_to_string(c
), p
, pid
);
819 done
|= CGROUP_MASK_EXTEND_JOINED(bit
);
825 int cg_migrate_v1_controllers(CGroupMask supported
, CGroupMask mask
, const char *from
, cg_migrate_callback_t to_callback
, void *userdata
) {
832 supported
&= CGROUP_MASK_V1
;
833 mask
= CGROUP_MASK_EXTEND_JOINED(mask
);
836 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
837 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
838 const char *to
= NULL
;
840 if (!FLAGS_SET(supported
, bit
))
843 if (FLAGS_SET(done
, bit
))
846 if (!FLAGS_SET(mask
, bit
))
849 to
= to_callback(bit
, userdata
);
851 /* Remember first error and try continuing */
852 q
= cg_migrate_recursive_fallback(SYSTEMD_CGROUP_CONTROLLER
, from
, cgroup_controller_to_string(c
), to
, 0);
855 done
|= CGROUP_MASK_EXTEND_JOINED(bit
);
861 int cg_trim_everywhere(CGroupMask supported
, const char *path
, bool delete_root
) {
864 r
= cg_trim(SYSTEMD_CGROUP_CONTROLLER
, path
, delete_root
);
868 q
= cg_all_unified();
874 return cg_trim_v1_controllers(supported
, _CGROUP_MASK_ALL
, path
, delete_root
);
877 int cg_trim_v1_controllers(CGroupMask supported
, CGroupMask mask
, const char *path
, bool delete_root
) {
882 supported
&= CGROUP_MASK_V1
;
883 mask
= CGROUP_MASK_EXTEND_JOINED(mask
);
886 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
887 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
889 if (!FLAGS_SET(supported
, bit
))
892 if (FLAGS_SET(done
, bit
))
895 if (FLAGS_SET(mask
, bit
)) {
896 /* Remember first error and try continuing */
897 q
= cg_trim(cgroup_controller_to_string(c
), path
, delete_root
);
900 done
|= CGROUP_MASK_EXTEND_JOINED(bit
);
906 int cg_enable_everywhere(
907 CGroupMask supported
,
910 CGroupMask
*ret_result_mask
) {
912 _cleanup_fclose_
FILE *f
= NULL
;
913 _cleanup_free_
char *fs
= NULL
;
920 if (supported
== 0) {
922 *ret_result_mask
= 0;
926 r
= cg_all_unified();
930 /* On the legacy hierarchy there's no concept of "enabling" controllers in cgroups defined. Let's claim
931 * complete success right away. (If you wonder why we return the full mask here, rather than zero: the
932 * caller tends to use the returned mask later on to compare if all controllers where properly joined,
933 * and if not requeues realization. This use is the primary purpose of the return value, hence let's
934 * minimize surprises here and reduce triggers for re-realization by always saying we fully
937 *ret_result_mask
= mask
& supported
& CGROUP_MASK_V2
; /* If you wonder why we mask this with
938 * CGROUP_MASK_V2: The 'supported' mask
939 * might contain pure-V1 or BPF
940 * controllers, and we never want to
941 * claim that we could enable those with
942 * cgroup.subtree_control */
946 r
= cg_get_path(SYSTEMD_CGROUP_CONTROLLER
, p
, "cgroup.subtree_control", &fs
);
950 for (c
= 0; c
< _CGROUP_CONTROLLER_MAX
; c
++) {
951 CGroupMask bit
= CGROUP_CONTROLLER_TO_MASK(c
);
954 if (!FLAGS_SET(CGROUP_MASK_V2
, bit
))
957 if (!FLAGS_SET(supported
, bit
))
960 n
= cgroup_controller_to_string(c
);
962 char s
[1 + strlen(n
) + 1];
964 s
[0] = FLAGS_SET(mask
, bit
) ? '+' : '-';
970 return log_debug_errno(errno
, "Failed to open cgroup.subtree_control file of %s: %m", p
);
973 r
= write_string_stream(f
, s
, WRITE_STRING_FILE_DISABLE_BUFFER
);
975 log_debug_errno(r
, "Failed to %s controller %s for %s (%s): %m",
976 FLAGS_SET(mask
, bit
) ? "enable" : "disable", n
, p
, fs
);
979 /* If we can't turn off a controller, leave it on in the reported resulting mask. This
980 * happens for example when we attempt to turn off a controller up in the tree that is
981 * used down in the tree. */
982 if (!FLAGS_SET(mask
, bit
) && r
== -EBUSY
) /* You might wonder why we check for EBUSY
983 * only here, and not follow the same logic
984 * for other errors such as EINVAL or
985 * EOPNOTSUPP or anything else. That's
986 * because EBUSY indicates that the
987 * controllers is currently enabled and
988 * cannot be disabled because something down
989 * the hierarchy is still using it. Any other
990 * error most likely means something like "I
991 * never heard of this controller" or
992 * similar. In the former case it's hence
993 * safe to assume the controller is still on
994 * after the failed operation, while in the
995 * latter case it's safer to assume the
996 * controller is unknown and hence certainly
1000 /* Otherwise, if we managed to turn on a controller, set the bit reflecting that. */
1001 if (FLAGS_SET(mask
, bit
))
1007 /* Let's return the precise set of controllers now enabled for the cgroup. */
1008 if (ret_result_mask
)
1009 *ret_result_mask
= ret
;